Compare commits
24 Commits
Author | SHA1 | Date |
---|---|---|
|
a312af9f63 | |
|
eaaef387e5 | |
|
c7f7d1eaa9 | |
|
79fbb6075b | |
|
2184a07227 | |
|
3498a20214 | |
|
19b0e9a3aa | |
|
1dca34e84f | |
|
c5a4552069 | |
|
40a80c8344 | |
|
d68e8de2e8 | |
|
5b40f081f4 | |
|
e47e130299 | |
|
bd1565c204 | |
|
487ccfd774 | |
|
e1ba61f251 | |
|
270f297315 | |
|
0d9983ef82 | |
|
725dd4d30e | |
|
8d7d655e51 | |
|
9456150b7c | |
|
7eae71ed89 | |
|
5804a34fa6 | |
|
744c0348f0 |
|
@ -76,7 +76,7 @@ jobs:
|
|||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@6e04d228eb30da1757ee4e1dd75a0ec73a653e06 # v3.1.1
|
||||
with:
|
||||
cosign-release: 'v2.0.2'
|
||||
cosign-release: 'v2.2.0'
|
||||
|
||||
- uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0
|
||||
- uses: docker/setup-buildx-action@4c0219f9ac95b02789c1075625400b2acbff50b1 # v2.9.1
|
||||
|
|
|
@ -51,7 +51,7 @@ jobs:
|
|||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.8.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.9.0
|
||||
with:
|
||||
image: quay.io/argoproj/argo-rollouts
|
||||
digest: ${{ needs.controller-image.outputs.image-digest }}
|
||||
|
@ -67,7 +67,7 @@ jobs:
|
|||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.8.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.9.0
|
||||
with:
|
||||
image: quay.io/argoproj/kubectl-argo-rollouts
|
||||
digest: ${{ needs.plugin-image.outputs.image-digest }}
|
||||
|
@ -170,9 +170,9 @@ jobs:
|
|||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@204a51a57a74d190b284a0ce69b44bc37201f343 # v3.0.3
|
||||
uses: sigstore/cosign-installer@11086d25041f77fe8fe7b9ea4e48e3b9192b8f19 # v3.1.2
|
||||
with:
|
||||
cosign-release: 'v2.0.2'
|
||||
cosign-release: 'v2.2.0'
|
||||
|
||||
- name: Generate SBOM (spdx)
|
||||
id: spdx-builder
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
version: 2
|
||||
formats: all
|
||||
mkdocs:
|
||||
fail_on_warning: false
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
build:
|
||||
os: "ubuntu-22.04"
|
||||
tools:
|
||||
python: "3.7"
|
|
@ -1,7 +0,0 @@
|
|||
version: 2
|
||||
formats: all
|
||||
mkdocs:
|
||||
fail_on_warning: false
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
|
@ -165,7 +165,7 @@ func newCommand() *cobra.Command {
|
|||
resyncDuration,
|
||||
kubeinformers.WithNamespace(notificationConfigNamespace),
|
||||
kubeinformers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||
options.Kind = "Secrete"
|
||||
options.Kind = "Secret"
|
||||
options.FieldSelector = fmt.Sprintf("metadata.name=%s", record.NotificationSecret)
|
||||
}),
|
||||
)
|
||||
|
|
|
@ -463,6 +463,12 @@ func (c *Manager) startLeading(ctx context.Context, rolloutThreadiness, serviceT
|
|||
// Start the informer factories to begin populating the informer caches
|
||||
log.Info("Starting Controllers")
|
||||
|
||||
c.notificationConfigMapInformerFactory.Start(ctx.Done())
|
||||
c.notificationSecretInformerFactory.Start(ctx.Done())
|
||||
if ok := cache.WaitForCacheSync(ctx.Done(), c.configMapSynced, c.secretSynced); !ok {
|
||||
log.Fatalf("failed to wait for configmap/secret caches to sync, exiting")
|
||||
}
|
||||
|
||||
// notice that there is no need to run Start methods in a separate goroutine. (i.e. go kubeInformerFactory.Start(stopCh)
|
||||
// Start method is non-blocking and runs all registered informers in a dedicated goroutine.
|
||||
c.dynamicInformerFactory.Start(ctx.Done())
|
||||
|
@ -471,9 +477,6 @@ func (c *Manager) startLeading(ctx context.Context, rolloutThreadiness, serviceT
|
|||
}
|
||||
c.kubeInformerFactory.Start(ctx.Done())
|
||||
|
||||
c.notificationConfigMapInformerFactory.Start(ctx.Done())
|
||||
c.notificationSecretInformerFactory.Start(ctx.Done())
|
||||
|
||||
c.jobInformerFactory.Start(ctx.Done())
|
||||
|
||||
// Check if Istio installed on cluster before starting dynamicInformerFactory
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
# AWS SQS
|
||||
|
||||
## Parameters
|
||||
|
||||
This notification service is capable of sending simple messages to AWS SQS queue.
|
||||
|
||||
* `queue` - name of the queue you are intending to send messages to. Can be overwriten with target destination annotation.
|
||||
* `region` - region of the sqs queue can be provided via env variable AWS_DEFAULT_REGION
|
||||
* `key` - optional, aws access key must be either referenced from a secret via variable or via env variable AWS_ACCESS_KEY_ID
|
||||
* `secret` - optional, aws access secret must be either referenced from a secret via variableor via env variable AWS_SECRET_ACCESS_KEY
|
||||
* `account` optional, external accountId of the queue
|
||||
* `endpointUrl` optional, useful for development with localstack
|
||||
|
||||
## Example
|
||||
|
||||
### Using Secret for credential retrieval:
|
||||
|
||||
Resource Annotation:
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-deployment-ready.awssqs: "overwrite-myqueue"
|
||||
```
|
||||
|
||||
* ConfigMap
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: <config-map-name>
|
||||
data:
|
||||
service.awssqs: |
|
||||
region: "us-east-2"
|
||||
queue: "myqueue"
|
||||
account: "1234567"
|
||||
key: "$awsaccess_key"
|
||||
secret: "$awsaccess_secret"
|
||||
|
||||
template.deployment-ready: |
|
||||
message: |
|
||||
Deployment {{.obj.metadata.name}} is ready!
|
||||
|
||||
trigger.on-deployment-ready: |
|
||||
- when: any(obj.status.conditions, {.type == 'Available' && .status == 'True'})
|
||||
send: [deployment-ready]
|
||||
- oncePer: obj.metadata.annotations["generation"]
|
||||
|
||||
```
|
||||
Secret
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: <secret-name>
|
||||
stringData:
|
||||
awsaccess_key: test
|
||||
awsaccess_secret: test
|
||||
```
|
||||
|
||||
|
||||
### Minimal configuration using AWS Env variables
|
||||
|
||||
Ensure following list of enviromental variable is injected via OIDC, or other method. And assuming SQS is local to the account.
|
||||
You may skip usage of secret for sensitive data and omit other parameters. (Setting parameters via ConfigMap takes precedent.)
|
||||
|
||||
Variables:
|
||||
|
||||
```bash
|
||||
export AWS_ACCESS_KEY_ID="test"
|
||||
export AWS_SECRET_ACCESS_KEY="test"
|
||||
export AWS_DEFAULT_REGION="us-east-1"
|
||||
```
|
||||
|
||||
Resource Annotation:
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-deployment-ready.awssqs: ""
|
||||
```
|
||||
|
||||
* ConfigMap
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: <config-map-name>
|
||||
data:
|
||||
service.awssqs: |
|
||||
queue: "myqueue"
|
||||
|
||||
template.deployment-ready: |
|
||||
message: |
|
||||
Deployment {{.obj.metadata.name}} is ready!
|
||||
|
||||
trigger.on-deployment-ready: |
|
||||
- when: any(obj.status.conditions, {.type == 'Available' && .status == 'True'})
|
||||
send: [deployment-ready]
|
||||
- oncePer: obj.metadata.annotations["generation"]
|
||||
|
||||
```
|
|
@ -12,7 +12,7 @@ The GitHub notification service changes commit status using [GitHub Apps](https:
|
|||
## Configuration
|
||||
|
||||
1. Create a GitHub Apps using https://github.com/settings/apps/new
|
||||
2. Change repository permissions to enable write commit statuses and/or deployments
|
||||
2. Change repository permissions to enable write commit statuses and/or deployments and/or pull requests comments
|
||||

|
||||
3. Generate a private key, and download it automatically
|
||||

|
||||
|
@ -75,8 +75,17 @@ template.app-deployed: |
|
|||
environmentURL: "https://{{.app.metadata.name}}.example.com"
|
||||
logURL: "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true"
|
||||
requiredContexts: []
|
||||
autoMerge: true
|
||||
pullRequestComment:
|
||||
content: |
|
||||
Application {{.app.metadata.name}} is now running new version of deployments manifests.
|
||||
See more here: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true
|
||||
```
|
||||
|
||||
**Notes**:
|
||||
- If the message is set to 140 characters or more, it will be truncated.
|
||||
- If `github.repoURLPath` and `github.revisionPath` are same as above, they can be omitted.
|
||||
- Automerge is optional and `true` by default for github deployments to ensure the requested ref is up to date with the default branch.
|
||||
Setting this option to `false` is required if you would like to deploy older refs in your default branch.
|
||||
For more information see the [Github Deployment API Docs](https://docs.github.com/en/rest/deployments/deployments?apiVersion=2022-11-28#create-a-deployment).
|
||||
- If `github.pullRequestComment.content` is set to 65536 characters or more, it will be truncated.
|
||||
|
|
|
@ -59,24 +59,27 @@ A card message can be defined as follows:
|
|||
```yaml
|
||||
template.app-sync-succeeded: |
|
||||
googlechat:
|
||||
cards: |
|
||||
cardsV2: |
|
||||
- header:
|
||||
title: ArgoCD Bot Notification
|
||||
sections:
|
||||
- widgets:
|
||||
- textParagraph:
|
||||
- decoratedText:
|
||||
text: The app {{ .app.metadata.name }} has successfully synced!
|
||||
- widgets:
|
||||
- keyValue:
|
||||
- decoratedText:
|
||||
topLabel: Repository
|
||||
content: {{ call .repo.RepoURLToHTTPS .app.spec.source.repoURL }}
|
||||
- keyValue:
|
||||
text: {{ call .repo.RepoURLToHTTPS .app.spec.source.repoURL }}
|
||||
- decoratedText:
|
||||
topLabel: Revision
|
||||
content: {{ .app.spec.source.targetRevision }}
|
||||
- keyValue:
|
||||
text: {{ .app.spec.source.targetRevision }}
|
||||
- decoratedText:
|
||||
topLabel: Author
|
||||
content: {{ (call .repo.GetCommitMetadata .app.status.sync.revision).Author }}
|
||||
text: {{ (call .repo.GetCommitMetadata .app.status.sync.revision).Author }}
|
||||
```
|
||||
All [Card fields](https://developers.google.com/chat/api/reference/rest/v1/cards#Card_1) are supported and can be used
|
||||
in notifications. It is also possible to use the previous (now deprecated) `cards` key to use the legacy card fields,
|
||||
but this is not recommended as Google has deprecated this field and recommends using the newer `cardsV2`.
|
||||
|
||||
The card message can be written in JSON too.
|
||||
|
||||
|
|
|
@ -4,6 +4,12 @@ To be able to create Grafana annotation with argocd-notifications you have to cr
|
|||
|
||||

|
||||
|
||||
Available parameters :
|
||||
|
||||
* `apiURL` - the server url, e.g. https://grafana.example.com
|
||||
* `apiKey` - the API key for the serviceaccount
|
||||
* `insecureSkipVerify` - optional bool, true or false
|
||||
|
||||
1. Login to your Grafana instance as `admin`
|
||||
2. On the left menu, go to Configuration / API Keys
|
||||
3. Click "Add API Key"
|
||||
|
|
|
@ -38,6 +38,7 @@ metadata:
|
|||
|
||||
## Service Types
|
||||
|
||||
* [AwsSqs](./awssqs.md)
|
||||
* [Email](./email.md)
|
||||
* [GitHub](./github.md)
|
||||
* [Slack](./slack.md)
|
||||
|
|
|
@ -29,56 +29,56 @@ The Slack notification service configuration includes following settings:
|
|||
1. Invite your slack bot to this channel **otherwise slack bot won't be able to deliver notifications to this channel**
|
||||
1. Store Oauth access token in `argocd-notifications-secret` secret
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: <secret-name>
|
||||
stringData:
|
||||
slack-token: <Oauth-access-token>
|
||||
```
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: <secret-name>
|
||||
stringData:
|
||||
slack-token: <Oauth-access-token>
|
||||
```
|
||||
|
||||
1. Define service type slack in data section of `argocd-notifications-cm` configmap:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: <config-map-name>
|
||||
data:
|
||||
service.slack: |
|
||||
token: $slack-token
|
||||
```
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: <config-map-name>
|
||||
data:
|
||||
service.slack: |
|
||||
token: $slack-token
|
||||
```
|
||||
|
||||
1. Add annotation in application yaml file to enable notifications for specific argocd app. The following example uses the [on-sync-succeeded trigger](../catalog.md#triggers):
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.slack: my_channel
|
||||
```
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-sync-succeeded.slack: my_channel
|
||||
```
|
||||
|
||||
1. Annotation with more than one [trigger](../catalog.md#triggers), with multiple destinations and recipients
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscriptions: |
|
||||
- trigger: [on-scaling-replica-set, on-rollout-updated, on-rollout-step-completed]
|
||||
destinations:
|
||||
- service: slack
|
||||
recipients: [my-channel-1, my-channel-2]
|
||||
- service: email
|
||||
recipients: [recipient-1, recipient-2, recipient-3 ]
|
||||
- trigger: [on-rollout-aborted, on-analysis-run-failed, on-analysis-run-error]
|
||||
destinations:
|
||||
- service: slack
|
||||
recipients: [my-channel-21, my-channel-22]
|
||||
```
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscriptions: |
|
||||
- trigger: [on-scaling-replica-set, on-rollout-updated, on-rollout-step-completed]
|
||||
destinations:
|
||||
- service: slack
|
||||
recipients: [my-channel-1, my-channel-2]
|
||||
- service: email
|
||||
recipients: [recipient-1, recipient-2, recipient-3 ]
|
||||
- trigger: [on-rollout-aborted, on-analysis-run-failed, on-analysis-run-error]
|
||||
destinations:
|
||||
- service: slack
|
||||
recipients: [my-channel-21, my-channel-22]
|
||||
```
|
||||
|
||||
## Templates
|
||||
|
||||
|
|
|
@ -805,7 +805,7 @@ func TestAddInvalidSpec(t *testing.T) {
|
|||
"status":{
|
||||
}
|
||||
}`, nil, cond)
|
||||
assert.Equal(t, expectedPatch, patch)
|
||||
assert.JSONEq(t, expectedPatch, patch)
|
||||
}
|
||||
|
||||
func TestKeepInvalidSpec(t *testing.T) {
|
||||
|
@ -852,7 +852,7 @@ func TestUpdateInvalidSpec(t *testing.T) {
|
|||
"status":{
|
||||
}
|
||||
}`, nil, cond)
|
||||
assert.Equal(t, expectedPatch, patch)
|
||||
assert.JSONEq(t, expectedPatch, patch)
|
||||
|
||||
}
|
||||
|
||||
|
@ -892,7 +892,7 @@ func TestRemoveInvalidSpec(t *testing.T) {
|
|||
"status":{
|
||||
}
|
||||
}`, templateStatus, cond)
|
||||
assert.Equal(t, expectedPatch, patch)
|
||||
assert.JSONEq(t, expectedPatch, patch)
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
|
|
|
@ -282,7 +282,7 @@ func TestSuccessAfterDurationPasses(t *testing.T) {
|
|||
"phase": "Successful"
|
||||
}
|
||||
}`, templateStatuses, cond)
|
||||
assert.Equal(t, expectedPatch, patch)
|
||||
assert.JSONEq(t, expectedPatch, patch)
|
||||
}
|
||||
|
||||
// TestDontRequeueWithoutDuration verifies we don't requeue if an experiment does not have
|
||||
|
|
|
@ -42,7 +42,7 @@ func TestCreateMultipleRS(t *testing.T) {
|
|||
"status":{
|
||||
}
|
||||
}`, templateStatus, cond)
|
||||
assert.Equal(t, expectedPatch, patch)
|
||||
assert.JSONEq(t, expectedPatch, patch)
|
||||
}
|
||||
|
||||
func TestCreateMissingRS(t *testing.T) {
|
||||
|
@ -72,7 +72,7 @@ func TestCreateMissingRS(t *testing.T) {
|
|||
generateTemplatesStatus("bar", 0, 0, v1alpha1.TemplateStatusProgressing, now()),
|
||||
generateTemplatesStatus("baz", 0, 0, v1alpha1.TemplateStatusProgressing, now()),
|
||||
}
|
||||
assert.Equal(t, calculatePatch(e, expectedPatch, templateStatuses, cond), patch)
|
||||
assert.JSONEq(t, calculatePatch(e, expectedPatch, templateStatuses, cond), patch)
|
||||
}
|
||||
|
||||
func TestTemplateHasMultipleRS(t *testing.T) {
|
||||
|
|
58
go.mod
58
go.mod
|
@ -3,11 +3,11 @@ module github.com/argoproj/argo-rollouts
|
|||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/antonmedv/expr v1.13.0
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20230712163936-39dfcb66f902
|
||||
github.com/antonmedv/expr v1.15.3
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231011160156-2d2d1a75dbee
|
||||
github.com/argoproj/pkg v0.13.6
|
||||
github.com/aws/aws-sdk-go-v2 v1.20.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.33
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.2
|
||||
github.com/aws/aws-sdk-go-v2/config v1.19.1
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.27.2
|
||||
github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.21.0
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
|
@ -36,7 +36,7 @@ require (
|
|||
github.com/stretchr/testify v1.8.4
|
||||
github.com/tj/assert v0.0.3
|
||||
github.com/valyala/fasttemplate v1.2.2
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130
|
||||
google.golang.org/grpc v1.57.0
|
||||
google.golang.org/protobuf v1.31.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
|
@ -57,7 +57,7 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.19.1 // indirect
|
||||
cloud.google.com/go/compute v1.20.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
|
||||
|
@ -82,16 +82,17 @@ require (
|
|||
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect
|
||||
github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20220708192748-b73dcb041214 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.116 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.38 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.39 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.13.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.21.2 // indirect
|
||||
github.com/aws/smithy-go v1.14.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect
|
||||
github.com/aws/smithy-go v1.15.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
|
@ -120,10 +121,13 @@ require (
|
|||
github.com/google/go-github/v53 v53.0.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.4 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/gregdel/pushover v1.1.0 // indirect
|
||||
github.com/gregdel/pushover v1.2.1 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-hclog v0.14.1 // indirect
|
||||
|
@ -162,7 +166,7 @@ require (
|
|||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/shopspring/decimal v1.2.0 // indirect
|
||||
github.com/slack-go/slack v0.12.2 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/cast v1.5.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect
|
||||
|
@ -170,21 +174,23 @@ require (
|
|||
github.com/valyala/fastjson v1.6.3 // indirect
|
||||
github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
|
||||
golang.org/x/crypto v0.7.0 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/oauth2 v0.8.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/term v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/oauth2 v0.10.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/term v0.10.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 // indirect
|
||||
gomodules.xyz/notify v0.1.1 // indirect
|
||||
google.golang.org/api v0.132.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
|
127
go.sum
127
go.sum
|
@ -19,8 +19,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
|||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY=
|
||||
cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
|
||||
cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg=
|
||||
cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||
cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
|
@ -91,11 +91,11 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
|||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antonmedv/expr v1.13.0 h1:8YrTtlCzlOtXw+hpeCLDLL2uo0C0k6jmYpYTGws5c5w=
|
||||
github.com/antonmedv/expr v1.13.0/go.mod h1:FPC8iWArxls7axbVLsW+kpg1mz29A1b2M6jt+hZfDkU=
|
||||
github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI=
|
||||
github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE=
|
||||
github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20230712163936-39dfcb66f902 h1:JnW6RNwSxFwf4qQf3d6n+LhTODzmrLpDx2mQMPYzKf8=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20230712163936-39dfcb66f902/go.mod h1:W//xreL6/AGmJdh6SyvmJhOZ1VweW6DBm8qSBx7NO1M=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231011160156-2d2d1a75dbee h1:ZYILioq4v6OIsr7uh0Pcx7JY4KpJ9qs8qbjRqM6HWMY=
|
||||
github.com/argoproj/notifications-engine v0.4.1-0.20231011160156-2d2d1a75dbee/go.mod h1:VG9FXG0ddIVGc7NcSTRapaUjCPCYqOji//z6mmBYwCE=
|
||||
github.com/argoproj/pkg v0.13.6 h1:36WPD9MNYECHcO1/R1pj6teYspiK7uMQLCgLGft2abM=
|
||||
github.com/argoproj/pkg v0.13.6/go.mod h1:I698DoJBKuvNFaixh4vFl2C88cNIT1WS7KCbz5ewyF8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
|
@ -103,34 +103,44 @@ github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2z
|
|||
github.com/aws/aws-sdk-go v1.44.39/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.116 h1:NpLIhcvLWXJZAEwvPj3TDHeqp7DleK6ZUVYyW01WNHY=
|
||||
github.com/aws/aws-sdk-go v1.44.116/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.20.1 h1:rZBf5DWr7YGrnlTK4kgDQGn1ltqOg5orCYb/UhOFZkg=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||
github.com/aws/aws-sdk-go-v2 v1.20.1/go.mod h1:NU06lETsFm8fUC6ZjhgDpVBcGZTFQ6XM+LZWZxMI4ac=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.33 h1:JKcw5SFxFW/rpM4mOPjv0VQ11E2kxW13F3exWOy7VZU=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.33/go.mod h1:hXO/l9pgY3K5oZJldamP0pbZHdPqqk+4/maa7DSD3cA=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.32 h1:lIH1eKPcCY1ylR4B6PkBGRWMHO3aVenOKJHWiS4/G2w=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.32/go.mod h1:lL8U3v/Y79YRG69WlAho0OHIKUXCyFvSXaIvfo81sls=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.8 h1:DK/9C+UN/X+1+Wm8pqaDksQr2tSLzq+8X1/rI/ZxKEQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.8/go.mod h1:ce7BgLQfYr5hQFdy67oX2svto3ufGtm6oBvmsHScI1Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.38 h1:c8ed/T9T2K5I+h/JzmF5tpI46+OODQ74dzmdo+QnaMg=
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.19.1 h1:oe3vqcGftyk40icfLymhhhNysAwk0NfiwkDi2GTPMXs=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.19.1/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.38/go.mod h1:qggunOChCMu9ZF/UkAfhTz25+U2rLVb3ya0Ua6TTfCA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.32 h1:hNeAAymUY5gu11WrrmFb3CVIp9Dar9hbo44yzzcQpzA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.32/go.mod h1:0ZXSqrty4FtQ7p8TEuRde/SZm9X05KT18LAUlR40Ln0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.39 h1:fc0ukRAiP1syoSGZYu+DaE+FulSYhTiJ8WpVu5jElU4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.39/go.mod h1:WLAW8PT7+JhjZfLSWe7WEJaJu0GNo0cKc2Zyo003RBs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7E0LDl8K9/G36gjlqca5iQbaqc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.27.2 h1:HbEoy5QzXicnGgGWF4moCgsbio2xytgVQcs70xD3j3w=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.27.2/go.mod h1:Fc5ZJyxghsjGp1KqbLb2HTJjsJjSv6AXUikHUJYmCHM=
|
||||
github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.21.0 h1:lSCNS+ZMztgQWoLz/I27HdYjKlUaKEMWApM0dVOR/y8=
|
||||
github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.21.0/go.mod h1:AZv/T0/2rhNBLiY2k109TT6HJ7Z0P8Z+SYvs0jqVkXE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.32 h1:dGAseBFEYxth10V23b5e2mAS+tX7oVbfYHD6dnDdAsg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.32/go.mod h1:4jwAWKEkCR0anWk5+1RbfSg1R5Gzld7NLiuaq5bTR/Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.13.2 h1:A2RlEMo4SJSwbNoUUgkxTAEMduAy/8wG3eB2b2lP4gY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.13.2/go.mod h1:ju+nNXUunfIFamXUIZQiICjnO/TPlOmWcYhZcSy7xaE=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.2 h1:OJELEgyaT2kmaBGZ+myyZbTTLobfe3ox3FSh5eYK9Qs=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.2/go.mod h1:ubDBBaDFs1GHijSOTi8ljppML15GLG0HxhILtbjNNYQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.21.2 h1:ympg1+Lnq33XLhcK/xTG4yZHPs1Oyxu+6DEWbl7qOzA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.21.2/go.mod h1:FQ/DQcOfESELfJi5ED+IPPAjI5xC6nxtSolVVB773jM=
|
||||
github.com/aws/smithy-go v1.14.1 h1:EFKMUmH/iHMqLiwoEDx2rRjRQpI1YCn5jTysoaDujFs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0 h1:tQoMg8i4nFAB70cJ4wiAYEiZRYo2P6uDmU2D6ys/igo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0/go.mod h1:jQhN5f4p3PALMNlUtfb/0wGIFlV7vGtJlPDVfxfNfPY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ=
|
||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/smithy-go v1.14.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8=
|
||||
github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -165,7 +175,11 @@ github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEM
|
|||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
|
@ -200,6 +214,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
|
|||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
|
@ -217,7 +232,7 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
|
|||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
|
||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
||||
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/getkin/kin-openapi v0.94.0/go.mod h1:LWZfzOd7PRy8GJ1dJ6mCU6tNdSfOwRac1BUPam4aw6Q=
|
||||
|
@ -339,6 +354,7 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
|
@ -367,14 +383,20 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
|
||||
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
|
@ -387,8 +409,8 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA
|
|||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregdel/pushover v1.1.0 h1:dwHyvrcpZCOS9V1fAnKPaGRRI5OC55cVaKhMybqNsKQ=
|
||||
github.com/gregdel/pushover v1.1.0/go.mod h1:EcaO66Nn1StkpEm1iKtBTV3d2A16SoMsVER1PthX7to=
|
||||
github.com/gregdel/pushover v1.2.1 h1:IPPJCdzXz60gMqnlzS0ZAW5z5aS1gI4nU+YM0Pe+ssA=
|
||||
github.com/gregdel/pushover v1.2.1/go.mod h1:EcaO66Nn1StkpEm1iKtBTV3d2A16SoMsVER1PthX7to=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
|
@ -622,8 +644,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
|||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
|
||||
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
|
||||
|
@ -653,8 +675,8 @@ github.com/spaceapegames/go-wavefront v1.8.1/go.mod h1:GtdIjtJ0URkfPmaKx0+7vMSDv
|
|||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
||||
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
|
||||
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
|
||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||
|
@ -680,6 +702,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk=
|
||||
|
@ -716,6 +739,8 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
|||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
|
||||
|
@ -734,12 +759,14 @@ golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWP
|
|||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -825,16 +852,18 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
|||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
|
||||
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
|
||||
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
|
||||
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -848,7 +877,7 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -919,16 +948,18 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -938,11 +969,13 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1030,6 +1063,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
|||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc=
|
||||
google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1069,12 +1104,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M=
|
||||
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
|
||||
google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8=
|
||||
google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
@ -1088,8 +1123,10 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
|||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
|
||||
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
|
|
|
@ -75,6 +75,7 @@ nav:
|
|||
- Overview: features/notifications.md
|
||||
- Services:
|
||||
- generated/notification-services/alertmanager.md
|
||||
- generated/notification-services/awssqs.md
|
||||
- generated/notification-services/email.md
|
||||
- generated/notification-services/github.md
|
||||
- generated/notification-services/googlechat.md
|
||||
|
|
|
@ -1380,28 +1380,87 @@ func (m *JobInfo) GetStartedAt() *v1.Time {
|
|||
return nil
|
||||
}
|
||||
|
||||
type AnalysisRunSpecAndStatus struct {
|
||||
Spec *v1alpha1.AnalysisRunSpec `protobuf:"bytes,1,opt,name=spec,proto3" json:"spec,omitempty"`
|
||||
Status *v1alpha1.AnalysisRunStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AnalysisRunSpecAndStatus) Reset() { *m = AnalysisRunSpecAndStatus{} }
|
||||
func (m *AnalysisRunSpecAndStatus) String() string { return proto.CompactTextString(m) }
|
||||
func (*AnalysisRunSpecAndStatus) ProtoMessage() {}
|
||||
func (*AnalysisRunSpecAndStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_99101d942e8912a7, []int{18}
|
||||
}
|
||||
func (m *AnalysisRunSpecAndStatus) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *AnalysisRunSpecAndStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_AnalysisRunSpecAndStatus.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *AnalysisRunSpecAndStatus) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AnalysisRunSpecAndStatus.Merge(m, src)
|
||||
}
|
||||
func (m *AnalysisRunSpecAndStatus) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *AnalysisRunSpecAndStatus) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AnalysisRunSpecAndStatus.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AnalysisRunSpecAndStatus proto.InternalMessageInfo
|
||||
|
||||
func (m *AnalysisRunSpecAndStatus) GetSpec() *v1alpha1.AnalysisRunSpec {
|
||||
if m != nil {
|
||||
return m.Spec
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AnalysisRunSpecAndStatus) GetStatus() *v1alpha1.AnalysisRunStatus {
|
||||
if m != nil {
|
||||
return m.Status
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type AnalysisRunInfo struct {
|
||||
ObjectMeta *v1.ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"`
|
||||
Icon string `protobuf:"bytes,2,opt,name=icon,proto3" json:"icon,omitempty"`
|
||||
Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
|
||||
Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
|
||||
Successful int32 `protobuf:"varint,5,opt,name=successful,proto3" json:"successful,omitempty"`
|
||||
Failed int32 `protobuf:"varint,6,opt,name=failed,proto3" json:"failed,omitempty"`
|
||||
Inconclusive int32 `protobuf:"varint,7,opt,name=inconclusive,proto3" json:"inconclusive,omitempty"`
|
||||
Error int32 `protobuf:"varint,8,opt,name=error,proto3" json:"error,omitempty"`
|
||||
Jobs []*JobInfo `protobuf:"bytes,9,rep,name=jobs,proto3" json:"jobs,omitempty"`
|
||||
NonJobInfo []*NonJobInfo `protobuf:"bytes,10,rep,name=nonJobInfo,proto3" json:"nonJobInfo,omitempty"`
|
||||
Metrics []*Metrics `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
ObjectMeta *v1.ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"`
|
||||
//
|
||||
//field type from 161 -170 will be deprecated in future.
|
||||
Icon string `protobuf:"bytes,2,opt,name=icon,proto3" json:"icon,omitempty"`
|
||||
Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
|
||||
Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
|
||||
Successful int32 `protobuf:"varint,5,opt,name=successful,proto3" json:"successful,omitempty"`
|
||||
Failed int32 `protobuf:"varint,6,opt,name=failed,proto3" json:"failed,omitempty"`
|
||||
Inconclusive int32 `protobuf:"varint,7,opt,name=inconclusive,proto3" json:"inconclusive,omitempty"`
|
||||
Error int32 `protobuf:"varint,8,opt,name=error,proto3" json:"error,omitempty"`
|
||||
Jobs []*JobInfo `protobuf:"bytes,9,rep,name=jobs,proto3" json:"jobs,omitempty"`
|
||||
NonJobInfo []*NonJobInfo `protobuf:"bytes,10,rep,name=nonJobInfo,proto3" json:"nonJobInfo,omitempty"`
|
||||
Metrics []*Metrics `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics,omitempty"`
|
||||
// The new API changes should use SpecAndStatus field type.
|
||||
SpecAndStatus *AnalysisRunSpecAndStatus `protobuf:"bytes,12,opt,name=specAndStatus,proto3" json:"specAndStatus,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AnalysisRunInfo) Reset() { *m = AnalysisRunInfo{} }
|
||||
func (m *AnalysisRunInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*AnalysisRunInfo) ProtoMessage() {}
|
||||
func (*AnalysisRunInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_99101d942e8912a7, []int{18}
|
||||
return fileDescriptor_99101d942e8912a7, []int{19}
|
||||
}
|
||||
func (m *AnalysisRunInfo) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1507,6 +1566,13 @@ func (m *AnalysisRunInfo) GetMetrics() []*Metrics {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *AnalysisRunInfo) GetSpecAndStatus() *AnalysisRunSpecAndStatus {
|
||||
if m != nil {
|
||||
return m.SpecAndStatus
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type NonJobInfo struct {
|
||||
Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
|
||||
|
@ -1521,7 +1587,7 @@ func (m *NonJobInfo) Reset() { *m = NonJobInfo{} }
|
|||
func (m *NonJobInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*NonJobInfo) ProtoMessage() {}
|
||||
func (*NonJobInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_99101d942e8912a7, []int{19}
|
||||
return fileDescriptor_99101d942e8912a7, []int{20}
|
||||
}
|
||||
func (m *NonJobInfo) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1593,7 +1659,7 @@ func (m *Metrics) Reset() { *m = Metrics{} }
|
|||
func (m *Metrics) String() string { return proto.CompactTextString(m) }
|
||||
func (*Metrics) ProtoMessage() {}
|
||||
func (*Metrics) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_99101d942e8912a7, []int{20}
|
||||
return fileDescriptor_99101d942e8912a7, []int{21}
|
||||
}
|
||||
func (m *Metrics) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
|
@ -1676,6 +1742,7 @@ func init() {
|
|||
proto.RegisterType((*PodInfo)(nil), "rollout.PodInfo")
|
||||
proto.RegisterType((*ContainerInfo)(nil), "rollout.ContainerInfo")
|
||||
proto.RegisterType((*JobInfo)(nil), "rollout.JobInfo")
|
||||
proto.RegisterType((*AnalysisRunSpecAndStatus)(nil), "rollout.AnalysisRunSpecAndStatus")
|
||||
proto.RegisterType((*AnalysisRunInfo)(nil), "rollout.AnalysisRunInfo")
|
||||
proto.RegisterType((*NonJobInfo)(nil), "rollout.NonJobInfo")
|
||||
proto.RegisterType((*Metrics)(nil), "rollout.Metrics")
|
||||
|
@ -1686,117 +1753,121 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_99101d942e8912a7 = []byte{
|
||||
// 1751 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0x1c, 0x49,
|
||||
0x15, 0x57, 0x7b, 0x3c, 0xf6, 0xf8, 0x8d, 0x3f, 0xc6, 0xe5, 0x6c, 0xb6, 0x77, 0x36, 0x58, 0xa6,
|
||||
0x17, 0x09, 0xc7, 0x40, 0xb7, 0x93, 0x8d, 0xb2, 0x2c, 0x1f, 0x87, 0x90, 0x58, 0xde, 0xa0, 0xec,
|
||||
0x12, 0x3a, 0xc0, 0x0a, 0x24, 0x88, 0x6a, 0x7a, 0xca, 0xe3, 0x4e, 0x7a, 0xba, 0x9a, 0xae, 0xea,
|
||||
0x09, 0x23, 0x6b, 0x0e, 0xf0, 0x0f, 0x70, 0xe0, 0x5f, 0x58, 0x09, 0x71, 0x42, 0x48, 0x5c, 0x38,
|
||||
0x70, 0x45, 0x1c, 0x91, 0xf8, 0x07, 0x50, 0xc4, 0x85, 0x23, 0x17, 0xce, 0xa8, 0x5e, 0x57, 0x57,
|
||||
0x7f, 0x78, 0xec, 0x38, 0xb2, 0x21, 0x7b, 0x9a, 0x7a, 0xef, 0xd5, 0x7b, 0xef, 0x57, 0xf3, 0x3e,
|
||||
0xaa, 0xfa, 0xc1, 0x7b, 0xc9, 0xf3, 0x91, 0x47, 0x93, 0x30, 0x88, 0x42, 0x16, 0x4b, 0x2f, 0xe5,
|
||||
0x51, 0xc4, 0x33, 0xf3, 0xeb, 0x26, 0x29, 0x97, 0x9c, 0x2c, 0x6b, 0xb2, 0x7f, 0x63, 0xc4, 0xf9,
|
||||
0x28, 0x62, 0x4a, 0xc1, 0xa3, 0x71, 0xcc, 0x25, 0x95, 0x21, 0x8f, 0x45, 0xbe, 0xad, 0xff, 0x68,
|
||||
0x14, 0xca, 0xe3, 0x6c, 0xe0, 0x06, 0x7c, 0xec, 0xd1, 0x74, 0xc4, 0x93, 0x94, 0x3f, 0xc3, 0xc5,
|
||||
0xd7, 0xb4, 0xbe, 0xf0, 0xb4, 0x37, 0xe1, 0x19, 0xce, 0xe4, 0x16, 0x8d, 0x92, 0x63, 0x7a, 0xcb,
|
||||
0x1b, 0xb1, 0x98, 0xa5, 0x54, 0xb2, 0xa1, 0xb6, 0x76, 0xe7, 0xf9, 0xd7, 0x85, 0x1b, 0x72, 0xb5,
|
||||
0x7d, 0x4c, 0x83, 0xe3, 0x30, 0x66, 0xe9, 0xb4, 0xd4, 0x1f, 0x33, 0x49, 0xbd, 0xc9, 0x69, 0xad,
|
||||
0x77, 0x35, 0x42, 0xa4, 0x06, 0xd9, 0x91, 0xc7, 0xc6, 0x89, 0x9c, 0xe6, 0x42, 0xe7, 0x01, 0xf4,
|
||||
0xfc, 0xdc, 0xef, 0xc3, 0xf8, 0x88, 0x7f, 0x3f, 0x63, 0xe9, 0x94, 0x10, 0x58, 0x8c, 0xe9, 0x98,
|
||||
0xd9, 0xd6, 0x8e, 0xb5, 0xbb, 0xe2, 0xe3, 0x9a, 0xdc, 0x80, 0x15, 0xf5, 0x2b, 0x12, 0x1a, 0x30,
|
||||
0x7b, 0x01, 0x05, 0x25, 0xc3, 0xb9, 0x03, 0xd7, 0x2a, 0x56, 0x1e, 0x85, 0x42, 0xe6, 0x96, 0x6a,
|
||||
0x5a, 0x56, 0x53, 0xeb, 0xd7, 0x16, 0x6c, 0x3c, 0x61, 0xf2, 0xe1, 0x98, 0x8e, 0x98, 0xcf, 0x7e,
|
||||
0x9e, 0x31, 0x21, 0x89, 0x0d, 0xc5, 0x3f, 0xab, 0xf7, 0x17, 0xa4, 0xb2, 0x15, 0xf0, 0x58, 0x52,
|
||||
0x75, 0xea, 0x02, 0x81, 0x61, 0x90, 0x6b, 0xd0, 0x0e, 0x95, 0x1d, 0xbb, 0x85, 0x92, 0x9c, 0x20,
|
||||
0x3d, 0x68, 0x49, 0x3a, 0xb2, 0x17, 0x91, 0xa7, 0x96, 0x75, 0x44, 0xed, 0x26, 0xa2, 0x63, 0x20,
|
||||
0x3f, 0x8c, 0x87, 0x5c, 0x9f, 0xe5, 0xd5, 0x98, 0xfa, 0xd0, 0x49, 0xd9, 0x24, 0x14, 0x21, 0x8f,
|
||||
0x11, 0x52, 0xcb, 0x37, 0x74, 0xdd, 0x53, 0xab, 0xe9, 0xe9, 0x21, 0xbc, 0xe5, 0x33, 0x21, 0x69,
|
||||
0x2a, 0x1b, 0xce, 0x5e, 0xff, 0xcf, 0xff, 0x29, 0xbc, 0xf5, 0x38, 0xe5, 0x63, 0x2e, 0xd9, 0x65,
|
||||
0x4d, 0x29, 0x8d, 0xa3, 0x2c, 0x8a, 0x10, 0x6e, 0xc7, 0xc7, 0xb5, 0x73, 0x08, 0x5b, 0xf7, 0x06,
|
||||
0xfc, 0x0a, 0x70, 0x1e, 0xc2, 0x96, 0xcf, 0x64, 0x3a, 0xbd, 0xb4, 0xa1, 0xa7, 0xb0, 0xa9, 0x6d,
|
||||
0x7c, 0x4a, 0x65, 0x70, 0x7c, 0x30, 0x61, 0x31, 0x9a, 0x91, 0xd3, 0xc4, 0x98, 0x51, 0x6b, 0x72,
|
||||
0x17, 0xba, 0x69, 0x99, 0x96, 0x68, 0xa8, 0x7b, 0xfb, 0x9a, 0x5b, 0x54, 0x72, 0x25, 0x65, 0xfd,
|
||||
0xea, 0x46, 0xe7, 0x29, 0xac, 0x7d, 0x52, 0x78, 0x53, 0x8c, 0xf3, 0xf3, 0x98, 0xec, 0xc3, 0x16,
|
||||
0x9d, 0xd0, 0x30, 0xa2, 0x83, 0x88, 0x19, 0x3d, 0x61, 0x2f, 0xec, 0xb4, 0x76, 0x57, 0xfc, 0x79,
|
||||
0x22, 0xe7, 0x3e, 0x6c, 0x34, 0xea, 0x85, 0xec, 0x43, 0xa7, 0x68, 0x00, 0xb6, 0xb5, 0xd3, 0x3a,
|
||||
0x13, 0xa8, 0xd9, 0xe5, 0x7c, 0x00, 0xdd, 0x1f, 0xb1, 0x54, 0xe5, 0x1a, 0x62, 0xdc, 0x85, 0x8d,
|
||||
0x42, 0xa4, 0xd9, 0x1a, 0x69, 0x93, 0xed, 0xfc, 0x76, 0x09, 0xba, 0x15, 0x93, 0xe4, 0x31, 0x00,
|
||||
0x1f, 0x3c, 0x63, 0x81, 0xfc, 0x98, 0x49, 0x8a, 0x4a, 0xdd, 0xdb, 0xfb, 0x6e, 0xde, 0x6b, 0xdc,
|
||||
0x6a, 0xaf, 0x71, 0x93, 0xe7, 0x23, 0xc5, 0x10, 0xae, 0xea, 0x35, 0xee, 0xe4, 0x96, 0xfb, 0x3d,
|
||||
0xa3, 0xe7, 0x57, 0x6c, 0x90, 0xeb, 0xb0, 0x24, 0x24, 0x95, 0x99, 0xd0, 0xc1, 0xd3, 0x94, 0xaa,
|
||||
0xa4, 0x31, 0x13, 0xa2, 0xac, 0xd3, 0x82, 0x54, 0xe1, 0x0b, 0x03, 0x1e, 0xeb, 0x52, 0xc5, 0xb5,
|
||||
0xaa, 0x2e, 0x21, 0x55, 0x27, 0x1b, 0x4d, 0x75, 0xa9, 0x1a, 0x5a, 0xed, 0x17, 0x92, 0x25, 0xf6,
|
||||
0x52, 0xbe, 0x5f, 0xad, 0x55, 0x94, 0x04, 0x93, 0x9f, 0xb2, 0x70, 0x74, 0x2c, 0xed, 0xe5, 0x3c,
|
||||
0x4a, 0x86, 0x41, 0x1c, 0x58, 0xa5, 0x81, 0xcc, 0x68, 0xa4, 0x37, 0x74, 0x70, 0x43, 0x8d, 0xa7,
|
||||
0xba, 0x48, 0xca, 0xe8, 0x70, 0x6a, 0xaf, 0xec, 0x58, 0xbb, 0x6d, 0x3f, 0x27, 0x14, 0xea, 0x20,
|
||||
0x4b, 0x53, 0x16, 0x4b, 0x1b, 0x90, 0x5f, 0x90, 0x4a, 0x32, 0x64, 0x22, 0x4c, 0xd9, 0xd0, 0xee,
|
||||
0xe6, 0x12, 0x4d, 0x2a, 0x49, 0x96, 0x0c, 0x55, 0x17, 0xb6, 0x57, 0x73, 0x89, 0x26, 0x15, 0x4a,
|
||||
0x93, 0x12, 0xf6, 0x1a, 0xca, 0x4a, 0x06, 0xd9, 0x81, 0x6e, 0x9a, 0xf7, 0x05, 0x36, 0xbc, 0x27,
|
||||
0xed, 0x75, 0x04, 0x59, 0x65, 0x91, 0x6d, 0x00, 0xdd, 0xe1, 0x55, 0x88, 0x37, 0x70, 0x43, 0x85,
|
||||
0x43, 0x3e, 0x54, 0x16, 0x92, 0x28, 0x0c, 0xe8, 0x13, 0x26, 0x85, 0xdd, 0xc3, 0x5c, 0x7a, 0xbb,
|
||||
0xcc, 0x25, 0x23, 0xd3, 0x79, 0x5f, 0xee, 0x55, 0xaa, 0xec, 0x17, 0x09, 0x4b, 0xc3, 0x31, 0x8b,
|
||||
0xa5, 0xb0, 0x37, 0x1b, 0xaa, 0x07, 0x46, 0x96, 0xab, 0x56, 0xf6, 0x92, 0x6f, 0xc1, 0x2a, 0x8d,
|
||||
0x69, 0x34, 0x15, 0xa1, 0xf0, 0xb3, 0x58, 0xd8, 0x04, 0x75, 0x6d, 0xa3, 0x7b, 0xaf, 0x14, 0xa2,
|
||||
0x72, 0x6d, 0x37, 0xb9, 0x0b, 0x60, 0x5a, 0xb9, 0xb0, 0xb7, 0x50, 0xf7, 0xba, 0xd1, 0xbd, 0x5f,
|
||||
0x88, 0x50, 0xb3, 0xb2, 0x93, 0xfc, 0x0c, 0xda, 0x2a, 0xf2, 0xc2, 0xbe, 0x86, 0x2a, 0x1f, 0xb9,
|
||||
0xe5, 0x75, 0xeb, 0x16, 0xd7, 0x2d, 0x2e, 0x9e, 0x16, 0x35, 0x50, 0xa6, 0xb0, 0xe1, 0x14, 0xd7,
|
||||
0xad, 0x7b, 0x9f, 0xc6, 0x34, 0x9d, 0x3e, 0x91, 0x2c, 0xf1, 0x73, 0xb3, 0xce, 0x9f, 0x17, 0x60,
|
||||
0xbd, 0x7e, 0xea, 0xff, 0x41, 0xb1, 0x14, 0xa9, 0xbf, 0x50, 0x4f, 0x7d, 0x73, 0xb1, 0xb4, 0x1a,
|
||||
0x17, 0x4b, 0x59, 0x5c, 0x8b, 0x67, 0x15, 0x57, 0xbb, 0x5e, 0x5c, 0x8d, 0x94, 0x58, 0x7a, 0x8d,
|
||||
0x94, 0x68, 0xc6, 0x75, 0xf9, 0x75, 0xe2, 0xea, 0xfc, 0xa7, 0x05, 0xeb, 0x75, 0xeb, 0xff, 0xc7,
|
||||
0x66, 0x53, 0xfc, 0xaf, 0xad, 0x33, 0xfe, 0xd7, 0xc5, 0xb9, 0xff, 0xab, 0xaa, 0xca, 0x36, 0x5e,
|
||||
0x7f, 0x9a, 0x52, 0xfc, 0x00, 0x33, 0x03, 0x9b, 0x4d, 0xc7, 0xd7, 0x94, 0xe2, 0xd3, 0x40, 0x86,
|
||||
0x13, 0x86, 0xbd, 0xa6, 0xe3, 0x6b, 0x4a, 0xc5, 0x21, 0x51, 0x46, 0xd9, 0x0b, 0xec, 0x31, 0x1d,
|
||||
0xbf, 0x20, 0x73, 0xef, 0xf8, 0x6f, 0x08, 0xdd, 0x61, 0x0c, 0x5d, 0x6f, 0x0b, 0xd0, 0x6c, 0x0b,
|
||||
0x7d, 0xe8, 0x48, 0x36, 0x4e, 0x22, 0x2a, 0x19, 0x76, 0x9a, 0x15, 0xdf, 0xd0, 0xe4, 0xab, 0xb0,
|
||||
0x29, 0x02, 0x1a, 0xb1, 0x07, 0xfc, 0x45, 0xfc, 0x80, 0xd1, 0x61, 0x14, 0xc6, 0x0c, 0x9b, 0xce,
|
||||
0x8a, 0x7f, 0x5a, 0xa0, 0x50, 0xe3, 0xdb, 0x48, 0xd8, 0x6b, 0x78, 0x3f, 0x69, 0x8a, 0x7c, 0x09,
|
||||
0x16, 0x13, 0x3e, 0x14, 0xf6, 0x3a, 0x06, 0xb8, 0x67, 0x02, 0xfc, 0x98, 0x0f, 0x31, 0xb0, 0x28,
|
||||
0x55, 0xff, 0x69, 0x12, 0xc6, 0x23, 0x6c, 0x3b, 0x1d, 0x1f, 0xd7, 0xc8, 0xe3, 0xf1, 0xc8, 0xee,
|
||||
0x69, 0x1e, 0x8f, 0x47, 0xce, 0x9f, 0x2c, 0x58, 0xd6, 0x9a, 0x6f, 0x38, 0xe2, 0xa6, 0xa5, 0xe7,
|
||||
0xc5, 0xa2, 0x5b, 0x3a, 0x46, 0x02, 0x7b, 0xaa, 0xc0, 0x68, 0x63, 0x24, 0x72, 0xda, 0xf9, 0x10,
|
||||
0xd6, 0x6a, 0x1d, 0x67, 0xee, 0x0b, 0xc5, 0xbc, 0x37, 0x17, 0x2a, 0xef, 0x4d, 0xe7, 0xdf, 0x16,
|
||||
0x2c, 0x7f, 0x97, 0x0f, 0x3e, 0x07, 0xc7, 0xde, 0x06, 0x18, 0x33, 0x99, 0x86, 0x81, 0x7a, 0x75,
|
||||
0xe8, 0xb3, 0x57, 0x38, 0xe4, 0x23, 0x58, 0x29, 0x6f, 0x99, 0x36, 0x82, 0xdb, 0xbb, 0x18, 0xb8,
|
||||
0x1f, 0x84, 0x63, 0xe6, 0x97, 0xca, 0xce, 0x67, 0x2d, 0xd8, 0x68, 0x74, 0x81, 0xcf, 0x71, 0x93,
|
||||
0xdc, 0x06, 0x10, 0x59, 0x10, 0x30, 0x21, 0x8e, 0xb2, 0x48, 0x87, 0xbe, 0xc2, 0x51, 0x7a, 0x47,
|
||||
0x34, 0x8c, 0xd8, 0x10, 0x8b, 0xbd, 0xed, 0x6b, 0x4a, 0xbd, 0x1e, 0xc2, 0x38, 0xe0, 0x71, 0x10,
|
||||
0x65, 0xa2, 0x28, 0xf9, 0xb6, 0x5f, 0xe3, 0xa9, 0x9c, 0x60, 0x69, 0xca, 0x53, 0x2c, 0xfb, 0xb6,
|
||||
0x9f, 0x13, 0xaa, 0xb0, 0x9e, 0xf1, 0x81, 0x2a, 0xf8, 0x7a, 0x61, 0xe9, 0x3c, 0xf1, 0x51, 0x4a,
|
||||
0xde, 0x07, 0x88, 0x79, 0xac, 0x79, 0x36, 0xe0, 0xde, 0x2d, 0xb3, 0xf7, 0x13, 0x23, 0xf2, 0x2b,
|
||||
0xdb, 0xc8, 0x9e, 0xea, 0xf8, 0x2a, 0xa4, 0xc2, 0xee, 0x36, 0xac, 0x7f, 0x9c, 0xf3, 0xfd, 0x62,
|
||||
0x83, 0xf3, 0x99, 0x05, 0x50, 0x9a, 0x51, 0x58, 0x27, 0x34, 0xca, 0x8a, 0xa4, 0xce, 0x89, 0x33,
|
||||
0x33, 0xac, 0x9e, 0x4d, 0xad, 0xf3, 0xb3, 0x69, 0xf1, 0x32, 0xd9, 0xf4, 0x07, 0x0b, 0x96, 0x35,
|
||||
0xf6, 0xb9, 0x75, 0xb7, 0x07, 0x3d, 0x1d, 0xad, 0xfb, 0x3c, 0x1e, 0x86, 0x32, 0x34, 0x39, 0x71,
|
||||
0x8a, 0xaf, 0xce, 0x18, 0xf0, 0x2c, 0x96, 0x08, 0xb8, 0xed, 0xe7, 0x84, 0x6a, 0x97, 0xd5, 0xa8,
|
||||
0x3d, 0x0a, 0xc7, 0x61, 0x8e, 0xb9, 0xed, 0x9f, 0x16, 0xa8, 0xb8, 0xab, 0x0c, 0xc8, 0x52, 0xbd,
|
||||
0x31, 0xcf, 0x98, 0x1a, 0xef, 0xf6, 0xbf, 0xd6, 0x60, 0x5d, 0xbf, 0xa7, 0x9f, 0xb0, 0x74, 0x12,
|
||||
0x06, 0x8c, 0x08, 0x58, 0x3f, 0x64, 0xb2, 0xfa, 0xc8, 0x7e, 0x67, 0xde, 0x6b, 0x1e, 0xbf, 0x92,
|
||||
0xfb, 0x73, 0x1f, 0xfa, 0xce, 0xfe, 0xaf, 0xfe, 0xfe, 0xcf, 0xdf, 0x2c, 0xec, 0x91, 0x5d, 0x1c,
|
||||
0x2d, 0x4c, 0x6e, 0x95, 0xf3, 0x81, 0x13, 0xf3, 0xe9, 0x31, 0xcb, 0xd7, 0x33, 0x2f, 0x54, 0x2e,
|
||||
0x66, 0xd0, 0xc3, 0x0f, 0xa2, 0x4b, 0xb9, 0xbd, 0x8b, 0x6e, 0xf7, 0x89, 0x7b, 0x51, 0xb7, 0xde,
|
||||
0x0b, 0xe5, 0x73, 0xdf, 0x22, 0x13, 0xe8, 0xa9, 0x2f, 0x99, 0x8a, 0x31, 0x41, 0xbe, 0x30, 0xcf,
|
||||
0x87, 0x99, 0x0f, 0xf4, 0xed, 0xb3, 0xc4, 0xce, 0x4d, 0x84, 0xf1, 0x1e, 0xf9, 0xe2, 0xb9, 0x30,
|
||||
0xf0, 0xd8, 0xbf, 0xb4, 0x60, 0xb3, 0x79, 0xee, 0x57, 0x7a, 0xee, 0x37, 0xc5, 0xe5, 0xa7, 0xa4,
|
||||
0xe3, 0xa1, 0xef, 0x9b, 0xe4, 0xcb, 0xaf, 0xf4, 0x6d, 0xce, 0xfe, 0x63, 0x58, 0x3d, 0x64, 0xd2,
|
||||
0x7c, 0xe1, 0x91, 0xeb, 0x6e, 0x3e, 0x74, 0x71, 0x8b, 0xa1, 0x8b, 0x7b, 0x30, 0x4e, 0xe4, 0xb4,
|
||||
0x5f, 0x3e, 0x6a, 0x6b, 0x1f, 0x98, 0xce, 0x3b, 0xe8, 0x72, 0x8b, 0x6c, 0x16, 0x2e, 0xcb, 0xaf,
|
||||
0xcb, 0xdf, 0x5b, 0xea, 0x0d, 0x55, 0x1d, 0x15, 0x90, 0xed, 0xca, 0xd3, 0x6d, 0xce, 0x0c, 0xa1,
|
||||
0x7f, 0x70, 0xb9, 0x77, 0xb0, 0xb6, 0x56, 0xa4, 0x42, 0xff, 0x2b, 0x17, 0x49, 0x05, 0x7d, 0x7d,
|
||||
0x7e, 0xc3, 0xda, 0x43, 0xc4, 0xf5, 0x89, 0x44, 0x05, 0xf1, 0xdc, 0x51, 0xc5, 0x1b, 0x41, 0x9c,
|
||||
0xe4, 0x48, 0x14, 0xe2, 0xdf, 0x59, 0xb0, 0x5a, 0x1d, 0x72, 0x90, 0x1b, 0xe5, 0x03, 0xf7, 0xf4,
|
||||
0xec, 0xe3, 0xaa, 0xd0, 0xde, 0x41, 0xb4, 0x6e, 0xff, 0xe6, 0x45, 0xd0, 0x52, 0x85, 0x43, 0x61,
|
||||
0xfd, 0x4b, 0x3e, 0x35, 0x2b, 0xb2, 0x1a, 0xe7, 0x5c, 0x65, 0x1d, 0x35, 0xe6, 0x69, 0x57, 0x05,
|
||||
0xd5, 0x47, 0xa8, 0x8f, 0xfa, 0x87, 0xe7, 0x43, 0xd5, 0xdc, 0x99, 0x27, 0x98, 0xf4, 0x4e, 0xcc,
|
||||
0x87, 0xda, 0xcc, 0x3b, 0xc1, 0xf7, 0xd1, 0xb7, 0xf7, 0xf6, 0x66, 0xde, 0x89, 0xa4, 0xa3, 0x99,
|
||||
0x3a, 0xc8, 0x1f, 0x2d, 0xe8, 0x56, 0xa6, 0x6d, 0xe4, 0x5d, 0x73, 0x88, 0xd3, 0x33, 0xb8, 0xab,
|
||||
0x3a, 0xc7, 0x3d, 0x3c, 0xc7, 0x37, 0xfb, 0x77, 0x2f, 0x78, 0x8e, 0x2c, 0x1e, 0x72, 0xef, 0xa4,
|
||||
0x78, 0x55, 0xcc, 0x8a, 0x5c, 0xa9, 0xce, 0xb1, 0x2a, 0xb9, 0x32, 0x67, 0xbc, 0xf5, 0x46, 0x72,
|
||||
0x25, 0x55, 0x38, 0x14, 0xd6, 0xc7, 0xb0, 0xac, 0x87, 0x3e, 0x67, 0x76, 0xa4, 0xf2, 0x16, 0xa8,
|
||||
0x0c, 0x93, 0x9c, 0xb7, 0xd1, 0xdd, 0x26, 0xd9, 0x28, 0xdc, 0x4d, 0x72, 0xe1, 0x77, 0x0e, 0xfe,
|
||||
0xfa, 0x72, 0xdb, 0xfa, 0xdb, 0xcb, 0x6d, 0xeb, 0x1f, 0x2f, 0xb7, 0xad, 0x9f, 0x7c, 0x70, 0xe1,
|
||||
0xf1, 0x76, 0x7d, 0x98, 0x3e, 0x58, 0x42, 0x14, 0xef, 0xff, 0x37, 0x00, 0x00, 0xff, 0xff, 0xa1,
|
||||
0x08, 0xa7, 0x61, 0x6c, 0x17, 0x00, 0x00,
|
||||
// 1821 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4f, 0x6f, 0x1c, 0x49,
|
||||
0x15, 0x57, 0x7b, 0x3c, 0xf6, 0xf8, 0x8d, 0xff, 0x96, 0xb3, 0xd9, 0xde, 0xd9, 0x60, 0x79, 0x7b,
|
||||
0x91, 0x70, 0x0c, 0x74, 0x3b, 0xde, 0x28, 0xcb, 0xf2, 0xe7, 0x60, 0x12, 0xcb, 0x1b, 0x94, 0xec,
|
||||
0x86, 0x36, 0xb0, 0x02, 0x09, 0xa2, 0x72, 0x4f, 0x79, 0xdc, 0x49, 0x4f, 0x57, 0xd3, 0x55, 0x3d,
|
||||
0x61, 0x64, 0xcd, 0x01, 0xbe, 0x00, 0x07, 0xbe, 0x02, 0x12, 0xe2, 0x84, 0x90, 0xb8, 0x70, 0xe0,
|
||||
0x8a, 0x38, 0x22, 0xf1, 0x05, 0x50, 0x84, 0x90, 0x38, 0x70, 0xe0, 0xc2, 0x19, 0xd5, 0xeb, 0xea,
|
||||
0xea, 0x3f, 0x1e, 0x27, 0x8e, 0x6c, 0x36, 0x39, 0x4d, 0xbf, 0xf7, 0xea, 0xbd, 0xf7, 0xab, 0xaa,
|
||||
0xf7, 0x5e, 0x55, 0xbd, 0x81, 0xf7, 0x93, 0xa7, 0x03, 0x8f, 0x26, 0x61, 0x10, 0x85, 0x2c, 0x96,
|
||||
0x5e, 0xca, 0xa3, 0x88, 0x67, 0xe6, 0xd7, 0x4d, 0x52, 0x2e, 0x39, 0x99, 0xd7, 0x64, 0xef, 0xc6,
|
||||
0x80, 0xf3, 0x41, 0xc4, 0x94, 0x82, 0x47, 0xe3, 0x98, 0x4b, 0x2a, 0x43, 0x1e, 0x8b, 0x7c, 0x58,
|
||||
0xef, 0xc1, 0x20, 0x94, 0x27, 0xd9, 0x91, 0x1b, 0xf0, 0xa1, 0x47, 0xd3, 0x01, 0x4f, 0x52, 0xfe,
|
||||
0x04, 0x3f, 0xbe, 0xaa, 0xf5, 0x85, 0xa7, 0xbd, 0x09, 0xcf, 0x70, 0x46, 0xb7, 0x68, 0x94, 0x9c,
|
||||
0xd0, 0x5b, 0xde, 0x80, 0xc5, 0x2c, 0xa5, 0x92, 0xf5, 0xb5, 0xb5, 0xdb, 0x4f, 0xbf, 0x26, 0xdc,
|
||||
0x90, 0xab, 0xe1, 0x43, 0x1a, 0x9c, 0x84, 0x31, 0x4b, 0xc7, 0xa5, 0xfe, 0x90, 0x49, 0xea, 0x8d,
|
||||
0xce, 0x6a, 0xbd, 0xab, 0x11, 0x22, 0x75, 0x94, 0x1d, 0x7b, 0x6c, 0x98, 0xc8, 0x71, 0x2e, 0x74,
|
||||
0xee, 0xc1, 0xaa, 0x9f, 0xfb, 0xbd, 0x1f, 0x1f, 0xf3, 0xef, 0x66, 0x2c, 0x1d, 0x13, 0x02, 0xb3,
|
||||
0x31, 0x1d, 0x32, 0xdb, 0xda, 0xb4, 0xb6, 0x16, 0x7c, 0xfc, 0x26, 0x37, 0x60, 0x41, 0xfd, 0x8a,
|
||||
0x84, 0x06, 0xcc, 0x9e, 0x41, 0x41, 0xc9, 0x70, 0x6e, 0xc3, 0xb5, 0x8a, 0x95, 0x07, 0xa1, 0x90,
|
||||
0xb9, 0xa5, 0x9a, 0x96, 0xd5, 0xd4, 0xfa, 0xa5, 0x05, 0x2b, 0x87, 0x4c, 0xde, 0x1f, 0xd2, 0x01,
|
||||
0xf3, 0xd9, 0x4f, 0x33, 0x26, 0x24, 0xb1, 0xa1, 0x58, 0x59, 0x3d, 0xbe, 0x20, 0x95, 0xad, 0x80,
|
||||
0xc7, 0x92, 0xaa, 0x59, 0x17, 0x08, 0x0c, 0x83, 0x5c, 0x83, 0x76, 0xa8, 0xec, 0xd8, 0x2d, 0x94,
|
||||
0xe4, 0x04, 0x59, 0x85, 0x96, 0xa4, 0x03, 0x7b, 0x16, 0x79, 0xea, 0xb3, 0x8e, 0xa8, 0xdd, 0x44,
|
||||
0x74, 0x02, 0xe4, 0xfb, 0x71, 0x9f, 0xeb, 0xb9, 0xbc, 0x1c, 0x53, 0x0f, 0x3a, 0x29, 0x1b, 0x85,
|
||||
0x22, 0xe4, 0x31, 0x42, 0x6a, 0xf9, 0x86, 0xae, 0x7b, 0x6a, 0x35, 0x3d, 0xdd, 0x87, 0xb7, 0x7c,
|
||||
0x26, 0x24, 0x4d, 0x65, 0xc3, 0xd9, 0xab, 0x2f, 0xfe, 0x8f, 0xe1, 0xad, 0x47, 0x29, 0x1f, 0x72,
|
||||
0xc9, 0x2e, 0x6b, 0x4a, 0x69, 0x1c, 0x67, 0x51, 0x84, 0x70, 0x3b, 0x3e, 0x7e, 0x3b, 0x07, 0xb0,
|
||||
0xbe, 0x77, 0xc4, 0xaf, 0x00, 0xe7, 0x01, 0xac, 0xfb, 0x4c, 0xa6, 0xe3, 0x4b, 0x1b, 0x7a, 0x0c,
|
||||
0x6b, 0xda, 0xc6, 0x67, 0x54, 0x06, 0x27, 0xfb, 0x23, 0x16, 0xa3, 0x19, 0x39, 0x4e, 0x8c, 0x19,
|
||||
0xf5, 0x4d, 0xee, 0x40, 0x37, 0x2d, 0xc3, 0x12, 0x0d, 0x75, 0x77, 0xaf, 0xb9, 0x45, 0x26, 0x57,
|
||||
0x42, 0xd6, 0xaf, 0x0e, 0x74, 0x1e, 0xc3, 0xd2, 0x27, 0x85, 0x37, 0xc5, 0x78, 0x71, 0x1c, 0x93,
|
||||
0x1d, 0x58, 0xa7, 0x23, 0x1a, 0x46, 0xf4, 0x28, 0x62, 0x46, 0x4f, 0xd8, 0x33, 0x9b, 0xad, 0xad,
|
||||
0x05, 0x7f, 0x9a, 0xc8, 0xb9, 0x0b, 0x2b, 0x8d, 0x7c, 0x21, 0x3b, 0xd0, 0x29, 0x0a, 0x80, 0x6d,
|
||||
0x6d, 0xb6, 0xce, 0x05, 0x6a, 0x46, 0x39, 0x1f, 0x42, 0xf7, 0x07, 0x2c, 0x55, 0xb1, 0x86, 0x18,
|
||||
0xb7, 0x60, 0xa5, 0x10, 0x69, 0xb6, 0x46, 0xda, 0x64, 0x3b, 0xbf, 0x99, 0x83, 0x6e, 0xc5, 0x24,
|
||||
0x79, 0x04, 0xc0, 0x8f, 0x9e, 0xb0, 0x40, 0x3e, 0x64, 0x92, 0xa2, 0x52, 0x77, 0x77, 0xc7, 0xcd,
|
||||
0x6b, 0x8d, 0x5b, 0xad, 0x35, 0x6e, 0xf2, 0x74, 0xa0, 0x18, 0xc2, 0x55, 0xb5, 0xc6, 0x1d, 0xdd,
|
||||
0x72, 0x3f, 0x35, 0x7a, 0x7e, 0xc5, 0x06, 0xb9, 0x0e, 0x73, 0x42, 0x52, 0x99, 0x09, 0xbd, 0x79,
|
||||
0x9a, 0x52, 0x99, 0x34, 0x64, 0x42, 0x94, 0x79, 0x5a, 0x90, 0x6a, 0xfb, 0xc2, 0x80, 0xc7, 0x3a,
|
||||
0x55, 0xf1, 0x5b, 0x65, 0x97, 0x90, 0xaa, 0x92, 0x0d, 0xc6, 0x3a, 0x55, 0x0d, 0xad, 0xc6, 0x0b,
|
||||
0xc9, 0x12, 0x7b, 0x2e, 0x1f, 0xaf, 0xbe, 0xd5, 0x2e, 0x09, 0x26, 0x3f, 0x63, 0xe1, 0xe0, 0x44,
|
||||
0xda, 0xf3, 0xf9, 0x2e, 0x19, 0x06, 0x71, 0x60, 0x91, 0x06, 0x32, 0xa3, 0x91, 0x1e, 0xd0, 0xc1,
|
||||
0x01, 0x35, 0x9e, 0xaa, 0x22, 0x29, 0xa3, 0xfd, 0xb1, 0xbd, 0xb0, 0x69, 0x6d, 0xb5, 0xfd, 0x9c,
|
||||
0x50, 0xa8, 0x83, 0x2c, 0x4d, 0x59, 0x2c, 0x6d, 0x40, 0x7e, 0x41, 0x2a, 0x49, 0x9f, 0x89, 0x30,
|
||||
0x65, 0x7d, 0xbb, 0x9b, 0x4b, 0x34, 0xa9, 0x24, 0x59, 0xd2, 0x57, 0x55, 0xd8, 0x5e, 0xcc, 0x25,
|
||||
0x9a, 0x54, 0x28, 0x4d, 0x48, 0xd8, 0x4b, 0x28, 0x2b, 0x19, 0x64, 0x13, 0xba, 0x69, 0x5e, 0x17,
|
||||
0x58, 0x7f, 0x4f, 0xda, 0xcb, 0x08, 0xb2, 0xca, 0x22, 0x1b, 0x00, 0xba, 0xc2, 0xab, 0x2d, 0x5e,
|
||||
0xc1, 0x01, 0x15, 0x0e, 0xf9, 0x48, 0x59, 0x48, 0xa2, 0x30, 0xa0, 0x87, 0x4c, 0x0a, 0x7b, 0x15,
|
||||
0x63, 0xe9, 0xed, 0x32, 0x96, 0x8c, 0x4c, 0xc7, 0x7d, 0x39, 0x56, 0xa9, 0xb2, 0x9f, 0x25, 0x2c,
|
||||
0x0d, 0x87, 0x2c, 0x96, 0xc2, 0x5e, 0x6b, 0xa8, 0xee, 0x1b, 0x59, 0xae, 0x5a, 0x19, 0x4b, 0xbe,
|
||||
0x09, 0x8b, 0x34, 0xa6, 0xd1, 0x58, 0x84, 0xc2, 0xcf, 0x62, 0x61, 0x13, 0xd4, 0xb5, 0x8d, 0xee,
|
||||
0x5e, 0x29, 0x44, 0xe5, 0xda, 0x68, 0x72, 0x07, 0xc0, 0x94, 0x72, 0x61, 0xaf, 0xa3, 0xee, 0x75,
|
||||
0xa3, 0x7b, 0xb7, 0x10, 0xa1, 0x66, 0x65, 0x24, 0xf9, 0x09, 0xb4, 0xd5, 0xce, 0x0b, 0xfb, 0x1a,
|
||||
0xaa, 0x7c, 0xec, 0x96, 0xc7, 0xad, 0x5b, 0x1c, 0xb7, 0xf8, 0xf1, 0xb8, 0xc8, 0x81, 0x32, 0x84,
|
||||
0x0d, 0xa7, 0x38, 0x6e, 0xdd, 0xbb, 0x34, 0xa6, 0xe9, 0xf8, 0x50, 0xb2, 0xc4, 0xcf, 0xcd, 0x3a,
|
||||
0x7f, 0x9a, 0x81, 0xe5, 0xfa, 0xac, 0xff, 0x0f, 0xc9, 0x52, 0x84, 0xfe, 0x4c, 0x3d, 0xf4, 0xcd,
|
||||
0xc1, 0xd2, 0x6a, 0x1c, 0x2c, 0x65, 0x72, 0xcd, 0x9e, 0x97, 0x5c, 0xed, 0x7a, 0x72, 0x35, 0x42,
|
||||
0x62, 0xee, 0x15, 0x42, 0xa2, 0xb9, 0xaf, 0xf3, 0xaf, 0xb2, 0xaf, 0xce, 0x7f, 0x5b, 0xb0, 0x5c,
|
||||
0xb7, 0xfe, 0x39, 0x16, 0x9b, 0x62, 0x5d, 0x5b, 0xe7, 0xac, 0xeb, 0xec, 0xd4, 0x75, 0x55, 0x59,
|
||||
0xd9, 0xc6, 0xe3, 0x4f, 0x53, 0x8a, 0x1f, 0x60, 0x64, 0x60, 0xb1, 0xe9, 0xf8, 0x9a, 0x52, 0x7c,
|
||||
0x1a, 0xc8, 0x70, 0xc4, 0xb0, 0xd6, 0x74, 0x7c, 0x4d, 0xa9, 0x7d, 0x48, 0x94, 0x51, 0xf6, 0x0c,
|
||||
0x6b, 0x4c, 0xc7, 0x2f, 0xc8, 0xdc, 0x3b, 0xae, 0x86, 0xd0, 0x15, 0xc6, 0xd0, 0xf5, 0xb2, 0x00,
|
||||
0xcd, 0xb2, 0xd0, 0x83, 0x8e, 0x64, 0xc3, 0x24, 0xa2, 0x92, 0x61, 0xa5, 0x59, 0xf0, 0x0d, 0x4d,
|
||||
0xbe, 0x02, 0x6b, 0x22, 0xa0, 0x11, 0xbb, 0xc7, 0x9f, 0xc5, 0xf7, 0x18, 0xed, 0x47, 0x61, 0xcc,
|
||||
0xb0, 0xe8, 0x2c, 0xf8, 0x67, 0x05, 0x0a, 0x35, 0xde, 0x8d, 0x84, 0xbd, 0x84, 0xe7, 0x93, 0xa6,
|
||||
0xc8, 0x17, 0x61, 0x36, 0xe1, 0x7d, 0x61, 0x2f, 0xe3, 0x06, 0xaf, 0x9a, 0x0d, 0x7e, 0xc4, 0xfb,
|
||||
0xb8, 0xb1, 0x28, 0x55, 0x6b, 0x9a, 0x84, 0xf1, 0x00, 0xcb, 0x4e, 0xc7, 0xc7, 0x6f, 0xe4, 0xf1,
|
||||
0x78, 0x60, 0xaf, 0x6a, 0x1e, 0x8f, 0x07, 0xce, 0x1f, 0x2d, 0x98, 0xd7, 0x9a, 0xaf, 0x79, 0xc7,
|
||||
0x4d, 0x49, 0xcf, 0x93, 0x45, 0x97, 0x74, 0xdc, 0x09, 0xac, 0xa9, 0x02, 0x77, 0x1b, 0x77, 0x22,
|
||||
0xa7, 0x9d, 0x8f, 0x60, 0xa9, 0x56, 0x71, 0xa6, 0xde, 0x50, 0xcc, 0x7d, 0x73, 0xa6, 0x72, 0xdf,
|
||||
0x74, 0xfe, 0x63, 0xc1, 0xfc, 0x77, 0xf8, 0xd1, 0x1b, 0x30, 0xed, 0x0d, 0x80, 0x21, 0x93, 0x69,
|
||||
0x18, 0xa8, 0x5b, 0x87, 0x9e, 0x7b, 0x85, 0x43, 0x3e, 0x86, 0x85, 0xf2, 0x94, 0x69, 0x23, 0xb8,
|
||||
0xed, 0x8b, 0x81, 0xfb, 0x5e, 0x38, 0x64, 0x7e, 0xa9, 0xec, 0xfc, 0xd3, 0x02, 0xbb, 0x52, 0x05,
|
||||
0x0e, 0x13, 0x16, 0xec, 0xc5, 0xfd, 0xc3, 0x1c, 0x1a, 0x85, 0x59, 0x91, 0xb0, 0x40, 0x4f, 0xff,
|
||||
0xe1, 0xe5, 0xea, 0x73, 0xc3, 0x8b, 0x8f, 0xa6, 0xc9, 0xa0, 0xb6, 0x2a, 0xdd, 0xdd, 0x4f, 0xaf,
|
||||
0xce, 0x09, 0x9a, 0x2d, 0x96, 0xd9, 0xf9, 0x77, 0x0b, 0x56, 0x1a, 0xe5, 0xee, 0x0d, 0x3e, 0x0d,
|
||||
0x36, 0x00, 0x44, 0x16, 0x04, 0x4c, 0x88, 0xe3, 0x2c, 0xd2, 0x31, 0x5e, 0xe1, 0x28, 0xbd, 0x63,
|
||||
0x1a, 0x46, 0xac, 0x8f, 0x55, 0xad, 0xed, 0x6b, 0x4a, 0x5d, 0x93, 0xc2, 0x38, 0xe0, 0x71, 0x10,
|
||||
0x65, 0xa2, 0xa8, 0x6d, 0x6d, 0xbf, 0xc6, 0x53, 0xc1, 0xcf, 0xd2, 0x94, 0xa7, 0x58, 0xdf, 0xda,
|
||||
0x7e, 0x4e, 0xa8, 0x0a, 0xf2, 0x84, 0x1f, 0xa9, 0xca, 0x56, 0xaf, 0x20, 0x3a, 0x21, 0x7c, 0x94,
|
||||
0x92, 0x0f, 0x00, 0x62, 0x1e, 0x6b, 0x9e, 0x0d, 0x38, 0x76, 0xdd, 0x8c, 0xfd, 0xc4, 0x88, 0xfc,
|
||||
0xca, 0x30, 0xb2, 0xad, 0x8e, 0x36, 0x15, 0xbb, 0xc2, 0xee, 0x36, 0xac, 0x3f, 0xcc, 0xf9, 0x7e,
|
||||
0x31, 0x80, 0x1c, 0xc0, 0x92, 0xa8, 0xc6, 0x20, 0x96, 0xc2, 0xee, 0xee, 0x7b, 0xd3, 0x8e, 0xac,
|
||||
0x5a, 0xb0, 0xfa, 0x75, 0x3d, 0xe7, 0xd7, 0x16, 0x40, 0x89, 0x47, 0x4d, 0x7a, 0x44, 0xa3, 0xac,
|
||||
0x28, 0x03, 0x39, 0x71, 0x6e, 0x4e, 0xd6, 0xf3, 0xaf, 0xf5, 0xe2, 0xfc, 0x9b, 0xbd, 0x4c, 0xfe,
|
||||
0xfd, 0xde, 0x82, 0x79, 0xbd, 0x08, 0x53, 0x2b, 0xd5, 0x36, 0xac, 0xea, 0x6d, 0xbf, 0xcb, 0xe3,
|
||||
0x7e, 0x28, 0x43, 0x13, 0x5c, 0x67, 0xf8, 0x6a, 0x8e, 0x01, 0xcf, 0x62, 0x89, 0x80, 0xdb, 0x7e,
|
||||
0x4e, 0xa8, 0x03, 0xa6, 0xba, 0xfd, 0x0f, 0xc2, 0x61, 0x98, 0x63, 0x6e, 0xfb, 0x67, 0x05, 0x2a,
|
||||
0x80, 0x54, 0x28, 0x65, 0xa9, 0x1e, 0x98, 0x87, 0x5e, 0x8d, 0xb7, 0xfb, 0xaf, 0x25, 0x58, 0xd6,
|
||||
0x2f, 0x90, 0x43, 0x96, 0x8e, 0xc2, 0x80, 0x11, 0x01, 0xcb, 0x07, 0x4c, 0x56, 0x9f, 0x25, 0xef,
|
||||
0x4c, 0x7b, 0xff, 0x60, 0x5f, 0xa1, 0x37, 0xf5, 0x69, 0xe4, 0xec, 0xfc, 0xe2, 0x6f, 0xff, 0xf8,
|
||||
0xd5, 0xcc, 0x36, 0xd9, 0xc2, 0x66, 0xcc, 0xe8, 0x56, 0xd9, 0x51, 0x39, 0x35, 0x8f, 0xb5, 0x49,
|
||||
0xfe, 0x3d, 0xf1, 0x42, 0xe5, 0x62, 0x02, 0xab, 0xf8, 0x84, 0xbc, 0x94, 0xdb, 0x3b, 0xe8, 0x76,
|
||||
0x87, 0xb8, 0x17, 0x75, 0xeb, 0x3d, 0x53, 0x3e, 0x77, 0x2c, 0x32, 0x82, 0x55, 0xf5, 0xf6, 0xab,
|
||||
0x18, 0x13, 0xe4, 0x0b, 0xd3, 0x7c, 0x98, 0x8e, 0x4a, 0xcf, 0x3e, 0x4f, 0xec, 0xdc, 0x44, 0x18,
|
||||
0xef, 0x93, 0xf7, 0x5e, 0x08, 0x03, 0xa7, 0xfd, 0x73, 0x0b, 0xd6, 0x9a, 0xf3, 0x7e, 0xa9, 0xe7,
|
||||
0x5e, 0x53, 0x5c, 0x3e, 0xbe, 0x1d, 0x0f, 0x7d, 0xdf, 0x24, 0x5f, 0x7a, 0xa9, 0x6f, 0x33, 0xf7,
|
||||
0x1f, 0xc2, 0xe2, 0x01, 0x93, 0xe6, 0x4d, 0x4c, 0xae, 0xbb, 0x79, 0x9b, 0xca, 0x2d, 0xda, 0x54,
|
||||
0xee, 0xfe, 0x30, 0x91, 0xe3, 0x5e, 0xf9, 0x0c, 0xa8, 0x3d, 0xc9, 0x9d, 0x77, 0xd0, 0xe5, 0x3a,
|
||||
0x59, 0x2b, 0x5c, 0x96, 0xef, 0xf1, 0xdf, 0x59, 0xea, 0xd6, 0x59, 0x6d, 0xae, 0x90, 0x8d, 0xca,
|
||||
0x65, 0x77, 0x4a, 0xd7, 0xa5, 0xb7, 0x7f, 0xb9, 0x43, 0x43, 0x5b, 0x2b, 0x42, 0xa1, 0xf7, 0xe5,
|
||||
0x8b, 0x84, 0x82, 0xbe, 0x70, 0x7c, 0xdd, 0xda, 0x46, 0xc4, 0xf5, 0x1e, 0x4e, 0x05, 0xf1, 0xd4,
|
||||
0xe6, 0xce, 0x6b, 0x41, 0x9c, 0xe4, 0x48, 0x14, 0xe2, 0xdf, 0x5a, 0xb0, 0x58, 0x6d, 0x0b, 0x91,
|
||||
0x1b, 0x65, 0x7d, 0x3d, 0xdb, 0x2d, 0xba, 0x2a, 0xb4, 0xb7, 0x11, 0xad, 0xdb, 0xbb, 0x79, 0x11,
|
||||
0xb4, 0x54, 0xe1, 0x50, 0x58, 0xff, 0x9c, 0xf7, 0x19, 0x8b, 0xa8, 0xc6, 0xce, 0x60, 0x99, 0x47,
|
||||
0x8d, 0x0e, 0xe4, 0x55, 0x41, 0xf5, 0x11, 0xea, 0x83, 0xde, 0xc1, 0x8b, 0xa1, 0x6a, 0xee, 0xc4,
|
||||
0x13, 0x4c, 0x7a, 0xa7, 0xe6, 0x69, 0x3b, 0xf1, 0x4e, 0xf1, 0x46, 0xf9, 0xad, 0xed, 0xed, 0x89,
|
||||
0x77, 0x2a, 0xe9, 0x60, 0xa2, 0x26, 0xf2, 0x07, 0x0b, 0xba, 0x95, 0xfe, 0x24, 0x79, 0xd7, 0x4c,
|
||||
0xe2, 0x6c, 0xd7, 0xf2, 0xaa, 0xe6, 0xb1, 0x87, 0xf3, 0xf8, 0x46, 0xef, 0xce, 0x05, 0xe7, 0x91,
|
||||
0xc5, 0x7d, 0xee, 0x9d, 0x16, 0xd7, 0x93, 0x49, 0x11, 0x2b, 0xd5, 0xce, 0x5f, 0x25, 0x56, 0xa6,
|
||||
0x34, 0x04, 0x5f, 0x4b, 0xac, 0xa4, 0x0a, 0x87, 0xc2, 0xfa, 0x08, 0xe6, 0x75, 0x9b, 0xec, 0xdc,
|
||||
0x8a, 0x54, 0x9e, 0x02, 0x95, 0xf6, 0x9b, 0xf3, 0x36, 0xba, 0x5b, 0x23, 0x2b, 0x85, 0xbb, 0x51,
|
||||
0x2e, 0xfc, 0xf6, 0xfe, 0x5f, 0x9e, 0x6f, 0x58, 0x7f, 0x7d, 0xbe, 0x61, 0xfd, 0xfd, 0xf9, 0x86,
|
||||
0xf5, 0xa3, 0x0f, 0x2f, 0xfc, 0x87, 0x40, 0xfd, 0xef, 0x87, 0xa3, 0x39, 0x44, 0xf1, 0xc1, 0xff,
|
||||
0x02, 0x00, 0x00, 0xff, 0xff, 0x9c, 0x35, 0xff, 0xe4, 0x9e, 0x18, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
@ -3496,6 +3567,57 @@ func (m *JobInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *AnalysisRunSpecAndStatus) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *AnalysisRunSpecAndStatus) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *AnalysisRunSpecAndStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.Status != nil {
|
||||
{
|
||||
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintRollout(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if m.Spec != nil {
|
||||
{
|
||||
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintRollout(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *AnalysisRunInfo) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
|
@ -3520,6 +3642,18 @@ func (m *AnalysisRunInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.SpecAndStatus != nil {
|
||||
{
|
||||
size, err := m.SpecAndStatus.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintRollout(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x62
|
||||
}
|
||||
if len(m.Metrics) > 0 {
|
||||
for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
|
@ -4287,6 +4421,26 @@ func (m *JobInfo) Size() (n int) {
|
|||
return n
|
||||
}
|
||||
|
||||
func (m *AnalysisRunSpecAndStatus) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Spec != nil {
|
||||
l = m.Spec.Size()
|
||||
n += 1 + l + sovRollout(uint64(l))
|
||||
}
|
||||
if m.Status != nil {
|
||||
l = m.Status.Size()
|
||||
n += 1 + l + sovRollout(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *AnalysisRunInfo) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
|
@ -4338,6 +4492,10 @@ func (m *AnalysisRunInfo) Size() (n int) {
|
|||
n += 1 + l + sovRollout(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.SpecAndStatus != nil {
|
||||
l = m.SpecAndStatus.Size()
|
||||
n += 1 + l + sovRollout(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
|
@ -7736,6 +7894,129 @@ func (m *JobInfo) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
func (m *AnalysisRunSpecAndStatus) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRollout
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: AnalysisRunSpecAndStatus: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: AnalysisRunSpecAndStatus: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRollout
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthRollout
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRollout
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Spec == nil {
|
||||
m.Spec = &v1alpha1.AnalysisRunSpec{}
|
||||
}
|
||||
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRollout
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthRollout
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRollout
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Status == nil {
|
||||
m.Status = &v1alpha1.AnalysisRunStatus{}
|
||||
}
|
||||
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipRollout(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthRollout
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *AnalysisRunInfo) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
@ -8062,6 +8343,42 @@ func (m *AnalysisRunInfo) Unmarshal(dAtA []byte) error {
|
|||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 12:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field SpecAndStatus", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRollout
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthRollout
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthRollout
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.SpecAndStatus == nil {
|
||||
m.SpecAndStatus = &AnalysisRunSpecAndStatus{}
|
||||
}
|
||||
if err := m.SpecAndStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipRollout(dAtA[iNdEx:])
|
||||
|
|
|
@ -148,8 +148,16 @@ message JobInfo {
|
|||
k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5;
|
||||
}
|
||||
|
||||
message AnalysisRunSpecAndStatus {
|
||||
github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRunSpec spec = 1;
|
||||
github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRunStatus status = 2;
|
||||
}
|
||||
|
||||
message AnalysisRunInfo {
|
||||
k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta objectMeta = 1;
|
||||
/*
|
||||
field type from 161 -170 will be deprecated in future.
|
||||
*/
|
||||
string icon = 2;
|
||||
int64 revision = 3;
|
||||
string status = 4;
|
||||
|
@ -160,6 +168,8 @@ message AnalysisRunInfo {
|
|||
repeated JobInfo jobs = 9;
|
||||
repeated NonJobInfo nonJobInfo = 10;
|
||||
repeated Metrics metrics = 11;
|
||||
/* The new API changes should use SpecAndStatus field type. */
|
||||
AnalysisRunSpecAndStatus specAndStatus = 12;
|
||||
}
|
||||
|
||||
message NonJobInfo {
|
||||
|
|
|
@ -598,6 +598,77 @@
|
|||
},
|
||||
"title": "AnalysisRunMetadata extra labels to add to the AnalysisRun"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRunSpec": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"metrics": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Metric"
|
||||
},
|
||||
"title": "Metrics contains the list of metrics to query as part of an analysis run\n+patchMergeKey=name\n+patchStrategy=merge"
|
||||
},
|
||||
"args": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Argument"
|
||||
},
|
||||
"title": "Args are the list of arguments used in this run\n+optional\n+patchMergeKey=name\n+patchStrategy=merge"
|
||||
},
|
||||
"terminate": {
|
||||
"type": "boolean",
|
||||
"title": "Terminate is used to prematurely stop the run (e.g. rollout completed and analysis is no longer desired)"
|
||||
},
|
||||
"dryRun": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DryRun"
|
||||
},
|
||||
"title": "DryRun object contains the settings for running the analysis in Dry-Run mode\n+patchMergeKey=metricName\n+patchStrategy=merge\n+optional"
|
||||
},
|
||||
"measurementRetention": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MeasurementRetention"
|
||||
},
|
||||
"title": "MeasurementRetention object contains the settings for retaining the number of measurements during the analysis\n+patchMergeKey=metricName\n+patchStrategy=merge\n+optional"
|
||||
}
|
||||
},
|
||||
"title": "AnalysisRunSpec is the spec for a AnalysisRun resource"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRunStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"phase": {
|
||||
"type": "string",
|
||||
"title": "Phase is the status of the analysis run"
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"title": "Message is a message explaining current status"
|
||||
},
|
||||
"metricResults": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MetricResult"
|
||||
},
|
||||
"title": "MetricResults contains the metrics collected during the run"
|
||||
},
|
||||
"startedAt": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time",
|
||||
"title": "StartedAt indicates when the analysisRun first started"
|
||||
},
|
||||
"runSummary": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RunSummary",
|
||||
"title": "RunSummary contains the final results from the metric executions"
|
||||
},
|
||||
"dryRunSummary": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RunSummary",
|
||||
"title": "DryRunSummary contains the final results from the metric executions in the dry-run mode"
|
||||
}
|
||||
},
|
||||
"title": "AnalysisRunStatus is the status for a AnalysisRun resource"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRunStrategy": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -710,6 +781,24 @@
|
|||
},
|
||||
"title": "AppMeshVirtualService holds information on the virtual service the rollout needs to modify"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Argument": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Name is the name of the argument"
|
||||
},
|
||||
"value": {
|
||||
"type": "string",
|
||||
"title": "Value is the value of the argument\n+optional"
|
||||
},
|
||||
"valueFrom": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ValueFrom",
|
||||
"title": "ValueFrom is a reference to where a secret is stored. This field is one of the fields with valueFrom\n+optional"
|
||||
}
|
||||
},
|
||||
"title": "Argument is an argument to an AnalysisRun"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ArgumentValueFrom": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -970,6 +1059,105 @@
|
|||
},
|
||||
"title": "CanaryStrategy defines parameters for a Replica Based Canary"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CloudWatchMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"interval": {
|
||||
"type": "string"
|
||||
},
|
||||
"metricDataQueries": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CloudWatchMetricDataQuery"
|
||||
}
|
||||
}
|
||||
},
|
||||
"title": "CloudWatchMetric defines the cloudwatch query to perform canary analysis"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CloudWatchMetricDataQuery": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"expression": {
|
||||
"type": "string"
|
||||
},
|
||||
"label": {
|
||||
"type": "string"
|
||||
},
|
||||
"metricStat": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CloudWatchMetricStat"
|
||||
},
|
||||
"period": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString"
|
||||
},
|
||||
"returnData": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"title": "CloudWatchMetricDataQuery defines the cloudwatch query"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CloudWatchMetricStat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"metric": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CloudWatchMetricStatMetric"
|
||||
},
|
||||
"period": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString"
|
||||
},
|
||||
"stat": {
|
||||
"type": "string"
|
||||
},
|
||||
"unit": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CloudWatchMetricStatMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"dimensions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CloudWatchMetricStatMetricDimension"
|
||||
}
|
||||
},
|
||||
"metricName": {
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CloudWatchMetricStatMetricDimension": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DatadogMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"interval": {
|
||||
"type": "string"
|
||||
},
|
||||
"query": {
|
||||
"type": "string"
|
||||
},
|
||||
"apiVersion": {
|
||||
"type": "string",
|
||||
"description": "ApiVersion refers to the Datadog API version being used (default: v1). v1 will eventually be deprecated."
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DryRun": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -989,6 +1177,20 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.GraphiteMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"address": {
|
||||
"type": "string",
|
||||
"title": "Address is the HTTP address and port of the Graphite server"
|
||||
},
|
||||
"query": {
|
||||
"type": "string",
|
||||
"title": "Query is a raw Graphite query to perform"
|
||||
}
|
||||
},
|
||||
"title": "GraphiteMetric defines the Graphite query to perform canary analysis"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.HeaderRoutingMatch": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -1002,6 +1204,20 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.InfluxdbMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"profile": {
|
||||
"type": "string",
|
||||
"title": "Profile is the name of the secret holding InfluxDB account configuration"
|
||||
},
|
||||
"query": {
|
||||
"type": "string",
|
||||
"title": "Query is a raw InfluxDB flux query to perform"
|
||||
}
|
||||
},
|
||||
"title": "InfluxdbMetric defines the InfluxDB Flux query to perform canary analysis"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.IstioDestinationRule": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -1072,6 +1288,77 @@
|
|||
},
|
||||
"title": "IstioVirtualService holds information on the virtual service the rollout needs to modify"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.JobMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"metadata": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta"
|
||||
},
|
||||
"spec": {
|
||||
"$ref": "#/definitions/k8s.io.api.batch.v1.JobSpec"
|
||||
}
|
||||
},
|
||||
"title": "JobMetric defines a job to run which acts as a metric"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.KayentaMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"address": {
|
||||
"type": "string"
|
||||
},
|
||||
"application": {
|
||||
"type": "string"
|
||||
},
|
||||
"canaryConfigName": {
|
||||
"type": "string"
|
||||
},
|
||||
"metricsAccountName": {
|
||||
"type": "string"
|
||||
},
|
||||
"configurationAccountName": {
|
||||
"type": "string"
|
||||
},
|
||||
"storageAccountName": {
|
||||
"type": "string"
|
||||
},
|
||||
"threshold": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.KayentaThreshold"
|
||||
},
|
||||
"scopes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.KayentaScope"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.KayentaScope": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"controlScope": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ScopeDetail"
|
||||
},
|
||||
"experimentScope": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ScopeDetail"
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.KayentaThreshold": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pass": {
|
||||
"type": "string",
|
||||
"format": "int64"
|
||||
},
|
||||
"marginal": {
|
||||
"type": "string",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MangedRoutes": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -1080,6 +1367,43 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Measurement": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"phase": {
|
||||
"type": "string",
|
||||
"title": "Phase is the status of this single measurement"
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"title": "Message contains a message describing current condition (e.g. error messages)"
|
||||
},
|
||||
"startedAt": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time",
|
||||
"title": "StartedAt is the timestamp in which this measurement started to be measured"
|
||||
},
|
||||
"finishedAt": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time",
|
||||
"title": "FinishedAt is the timestamp in which this measurement completed and value was collected"
|
||||
},
|
||||
"value": {
|
||||
"type": "string",
|
||||
"title": "Value is the measured value of the metric"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Metadata stores additional metadata about this metric result, used by the different providers\n(e.g. kayenta run ID, job name)"
|
||||
},
|
||||
"resumeAt": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time",
|
||||
"title": "ResumeAt is the timestamp when the analysisRun should try to resume the measurement"
|
||||
}
|
||||
},
|
||||
"title": "Measurement is a point in time result value of a single metric, and the time it was measured"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MeasurementRetention": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -1095,6 +1419,190 @@
|
|||
},
|
||||
"description": "MeasurementRetention defines the settings for retaining the number of measurements during the analysis."
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Metric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Name is the name of the metric"
|
||||
},
|
||||
"interval": {
|
||||
"type": "string",
|
||||
"title": "Interval defines an interval string (e.g. 30s, 5m, 1h) between each measurement.\nIf omitted, will perform a single measurement"
|
||||
},
|
||||
"initialDelay": {
|
||||
"type": "string",
|
||||
"title": "InitialDelay how long the AnalysisRun should wait before starting this metric"
|
||||
},
|
||||
"count": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString",
|
||||
"description": "Count is the number of times to run the measurement. If both interval and count are omitted,\nthe effective count is 1. If only interval is specified, metric runs indefinitely.\nIf count \u003e 1, interval must be specified."
|
||||
},
|
||||
"successCondition": {
|
||||
"type": "string",
|
||||
"title": "SuccessCondition is an expression which determines if a measurement is considered successful\nExpression is a goevaluate expression. The keyword `result` is a variable reference to the\nvalue of measurement. Results can be both structured data or primitive.\nExamples:\n result \u003e 10\n (result.requests_made * result.requests_succeeded / 100) \u003e= 90"
|
||||
},
|
||||
"failureCondition": {
|
||||
"type": "string",
|
||||
"title": "FailureCondition is an expression which determines if a measurement is considered failed\nIf both success and failure conditions are specified, and the measurement does not fall into\neither condition, the measurement is considered Inconclusive"
|
||||
},
|
||||
"failureLimit": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString",
|
||||
"title": "FailureLimit is the maximum number of times the measurement is allowed to fail, before the\nentire metric is considered Failed (default: 0)"
|
||||
},
|
||||
"inconclusiveLimit": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString",
|
||||
"title": "InconclusiveLimit is the maximum number of times the measurement is allowed to measure\nInconclusive, before the entire metric is considered Inconclusive (default: 0)"
|
||||
},
|
||||
"consecutiveErrorLimit": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString",
|
||||
"title": "ConsecutiveErrorLimit is the maximum number of times the measurement is allowed to error in\nsuccession, before the metric is considered error (default: 4)"
|
||||
},
|
||||
"provider": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MetricProvider",
|
||||
"title": "Provider configuration to the external system to use to verify the analysis"
|
||||
}
|
||||
},
|
||||
"title": "Metric defines a metric in which to perform analysis"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MetricProvider": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"prometheus": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PrometheusMetric",
|
||||
"title": "Prometheus specifies the prometheus metric to query"
|
||||
},
|
||||
"kayenta": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.KayentaMetric",
|
||||
"title": "Kayenta specifies a Kayenta metric"
|
||||
},
|
||||
"web": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.WebMetric",
|
||||
"title": "Web specifies a generic HTTP web metric"
|
||||
},
|
||||
"datadog": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DatadogMetric",
|
||||
"title": "Datadog specifies a datadog metric to query"
|
||||
},
|
||||
"wavefront": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.WavefrontMetric",
|
||||
"title": "Wavefront specifies the wavefront metric to query"
|
||||
},
|
||||
"newRelic": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.NewRelicMetric",
|
||||
"title": "NewRelic specifies the newrelic metric to query"
|
||||
},
|
||||
"job": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.JobMetric",
|
||||
"title": "Job specifies the job metric run"
|
||||
},
|
||||
"cloudWatch": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CloudWatchMetric",
|
||||
"title": "CloudWatch specifies the cloudWatch metric to query"
|
||||
},
|
||||
"graphite": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.GraphiteMetric",
|
||||
"title": "Graphite specifies the Graphite metric to query"
|
||||
},
|
||||
"influxdb": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.InfluxdbMetric",
|
||||
"title": "Influxdb specifies the influxdb metric to query"
|
||||
},
|
||||
"skywalking": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SkyWalkingMetric",
|
||||
"title": "SkyWalking specifies the skywalking metric to query"
|
||||
},
|
||||
"plugin": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string",
|
||||
"format": "byte"
|
||||
},
|
||||
"title": "+kubebuilder:validation:Schemaless\n+kubebuilder:pruning:PreserveUnknownFields\n+kubebuilder:validation:Type=object\nPlugin specifies the hashicorp go-plugin metric to query"
|
||||
}
|
||||
},
|
||||
"title": "MetricProvider which external system to use to verify the analysis\nOnly one of the fields in this struct should be non-nil"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MetricResult": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Name is the name of the metric"
|
||||
},
|
||||
"phase": {
|
||||
"type": "string",
|
||||
"title": "Phase is the overall aggregate status of the metric"
|
||||
},
|
||||
"measurements": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Measurement"
|
||||
},
|
||||
"title": "Measurements holds the most recent measurements collected for the metric"
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"title": "Message contains a message describing current condition (e.g. error messages)"
|
||||
},
|
||||
"count": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Count is the number of times the metric was measured without Error\nThis is equal to the sum of Successful, Failed, Inconclusive"
|
||||
},
|
||||
"successful": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Successful is the number of times the metric was measured Successful"
|
||||
},
|
||||
"failed": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Failed is the number of times the metric was measured Failed"
|
||||
},
|
||||
"inconclusive": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Inconclusive is the number of times the metric was measured Inconclusive"
|
||||
},
|
||||
"error": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Error is the number of times an error was encountered during measurement"
|
||||
},
|
||||
"consecutiveError": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "ConsecutiveError is the number of times an error was encountered during measurement in succession\nResets to zero when non-errors are encountered"
|
||||
},
|
||||
"dryRun": {
|
||||
"type": "boolean",
|
||||
"title": "DryRun indicates whether this metric is running in a dry-run mode or not"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "Metadata stores additional metadata about this metric. It is used by different providers to store\nthe final state which gets used while taking measurements. For example, Prometheus uses this field\nto store the final resolved query after substituting the template arguments."
|
||||
}
|
||||
},
|
||||
"title": "MetricResult contain a list of the most recent measurements for a single metric along with\ncounters on how often the measurement"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.NewRelicMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"profile": {
|
||||
"type": "string",
|
||||
"title": "Profile is the name of the secret holding NR account configuration"
|
||||
},
|
||||
"query": {
|
||||
"type": "string",
|
||||
"title": "Query is a raw newrelic NRQL query to perform"
|
||||
}
|
||||
},
|
||||
"title": "NewRelicMetric defines the newrelic query to perform canary analysis"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.NginxTrafficRouting": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -1198,6 +1706,50 @@
|
|||
},
|
||||
"title": "PreferredDuringSchedulingIgnoredDuringExecution defines the weight of the anti-affinity injection"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PrometheusAuth": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"sigv4": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Sigv4Config",
|
||||
"title": "+optional"
|
||||
}
|
||||
},
|
||||
"title": "PrometheusMetric defines the prometheus query to perform canary analysis"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PrometheusMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"address": {
|
||||
"type": "string",
|
||||
"title": "Address is the HTTP address and port of the prometheus server"
|
||||
},
|
||||
"query": {
|
||||
"type": "string",
|
||||
"title": "Query is a raw prometheus query to perform"
|
||||
},
|
||||
"authentication": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PrometheusAuth",
|
||||
"title": "Sigv4 Config is the aws SigV4 configuration to use for SigV4 signing if using Amazon Managed Prometheus\n+optional"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "string",
|
||||
"format": "int64",
|
||||
"title": "Timeout represents the duration within which a prometheus query should complete. It is expressed in seconds.\n+optional"
|
||||
},
|
||||
"insecure": {
|
||||
"type": "boolean",
|
||||
"title": "Insecure skips host TLS verification"
|
||||
},
|
||||
"headers": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.WebMetricHeader"
|
||||
},
|
||||
"title": "Headers are optional HTTP headers to use in the request\n+optional\n+patchMergeKey=key\n+patchStrategy=merge"
|
||||
}
|
||||
},
|
||||
"title": "PrometheusMetric defines the prometheus query to perform canary analysis"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RequiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"type": "object",
|
||||
"title": "RequiredDuringSchedulingIgnoredDuringExecution defines inter-pod scheduling rule to be RequiredDuringSchedulingIgnoredDuringExecution"
|
||||
|
@ -1708,6 +2260,37 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RunSummary": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"count": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "This is equal to the sum of Successful, Failed, Inconclusive"
|
||||
},
|
||||
"successful": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Successful is the number of times the metric was measured Successful"
|
||||
},
|
||||
"failed": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Failed is the number of times the metric was measured Failed"
|
||||
},
|
||||
"inconclusive": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Inconclusive is the number of times the metric was measured Inconclusive"
|
||||
},
|
||||
"error": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Error is the number of times an error was encountered during measurement"
|
||||
}
|
||||
},
|
||||
"title": "RunSummary contains the final results from the metric executions"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SMITrafficRouting": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -1722,6 +2305,40 @@
|
|||
},
|
||||
"title": "SMITrafficRouting configuration for TrafficSplit Custom Resource to control traffic routing"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ScopeDetail": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"scope": {
|
||||
"type": "string"
|
||||
},
|
||||
"region": {
|
||||
"type": "string"
|
||||
},
|
||||
"step": {
|
||||
"type": "string",
|
||||
"format": "int64"
|
||||
},
|
||||
"start": {
|
||||
"type": "string"
|
||||
},
|
||||
"end": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SecretKeyRef": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Name is the name of the secret"
|
||||
},
|
||||
"key": {
|
||||
"type": "string",
|
||||
"description": "Key is the key of the secret to select from."
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SetCanaryScale": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -1779,6 +2396,37 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Sigv4Config": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"address": {
|
||||
"type": "string",
|
||||
"title": "Region is the AWS Region to sign the SigV4 Request"
|
||||
},
|
||||
"profile": {
|
||||
"type": "string",
|
||||
"title": "Profile is the Credential Profile used to sign the SigV4 Request"
|
||||
},
|
||||
"roleArn": {
|
||||
"type": "string",
|
||||
"title": "RoleARN is the IAM role used to sign the SIgV4 Request"
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SkyWalkingMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"address": {
|
||||
"type": "string"
|
||||
},
|
||||
"query": {
|
||||
"type": "string"
|
||||
},
|
||||
"interval": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.StickinessConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -1882,6 +2530,86 @@
|
|||
},
|
||||
"title": "TrafficWeights describes the current status of how traffic has been split"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ValueFrom": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"secretKeyRef": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.SecretKeyRef",
|
||||
"title": "Secret is a reference to where a secret is stored. This field is one of the fields with valueFrom\n+optional"
|
||||
},
|
||||
"fieldRef": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.FieldRef",
|
||||
"title": "FieldRef is a reference to the fields in metadata which we are referencing. This field is one of the fields with\nvalueFrom\n+optional"
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.WavefrontMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"address": {
|
||||
"type": "string",
|
||||
"title": "Address is the HTTP address and port of the wavefront server"
|
||||
},
|
||||
"query": {
|
||||
"type": "string",
|
||||
"title": "Query is a raw wavefront query to perform"
|
||||
}
|
||||
},
|
||||
"title": "WavefrontMetric defines the wavefront query to perform canary analysis"
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.WebMetric": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"method": {
|
||||
"type": "string",
|
||||
"title": "Method is the method of the web metric (empty defaults to GET)"
|
||||
},
|
||||
"url": {
|
||||
"type": "string",
|
||||
"title": "URL is the address of the web metric"
|
||||
},
|
||||
"headers": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.WebMetricHeader"
|
||||
},
|
||||
"title": "+patchMergeKey=key\n+patchStrategy=merge\nHeaders are optional HTTP headers to use in the request"
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"title": "Body is the body of the web metric (must be POST/PUT)"
|
||||
},
|
||||
"timeoutSeconds": {
|
||||
"type": "string",
|
||||
"format": "int64",
|
||||
"title": "TimeoutSeconds is the timeout for the request in seconds (default: 10)"
|
||||
},
|
||||
"jsonPath": {
|
||||
"type": "string",
|
||||
"title": "JSONPath is a JSON Path to use as the result variable (default: \"{$}\")"
|
||||
},
|
||||
"insecure": {
|
||||
"type": "boolean",
|
||||
"title": "Insecure skips host TLS verification"
|
||||
},
|
||||
"jsonBody": {
|
||||
"type": "string",
|
||||
"format": "byte",
|
||||
"title": "+kubebuilder:validation:Schemaless\n+kubebuilder:pruning:PreserveUnknownFields\n+kubebuilder:validation:Type=object\nJSONBody is the body of the web metric in a json format (method must be POST/PUT)"
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.WebMetricHeader": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"key": {
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.WeightDestination": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -1958,6 +2686,131 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"k8s.io.api.batch.v1.JobSpec": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"parallelism": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Specifies the maximum desired number of pods the job should\nrun at any given time. The actual number of pods running in steady state will\nbe less than this number when ((.spec.completions - .status.successful) \u003c .spec.parallelism),\ni.e. when the work left to do is less than max parallelism.\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/\n+optional"
|
||||
},
|
||||
"completions": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Specifies the desired number of successfully finished pods the\njob should be run with. Setting to nil means that the success of any\npod signals the success of all pods, and allows parallelism to have any positive\nvalue. Setting to 1 means that parallelism is limited to 1 and the success of that\npod signals the success of the job.\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/\n+optional"
|
||||
},
|
||||
"activeDeadlineSeconds": {
|
||||
"type": "string",
|
||||
"format": "int64",
|
||||
"title": "Specifies the duration in seconds relative to the startTime that the job\nmay be continuously active before the system tries to terminate it; value\nmust be positive integer. If a Job is suspended (at creation or through an\nupdate), this timer will effectively be stopped and reset when the Job is\nresumed again.\n+optional"
|
||||
},
|
||||
"podFailurePolicy": {
|
||||
"$ref": "#/definitions/k8s.io.api.batch.v1.PodFailurePolicy",
|
||||
"description": "Specifies the policy of handling failed pods. In particular, it allows to\nspecify the set of actions and conditions which need to be\nsatisfied to take the associated action.\nIf empty, the default behaviour applies - the counter of failed pods,\nrepresented by the jobs's .status.failed field, is incremented and it is\nchecked against the backoffLimit. This field cannot be used in combination\nwith restartPolicy=OnFailure.\n\nThis field is alpha-level. To use this field, you must enable the\n`JobPodFailurePolicy` feature gate (disabled by default).\n+optional"
|
||||
},
|
||||
"backoffLimit": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "Specifies the number of retries before marking this job failed.\nDefaults to 6\n+optional"
|
||||
},
|
||||
"selector": {
|
||||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector",
|
||||
"title": "A label query over pods that should match the pod count.\nNormally, the system sets this field for you.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors\n+optional"
|
||||
},
|
||||
"manualSelector": {
|
||||
"type": "boolean",
|
||||
"title": "manualSelector controls generation of pod labels and pod selectors.\nLeave `manualSelector` unset unless you are certain what you are doing.\nWhen false or unset, the system pick labels unique to this job\nand appends those labels to the pod template. When true,\nthe user is responsible for picking unique labels and specifying\nthe selector. Failure to pick a unique label may cause this\nand other jobs to not function correctly. However, You may see\n`manualSelector=true` in jobs that were created with the old `extensions/v1beta1`\nAPI.\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector\n+optional"
|
||||
},
|
||||
"template": {
|
||||
"$ref": "#/definitions/k8s.io.api.core.v1.PodTemplateSpec",
|
||||
"title": "Describes the pod that will be created when executing a job.\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"
|
||||
},
|
||||
"ttlSecondsAfterFinished": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"title": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished\nexecution (either Complete or Failed). If this field is set,\nttlSecondsAfterFinished after the Job finishes, it is eligible to be\nautomatically deleted. When the Job is being deleted, its lifecycle\nguarantees (e.g. finalizers) will be honored. If this field is unset,\nthe Job won't be automatically deleted. If this field is set to zero,\nthe Job becomes eligible to be deleted immediately after it finishes.\n+optional"
|
||||
},
|
||||
"completionMode": {
|
||||
"type": "string",
|
||||
"description": "CompletionMode specifies how Pod completions are tracked. It can be\n`NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have\nbeen .spec.completions successfully completed Pods. Each Pod completion is\nhomologous to each other.\n\n`Indexed` means that the Pods of a\nJob get an associated completion index from 0 to (.spec.completions - 1),\navailable in the annotation batch.kubernetes.io/job-completion-index.\nThe Job is considered complete when there is one successfully completed Pod\nfor each index.\nWhen value is `Indexed`, .spec.completions must be specified and\n`.spec.parallelism` must be less than or equal to 10^5.\nIn addition, The Pod name takes the form\n`$(job-name)-$(index)-$(random-string)`,\nthe Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future.\nIf the Job controller observes a mode that it doesn't recognize, which\nis possible during upgrades due to version skew, the controller\nskips updates for the Job.\n+optional"
|
||||
},
|
||||
"suspend": {
|
||||
"type": "boolean",
|
||||
"description": "Suspend specifies whether the Job controller should create Pods or not. If\na Job is created with suspend set to true, no Pods are created by the Job\ncontroller. If a Job is suspended after creation (i.e. the flag goes from\nfalse to true), the Job controller will delete all active Pods associated\nwith this Job. Users must design their workload to gracefully handle this.\nSuspending a Job will reset the StartTime field of the Job, effectively\nresetting the ActiveDeadlineSeconds timer too. Defaults to false.\n\n+optional"
|
||||
}
|
||||
},
|
||||
"description": "JobSpec describes how the job execution will look like."
|
||||
},
|
||||
"k8s.io.api.batch.v1.PodFailurePolicy": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"rules": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/k8s.io.api.batch.v1.PodFailurePolicyRule"
|
||||
},
|
||||
"title": "A list of pod failure policy rules. The rules are evaluated in order.\nOnce a rule matches a Pod failure, the remaining of the rules are ignored.\nWhen no rule matches the Pod failure, the default handling applies - the\ncounter of pod failures is incremented and it is checked against\nthe backoffLimit. At most 20 elements are allowed.\n+listType=atomic"
|
||||
}
|
||||
},
|
||||
"description": "PodFailurePolicy describes how failed pods influence the backoffLimit."
|
||||
},
|
||||
"k8s.io.api.batch.v1.PodFailurePolicyOnExitCodesRequirement": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"containerName": {
|
||||
"type": "string",
|
||||
"title": "Restricts the check for exit codes to the container with the\nspecified name. When null, the rule applies to all containers.\nWhen specified, it should match one the container or initContainer\nnames in the pod template.\n+optional"
|
||||
},
|
||||
"operator": {
|
||||
"type": "string",
|
||||
"description": "Represents the relationship between the container exit code(s) and the\nspecified values. Containers completed with success (exit code 0) are\nexcluded from the requirement check. Possible values are:\n- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should\nreact to an unknown operator by assuming the requirement is not satisfied."
|
||||
},
|
||||
"values": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"title": "Specifies the set of values. Each returned container exit code (might be\nmultiple in case of multiple containers) is checked against this set of\nvalues with respect to the operator. The list of values must be ordered\nand must not contain duplicates. Value '0' cannot be used for the In operator.\nAt least one element is required. At most 255 elements are allowed.\n+listType=set"
|
||||
}
|
||||
},
|
||||
"description": "PodFailurePolicyOnExitCodesRequirement describes the requirement for handling\na failed pod based on its container exit codes. In particular, it lookups the\n.state.terminated.exitCode for each app container and init container status,\nrepresented by the .status.containerStatuses and .status.initContainerStatuses\nfields in the Pod status, respectively. Containers completed with success\n(exit code 0) are excluded from the requirement check."
|
||||
},
|
||||
"k8s.io.api.batch.v1.PodFailurePolicyOnPodConditionsPattern": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"description": "Specifies the required Pod condition type. To match a pod condition\nit is required that specified type equals the pod condition type."
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Specifies the required Pod condition status. To match a pod condition\nit is required that the specified status equals the pod condition status.\nDefaults to True."
|
||||
}
|
||||
},
|
||||
"description": "PodFailurePolicyOnPodConditionsPattern describes a pattern for matching\nan actual pod condition type."
|
||||
},
|
||||
"k8s.io.api.batch.v1.PodFailurePolicyRule": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
"type": "string",
|
||||
"description": "Specifies the action taken on a pod failure when the requirements are satisfied.\nPossible values are:\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should\nreact to an unknown action by skipping the rule."
|
||||
},
|
||||
"onExitCodes": {
|
||||
"$ref": "#/definitions/k8s.io.api.batch.v1.PodFailurePolicyOnExitCodesRequirement",
|
||||
"title": "Represents the requirement on the container exit codes.\n+optional"
|
||||
},
|
||||
"onPodConditions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/k8s.io.api.batch.v1.PodFailurePolicyOnPodConditionsPattern"
|
||||
},
|
||||
"title": "Represents the requirement on the pod conditions. The requirement is represented\nas a list of pod condition patterns. The requirement is satisfied if at\nleast one pattern matches an actual pod condition. At most 20 elements are allowed.\n+listType=atomic"
|
||||
}
|
||||
},
|
||||
"description": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met.\nOne of OnExitCodes and onPodConditions, but not both, can be used in each rule."
|
||||
},
|
||||
"k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -4677,7 +5530,8 @@
|
|||
"$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta"
|
||||
},
|
||||
"icon": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "field type from 161 -170 will be deprecated in future."
|
||||
},
|
||||
"revision": {
|
||||
"type": "string",
|
||||
|
@ -4719,6 +5573,21 @@
|
|||
"items": {
|
||||
"$ref": "#/definitions/rollout.Metrics"
|
||||
}
|
||||
},
|
||||
"specAndStatus": {
|
||||
"$ref": "#/definitions/rollout.AnalysisRunSpecAndStatus",
|
||||
"description": "The new API changes should use SpecAndStatus field type."
|
||||
}
|
||||
}
|
||||
},
|
||||
"rollout.AnalysisRunSpecAndStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"spec": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRunSpec"
|
||||
},
|
||||
"status": {
|
||||
"$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRunStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -34,6 +34,7 @@ const (
|
|||
setCurrentStepIndex = `{"status":{"currentStepIndex":%d}}`
|
||||
unpausePatch = `{"spec":{"paused":false}}`
|
||||
clearPauseConditionsPatch = `{"status":{"pauseConditions":null}}`
|
||||
clearPauseConditionsAndControllerPausePatch = `{"status":{"pauseConditions":null, "controllerPause":false, "currentStepIndex":%d}}`
|
||||
unpauseAndClearPauseConditionsPatch = `{"spec":{"paused":false},"status":{"pauseConditions":null}}`
|
||||
promoteFullPatch = `{"status":{"promoteFull":true}}`
|
||||
clearPauseConditionsPatchWithStep = `{"status":{"pauseConditions":null, "currentStepIndex":%d}}`
|
||||
|
@ -133,6 +134,10 @@ func PromoteRollout(rolloutIf clientset.RolloutInterface, name string, skipCurre
|
|||
return ro, nil
|
||||
}
|
||||
|
||||
func isInconclusive(rollout *v1alpha1.Rollout) bool {
|
||||
return rollout.Spec.Strategy.Canary != nil && rollout.Status.Canary.CurrentStepAnalysisRunStatus != nil && rollout.Status.Canary.CurrentStepAnalysisRunStatus.Status == v1alpha1.AnalysisPhaseInconclusive
|
||||
}
|
||||
|
||||
func getPatches(rollout *v1alpha1.Rollout, skipCurrentStep, skipAllStep, full bool) ([]byte, []byte, []byte) {
|
||||
var specPatch, statusPatch, unifiedPatch []byte
|
||||
switch {
|
||||
|
@ -160,7 +165,18 @@ func getPatches(rollout *v1alpha1.Rollout, skipCurrentStep, skipAllStep, full bo
|
|||
if rollout.Spec.Paused {
|
||||
specPatch = []byte(unpausePatch)
|
||||
}
|
||||
if len(rollout.Status.PauseConditions) > 0 {
|
||||
// in case if canary rollout in inconclusive state, we want to unset controller pause , clean pause conditions and increment step index
|
||||
// so that rollout can proceed to next step
|
||||
// without such patch, rollout will be stuck in inconclusive state in case if next step is pause step
|
||||
if isInconclusive(rollout) && len(rollout.Status.PauseConditions) > 0 && rollout.Status.ControllerPause {
|
||||
_, index := replicasetutil.GetCurrentCanaryStep(rollout)
|
||||
if index != nil {
|
||||
if *index < int32(len(rollout.Spec.Strategy.Canary.Steps)) {
|
||||
*index++
|
||||
}
|
||||
statusPatch = []byte(fmt.Sprintf(clearPauseConditionsAndControllerPausePatch, *index))
|
||||
}
|
||||
} else if len(rollout.Status.PauseConditions) > 0 {
|
||||
statusPatch = []byte(clearPauseConditionsPatch)
|
||||
} else if rollout.Spec.Strategy.Canary != nil {
|
||||
// we only want to clear pause conditions, or increment step index (never both)
|
||||
|
|
|
@ -490,3 +490,69 @@ func TestPromoteCmdAlreadyFullyPromoted(t *testing.T) {
|
|||
assert.Equal(t, stdout, "rollout 'guestbook' fully promoted\n")
|
||||
assert.Empty(t, stderr)
|
||||
}
|
||||
|
||||
func TestPromoteInconclusiveStep(t *testing.T) {
|
||||
ro := v1alpha1.Rollout{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "guestbook",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: v1alpha1.RolloutSpec{
|
||||
Paused: true,
|
||||
Strategy: v1alpha1.RolloutStrategy{
|
||||
Canary: &v1alpha1.CanaryStrategy{
|
||||
Steps: []v1alpha1.CanaryStep{
|
||||
{
|
||||
SetWeight: pointer.Int32Ptr(1),
|
||||
},
|
||||
{
|
||||
SetWeight: pointer.Int32Ptr(2),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1alpha1.RolloutStatus{
|
||||
PauseConditions: []v1alpha1.PauseCondition{{
|
||||
Reason: v1alpha1.PauseReasonCanaryPauseStep,
|
||||
}},
|
||||
ControllerPause: true,
|
||||
Canary: v1alpha1.CanaryStatus{
|
||||
CurrentStepAnalysisRunStatus: &v1alpha1.RolloutAnalysisRunStatus{
|
||||
Status: v1alpha1.AnalysisPhaseInconclusive,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tf, o := options.NewFakeArgoRolloutsOptions(&ro)
|
||||
defer tf.Cleanup()
|
||||
fakeClient := o.RolloutsClient.(*fakeroclient.Clientset)
|
||||
fakeClient.PrependReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if patchAction, ok := action.(kubetesting.PatchAction); ok {
|
||||
patchRo := v1alpha1.Rollout{}
|
||||
err := json.Unmarshal(patchAction.GetPatch(), &patchRo)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ro.Status.CurrentStepIndex = patchRo.Status.CurrentStepIndex
|
||||
ro.Status.ControllerPause = patchRo.Status.ControllerPause
|
||||
ro.Status.PauseConditions = patchRo.Status.PauseConditions
|
||||
}
|
||||
return true, &ro, nil
|
||||
})
|
||||
|
||||
cmd := NewCmdPromote(o)
|
||||
cmd.PersistentPreRunE = o.PersistentPreRunE
|
||||
cmd.SetArgs([]string{"guestbook"})
|
||||
|
||||
err := cmd.Execute()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, false, ro.Status.ControllerPause)
|
||||
assert.Empty(t, ro.Status.PauseConditions)
|
||||
|
||||
stdout := o.Out.(*bytes.Buffer).String()
|
||||
stderr := o.ErrOut.(*bytes.Buffer).String()
|
||||
assert.Equal(t, stdout, "rollout 'guestbook' promoted\n")
|
||||
assert.Empty(t, stderr)
|
||||
}
|
||||
|
|
|
@ -26,6 +26,12 @@ func getAnalysisRunInfo(ownerUID types.UID, allAnalysisRuns []*v1alpha1.Analysis
|
|||
UID: run.UID,
|
||||
},
|
||||
}
|
||||
|
||||
arInfo.SpecAndStatus = &rollout.AnalysisRunSpecAndStatus{
|
||||
Spec: &run.Spec,
|
||||
Status: &run.Status,
|
||||
}
|
||||
|
||||
if run.Spec.Metrics != nil {
|
||||
for _, metric := range run.Spec.Metrics {
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ func TestCreateBackgroundAnalysisRun(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch)
|
||||
}
|
||||
|
||||
func TestCreateBackgroundAnalysisRunWithTemplates(t *testing.T) {
|
||||
|
@ -241,7 +241,7 @@ func TestCreateBackgroundAnalysisRunWithTemplates(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch)
|
||||
}
|
||||
|
||||
func TestCreateBackgroundAnalysisRunWithClusterTemplates(t *testing.T) {
|
||||
|
@ -303,7 +303,7 @@ func TestCreateBackgroundAnalysisRunWithClusterTemplates(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch)
|
||||
}
|
||||
|
||||
func TestInvalidSpecMissingClusterTemplatesBackgroundAnalysis(t *testing.T) {
|
||||
|
@ -339,7 +339,7 @@ func TestInvalidSpecMissingClusterTemplatesBackgroundAnalysis(t *testing.T) {
|
|||
expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, progressingCond, string(invalidSpecBytes), strings.ReplaceAll(errmsg, "\"", "\\\""))
|
||||
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestCreateBackgroundAnalysisRunWithClusterTemplatesAndTemplate(t *testing.T) {
|
||||
|
@ -416,7 +416,7 @@ func TestCreateBackgroundAnalysisRunWithClusterTemplatesAndTemplate(t *testing.T
|
|||
}
|
||||
}
|
||||
}`
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch)
|
||||
}
|
||||
|
||||
// TestCreateAnalysisRunWithCollision ensures we will create an new analysis run with a new name
|
||||
|
@ -487,7 +487,7 @@ func TestCreateAnalysisRunWithCollision(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedAR.Name)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedAR.Name)), patch)
|
||||
}
|
||||
|
||||
// TestCreateAnalysisRunWithCollisionAndSemanticEquality will ensure we do not create an extra
|
||||
|
@ -550,7 +550,7 @@ func TestCreateAnalysisRunWithCollisionAndSemanticEquality(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, ar.Name)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, ar.Name)), patch)
|
||||
}
|
||||
|
||||
func TestCreateAnalysisRunOnAnalysisStep(t *testing.T) {
|
||||
|
@ -611,7 +611,7 @@ func TestCreateAnalysisRunOnAnalysisStep(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch)
|
||||
}
|
||||
|
||||
func TestFailCreateStepAnalysisRunIfInvalidTemplateRef(t *testing.T) {
|
||||
|
@ -653,7 +653,7 @@ func TestFailCreateStepAnalysisRunIfInvalidTemplateRef(t *testing.T) {
|
|||
expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, progressingCond, string(invalidSpecBytes), strings.ReplaceAll(errmsg, "\"", "\\\""))
|
||||
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestFailCreateBackgroundAnalysisRunIfInvalidTemplateRef(t *testing.T) {
|
||||
|
@ -698,7 +698,7 @@ func TestFailCreateBackgroundAnalysisRunIfInvalidTemplateRef(t *testing.T) {
|
|||
expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, progressingCond, string(invalidSpecBytes), strings.ReplaceAll(errmsg, "\"", "\\\""))
|
||||
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestFailCreateBackgroundAnalysisRunIfMetricRepeated(t *testing.T) {
|
||||
|
@ -745,7 +745,7 @@ func TestFailCreateBackgroundAnalysisRunIfMetricRepeated(t *testing.T) {
|
|||
expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, progressingCond, string(invalidSpecBytes), strings.ReplaceAll(errmsg, "\"", "\\\""))
|
||||
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestDoNothingWithAnalysisRunsWhileBackgroundAnalysisRunRunning(t *testing.T) {
|
||||
|
@ -798,7 +798,7 @@ func TestDoNothingWithAnalysisRunsWhileBackgroundAnalysisRunRunning(t *testing.T
|
|||
patchIndex := f.expectPatchRolloutAction(r2)
|
||||
f.run(getKey(r2, t))
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
}
|
||||
|
||||
func TestDoNothingWhileStepBasedAnalysisRunRunning(t *testing.T) {
|
||||
|
@ -847,7 +847,7 @@ func TestDoNothingWhileStepBasedAnalysisRunRunning(t *testing.T) {
|
|||
patchIndex := f.expectPatchRolloutAction(r2)
|
||||
f.run(getKey(r2, t))
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
}
|
||||
|
||||
func TestCancelOlderAnalysisRuns(t *testing.T) {
|
||||
|
@ -915,7 +915,7 @@ func TestCancelOlderAnalysisRuns(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestDeleteAnalysisRunsWithNoMatchingRS(t *testing.T) {
|
||||
|
@ -971,7 +971,7 @@ func TestDeleteAnalysisRunsWithNoMatchingRS(t *testing.T) {
|
|||
deletedAr := f.getDeletedAnalysisRun(deletedIndex)
|
||||
assert.Equal(t, deletedAr, arWithDiffPodHash.Name)
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
}
|
||||
|
||||
func TestDeleteAnalysisRunsAfterRSDelete(t *testing.T) {
|
||||
|
@ -1083,7 +1083,7 @@ func TestIncrementStepAfterSuccessfulAnalysisRun(t *testing.T) {
|
|||
}`
|
||||
condition := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, false, "", false)
|
||||
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition)), patch)
|
||||
}
|
||||
|
||||
func TestPausedOnInconclusiveBackgroundAnalysisRun(t *testing.T) {
|
||||
|
@ -1152,7 +1152,7 @@ func TestPausedOnInconclusiveBackgroundAnalysisRun(t *testing.T) {
|
|||
}`
|
||||
condition := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false)
|
||||
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition, v1alpha1.PauseReasonInconclusiveAnalysis, now, v1alpha1.PauseReasonInconclusiveAnalysis)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition, v1alpha1.PauseReasonInconclusiveAnalysis, now, v1alpha1.PauseReasonInconclusiveAnalysis)), patch)
|
||||
}
|
||||
|
||||
func TestPausedStepAfterInconclusiveAnalysisRun(t *testing.T) {
|
||||
|
@ -1215,7 +1215,7 @@ func TestPausedStepAfterInconclusiveAnalysisRun(t *testing.T) {
|
|||
}
|
||||
}`
|
||||
condition := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition, v1alpha1.PauseReasonInconclusiveAnalysis, now, v1alpha1.PauseReasonInconclusiveAnalysis)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition, v1alpha1.PauseReasonInconclusiveAnalysis, now, v1alpha1.PauseReasonInconclusiveAnalysis)), patch)
|
||||
}
|
||||
|
||||
func TestErrorConditionAfterErrorAnalysisRunStep(t *testing.T) {
|
||||
|
@ -1282,7 +1282,7 @@ func TestErrorConditionAfterErrorAnalysisRunStep(t *testing.T) {
|
|||
errmsg := fmt.Sprintf(conditions.RolloutAbortedMessage, 2) + ": " + ar.Status.Message
|
||||
condition := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, errmsg, false)
|
||||
expectedPatch = fmt.Sprintf(expectedPatch, condition, now, errmsg)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestErrorConditionAfterErrorAnalysisRunBackground(t *testing.T) {
|
||||
|
@ -1358,7 +1358,7 @@ func TestErrorConditionAfterErrorAnalysisRunBackground(t *testing.T) {
|
|||
condition := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, "", false)
|
||||
|
||||
now := timeutil.Now().UTC().Format(time.RFC3339)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition, now, errmsg)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, condition, now, errmsg)), patch)
|
||||
}
|
||||
|
||||
func TestCancelAnalysisRunsWhenAborted(t *testing.T) {
|
||||
|
@ -1419,7 +1419,7 @@ func TestCancelAnalysisRunsWhenAborted(t *testing.T) {
|
|||
}`
|
||||
errmsg := fmt.Sprintf(conditions.RolloutAbortedMessage, 2)
|
||||
now := timeutil.Now().UTC().Format(time.RFC3339)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, newConditions, now, errmsg)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, newConditions, now, errmsg)), patch)
|
||||
}
|
||||
|
||||
func TestCancelBackgroundAnalysisRunWhenRolloutIsCompleted(t *testing.T) {
|
||||
|
@ -1521,7 +1521,7 @@ func TestDoNotCreateBackgroundAnalysisRunAfterInconclusiveRun(t *testing.T) {
|
|||
f.run(getKey(r2, t))
|
||||
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
}
|
||||
|
||||
func TestDoNotCreateBackgroundAnalysisRunOnNewCanaryRollout(t *testing.T) {
|
||||
|
@ -1647,7 +1647,7 @@ func TestCreatePrePromotionAnalysisRun(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`, ar.Name)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
// TestDoNotCreatePrePromotionAnalysisProgressedRollout ensures a pre-promotion analysis is not created after a Rollout
|
||||
|
@ -1771,7 +1771,7 @@ func TestDoNotCreatePrePromotionAnalysisRunOnNotReadyReplicaSet(t *testing.T) {
|
|||
f.run(getKey(r2, t))
|
||||
|
||||
patch := f.getPatchedRollout(patchRolloutIndex)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
}
|
||||
|
||||
func TestRolloutPrePromotionAnalysisBecomesInconclusive(t *testing.T) {
|
||||
|
@ -1841,7 +1841,7 @@ func TestRolloutPrePromotionAnalysisBecomesInconclusive(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`, now, now)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestRolloutPrePromotionAnalysisSwitchServiceAfterSuccess(t *testing.T) {
|
||||
|
@ -1905,7 +1905,7 @@ func TestRolloutPrePromotionAnalysisSwitchServiceAfterSuccess(t *testing.T) {
|
|||
"message": null
|
||||
}
|
||||
}`, rs2PodHash, rs2PodHash, rs2PodHash)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestRolloutPrePromotionAnalysisHonorAutoPromotionSeconds(t *testing.T) {
|
||||
|
@ -1971,7 +1971,7 @@ func TestRolloutPrePromotionAnalysisHonorAutoPromotionSeconds(t *testing.T) {
|
|||
"message": null
|
||||
}
|
||||
}`, rs2PodHash, rs2PodHash, rs2PodHash)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestRolloutPrePromotionAnalysisDoNothingOnInconclusiveAnalysis(t *testing.T) {
|
||||
|
@ -2096,7 +2096,7 @@ func TestAbortRolloutOnErrorPrePromotionAnalysis(t *testing.T) {
|
|||
now := timeutil.MetaNow().UTC().Format(time.RFC3339)
|
||||
progressingFalseAborted, _ := newProgressingCondition(conditions.RolloutAbortedReason, r2, "")
|
||||
newConditions := updateConditionsPatch(*r2, progressingFalseAborted)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, now, newConditions, conditions.RolloutAbortedReason, progressingFalseAborted.Message)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, now, newConditions, conditions.RolloutAbortedReason, progressingFalseAborted.Message)), patch)
|
||||
}
|
||||
|
||||
func TestCreatePostPromotionAnalysisRun(t *testing.T) {
|
||||
|
@ -2143,7 +2143,7 @@ func TestCreatePostPromotionAnalysisRun(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}`, ar.Name)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestRolloutPostPromotionAnalysisSuccess(t *testing.T) {
|
||||
|
@ -2199,7 +2199,7 @@ func TestRolloutPostPromotionAnalysisSuccess(t *testing.T) {
|
|||
"message": null
|
||||
}
|
||||
}`, rs2PodHash)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
// TestPostPromotionAnalysisRunHandleInconclusive ensures that the Rollout does not scale down a old ReplicaSet if
|
||||
|
@ -2264,7 +2264,7 @@ func TestPostPromotionAnalysisRunHandleInconclusive(t *testing.T) {
|
|||
"message": "InconclusiveAnalysisRun"
|
||||
}
|
||||
}`)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestAbortRolloutOnErrorPostPromotionAnalysis(t *testing.T) {
|
||||
|
@ -2334,7 +2334,7 @@ func TestAbortRolloutOnErrorPostPromotionAnalysis(t *testing.T) {
|
|||
now := timeutil.MetaNow().UTC().Format(time.RFC3339)
|
||||
progressingFalseAborted, _ := newProgressingCondition(conditions.RolloutAbortedReason, r2, "")
|
||||
newConditions := updateConditionsPatch(*r2, progressingFalseAborted)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, now, newConditions, conditions.RolloutAbortedReason, progressingFalseAborted.Message)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, now, newConditions, conditions.RolloutAbortedReason, progressingFalseAborted.Message)), patch)
|
||||
}
|
||||
|
||||
func TestCreateAnalysisRunWithCustomAnalysisRunMetadataAndROCopyLabels(t *testing.T) {
|
||||
|
|
|
@ -220,10 +220,9 @@ func (c *rolloutContext) scaleDownOldReplicaSetsForBlueGreen(oldRSs []*appsv1.Re
|
|||
annotationedRSs := int32(0)
|
||||
rolloutReplicas := defaults.GetReplicasOrDefault(c.rollout.Spec.Replicas)
|
||||
for _, targetRS := range oldRSs {
|
||||
if replicasetutil.IsStillReferenced(c.rollout.Status, targetRS) {
|
||||
// We should technically never get here because we shouldn't be passing a replicaset list
|
||||
// which includes referenced ReplicaSets. But we check just in case
|
||||
c.log.Warnf("Prevented inadvertent scaleDown of RS '%s'", targetRS.Name)
|
||||
if c.isReplicaSetReferenced(targetRS) {
|
||||
// We might get here if user interrupted an an update in order to move back to stable.
|
||||
c.log.Infof("Skip scale down of older RS '%s': still referenced", targetRS.Name)
|
||||
continue
|
||||
}
|
||||
if *targetRS.Spec.Replicas == 0 {
|
||||
|
|
|
@ -290,7 +290,7 @@ func TestBlueGreenHandlePause(t *testing.T) {
|
|||
f.run(getKey(r2, t))
|
||||
|
||||
patch := f.getPatchedRollout(patchRolloutIndex)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
})
|
||||
t.Run("AddPause", func(t *testing.T) {
|
||||
f := newFixture(t)
|
||||
|
@ -334,7 +334,7 @@ func TestBlueGreenHandlePause(t *testing.T) {
|
|||
}
|
||||
}`
|
||||
now := timeutil.Now().UTC().Format(time.RFC3339)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, v1alpha1.PauseReasonBlueGreenPause, now)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, v1alpha1.PauseReasonBlueGreenPause, now)), patch)
|
||||
|
||||
})
|
||||
|
||||
|
@ -376,7 +376,7 @@ func TestBlueGreenHandlePause(t *testing.T) {
|
|||
}
|
||||
}`
|
||||
addedConditions := generateConditionsPatchWithPause(true, conditions.RolloutPausedReason, rs2, true, "", true, false)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, addedConditions)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, addedConditions)), patch)
|
||||
})
|
||||
|
||||
t.Run("NoActionsAfterPausing", func(t *testing.T) {
|
||||
|
@ -417,7 +417,7 @@ func TestBlueGreenHandlePause(t *testing.T) {
|
|||
patchIndex := f.expectPatchRolloutActionWithPatch(r2, OnlyObservedGenerationPatch)
|
||||
f.run(getKey(r2, t))
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
})
|
||||
|
||||
t.Run("NoActionsAfterPausedOnInconclusiveRun", func(t *testing.T) {
|
||||
|
@ -468,7 +468,7 @@ func TestBlueGreenHandlePause(t *testing.T) {
|
|||
patchIndex := f.expectPatchRolloutActionWithPatch(r2, OnlyObservedGenerationPatch)
|
||||
f.run(getKey(r2, t))
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
})
|
||||
|
||||
t.Run("NoAutoPromoteBeforeDelayTimePasses", func(t *testing.T) {
|
||||
|
@ -509,7 +509,7 @@ func TestBlueGreenHandlePause(t *testing.T) {
|
|||
patchIndex := f.expectPatchRolloutActionWithPatch(r2, OnlyObservedGenerationPatch)
|
||||
f.run(getKey(r2, t))
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
})
|
||||
|
||||
t.Run("AutoPromoteAfterDelayTimePasses", func(t *testing.T) {
|
||||
|
@ -813,7 +813,7 @@ func TestBlueGreenHandlePause(t *testing.T) {
|
|||
"conditions": %s
|
||||
}
|
||||
}`
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedUnpausePatch, unpauseConditions)), unpausePatch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedUnpausePatch, unpauseConditions)), unpausePatch)
|
||||
|
||||
generatedConditions := generateConditionsPatchWithCompleted(true, conditions.ReplicaSetUpdatedReason, rs2, true, "", true)
|
||||
expected2ndPatchWithoutSubs := `{
|
||||
|
@ -1453,7 +1453,7 @@ func TestBlueGreenAbort(t *testing.T) {
|
|||
}
|
||||
}`, rs1PodHash, expectedConditions, rs1PodHash, conditions.RolloutAbortedReason, fmt.Sprintf(conditions.RolloutAbortedMessage, 2))
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestBlueGreenHandlePauseAutoPromoteWithConditions(t *testing.T) {
|
||||
|
|
|
@ -180,10 +180,9 @@ func (c *rolloutContext) scaleDownOldReplicaSetsForCanary(oldRSs []*appsv1.Repli
|
|||
|
||||
annotationedRSs := int32(0)
|
||||
for _, targetRS := range oldRSs {
|
||||
if replicasetutil.IsStillReferenced(c.rollout.Status, targetRS) {
|
||||
// We should technically never get here because we shouldn't be passing a replicaset list
|
||||
// which includes referenced ReplicaSets. But we check just in case
|
||||
c.log.Warnf("Prevented inadvertent scaleDown of RS '%s'", targetRS.Name)
|
||||
if c.isReplicaSetReferenced(targetRS) {
|
||||
// We might get here if user interrupted an an update in order to move back to stable.
|
||||
c.log.Infof("Skip scale down of older RS '%s': still referenced", targetRS.Name)
|
||||
continue
|
||||
}
|
||||
if maxScaleDown <= 0 {
|
||||
|
@ -220,15 +219,8 @@ func (c *rolloutContext) scaleDownOldReplicaSetsForCanary(oldRSs []*appsv1.Repli
|
|||
// and doesn't yet have scale down deadline. This happens when a user changes their
|
||||
// mind in the middle of an V1 -> V2 update, and then applies a V3. We are deciding
|
||||
// what to do with the defunct, intermediate V2 ReplicaSet right now.
|
||||
if !c.replicaSetReferencedByCanaryTraffic(targetRS) {
|
||||
// It is safe to scale the intermediate RS down, if no traffic is directed to it.
|
||||
c.log.Infof("scaling down intermediate RS '%s'", targetRS.Name)
|
||||
} else {
|
||||
c.log.Infof("Skip scaling down intermediate RS '%s': still referenced by service", targetRS.Name)
|
||||
// This ReplicaSet is still referenced by the service. It is not safe to scale
|
||||
// this down.
|
||||
continue
|
||||
}
|
||||
// It is safe to scale the intermediate RS down, since no traffic is directed to it.
|
||||
c.log.Infof("scaling down intermediate RS '%s'", targetRS.Name)
|
||||
}
|
||||
}
|
||||
if *targetRS.Spec.Replicas == desiredReplicaCount {
|
||||
|
@ -248,19 +240,26 @@ func (c *rolloutContext) scaleDownOldReplicaSetsForCanary(oldRSs []*appsv1.Repli
|
|||
return totalScaledDown, nil
|
||||
}
|
||||
|
||||
func (c *rolloutContext) replicaSetReferencedByCanaryTraffic(rs *appsv1.ReplicaSet) bool {
|
||||
rsPodHash := replicasetutil.GetPodTemplateHash(rs)
|
||||
ro := c.rollout
|
||||
|
||||
if ro.Status.Canary.Weights == nil {
|
||||
return false
|
||||
// isDynamicallyRollingBackToStable returns true if we were in the middle of an canary update with
|
||||
// dynamic stable scaling, but was interrupted and are now rolling back to stable RS. This is similar
|
||||
// to, but different than aborting. With abort, desired hash != stable hash and so we know the
|
||||
// two hashes to balance traffic against. But with dynamically rolling back to stable, the
|
||||
// desired hash == stable hash, and so we must use the *previous* desired hash and balance traffic
|
||||
// between previous desired vs. stable hash, in order to safely shift traffic back to stable.
|
||||
// This function also returns the previous desired hash (where we are weighted to)
|
||||
func isDynamicallyRollingBackToStable(ro *v1alpha1.Rollout, desiredRS *appsv1.ReplicaSet) (bool, string) {
|
||||
if rolloututil.IsFullyPromoted(ro) && ro.Spec.Strategy.Canary.TrafficRouting != nil && ro.Spec.Strategy.Canary.DynamicStableScale {
|
||||
if ro.Status.Canary.Weights != nil {
|
||||
currSelector := ro.Status.Canary.Weights.Canary.PodTemplateHash
|
||||
desiredSelector := replicasetutil.GetPodTemplateHash(desiredRS)
|
||||
if currSelector != desiredSelector {
|
||||
if desiredRS.Status.AvailableReplicas < *ro.Spec.Replicas {
|
||||
return true, currSelector
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ro.Status.Canary.Weights.Canary.PodTemplateHash == rsPodHash || ro.Status.Canary.Weights.Stable.PodTemplateHash == rsPodHash {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// canProceedWithScaleDownAnnotation returns whether or not it is safe to proceed with annotating
|
||||
|
@ -359,13 +358,18 @@ func (c *rolloutContext) syncRolloutStatusCanary() error {
|
|||
|
||||
if replicasetutil.PodTemplateOrStepsChanged(c.rollout, c.newRS) {
|
||||
c.resetRolloutStatus(&newStatus)
|
||||
if c.newRS != nil && c.rollout.Status.StableRS == replicasetutil.GetPodTemplateHash(c.newRS) {
|
||||
if stepCount > 0 {
|
||||
if c.newRS != nil && stepCount > 0 {
|
||||
if c.rollout.Status.StableRS == replicasetutil.GetPodTemplateHash(c.newRS) {
|
||||
// If we get here, we detected that we've moved back to the stable ReplicaSet
|
||||
c.recorder.Eventf(c.rollout, record.EventOptions{EventReason: "SkipSteps"}, "Rollback to stable")
|
||||
c.recorder.Eventf(c.rollout, record.EventOptions{EventReason: "SkipSteps"}, "Rollback to stable ReplicaSets")
|
||||
newStatus.CurrentStepIndex = &stepCount
|
||||
} else if c.isRollbackWithinWindow() && replicasetutil.IsActive(c.newRS) {
|
||||
// Else if we get here we detected that we are within the rollback window we can skip steps and move back to the active ReplicaSet
|
||||
c.recorder.Eventf(c.rollout, record.EventOptions{EventReason: "SkipSteps"}, "Rollback to active ReplicaSets within RollbackWindow")
|
||||
newStatus.CurrentStepIndex = &stepCount
|
||||
}
|
||||
}
|
||||
|
||||
newStatus = c.calculateRolloutConditions(newStatus)
|
||||
return c.persistRolloutStatus(&newStatus)
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -182,7 +183,7 @@ func TestCanaryRolloutEnterPauseState(t *testing.T) {
|
|||
now := timeutil.MetaNow().UTC().Format(time.RFC3339)
|
||||
expectedPatchWithoutObservedGen := fmt.Sprintf(expectedPatchTemplate, v1alpha1.PauseReasonCanaryPauseStep, now, conditions, v1alpha1.PauseReasonCanaryPauseStep)
|
||||
expectedPatch := calculatePatch(r2, expectedPatchWithoutObservedGen)
|
||||
assert.Equal(t, expectedPatch, patch)
|
||||
assert.JSONEq(t, expectedPatch, patch)
|
||||
}
|
||||
|
||||
func TestCanaryRolloutNoProgressWhilePaused(t *testing.T) {
|
||||
|
@ -257,7 +258,7 @@ func TestCanaryRolloutUpdatePauseConditionWhilePaused(t *testing.T) {
|
|||
f.run(getKey(r2, t))
|
||||
|
||||
patch := f.getPatchedRollout(addPausedConditionPatch)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
}
|
||||
|
||||
func TestCanaryRolloutResetProgressDeadlineOnRetry(t *testing.T) {
|
||||
|
@ -300,7 +301,7 @@ func TestCanaryRolloutResetProgressDeadlineOnRetry(t *testing.T) {
|
|||
"message": "more replicas need to be updated"
|
||||
}
|
||||
}`, retryCondition)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestCanaryRolloutIncrementStepAfterUnPaused(t *testing.T) {
|
||||
|
@ -342,7 +343,7 @@ func TestCanaryRolloutIncrementStepAfterUnPaused(t *testing.T) {
|
|||
}`
|
||||
generatedConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, false, "", false)
|
||||
expectedPatch := calculatePatch(r2, fmt.Sprintf(expectedPatchTemplate, generatedConditions))
|
||||
assert.Equal(t, expectedPatch, patch)
|
||||
assert.JSONEq(t, expectedPatch, patch)
|
||||
}
|
||||
|
||||
func TestCanaryRolloutUpdateStatusWhenAtEndOfSteps(t *testing.T) {
|
||||
|
@ -383,7 +384,7 @@ func TestCanaryRolloutUpdateStatusWhenAtEndOfSteps(t *testing.T) {
|
|||
}`
|
||||
|
||||
expectedPatch := fmt.Sprintf(expectedPatchWithoutStableRS, expectedStableRS, generateConditionsPatchWithCompleted(true, conditions.ReplicaSetUpdatedReason, rs2, false, "", true))
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestResetCurrentStepIndexOnStepChange(t *testing.T) {
|
||||
|
@ -426,7 +427,7 @@ func TestResetCurrentStepIndexOnStepChange(t *testing.T) {
|
|||
}`
|
||||
newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false)
|
||||
expectedPatch := fmt.Sprintf(expectedPatchWithoutPodHash, expectedCurrentPodHash, expectedCurrentStepHash, newConditions)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestResetCurrentStepIndexOnPodSpecChange(t *testing.T) {
|
||||
|
@ -467,7 +468,7 @@ func TestResetCurrentStepIndexOnPodSpecChange(t *testing.T) {
|
|||
newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false)
|
||||
|
||||
expectedPatch := fmt.Sprintf(expectedPatchWithoutPodHash, expectedCurrentPodHash, newConditions)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestCanaryRolloutCreateFirstReplicasetNoSteps(t *testing.T) {
|
||||
|
@ -505,7 +506,7 @@ func TestCanaryRolloutCreateFirstReplicasetNoSteps(t *testing.T) {
|
|||
|
||||
newConditions := generateConditionsPatchWithCompleted(false, conditions.ReplicaSetUpdatedReason, rs, false, "", true)
|
||||
|
||||
assert.Equal(t, calculatePatch(r, fmt.Sprintf(expectedPatch, newConditions)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r, fmt.Sprintf(expectedPatch, newConditions)), patch)
|
||||
}
|
||||
|
||||
func TestCanaryRolloutCreateFirstReplicasetWithSteps(t *testing.T) {
|
||||
|
@ -545,7 +546,7 @@ func TestCanaryRolloutCreateFirstReplicasetWithSteps(t *testing.T) {
|
|||
}`
|
||||
expectedPatch := fmt.Sprintf(expectedPatchWithSub, generateConditionsPatchWithCompleted(false, conditions.ReplicaSetUpdatedReason, rs, false, "", true))
|
||||
|
||||
assert.Equal(t, calculatePatch(r, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestCanaryRolloutCreateNewReplicaWithCorrectWeight(t *testing.T) {
|
||||
|
@ -822,9 +823,9 @@ func TestRollBackToStable(t *testing.T) {
|
|||
f.rolloutLister = append(f.rolloutLister, r2)
|
||||
f.objects = append(f.objects, r2)
|
||||
|
||||
updatedRSIndex := f.expectUpdateReplicaSetAction(rs1)
|
||||
f.expectUpdateReplicaSetAction(rs1)
|
||||
patchIndex := f.expectPatchRolloutAction(r2)
|
||||
updatedRSIndex := f.expectUpdateReplicaSetAction(rs1) // Bump replicaset revision from 1 to 3
|
||||
f.expectUpdateRolloutAction(r2) // Bump rollout revision from 1 to 3
|
||||
patchIndex := f.expectPatchRolloutAction(r2) // Patch rollout status
|
||||
f.run(getKey(r2, t))
|
||||
|
||||
expectedRS1 := rs1.DeepCopy()
|
||||
|
@ -843,7 +844,56 @@ func TestRollBackToStable(t *testing.T) {
|
|||
newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs1, false, "", true)
|
||||
expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, hash.ComputePodTemplateHash(&r2.Spec.Template, r2.Status.CollisionCount), newConditions)
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestRollBackToActiveReplicaSetWithinWindow(t *testing.T) {
|
||||
f := newFixture(t)
|
||||
defer f.Close()
|
||||
|
||||
steps := []v1alpha1.CanaryStep{{
|
||||
SetWeight: int32Ptr(10),
|
||||
}}
|
||||
|
||||
r1 := newCanaryRollout("foo", 1, nil, steps, int32Ptr(0), intstr.FromInt(1), intstr.FromInt(0))
|
||||
r2 := bumpVersion(r1)
|
||||
|
||||
// For the fast rollback to work, we need to:
|
||||
// 1. Have the previous revision be active (i.e. not scaled down)
|
||||
// 2. Be in rollback window (within window revisions and previous creation timestamp)
|
||||
rs1 := newReplicaSetWithStatus(r1, 1, 1)
|
||||
rs2 := newReplicaSetWithStatus(r2, 1, 1)
|
||||
r2.Spec.RollbackWindow = &v1alpha1.RollbackWindowSpec{Revisions: 1}
|
||||
rs1.CreationTimestamp = timeutil.MetaTime(time.Now().Add(-1 * time.Hour))
|
||||
rs2.CreationTimestamp = timeutil.MetaNow()
|
||||
|
||||
f.kubeobjects = append(f.kubeobjects, rs1, rs2)
|
||||
f.replicaSetLister = append(f.replicaSetLister, rs1, rs2)
|
||||
f.serviceLister = append(f.serviceLister)
|
||||
|
||||
// Switch back to version 1
|
||||
r2.Spec.Template = r1.Spec.Template
|
||||
|
||||
rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
|
||||
rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
|
||||
|
||||
// Since old replicaset is still active, expect twice the number of replicas
|
||||
r2 = updateCanaryRolloutStatus(r2, rs2PodHash, 2, 2, 2, false)
|
||||
|
||||
f.rolloutLister = append(f.rolloutLister, r2)
|
||||
f.objects = append(f.objects, r2)
|
||||
|
||||
f.expectUpdateReplicaSetAction(rs1) // Update replicaset revision from 1 to 3
|
||||
f.expectUpdateRolloutAction(r2) // Update rollout revision from 1 to 3
|
||||
rolloutPatchIndex := f.expectPatchRolloutAction(r2) // Patch rollout status
|
||||
f.run(getKey(r2, t))
|
||||
|
||||
expectedStepIndex := len(steps)
|
||||
patch := f.getPatchedRolloutWithoutConditions(rolloutPatchIndex)
|
||||
|
||||
// Assert current pod hash is the old replicaset and steps were skipped
|
||||
assert.Regexp(t, fmt.Sprintf(`"currentPodHash":"%s"`, rs1PodHash), patch)
|
||||
assert.Regexp(t, fmt.Sprintf(`"currentStepIndex":%d`, expectedStepIndex), patch)
|
||||
}
|
||||
|
||||
func TestGradualShiftToNewStable(t *testing.T) {
|
||||
|
@ -886,7 +936,7 @@ func TestGradualShiftToNewStable(t *testing.T) {
|
|||
newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false)
|
||||
expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, newConditions)
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestRollBackToStableAndStepChange(t *testing.T) {
|
||||
|
@ -913,7 +963,8 @@ func TestRollBackToStableAndStepChange(t *testing.T) {
|
|||
f.objects = append(f.objects, r2)
|
||||
|
||||
updatedRSIndex := f.expectUpdateReplicaSetAction(rs1)
|
||||
f.expectUpdateReplicaSetAction(rs1)
|
||||
//f.expectUpdateReplicaSetAction(rs1)
|
||||
f.expectUpdateRolloutAction(r2)
|
||||
patchIndex := f.expectPatchRolloutAction(r2)
|
||||
f.run(getKey(r2, t))
|
||||
|
||||
|
@ -934,7 +985,7 @@ func TestRollBackToStableAndStepChange(t *testing.T) {
|
|||
newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs1, false, "", true)
|
||||
expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, newPodHash, newStepHash, newConditions)
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestCanaryRolloutIncrementStepIfSetWeightsAreCorrect(t *testing.T) {
|
||||
|
@ -970,7 +1021,7 @@ func TestCanaryRolloutIncrementStepIfSetWeightsAreCorrect(t *testing.T) {
|
|||
}
|
||||
}`
|
||||
newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs3, false, "", false)
|
||||
assert.Equal(t, calculatePatch(r3, fmt.Sprintf(expectedPatch, newConditions)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r3, fmt.Sprintf(expectedPatch, newConditions)), patch)
|
||||
}
|
||||
|
||||
func TestSyncRolloutWaitAddToQueue(t *testing.T) {
|
||||
|
@ -1122,7 +1173,7 @@ func TestSyncRolloutWaitIncrementStepIndex(t *testing.T) {
|
|||
"currentStepIndex":2
|
||||
}
|
||||
}`
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestCanaryRolloutStatusHPAStatusFields(t *testing.T) {
|
||||
|
@ -1166,7 +1217,7 @@ func TestCanaryRolloutStatusHPAStatusFields(t *testing.T) {
|
|||
f.run(getKey(r2, t))
|
||||
|
||||
patch := f.getPatchedRolloutWithoutConditions(index)
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatchWithSub), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatchWithSub), patch)
|
||||
}
|
||||
|
||||
func TestCanaryRolloutWithCanaryService(t *testing.T) {
|
||||
|
@ -1607,7 +1658,7 @@ func TestCanaryRolloutScaleWhilePaused(t *testing.T) {
|
|||
|
||||
patch := f.getPatchedRolloutWithoutConditions(patchIndex)
|
||||
expectedPatch := calculatePatch(r2, OnlyObservedGenerationPatch)
|
||||
assert.Equal(t, expectedPatch, patch)
|
||||
assert.JSONEq(t, expectedPatch, patch)
|
||||
}
|
||||
|
||||
func TestResumeRolloutAfterPauseDuration(t *testing.T) {
|
||||
|
@ -1707,7 +1758,7 @@ func TestNoResumeAfterPauseDurationIfUserPaused(t *testing.T) {
|
|||
"message": "manually paused"
|
||||
}
|
||||
}`
|
||||
assert.Equal(t, calculatePatch(r2, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func TestHandleNilNewRSOnScaleAndImageChange(t *testing.T) {
|
||||
|
@ -1754,7 +1805,7 @@ func TestHandleNilNewRSOnScaleAndImageChange(t *testing.T) {
|
|||
patchIndex := f.expectPatchRolloutAction(r2)
|
||||
f.run(getKey(r2, t))
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
}
|
||||
|
||||
func TestHandleCanaryAbort(t *testing.T) {
|
||||
|
@ -1801,10 +1852,10 @@ func TestHandleCanaryAbort(t *testing.T) {
|
|||
}`
|
||||
errmsg := fmt.Sprintf(conditions.RolloutAbortedMessage, 2)
|
||||
newConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, "", false)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, newConditions, conditions.RolloutAbortedReason, errmsg)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, newConditions, conditions.RolloutAbortedReason, errmsg)), patch)
|
||||
})
|
||||
|
||||
t.Run("Do not reset currentStepCount if newRS is stableRS", func(t *testing.T) {
|
||||
t.Run("Do not reset currentStepCount and reset abort if newRS is stableRS", func(t *testing.T) {
|
||||
f := newFixture(t)
|
||||
defer f.Close()
|
||||
|
||||
|
@ -1832,13 +1883,129 @@ func TestHandleCanaryAbort(t *testing.T) {
|
|||
patch := f.getPatchedRollout(patchIndex)
|
||||
expectedPatch := `{
|
||||
"status":{
|
||||
"conditions": %s,
|
||||
"phase": "Degraded",
|
||||
"message": "%s: %s"
|
||||
"abort": null,
|
||||
"abortedAt": null,
|
||||
"conditions": %s
|
||||
}
|
||||
}`
|
||||
errmsg := fmt.Sprintf(conditions.RolloutAbortedMessage, 1)
|
||||
newConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r1, false, "", true)
|
||||
assert.Equal(t, calculatePatch(r1, fmt.Sprintf(expectedPatch, newConditions, conditions.RolloutAbortedReason, errmsg)), patch)
|
||||
newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r1, false, "", true)
|
||||
assert.JSONEq(t, calculatePatch(r1, fmt.Sprintf(expectedPatch, newConditions)), patch)
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDynamicallyRollingBackToStable(t *testing.T) {
|
||||
newRSWithHashAndReplicas := func(hash string, available int32) *appsv1.ReplicaSet {
|
||||
return &appsv1.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1alpha1.DefaultRolloutUniqueLabelKey: hash,
|
||||
},
|
||||
},
|
||||
Status: v1.ReplicaSetStatus{
|
||||
AvailableReplicas: available,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
status v1alpha1.RolloutStatus
|
||||
trafficRoutingDisabled bool
|
||||
dynamicStableScalingDisabled bool
|
||||
rsHash string
|
||||
rsAvailableReplicas *int32 // if nil, will set to rollout replicas
|
||||
trafficWeights *v1alpha1.TrafficWeights
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
name: "desired RS != stable RS",
|
||||
status: v1alpha1.RolloutStatus{CurrentPodHash: "abc123", StableRS: "def456"},
|
||||
rsHash: "",
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "not using traffic routing",
|
||||
trafficRoutingDisabled: true,
|
||||
status: v1alpha1.RolloutStatus{CurrentPodHash: "abc123", StableRS: "abc123"},
|
||||
rsHash: "",
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "not using dynamicStableScaling",
|
||||
dynamicStableScalingDisabled: true,
|
||||
status: v1alpha1.RolloutStatus{CurrentPodHash: "abc123", StableRS: "abc123"},
|
||||
rsHash: "",
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "weighted selector == desired RS",
|
||||
status: v1alpha1.RolloutStatus{
|
||||
CurrentPodHash: "abc123",
|
||||
StableRS: "abc123",
|
||||
Canary: v1alpha1.CanaryStatus{
|
||||
Weights: &v1alpha1.TrafficWeights{
|
||||
Canary: v1alpha1.WeightDestination{
|
||||
PodTemplateHash: "abc123",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
rsHash: "abc123",
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "weighted selector != desired RS, desired not fully available",
|
||||
status: v1alpha1.RolloutStatus{
|
||||
CurrentPodHash: "abc123",
|
||||
StableRS: "abc123",
|
||||
Canary: v1alpha1.CanaryStatus{
|
||||
Weights: &v1alpha1.TrafficWeights{
|
||||
Canary: v1alpha1.WeightDestination{
|
||||
PodTemplateHash: "def456",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
rsHash: "abc123",
|
||||
rsAvailableReplicas: pointer.Int32(1),
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "weighted selector != desired RS, desired RS is fully available",
|
||||
status: v1alpha1.RolloutStatus{
|
||||
CurrentPodHash: "abc123",
|
||||
StableRS: "abc123",
|
||||
Canary: v1alpha1.CanaryStatus{
|
||||
Weights: &v1alpha1.TrafficWeights{
|
||||
Canary: v1alpha1.WeightDestination{
|
||||
PodTemplateHash: "def456",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
rsHash: "abc123",
|
||||
expectedResult: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ro := newCanaryRollout("test", 10, nil, nil, nil, intstr.FromInt(0), intstr.FromInt(1))
|
||||
if !tc.trafficRoutingDisabled {
|
||||
ro.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{}
|
||||
}
|
||||
if !tc.dynamicStableScalingDisabled {
|
||||
ro.Spec.Strategy.Canary.DynamicStableScale = true
|
||||
}
|
||||
ro.Status = tc.status
|
||||
|
||||
desiredRS := newRSWithHashAndReplicas(tc.rsHash, 1)
|
||||
if tc.rsAvailableReplicas != nil {
|
||||
desiredRS.Status.AvailableReplicas = *tc.rsAvailableReplicas
|
||||
}
|
||||
|
||||
rbToStable, _ := isDynamicallyRollingBackToStable(ro, desiredRS)
|
||||
|
||||
assert.Equal(t, tc.expectedResult, rbToStable)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,6 +56,7 @@ import (
|
|||
logutil "github.com/argoproj/argo-rollouts/utils/log"
|
||||
"github.com/argoproj/argo-rollouts/utils/record"
|
||||
replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset"
|
||||
rolloututil "github.com/argoproj/argo-rollouts/utils/rollout"
|
||||
serviceutil "github.com/argoproj/argo-rollouts/utils/service"
|
||||
timeutil "github.com/argoproj/argo-rollouts/utils/time"
|
||||
unstructuredutil "github.com/argoproj/argo-rollouts/utils/unstructured"
|
||||
|
@ -131,6 +132,7 @@ type reconcilerBase struct {
|
|||
replicaSetSynced cache.InformerSynced
|
||||
rolloutsInformer cache.SharedIndexInformer
|
||||
rolloutsLister listers.RolloutLister
|
||||
replicaSetInformer cache.SharedIndexInformer
|
||||
rolloutsSynced cache.InformerSynced
|
||||
rolloutsIndexer cache.Indexer
|
||||
servicesLister v1.ServiceLister
|
||||
|
@ -175,7 +177,6 @@ func NewController(cfg ControllerConfig) *Controller {
|
|||
controllerutil.EnqueueAfter(obj, duration, cfg.RolloutWorkQueue)
|
||||
},
|
||||
}
|
||||
|
||||
base := reconcilerBase{
|
||||
kubeclientset: cfg.KubeClientSet,
|
||||
argoprojclientset: cfg.ArgoProjClientset,
|
||||
|
@ -184,6 +185,7 @@ func NewController(cfg ControllerConfig) *Controller {
|
|||
replicaSetLister: cfg.ReplicaSetInformer.Lister(),
|
||||
replicaSetSynced: cfg.ReplicaSetInformer.Informer().HasSynced,
|
||||
rolloutsInformer: cfg.RolloutsInformer.Informer(),
|
||||
replicaSetInformer: cfg.ReplicaSetInformer.Informer(),
|
||||
rolloutsIndexer: cfg.RolloutsInformer.Informer().GetIndexer(),
|
||||
rolloutsLister: cfg.RolloutsInformer.Lister(),
|
||||
rolloutsSynced: cfg.RolloutsInformer.Informer().HasSynced,
|
||||
|
@ -520,6 +522,10 @@ func (c *Controller) newRolloutContext(rollout *v1alpha1.Rollout) (*rolloutConte
|
|||
},
|
||||
reconcilerBase: c.reconcilerBase,
|
||||
}
|
||||
if rolloututil.IsFullyPromoted(rollout) && roCtx.pauseContext.IsAborted() {
|
||||
logCtx.Warnf("Removing abort condition from fully promoted rollout")
|
||||
roCtx.pauseContext.RemoveAbort()
|
||||
}
|
||||
// carry over existing recorded weights
|
||||
roCtx.newStatus.Canary.Weights = rollout.Status.Canary.Weights
|
||||
return &roCtx, nil
|
||||
|
|
|
@ -1346,7 +1346,7 @@ func TestSwitchInvalidSpecMessage(t *testing.T) {
|
|||
expectedPatch := fmt.Sprintf(expectedPatchWithoutSub, progressingCond, string(invalidSpecBytes), conditions.InvalidSpecReason, strings.ReplaceAll(errmsg, "\"", "\\\""))
|
||||
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r, expectedPatch), patch)
|
||||
}
|
||||
|
||||
// TestPodTemplateHashEquivalence verifies the hash is computed consistently when there are slight
|
||||
|
@ -1549,7 +1549,7 @@ func TestSwitchBlueGreenToCanary(t *testing.T) {
|
|||
"selector": "foo=bar"
|
||||
}
|
||||
}`, addedConditions, conditions.ComputeStepHash(r))
|
||||
assert.Equal(t, calculatePatch(r, expectedPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r, expectedPatch), patch)
|
||||
}
|
||||
|
||||
func newInvalidSpecCondition(reason string, resourceObj runtime.Object, optionalMessage string) (v1alpha1.RolloutCondition, string) {
|
||||
|
|
|
@ -69,7 +69,7 @@ func TestRolloutCreateExperiment(t *testing.T) {
|
|||
}
|
||||
}`
|
||||
conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, ex.Name, conds)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, ex.Name, conds)), patch)
|
||||
}
|
||||
|
||||
func TestRolloutCreateClusterTemplateExperiment(t *testing.T) {
|
||||
|
@ -126,7 +126,7 @@ func TestRolloutCreateClusterTemplateExperiment(t *testing.T) {
|
|||
}
|
||||
}`
|
||||
conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, ex.Name, conds)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, ex.Name, conds)), patch)
|
||||
}
|
||||
|
||||
func TestCreateExperimentWithCollision(t *testing.T) {
|
||||
|
@ -178,7 +178,7 @@ func TestCreateExperimentWithCollision(t *testing.T) {
|
|||
}
|
||||
}`
|
||||
conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, createdEx.Name, conds)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, createdEx.Name, conds)), patch)
|
||||
}
|
||||
|
||||
func TestCreateExperimentWithCollisionAndSemanticEquality(t *testing.T) {
|
||||
|
@ -229,7 +229,7 @@ func TestCreateExperimentWithCollisionAndSemanticEquality(t *testing.T) {
|
|||
}
|
||||
}`
|
||||
conds := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, ex.Name, conds)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, ex.Name, conds)), patch)
|
||||
}
|
||||
|
||||
func TestRolloutExperimentProcessingDoNothing(t *testing.T) {
|
||||
|
@ -267,7 +267,7 @@ func TestRolloutExperimentProcessingDoNothing(t *testing.T) {
|
|||
f.run(getKey(r2, t))
|
||||
|
||||
patch := f.getPatchedRollout(patchIndex)
|
||||
assert.Equal(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
|
||||
|
||||
}
|
||||
|
||||
|
@ -314,7 +314,7 @@ func TestAbortRolloutAfterFailedExperiment(t *testing.T) {
|
|||
}`
|
||||
now := timeutil.Now().UTC().Format(time.RFC3339)
|
||||
generatedConditions := generateConditionsPatch(true, conditions.RolloutAbortedReason, r2, false, "", false)
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, now, generatedConditions, conditions.RolloutAbortedReason, fmt.Sprintf(conditions.RolloutAbortedMessage, 2))), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, now, generatedConditions, conditions.RolloutAbortedReason, fmt.Sprintf(conditions.RolloutAbortedMessage, 2))), patch)
|
||||
}
|
||||
|
||||
func TestPauseRolloutAfterInconclusiveExperiment(t *testing.T) {
|
||||
|
@ -481,7 +481,7 @@ func TestRolloutExperimentFinishedIncrementStep(t *testing.T) {
|
|||
}`
|
||||
generatedConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, rs2, false, "", false)
|
||||
|
||||
assert.Equal(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, generatedConditions)), patch)
|
||||
assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, generatedConditions)), patch)
|
||||
}
|
||||
|
||||
func TestRolloutDoNotCreateExperimentWithoutStableRS(t *testing.T) {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
patchtypes "k8s.io/apimachinery/pkg/types"
|
||||
|
@ -15,6 +16,7 @@ import (
|
|||
"github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
"github.com/argoproj/argo-rollouts/utils/defaults"
|
||||
replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset"
|
||||
serviceutil "github.com/argoproj/argo-rollouts/utils/service"
|
||||
timeutil "github.com/argoproj/argo-rollouts/utils/time"
|
||||
)
|
||||
|
||||
|
@ -32,9 +34,14 @@ func (c *rolloutContext) removeScaleDownDelay(rs *appsv1.ReplicaSet) error {
|
|||
return nil
|
||||
}
|
||||
patch := fmt.Sprintf(removeScaleDownAtAnnotationsPatch, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey)
|
||||
_, err := c.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{})
|
||||
if err == nil {
|
||||
c.log.Infof("Removed '%s' annotation from RS '%s'", v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, rs.Name)
|
||||
rs, err := c.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error removing scale-down-deadline annotation from RS '%s': %w", rs.Name, err)
|
||||
}
|
||||
c.log.Infof("Removed '%s' annotation from RS '%s'", v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, rs.Name)
|
||||
err = c.replicaSetInformer.GetIndexer().Update(rs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating replicaset informer in removeScaleDownDelay: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -56,9 +63,14 @@ func (c *rolloutContext) addScaleDownDelay(rs *appsv1.ReplicaSet, scaleDownDelay
|
|||
}
|
||||
deadline := timeutil.MetaNow().Add(scaleDownDelaySeconds).UTC().Format(time.RFC3339)
|
||||
patch := fmt.Sprintf(addScaleDownAtAnnotationsPatch, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, deadline)
|
||||
_, err := c.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{})
|
||||
if err == nil {
|
||||
c.log.Infof("Set '%s' annotation on '%s' to %s (%s)", v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, rs.Name, deadline, scaleDownDelaySeconds)
|
||||
rs, err := c.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding scale-down-deadline annotation to RS '%s': %w", rs.Name, err)
|
||||
}
|
||||
c.log.Infof("Set '%s' annotation on '%s' to %s (%s)", v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, rs.Name, deadline, scaleDownDelaySeconds)
|
||||
err = c.replicaSetInformer.GetIndexer().Update(rs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating replicaset informer in addScaleDownDelay: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -294,3 +306,56 @@ func (c *rolloutContext) scaleDownDelayHelper(rs *appsv1.ReplicaSet, annotatione
|
|||
|
||||
return annotationedRSs, desiredReplicaCount, nil
|
||||
}
|
||||
|
||||
// isReplicaSetReferenced returns if the given ReplicaSet is still being referenced by any of
|
||||
// the current, stable, blue-green services. Used to determine if the ReplicaSet can
|
||||
// safely be scaled to zero, or deleted.
|
||||
func (c *rolloutContext) isReplicaSetReferenced(rs *appsv1.ReplicaSet) bool {
|
||||
rsPodHash := replicasetutil.GetPodTemplateHash(rs)
|
||||
if rsPodHash == "" {
|
||||
return false
|
||||
}
|
||||
ro := c.rollout
|
||||
referencesToCheck := []string{
|
||||
ro.Status.StableRS,
|
||||
ro.Status.CurrentPodHash,
|
||||
ro.Status.BlueGreen.ActiveSelector,
|
||||
ro.Status.BlueGreen.PreviewSelector,
|
||||
}
|
||||
if ro.Status.Canary.Weights != nil {
|
||||
referencesToCheck = append(referencesToCheck, ro.Status.Canary.Weights.Canary.PodTemplateHash, ro.Status.Canary.Weights.Stable.PodTemplateHash)
|
||||
}
|
||||
for _, ref := range referencesToCheck {
|
||||
if ref == rsPodHash {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// The above are static, lightweight checks to see if the selectors we record in our status are
|
||||
// still referencing the ReplicaSet in question. Those checks aren't always enough. Next, we do
|
||||
// a deeper check to look up the actual service objects, and see if they are still referencing
|
||||
// the ReplicaSet. If so, we cannot scale it down.
|
||||
var servicesToCheck []string
|
||||
if ro.Spec.Strategy.Canary != nil {
|
||||
servicesToCheck = []string{ro.Spec.Strategy.Canary.CanaryService, ro.Spec.Strategy.Canary.StableService}
|
||||
} else {
|
||||
servicesToCheck = []string{ro.Spec.Strategy.BlueGreen.ActiveService, ro.Spec.Strategy.BlueGreen.PreviewService}
|
||||
}
|
||||
for _, svcName := range servicesToCheck {
|
||||
if svcName == "" {
|
||||
continue
|
||||
}
|
||||
svc, err := c.servicesLister.Services(c.rollout.Namespace).Get(svcName)
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
// service doesn't exist
|
||||
continue
|
||||
}
|
||||
return true
|
||||
}
|
||||
if serviceutil.GetRolloutSelectorLabel(svc) == rsPodHash {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package rollout
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -8,6 +9,9 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
k8sfake "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/utils/pointer"
|
||||
|
@ -131,8 +135,9 @@ func TestReconcileNewReplicaSet(t *testing.T) {
|
|||
abortScaleDownAnnotated bool
|
||||
abortScaleDownDelayPassed bool
|
||||
expectedNewReplicas int
|
||||
failRSUpdate bool
|
||||
abort bool
|
||||
}{
|
||||
|
||||
{
|
||||
name: "New Replica Set matches rollout replica: No scale",
|
||||
rolloutReplicas: 10,
|
||||
|
@ -160,6 +165,7 @@ func TestReconcileNewReplicaSet(t *testing.T) {
|
|||
newReplicas: 10,
|
||||
// ScaleDownOnAbort: true,
|
||||
abortScaleDownDelaySeconds: 5,
|
||||
abort: true,
|
||||
abortScaleDownAnnotated: true,
|
||||
abortScaleDownDelayPassed: true,
|
||||
scaleExpected: true,
|
||||
|
@ -171,6 +177,7 @@ func TestReconcileNewReplicaSet(t *testing.T) {
|
|||
newReplicas: 8,
|
||||
// ScaleDownOnAbort: true,
|
||||
abortScaleDownDelaySeconds: 5,
|
||||
abort: true,
|
||||
abortScaleDownAnnotated: true,
|
||||
abortScaleDownDelayPassed: false,
|
||||
scaleExpected: false,
|
||||
|
@ -181,10 +188,20 @@ func TestReconcileNewReplicaSet(t *testing.T) {
|
|||
rolloutReplicas: 10,
|
||||
newReplicas: 10,
|
||||
abortScaleDownDelaySeconds: 5,
|
||||
abort: true,
|
||||
abortScaleDownAnnotated: false,
|
||||
scaleExpected: false,
|
||||
expectedNewReplicas: 0,
|
||||
},
|
||||
{
|
||||
name: "Fail to update RS: No scale and add default annotation",
|
||||
rolloutReplicas: 10,
|
||||
newReplicas: 10,
|
||||
scaleExpected: false,
|
||||
failRSUpdate: true,
|
||||
abort: true,
|
||||
abortScaleDownDelaySeconds: -1,
|
||||
},
|
||||
}
|
||||
for i := range tests {
|
||||
test := tests[i]
|
||||
|
@ -195,30 +212,56 @@ func TestReconcileNewReplicaSet(t *testing.T) {
|
|||
rollout := newBlueGreenRollout("foo", test.rolloutReplicas, nil, "", "")
|
||||
fake := fake.Clientset{}
|
||||
k8sfake := k8sfake.Clientset{}
|
||||
|
||||
if test.failRSUpdate {
|
||||
k8sfake.PrependReactor("patch", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &appsv1.ReplicaSet{}, fmt.Errorf("should not patch replica set")
|
||||
})
|
||||
}
|
||||
|
||||
f := newFixture(t)
|
||||
defer f.Close()
|
||||
f.objects = append(f.objects, rollout)
|
||||
f.replicaSetLister = append(f.replicaSetLister, oldRS, newRS)
|
||||
f.kubeobjects = append(f.kubeobjects, oldRS, newRS)
|
||||
_, informers, k8sInformer := f.newController(noResyncPeriodFunc)
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
informers.WaitForCacheSync(stopCh)
|
||||
close(stopCh)
|
||||
|
||||
roCtx := rolloutContext{
|
||||
log: logutil.WithRollout(rollout),
|
||||
rollout: rollout,
|
||||
newRS: newRS,
|
||||
stableRS: oldRS,
|
||||
reconcilerBase: reconcilerBase{
|
||||
argoprojclientset: &fake,
|
||||
kubeclientset: &k8sfake,
|
||||
recorder: record.NewFakeEventRecorder(),
|
||||
resyncPeriod: 30 * time.Second,
|
||||
argoprojclientset: &fake,
|
||||
kubeclientset: &k8sfake,
|
||||
recorder: record.NewFakeEventRecorder(),
|
||||
resyncPeriod: 30 * time.Second,
|
||||
replicaSetInformer: k8sInformer.Apps().V1().ReplicaSets().Informer(),
|
||||
},
|
||||
pauseContext: &pauseContext{
|
||||
rollout: rollout,
|
||||
},
|
||||
}
|
||||
roCtx.enqueueRolloutAfter = func(obj interface{}, duration time.Duration) {}
|
||||
roCtx.enqueueRolloutAfter = func(obj any, duration time.Duration) {}
|
||||
|
||||
rollout.Status.Abort = test.abort
|
||||
roCtx.stableRS.Status.AvailableReplicas = int32(test.rolloutReplicas)
|
||||
rollout.Spec.Strategy = v1alpha1.RolloutStrategy{
|
||||
BlueGreen: &v1alpha1.BlueGreenStrategy{
|
||||
AbortScaleDownDelaySeconds: &test.abortScaleDownDelaySeconds,
|
||||
},
|
||||
}
|
||||
|
||||
if test.abortScaleDownDelaySeconds > 0 {
|
||||
rollout.Status.Abort = true
|
||||
rollout.Spec.Strategy = v1alpha1.RolloutStrategy{
|
||||
BlueGreen: &v1alpha1.BlueGreenStrategy{
|
||||
AbortScaleDownDelaySeconds: &test.abortScaleDownDelaySeconds,
|
||||
},
|
||||
}
|
||||
|
||||
if test.abortScaleDownAnnotated {
|
||||
var deadline string
|
||||
if test.abortScaleDownDelayPassed {
|
||||
|
@ -230,7 +273,19 @@ func TestReconcileNewReplicaSet(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
if test.abortScaleDownDelaySeconds < 0 {
|
||||
rollout.Spec.Strategy = v1alpha1.RolloutStrategy{
|
||||
BlueGreen: &v1alpha1.BlueGreenStrategy{
|
||||
AbortScaleDownDelaySeconds: nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
scaled, err := roCtx.reconcileNewReplicaSet()
|
||||
if test.failRSUpdate {
|
||||
assert.Error(t, err)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
|
@ -340,3 +395,182 @@ func TestReconcileOldReplicaSet(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsReplicaSetReferenced(t *testing.T) {
|
||||
newRSWithPodTemplateHash := func(hash string) *appsv1.ReplicaSet {
|
||||
return &appsv1.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1alpha1.DefaultRolloutUniqueLabelKey: hash,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
status v1alpha1.RolloutStatus
|
||||
canaryService string
|
||||
stableService string
|
||||
activeService string
|
||||
previewService string
|
||||
services []runtime.Object
|
||||
rsHash string
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
name: "empty hash",
|
||||
status: v1alpha1.RolloutStatus{StableRS: "abc123"},
|
||||
rsHash: "",
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "not referenced",
|
||||
status: v1alpha1.RolloutStatus{StableRS: "abc123"},
|
||||
rsHash: "def456",
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "stable rs referenced",
|
||||
status: v1alpha1.RolloutStatus{StableRS: "abc123"},
|
||||
rsHash: "abc123",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "current rs referenced",
|
||||
status: v1alpha1.RolloutStatus{CurrentPodHash: "abc123"},
|
||||
rsHash: "abc123",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "active referenced",
|
||||
status: v1alpha1.RolloutStatus{BlueGreen: v1alpha1.BlueGreenStatus{ActiveSelector: "abc123"}},
|
||||
rsHash: "abc123",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "active referenced",
|
||||
status: v1alpha1.RolloutStatus{BlueGreen: v1alpha1.BlueGreenStatus{PreviewSelector: "abc123"}},
|
||||
rsHash: "abc123",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "traffic routed canary current pod hash",
|
||||
status: v1alpha1.RolloutStatus{Canary: v1alpha1.CanaryStatus{Weights: &v1alpha1.TrafficWeights{
|
||||
Canary: v1alpha1.WeightDestination{
|
||||
PodTemplateHash: "abc123",
|
||||
},
|
||||
}}},
|
||||
rsHash: "abc123",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "traffic routed canary current pod hash",
|
||||
status: v1alpha1.RolloutStatus{Canary: v1alpha1.CanaryStatus{Weights: &v1alpha1.TrafficWeights{
|
||||
Stable: v1alpha1.WeightDestination{
|
||||
PodTemplateHash: "abc123",
|
||||
},
|
||||
}}},
|
||||
rsHash: "abc123",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "canary service still referenced",
|
||||
status: v1alpha1.RolloutStatus{
|
||||
CurrentPodHash: "abc123",
|
||||
StableRS: "abc123",
|
||||
},
|
||||
canaryService: "mysvc",
|
||||
services: []runtime.Object{newService("mysvc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: "def456"}, nil)},
|
||||
rsHash: "def456",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "stable service still referenced",
|
||||
status: v1alpha1.RolloutStatus{
|
||||
CurrentPodHash: "abc123",
|
||||
StableRS: "abc123",
|
||||
},
|
||||
stableService: "mysvc",
|
||||
services: []runtime.Object{newService("mysvc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: "def456"}, nil)},
|
||||
rsHash: "def456",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "active service still referenced",
|
||||
status: v1alpha1.RolloutStatus{
|
||||
CurrentPodHash: "abc123",
|
||||
StableRS: "abc123",
|
||||
},
|
||||
activeService: "mysvc",
|
||||
services: []runtime.Object{newService("mysvc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: "def456"}, nil)},
|
||||
rsHash: "def456",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "preview service still referenced",
|
||||
status: v1alpha1.RolloutStatus{
|
||||
CurrentPodHash: "abc123",
|
||||
StableRS: "abc123",
|
||||
},
|
||||
activeService: "mysvc",
|
||||
previewService: "mysvc2",
|
||||
services: []runtime.Object{newService("mysvc2", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: "def456"}, nil)},
|
||||
rsHash: "def456",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "service not found",
|
||||
status: v1alpha1.RolloutStatus{
|
||||
CurrentPodHash: "abc123",
|
||||
StableRS: "abc123",
|
||||
},
|
||||
activeService: "mysvc",
|
||||
previewService: "mysvc2",
|
||||
rsHash: "def456",
|
||||
expectedResult: false,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
fake := fake.Clientset{}
|
||||
k8sfake := k8sfake.NewSimpleClientset(tc.services...)
|
||||
informers := k8sinformers.NewSharedInformerFactory(k8sfake, 0)
|
||||
servicesLister := informers.Core().V1().Services().Lister()
|
||||
stopchan := make(chan struct{})
|
||||
defer close(stopchan)
|
||||
informers.Start(stopchan)
|
||||
informers.WaitForCacheSync(stopchan)
|
||||
|
||||
var r *v1alpha1.Rollout
|
||||
if tc.activeService != "" {
|
||||
r = newBlueGreenRollout("test", 1, nil, tc.activeService, tc.previewService)
|
||||
} else {
|
||||
r = newCanaryRollout("test", 1, nil, nil, nil, intstr.FromInt(0), intstr.FromInt(1))
|
||||
r.Spec.Strategy.Canary.CanaryService = tc.canaryService
|
||||
r.Spec.Strategy.Canary.StableService = tc.stableService
|
||||
}
|
||||
r.Status = tc.status
|
||||
|
||||
roCtx := &rolloutContext{
|
||||
rollout: r,
|
||||
log: logutil.WithRollout(r),
|
||||
reconcilerBase: reconcilerBase{
|
||||
servicesLister: servicesLister,
|
||||
argoprojclientset: &fake,
|
||||
kubeclientset: k8sfake,
|
||||
recorder: record.NewFakeEventRecorder(),
|
||||
},
|
||||
}
|
||||
rs := newRSWithPodTemplateHash(tc.rsHash)
|
||||
stillReferenced := roCtx.isReplicaSetReferenced(rs)
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
tc.expectedResult,
|
||||
stillReferenced,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ func (c *rolloutContext) awsVerifyTargetGroups(svc *corev1.Service) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
c.targetsVerified = pointer.BoolPtr(false)
|
||||
c.targetsVerified = pointer.Bool(false)
|
||||
|
||||
// get endpoints of service
|
||||
endpoints, err := c.kubeclientset.CoreV1().Endpoints(svc.Namespace).Get(ctx, svc.Name, metav1.GetOptions{})
|
||||
|
@ -177,7 +177,7 @@ func (c *rolloutContext) awsVerifyTargetGroups(svc *corev1.Service) error {
|
|||
}
|
||||
c.recorder.Eventf(c.rollout, record.EventOptions{EventReason: conditions.TargetGroupVerifiedReason}, conditions.TargetGroupVerifiedRegistrationMessage, svc.Name, tgb.Spec.TargetGroupARN, verifyRes.EndpointsRegistered)
|
||||
}
|
||||
c.targetsVerified = pointer.BoolPtr(true)
|
||||
c.targetsVerified = pointer.Bool(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -266,6 +266,17 @@ func (c *rolloutContext) reconcileStableAndCanaryService() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if dynamicallyRollingBackToStable, currSelector := isDynamicallyRollingBackToStable(c.rollout, c.newRS); dynamicallyRollingBackToStable {
|
||||
// User may have interrupted an update in order go back to stableRS, and is using dynamic
|
||||
// stable scaling. If that is the case, the stableRS might be undersized and if we blindly
|
||||
// switch service selector we could overwhelm stableRS pods.
|
||||
// If we get here, we detected that the canary service needs to be pointed back to
|
||||
// stable, but stable is not fully available. Skip the service switch for now.
|
||||
c.log.Infof("delaying fully promoted service switch of '%s' from %s to %s: ReplicaSet '%s' not fully available",
|
||||
c.rollout.Spec.Strategy.Canary.CanaryService, currSelector, replicasetutil.GetPodTemplateHash(c.newRS), c.newRS.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
err = c.ensureSVCTargets(c.rollout.Spec.Strategy.Canary.CanaryService, c.newRS, true)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -144,7 +144,7 @@ func TestActiveServiceNotFound(t *testing.T) {
|
|||
}
|
||||
}`
|
||||
_, pausedCondition := newInvalidSpecCondition(conditions.InvalidSpecReason, notUsedActiveSvc, errmsg)
|
||||
assert.Equal(t, calculatePatch(r, fmt.Sprintf(expectedPatch, pausedCondition, conditions.InvalidSpecReason, strings.ReplaceAll(errmsg, "\"", "\\\""))), patch)
|
||||
assert.JSONEq(t, calculatePatch(r, fmt.Sprintf(expectedPatch, pausedCondition, conditions.InvalidSpecReason, strings.ReplaceAll(errmsg, "\"", "\\\""))), patch)
|
||||
}
|
||||
|
||||
func TestPreviewServiceNotFound(t *testing.T) {
|
||||
|
@ -173,7 +173,7 @@ func TestPreviewServiceNotFound(t *testing.T) {
|
|||
}
|
||||
}`
|
||||
_, pausedCondition := newInvalidSpecCondition(conditions.InvalidSpecReason, notUsedPreviewSvc, errmsg)
|
||||
assert.Equal(t, calculatePatch(r, fmt.Sprintf(expectedPatch, pausedCondition, conditions.InvalidSpecReason, strings.ReplaceAll(errmsg, "\"", "\\\""))), patch)
|
||||
assert.JSONEq(t, calculatePatch(r, fmt.Sprintf(expectedPatch, pausedCondition, conditions.InvalidSpecReason, strings.ReplaceAll(errmsg, "\"", "\\\""))), patch)
|
||||
|
||||
}
|
||||
|
||||
|
@ -437,26 +437,26 @@ func TestCanaryAWSVerifyTargetGroupsNotYetReady(t *testing.T) {
|
|||
TargetHealthDescriptions: []elbv2types.TargetHealthDescription{
|
||||
{
|
||||
Target: &elbv2types.TargetDescription{
|
||||
Id: pointer.StringPtr("1.2.3.4"),
|
||||
Port: pointer.Int32Ptr(80),
|
||||
Id: pointer.String("1.2.3.4"),
|
||||
Port: pointer.Int32(80),
|
||||
},
|
||||
},
|
||||
{
|
||||
Target: &elbv2types.TargetDescription{
|
||||
Id: pointer.StringPtr("5.6.7.8"),
|
||||
Port: pointer.Int32Ptr(80),
|
||||
Id: pointer.String("5.6.7.8"),
|
||||
Port: pointer.Int32(80),
|
||||
},
|
||||
},
|
||||
{
|
||||
Target: &elbv2types.TargetDescription{
|
||||
Id: pointer.StringPtr("2.4.6.8"), // irrelevant
|
||||
Port: pointer.Int32Ptr(81), // wrong port
|
||||
Id: pointer.String("2.4.6.8"), // irrelevant
|
||||
Port: pointer.Int32(81), // wrong port
|
||||
},
|
||||
},
|
||||
{
|
||||
Target: &elbv2types.TargetDescription{
|
||||
Id: pointer.StringPtr("9.8.7.6"), // irrelevant ip
|
||||
Port: pointer.Int32Ptr(80),
|
||||
Id: pointer.String("9.8.7.6"), // irrelevant ip
|
||||
Port: pointer.Int32(80),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -464,8 +464,8 @@ func TestCanaryAWSVerifyTargetGroupsNotYetReady(t *testing.T) {
|
|||
fakeELB.On("DescribeTargetHealth", mock.Anything, mock.Anything).Return(&thOut, nil)
|
||||
|
||||
r1 := newCanaryRollout("foo", 3, nil, []v1alpha1.CanaryStep{{
|
||||
SetWeight: pointer.Int32Ptr(10),
|
||||
}}, pointer.Int32Ptr(0), intstr.FromString("25%"), intstr.FromString("25%"))
|
||||
SetWeight: pointer.Int32(10),
|
||||
}}, pointer.Int32(0), intstr.FromString("25%"), intstr.FromString("25%"))
|
||||
|
||||
r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{
|
||||
ALB: &v1alpha1.ALBTrafficRouting{
|
||||
|
@ -491,6 +491,7 @@ func TestCanaryAWSVerifyTargetGroupsNotYetReady(t *testing.T) {
|
|||
r2.Status.Message = ""
|
||||
r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation))
|
||||
r2.Status.StableRS = rs2PodHash
|
||||
r2.Status.CurrentStepIndex = pointer.Int32(1)
|
||||
availableCondition, _ := newAvailableCondition(true)
|
||||
conditions.SetRolloutCondition(&r2.Status, availableCondition)
|
||||
healthyCondition, _ := newHealthyCondition(false)
|
||||
|
@ -536,26 +537,26 @@ func TestCanaryAWSVerifyTargetGroupsReady(t *testing.T) {
|
|||
TargetHealthDescriptions: []elbv2types.TargetHealthDescription{
|
||||
{
|
||||
Target: &elbv2types.TargetDescription{
|
||||
Id: pointer.StringPtr("1.2.3.4"),
|
||||
Port: pointer.Int32Ptr(80),
|
||||
Id: pointer.String("1.2.3.4"),
|
||||
Port: pointer.Int32(80),
|
||||
},
|
||||
},
|
||||
{
|
||||
Target: &elbv2types.TargetDescription{
|
||||
Id: pointer.StringPtr("5.6.7.8"),
|
||||
Port: pointer.Int32Ptr(80),
|
||||
Id: pointer.String("5.6.7.8"),
|
||||
Port: pointer.Int32(80),
|
||||
},
|
||||
},
|
||||
{
|
||||
Target: &elbv2types.TargetDescription{
|
||||
Id: pointer.StringPtr("2.4.6.8"), // irrelevant
|
||||
Port: pointer.Int32Ptr(80), // wrong port
|
||||
Id: pointer.String("2.4.6.8"), // irrelevant
|
||||
Port: pointer.Int32(80), // wrong port
|
||||
},
|
||||
},
|
||||
{
|
||||
Target: &elbv2types.TargetDescription{
|
||||
Id: pointer.StringPtr("9.8.7.6"), // irrelevant ip
|
||||
Port: pointer.Int32Ptr(80),
|
||||
Id: pointer.String("9.8.7.6"), // irrelevant ip
|
||||
Port: pointer.Int32(80),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -563,8 +564,8 @@ func TestCanaryAWSVerifyTargetGroupsReady(t *testing.T) {
|
|||
fakeELB.On("DescribeTargetHealth", mock.Anything, mock.Anything).Return(&thOut, nil)
|
||||
|
||||
r1 := newCanaryRollout("foo", 3, nil, []v1alpha1.CanaryStep{{
|
||||
SetWeight: pointer.Int32Ptr(10),
|
||||
}}, pointer.Int32Ptr(0), intstr.FromString("25%"), intstr.FromString("25%"))
|
||||
SetWeight: pointer.Int32(10),
|
||||
}}, pointer.Int32(0), intstr.FromString("25%"), intstr.FromString("25%"))
|
||||
r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{
|
||||
ALB: &v1alpha1.ALBTrafficRouting{
|
||||
Ingress: "ingress",
|
||||
|
@ -589,6 +590,7 @@ func TestCanaryAWSVerifyTargetGroupsReady(t *testing.T) {
|
|||
r2.Status.Message = ""
|
||||
r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation))
|
||||
r2.Status.StableRS = rs2PodHash
|
||||
r2.Status.CurrentStepIndex = pointer.Int32(1)
|
||||
availableCondition, _ := newAvailableCondition(true)
|
||||
conditions.SetRolloutCondition(&r2.Status, availableCondition)
|
||||
healthyCondition, _ := newHealthyCondition(false)
|
||||
|
@ -624,8 +626,8 @@ func TestCanaryAWSVerifyTargetGroupsSkip(t *testing.T) {
|
|||
defer f.Close()
|
||||
|
||||
r1 := newCanaryRollout("foo", 3, nil, []v1alpha1.CanaryStep{{
|
||||
SetWeight: pointer.Int32Ptr(10),
|
||||
}}, pointer.Int32Ptr(0), intstr.FromString("25%"), intstr.FromString("25%"))
|
||||
SetWeight: pointer.Int32(10),
|
||||
}}, pointer.Int32(0), intstr.FromString("25%"), intstr.FromString("25%"))
|
||||
r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{
|
||||
ALB: &v1alpha1.ALBTrafficRouting{
|
||||
Ingress: "ingress",
|
||||
|
@ -652,6 +654,7 @@ func TestCanaryAWSVerifyTargetGroupsSkip(t *testing.T) {
|
|||
r2.Status.Message = ""
|
||||
r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation))
|
||||
r2.Status.StableRS = rs2PodHash
|
||||
r2.Status.CurrentStepIndex = pointer.Int32(1)
|
||||
availableCondition, _ := newAvailableCondition(true)
|
||||
conditions.SetRolloutCondition(&r2.Status, availableCondition)
|
||||
healthyCondition, _ := newHealthyCondition(false)
|
||||
|
|
|
@ -85,7 +85,17 @@ func (c *rolloutContext) syncReplicaSetRevision() (*appsv1.ReplicaSet, error) {
|
|||
if annotationsUpdated || minReadySecondsNeedsUpdate || affinityNeedsUpdate {
|
||||
rsCopy.Spec.MinReadySeconds = c.rollout.Spec.MinReadySeconds
|
||||
rsCopy.Spec.Template.Spec.Affinity = replicasetutil.GenerateReplicaSetAffinity(*c.rollout)
|
||||
return c.kubeclientset.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(ctx, rsCopy, metav1.UpdateOptions{})
|
||||
rs, err := c.kubeclientset.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(ctx, rsCopy, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
c.log.WithError(err).Error("Error: updating replicaset revision")
|
||||
return nil, fmt.Errorf("error updating replicaset revision: %v", err)
|
||||
}
|
||||
c.log.Infof("Synced revision on ReplicaSet '%s' to '%s'", rs.Name, newRevision)
|
||||
err = c.replicaSetInformer.GetIndexer().Update(rs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error updating replicaset informer in syncReplicaSetRevision: %w", err)
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
// Should use the revision in existingNewRS's annotation, since it set by before
|
||||
|
@ -270,14 +280,16 @@ func (c *rolloutContext) createDesiredReplicaSet() (*appsv1.ReplicaSet, error) {
|
|||
// syncReplicasOnly is responsible for reconciling rollouts on scaling events.
|
||||
func (c *rolloutContext) syncReplicasOnly() error {
|
||||
c.log.Infof("Syncing replicas only due to scaling event")
|
||||
_, err := c.getAllReplicaSetsAndSyncRevision(false)
|
||||
var err error
|
||||
c.newRS, err = c.getAllReplicaSetsAndSyncRevision(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newStatus := c.rollout.Status.DeepCopy()
|
||||
|
||||
// NOTE: it is possible for newRS to be nil (e.g. when template and replicas changed at same time)
|
||||
if c.rollout.Spec.Strategy.BlueGreen != nil {
|
||||
previewSvc, activeSvc, err := c.getPreviewAndActiveServices()
|
||||
_, activeSvc, err := c.getPreviewAndActiveServices()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -286,7 +298,15 @@ func (c *rolloutContext) syncReplicasOnly() error {
|
|||
// so we can abort this resync
|
||||
return err
|
||||
}
|
||||
return c.syncRolloutStatusBlueGreen(previewSvc, activeSvc)
|
||||
activeRS, _ := replicasetutil.GetReplicaSetByTemplateHash(c.allRSs, newStatus.BlueGreen.ActiveSelector)
|
||||
if activeRS != nil {
|
||||
newStatus.HPAReplicas = activeRS.Status.Replicas
|
||||
newStatus.AvailableReplicas = activeRS.Status.AvailableReplicas
|
||||
} else {
|
||||
// when we do not have an active replicaset, accounting is done on the default rollout selector
|
||||
newStatus.HPAReplicas = replicasetutil.GetActualReplicaCountForReplicaSets(c.allRSs)
|
||||
newStatus.AvailableReplicas = replicasetutil.GetAvailableReplicaCountForReplicaSets(c.allRSs)
|
||||
}
|
||||
}
|
||||
// The controller wants to use the rolloutCanary method to reconcile the rollout if the rollout is not paused.
|
||||
// If there are no scaling events, the rollout should only sync its status
|
||||
|
@ -296,9 +316,10 @@ func (c *rolloutContext) syncReplicasOnly() error {
|
|||
// so we can abort this resync
|
||||
return err
|
||||
}
|
||||
return c.syncRolloutStatusCanary()
|
||||
newStatus.AvailableReplicas = replicasetutil.GetAvailableReplicaCountForReplicaSets(c.allRSs)
|
||||
newStatus.HPAReplicas = replicasetutil.GetActualReplicaCountForReplicaSets(c.allRSs)
|
||||
}
|
||||
return fmt.Errorf("no rollout strategy provided")
|
||||
return c.persistRolloutStatus(newStatus)
|
||||
}
|
||||
|
||||
// isScalingEvent checks whether the provided rollout has been updated with a scaling event
|
||||
|
@ -306,7 +327,8 @@ func (c *rolloutContext) syncReplicasOnly() error {
|
|||
//
|
||||
// rsList should come from getReplicaSetsForRollout(r).
|
||||
func (c *rolloutContext) isScalingEvent() (bool, error) {
|
||||
_, err := c.getAllReplicaSetsAndSyncRevision(false)
|
||||
var err error
|
||||
c.newRS, err = c.getAllReplicaSetsAndSyncRevision(false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -355,8 +377,18 @@ func (c *rolloutContext) scaleReplicaSet(rs *appsv1.ReplicaSet, newScale int32,
|
|||
if fullScaleDown && !c.shouldDelayScaleDownOnAbort() {
|
||||
delete(rsCopy.Annotations, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey)
|
||||
}
|
||||
|
||||
rs, err = c.kubeclientset.AppsV1().ReplicaSets(rsCopy.Namespace).Update(ctx, rsCopy, metav1.UpdateOptions{})
|
||||
if err == nil && sizeNeedsUpdate {
|
||||
if err != nil {
|
||||
return scaled, rs, fmt.Errorf("error updating replicaset %s: %w", rs.Name, err)
|
||||
}
|
||||
err = c.replicaSetInformer.GetIndexer().Update(rs)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error updating replicaset informer in scaleReplicaSet: %w", err)
|
||||
return scaled, rs, err
|
||||
}
|
||||
|
||||
if sizeNeedsUpdate {
|
||||
scaled = true
|
||||
revision, _ := replicasetutil.Revision(rs)
|
||||
c.recorder.Eventf(rollout, record.EventOptions{EventReason: conditions.ScalingReplicaSetReason}, conditions.ScalingReplicaSetMessage, scalingOperation, rs.Name, revision, oldScale, newScale)
|
||||
|
|
|
@ -163,25 +163,20 @@ func (c *rolloutContext) reconcileTrafficRouting() error {
|
|||
canaryHash = c.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
|
||||
}
|
||||
|
||||
if rolloututil.IsFullyPromoted(c.rollout) {
|
||||
// when we are fully promoted. desired canary weight should be 0
|
||||
if dynamicallyRollingBackToStable, prevDesiredHash := isDynamicallyRollingBackToStable(c.rollout, c.newRS); dynamicallyRollingBackToStable {
|
||||
desiredWeight = c.calculateDesiredWeightOnAbortOrStableRollback()
|
||||
// Since stableRS == desiredRS, we must balance traffic between the
|
||||
// *previous desired* vs. stable (as opposed to current desired vs. stable).
|
||||
// The previous desired is remembered in Status.Canary.Weights.Canary.PodTemplateHash.
|
||||
// See: https://github.com/argoproj/argo-rollouts/issues/3020
|
||||
canaryHash = prevDesiredHash
|
||||
} else if rolloututil.IsFullyPromoted(c.rollout) {
|
||||
err := reconciler.RemoveManagedRoutes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if c.pauseContext.IsAborted() {
|
||||
// when aborted, desired canary weight should immediately be 0 (100% to stable), *unless*
|
||||
// we are using dynamic stable scaling. In that case, we are dynamically decreasing the
|
||||
// weight to the canary according to the availability of the stable (whatever it can support).
|
||||
if c.rollout.Spec.Strategy.Canary.DynamicStableScale {
|
||||
desiredWeight = 100 - ((100 * c.stableRS.Status.AvailableReplicas) / *c.rollout.Spec.Replicas)
|
||||
if c.rollout.Status.Canary.Weights != nil {
|
||||
// This ensures that if we are already at a lower weight, then we will not
|
||||
// increase the weight because stable availability is flapping (e.g. pod restarts)
|
||||
desiredWeight = minInt(desiredWeight, c.rollout.Status.Canary.Weights.Canary.Weight)
|
||||
}
|
||||
}
|
||||
|
||||
desiredWeight = c.calculateDesiredWeightOnAbortOrStableRollback()
|
||||
if (c.rollout.Spec.Strategy.Canary.DynamicStableScale && desiredWeight == 0) || !c.rollout.Spec.Strategy.Canary.DynamicStableScale {
|
||||
// If we are using dynamic stable scale we need to also make sure that desiredWeight=0 aka we are completely
|
||||
// done with aborting before resetting the canary service selectors back to stable
|
||||
|
@ -295,6 +290,26 @@ func (c *rolloutContext) reconcileTrafficRouting() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// calculateDesiredWeightOnAbortOrStableRollback returns the desired weight to use when we are either
|
||||
// aborting, or rolling back to stable RS.
|
||||
func (c *rolloutContext) calculateDesiredWeightOnAbortOrStableRollback() int32 {
|
||||
if !c.rollout.Spec.Strategy.Canary.DynamicStableScale {
|
||||
// When aborting or rolling back to stable RS and dynamicStableScaling is disabled,
|
||||
// then desired canary weight should immediately be 0 (100% to stable) since we can trust
|
||||
// that it is fully scaled up
|
||||
return 0
|
||||
}
|
||||
// When using dynamic stable scaling, we must dynamically decreasing the weight to the canary
|
||||
// according to the availability of the stable (whatever it can support).
|
||||
desiredWeight := 100 - ((100 * c.stableRS.Status.AvailableReplicas) / *c.rollout.Spec.Replicas)
|
||||
if c.rollout.Status.Canary.Weights != nil {
|
||||
// This ensures that if we are already at a lower weight, then we will not
|
||||
// increase the weight because stable availability is flapping (e.g. pod restarts)
|
||||
desiredWeight = minInt(desiredWeight, c.rollout.Status.Canary.Weights.Canary.Weight)
|
||||
}
|
||||
return desiredWeight
|
||||
}
|
||||
|
||||
// trafficWeightUpdatedMessage returns a message we emit for the kubernetes event whenever we adjust traffic weights
|
||||
func trafficWeightUpdatedMessage(prev, new *v1alpha1.TrafficWeights) string {
|
||||
var details []string
|
||||
|
|
|
@ -2,6 +2,7 @@ package rollout
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -752,8 +753,8 @@ func TestCanaryWithTrafficRoutingAddScaleDownDelay(t *testing.T) {
|
|||
defer f.Close()
|
||||
|
||||
r1 := newCanaryRollout("foo", 1, nil, []v1alpha1.CanaryStep{{
|
||||
SetWeight: pointer.Int32Ptr(10),
|
||||
}}, pointer.Int32Ptr(0), intstr.FromInt(1), intstr.FromInt(1))
|
||||
SetWeight: pointer.Int32(10),
|
||||
}}, pointer.Int32(0), intstr.FromInt(1), intstr.FromInt(1))
|
||||
r1.Spec.Strategy.Canary.CanaryService = "canary"
|
||||
r1.Spec.Strategy.Canary.StableService = "stable"
|
||||
r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{
|
||||
|
@ -765,6 +766,7 @@ func TestCanaryWithTrafficRoutingAddScaleDownDelay(t *testing.T) {
|
|||
rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
|
||||
r2 = updateCanaryRolloutStatus(r2, rs2PodHash, 2, 1, 2, false)
|
||||
r2.Status.ObservedGeneration = strconv.Itoa(int(r2.Generation))
|
||||
r2.Status.CurrentStepIndex = pointer.Int32(1)
|
||||
availableCondition, _ := newAvailableCondition(true)
|
||||
conditions.SetRolloutCondition(&r2.Status, availableCondition)
|
||||
completedCondition, _ := newCompletedCondition(true)
|
||||
|
@ -1153,3 +1155,94 @@ func TestRolloutReplicaIsAvailableAndGenerationNotBeModifiedShouldModifyVirtualS
|
|||
}).Once().Return(nil)
|
||||
f.run(getKey(r1, t))
|
||||
}
|
||||
|
||||
// This makes sure we don't set weight to zero if we are rolling back to stable with DynamicStableScale
|
||||
func TestDontWeightToZeroWhenDynamicallyRollingBackToStable(t *testing.T) {
|
||||
f := newFixture(t)
|
||||
defer f.Close()
|
||||
|
||||
steps := []v1alpha1.CanaryStep{
|
||||
{
|
||||
SetWeight: pointer.Int32(90),
|
||||
},
|
||||
{
|
||||
Pause: &v1alpha1.RolloutPause{},
|
||||
},
|
||||
}
|
||||
r1 := newCanaryRollout("foo", 10, nil, steps, pointer.Int32(1), intstr.FromInt(1), intstr.FromInt(1))
|
||||
r1.Spec.Strategy.Canary.DynamicStableScale = true
|
||||
r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{
|
||||
SMI: &v1alpha1.SMITrafficRouting{},
|
||||
}
|
||||
r1.Spec.Strategy.Canary.CanaryService = "canary"
|
||||
r1.Spec.Strategy.Canary.StableService = "stable"
|
||||
r1.Status.ReadyReplicas = 10
|
||||
r1.Status.AvailableReplicas = 10
|
||||
r2 := bumpVersion(r1)
|
||||
|
||||
rs1 := newReplicaSetWithStatus(r1, 1, 1)
|
||||
rs2 := newReplicaSetWithStatus(r2, 9, 9)
|
||||
|
||||
rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
|
||||
rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
|
||||
canarySelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash}
|
||||
stableSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash}
|
||||
canarySvc := newService("canary", 80, canarySelector, r1)
|
||||
stableSvc := newService("stable", 80, stableSelector, r1)
|
||||
|
||||
// simulate rollback to stable
|
||||
r2.Spec = r1.Spec
|
||||
r2.Status.StableRS = rs1PodHash
|
||||
r2.Status.CurrentPodHash = rs1PodHash // will cause IsFullyPromoted() to be true
|
||||
r2.Status.Canary.Weights = &v1alpha1.TrafficWeights{
|
||||
Canary: v1alpha1.WeightDestination{
|
||||
Weight: 10,
|
||||
ServiceName: "canary",
|
||||
PodTemplateHash: rs2PodHash,
|
||||
},
|
||||
Stable: v1alpha1.WeightDestination{
|
||||
Weight: 90,
|
||||
ServiceName: "stable",
|
||||
PodTemplateHash: rs1PodHash,
|
||||
},
|
||||
}
|
||||
|
||||
f.kubeobjects = append(f.kubeobjects, rs1, rs2, canarySvc, stableSvc)
|
||||
f.replicaSetLister = append(f.replicaSetLister, rs1, rs2)
|
||||
|
||||
f.rolloutLister = append(f.rolloutLister, r2)
|
||||
f.objects = append(f.objects, r2)
|
||||
|
||||
f.expectUpdateReplicaSetAction(rs1) // Updates the revision annotation from 1 to 3 from func isScalingEvent
|
||||
f.expectUpdateRolloutAction(r2) // Update the rollout revision from 1 to 3
|
||||
scaleUpIndex := f.expectUpdateReplicaSetAction(rs1) // Scale The replicaset from 1 to 10 from func scaleReplicaSet
|
||||
f.expectPatchRolloutAction(r2) // Updates the rollout status from the scaling to 10 action
|
||||
|
||||
f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler()
|
||||
f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(func(canaryHash, stableHash string, additionalDestinations ...v1alpha1.WeightDestination) error {
|
||||
// make sure UpdateHash was called with previous desired hash (not current pod hash)
|
||||
if canaryHash != rs2PodHash {
|
||||
return fmt.Errorf("UpdateHash was called with canary hash: %s. Expected: %s", canaryHash, rs2PodHash)
|
||||
}
|
||||
if stableHash != rs1PodHash {
|
||||
return fmt.Errorf("UpdateHash was called with stable hash: %s. Expected: %s", canaryHash, rs1PodHash)
|
||||
}
|
||||
return nil
|
||||
|
||||
})
|
||||
f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error {
|
||||
// make sure SetWeight was not changed
|
||||
if desiredWeight != 10 {
|
||||
return fmt.Errorf("SetWeight was called with unexpected weight: %d. Expected: 10", desiredWeight)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil)
|
||||
f.fakeTrafficRouting.On("RemoveManagedRoutes", mock.Anything, mock.Anything).Return(nil)
|
||||
f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(true), nil)
|
||||
f.run(getKey(r1, t))
|
||||
|
||||
// Make sure we scale up stable ReplicaSet to 10
|
||||
rs1Updated := f.getUpdatedReplicaSet(scaleUpIndex)
|
||||
assert.Equal(t, int32(10), *rs1Updated.Spec.Replicas)
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
rov1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
"github.com/argoproj/argo-rollouts/test/fixtures"
|
||||
)
|
||||
|
||||
|
@ -620,3 +621,62 @@ func (s *CanarySuite) TestCanaryDynamicStableScale() {
|
|||
ExpectServiceSelector("dynamic-stable-scale-canary", map[string]string{"app": "dynamic-stable-scale", "rollouts-pod-template-hash": "868d98995b"}, false).
|
||||
ExpectRevisionPodCount("1", 4)
|
||||
}
|
||||
|
||||
// TestCanaryDynamicStableScaleRollbackToStable verifies when we rollback to stable with
|
||||
// DynamicStableScale enabled, we do so in a safe manner without shifting traffic back to stable
|
||||
// before it can handle it
|
||||
func (s *CanarySuite) TestCanaryDynamicStableScaleRollbackToStable() {
|
||||
s.Given().
|
||||
RolloutObjects(`@functional/canary-dynamic-stable-scale.yaml`).
|
||||
When().
|
||||
ApplyManifests().
|
||||
MarkPodsReady("1", 4). // mark all 4 pods ready
|
||||
WaitForRolloutStatus("Healthy").
|
||||
UpdateSpec().
|
||||
MarkPodsReady("2", 1). // mark 1 of 1 canary pods ready
|
||||
WaitForRolloutStatus("Paused").
|
||||
Sleep(2*time.Second).
|
||||
Then().
|
||||
ExpectRevisionPodCount("1", 3).
|
||||
ExpectRevisionPodCount("2", 1).
|
||||
When().
|
||||
UndoRollout(1). // Rollback to stable (revision 1)
|
||||
Sleep(2*time.Second).
|
||||
Then().
|
||||
ExpectRevisionPodCount("3", 4). // Ensure we fully scale up the stable (now revision 3)
|
||||
ExpectRevisionPodCount("2", 1). // And do not scale down the previous desired (revision 2)
|
||||
Assert(func(t *fixtures.Then) {
|
||||
// Make sure canary service is still pointing to the previous desired (revision 2)
|
||||
rs3 := t.GetReplicaSetByRevision("3")
|
||||
rs2 := t.GetReplicaSetByRevision("2")
|
||||
canarySvc, stableSvc := t.GetServices()
|
||||
assert.Equal(s.T(), rs2.Labels[rov1.DefaultRolloutUniqueLabelKey], canarySvc.Spec.Selector["rollouts-pod-template-hash"])
|
||||
assert.Equal(s.T(), rs3.Labels[rov1.DefaultRolloutUniqueLabelKey], stableSvc.Spec.Selector["rollouts-pod-template-hash"])
|
||||
|
||||
// Ensure we did not touch the weights even though we are "fully promoted"
|
||||
ro := t.GetRollout()
|
||||
assert.Equal(s.T(), rs2.Labels[rov1.DefaultRolloutUniqueLabelKey], ro.Status.Canary.Weights.Canary.PodTemplateHash)
|
||||
assert.Equal(s.T(), int32(25), ro.Status.Canary.Weights.Canary.Weight)
|
||||
assert.Equal(s.T(), rs3.Labels[rov1.DefaultRolloutUniqueLabelKey], ro.Status.Canary.Weights.Stable.PodTemplateHash)
|
||||
assert.Equal(s.T(), int32(75), ro.Status.Canary.Weights.Stable.Weight)
|
||||
}).
|
||||
When().
|
||||
MarkPodsReady("3", 1). // marks the 4th pod of stableRS/newRS (revision 3) ready
|
||||
WaitForRevisionPodCount("2", 0). // make sure we scale down the previous desired (revision 2)
|
||||
Then().
|
||||
Assert(func(t *fixtures.Then) {
|
||||
// Make sure canary/stable service is updated to point to revision 3
|
||||
rs3 := t.GetReplicaSetByRevision("3")
|
||||
canarySvc, stableSvc := t.GetServices()
|
||||
assert.Equal(s.T(), rs3.Labels[rov1.DefaultRolloutUniqueLabelKey], canarySvc.Spec.Selector["rollouts-pod-template-hash"])
|
||||
assert.Equal(s.T(), rs3.Labels[rov1.DefaultRolloutUniqueLabelKey], stableSvc.Spec.Selector["rollouts-pod-template-hash"])
|
||||
|
||||
// Ensure we are 100% back to stable
|
||||
ro := t.GetRollout()
|
||||
assert.Equal(s.T(), rs3.Labels[rov1.DefaultRolloutUniqueLabelKey], ro.Status.Canary.Weights.Canary.PodTemplateHash)
|
||||
assert.Equal(s.T(), int32(0), ro.Status.Canary.Weights.Canary.Weight)
|
||||
assert.Equal(s.T(), rs3.Labels[rov1.DefaultRolloutUniqueLabelKey], ro.Status.Canary.Weights.Stable.PodTemplateHash)
|
||||
assert.Equal(s.T(), int32(100), ro.Status.Canary.Weights.Stable.Weight)
|
||||
|
||||
})
|
||||
}
|
||||
|
|
|
@ -303,7 +303,7 @@ func (s *IstioSuite) TestIstioAbortUpdate() {
|
|||
Then().
|
||||
When().
|
||||
AbortRollout().
|
||||
WaitForRolloutStatus("Degraded").
|
||||
WaitForRolloutStatus("Healthy").
|
||||
Then().
|
||||
ExpectRevisionPodCount("1", 1).
|
||||
When().
|
||||
|
@ -316,7 +316,7 @@ func (s *IstioSuite) TestIstioAbortUpdate() {
|
|||
Then().
|
||||
When().
|
||||
AbortRollout().
|
||||
WaitForRolloutStatus("Degraded").
|
||||
WaitForRolloutStatus("Healthy").
|
||||
Then().
|
||||
ExpectRevisionPodCount("2", 1)
|
||||
}
|
||||
|
|
|
@ -71,6 +71,7 @@ func (c *Common) CheckError(err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Rollout returns the original rollout manifest used in the test
|
||||
func (c *Common) Rollout() *rov1.Rollout {
|
||||
var ro rov1.Rollout
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(c.rollout.Object, &ro)
|
||||
|
@ -78,6 +79,13 @@ func (c *Common) Rollout() *rov1.Rollout {
|
|||
return &ro
|
||||
}
|
||||
|
||||
// GetRollout returns the live rollout object in the cluster
|
||||
func (c *Common) GetRollout() *rov1.Rollout {
|
||||
ro, err := c.rolloutClient.ArgoprojV1alpha1().Rollouts(c.namespace).Get(context.TODO(), c.Rollout().GetName(), metav1.GetOptions{})
|
||||
c.CheckError(err)
|
||||
return ro
|
||||
}
|
||||
|
||||
func (c *Common) PrintRollout(name string) {
|
||||
streams := genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr}
|
||||
o := options.NewArgoRolloutsOptions(streams)
|
||||
|
|
|
@ -25,12 +25,14 @@ import (
|
|||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-rollouts/pkg/apiclient/rollout"
|
||||
"github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
rov1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
"github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/cmd/abort"
|
||||
"github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/cmd/promote"
|
||||
"github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/cmd/restart"
|
||||
"github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/cmd/retry"
|
||||
"github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/cmd/status"
|
||||
"github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/cmd/undo"
|
||||
"github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/options"
|
||||
"github.com/argoproj/argo-rollouts/pkg/kubectl-argo-rollouts/viewcontroller"
|
||||
rolloututil "github.com/argoproj/argo-rollouts/utils/rollout"
|
||||
|
@ -185,6 +187,16 @@ func (w *When) RetryRollout() *When {
|
|||
return w
|
||||
}
|
||||
|
||||
func (w *When) UndoRollout(toRevision int64) *When {
|
||||
if w.rollout == nil {
|
||||
w.t.Fatal("Rollout not set")
|
||||
}
|
||||
_, err := undo.RunUndoRollout(w.dynamicClient.Resource(v1alpha1.RolloutGVR).Namespace(w.namespace), w.kubeClient, w.rollout.GetName(), toRevision)
|
||||
w.CheckError(err)
|
||||
w.log.Infof("Undo rollout to %d", toRevision)
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *When) RestartRollout() *When {
|
||||
if w.rollout == nil {
|
||||
w.t.Fatal("Rollout not set")
|
||||
|
|
|
@ -71,6 +71,7 @@
|
|||
"webpack-merge": "^5.7.3"
|
||||
},
|
||||
"resolutions": {
|
||||
"@types/react": "16.9.3"
|
||||
"@types/react": "16.9.3",
|
||||
"moment": "2.29.4"
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -7973,12 +7973,7 @@ moment-timezone@^0.5.33:
|
|||
dependencies:
|
||||
moment ">= 2.9.0"
|
||||
|
||||
"moment@>= 2.9.0", moment@^2.20.1:
|
||||
version "2.29.1"
|
||||
resolved "https://registry.yarnpkg.com/moment/-/moment-2.29.1.tgz#b2be769fa31940be9eeea6469c075e35006fa3d3"
|
||||
integrity sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ==
|
||||
|
||||
moment@^2.29.4:
|
||||
moment@2.29.4, "moment@>= 2.9.0", moment@^2.20.1, moment@^2.29.4:
|
||||
version "2.29.4"
|
||||
resolved "https://registry.yarnpkg.com/moment/-/moment-2.29.4.tgz#3dbe052889fe7c1b2ed966fcb3a77328964ef108"
|
||||
integrity sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==
|
||||
|
|
|
@ -218,9 +218,7 @@ func (e *EventRecorderAdapter) defaultEventf(object runtime.Object, warn bool, o
|
|||
err := e.sendNotifications(api, object, opts)
|
||||
if err != nil {
|
||||
logCtx.Errorf("Notifications failed to send for eventReason %s with error: %s", opts.EventReason, err)
|
||||
e.NotificationFailedCounter.WithLabelValues(namespace, name, opts.EventType, opts.EventReason).Inc()
|
||||
}
|
||||
e.NotificationSuccessCounter.WithLabelValues(namespace, name, opts.EventType, opts.EventReason).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -248,7 +246,7 @@ func NewAPIFactorySettings() api.Settings {
|
|||
}
|
||||
|
||||
// Send notifications for triggered event if user is subscribed
|
||||
func (e *EventRecorderAdapter) sendNotifications(notificationsAPI api.API, object runtime.Object, opts EventOptions) error {
|
||||
func (e *EventRecorderAdapter) sendNotifications(notificationsAPI api.API, object runtime.Object, opts EventOptions) []error {
|
||||
logCtx := logutil.WithObject(object)
|
||||
_, namespace, name := logutil.KindNamespaceName(logCtx)
|
||||
startTime := timeutil.Now()
|
||||
|
@ -259,7 +257,7 @@ func (e *EventRecorderAdapter) sendNotifications(notificationsAPI api.API, objec
|
|||
}()
|
||||
|
||||
if notificationsAPI == nil {
|
||||
return fmt.Errorf("notificationsAPI is nil")
|
||||
return []error{fmt.Errorf("NotificationsAPI is nil")}
|
||||
}
|
||||
|
||||
cfg := notificationsAPI.GetConfig()
|
||||
|
@ -274,39 +272,53 @@ func (e *EventRecorderAdapter) sendNotifications(notificationsAPI api.API, objec
|
|||
|
||||
objMap, err := toObjectMap(object)
|
||||
if err != nil {
|
||||
return err
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
emptyCondition := hash("")
|
||||
|
||||
// We should not return in these loops because we want other configured notifications to still send if they can.
|
||||
errors := []error{}
|
||||
for _, destination := range destinations {
|
||||
res, err := notificationsAPI.RunTrigger(trigger, objMap)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to execute condition of trigger %s: %v", trigger, err)
|
||||
return err
|
||||
log.Errorf("Failed to run trigger, trigger: %s, destination: %s, namespace config: %s : %v",
|
||||
trigger, destination, notificationsAPI.GetConfig().Namespace, err)
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
log.Infof("Trigger %s result: %v", trigger, res)
|
||||
|
||||
for _, c := range res {
|
||||
log.Infof("Res When Condition hash: %s, Templates: %s", c.Key, c.Templates)
|
||||
log.Infof("Result when condition hash: %s, templates: %s", c.Key, c.Templates)
|
||||
s := strings.Split(c.Key, ".")[1]
|
||||
if s != emptyCondition && c.Triggered == true {
|
||||
err = notificationsAPI.Send(objMap, c.Templates, destination)
|
||||
if err != nil {
|
||||
log.Errorf("notification error: %s", err.Error())
|
||||
return err
|
||||
log.Errorf("Failed to execute the sending of notification on not empty condition, trigger: %s, destination: %s, namespace config: %s : %v",
|
||||
trigger, destination, notificationsAPI.GetConfig().Namespace, err)
|
||||
e.NotificationFailedCounter.WithLabelValues(namespace, name, opts.EventType, opts.EventReason).Inc()
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
e.NotificationSuccessCounter.WithLabelValues(namespace, name, opts.EventType, opts.EventReason).Inc()
|
||||
} else if s == emptyCondition {
|
||||
err = notificationsAPI.Send(objMap, c.Templates, destination)
|
||||
if err != nil {
|
||||
log.Errorf("notification error: %s", err.Error())
|
||||
return err
|
||||
log.Errorf("Failed to execute the sending of notification on empty condition, trigger: %s, destination: %s, namespace config: %s : %v",
|
||||
trigger, destination, notificationsAPI.GetConfig().Namespace, err)
|
||||
e.NotificationFailedCounter.WithLabelValues(namespace, name, opts.EventType, opts.EventReason).Inc()
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
e.NotificationSuccessCounter.WithLabelValues(namespace, name, opts.EventType, opts.EventReason).Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
if len(errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors
|
||||
}
|
||||
|
||||
// This function is copied over from notification engine to make sure we honour emptyCondition
|
||||
|
|
|
@ -113,7 +113,7 @@ func TestSendNotifications(t *testing.T) {
|
|||
rec.EventRecorderAdapter.apiFactory = apiFactory
|
||||
//ch := make(chan prometheus.HistogramVec, 1)
|
||||
err := rec.sendNotifications(mockAPI, &r, EventOptions{EventReason: "FooReason"})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestSendNotificationsWhenCondition(t *testing.T) {
|
||||
|
@ -140,7 +140,7 @@ func TestSendNotificationsWhenCondition(t *testing.T) {
|
|||
rec.EventRecorderAdapter.apiFactory = apiFactory
|
||||
//ch := make(chan prometheus.HistogramVec, 1)
|
||||
err := rec.sendNotifications(mockAPI, &r, EventOptions{EventReason: "FooReason"})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestSendNotificationsWhenConditionTime(t *testing.T) {
|
||||
|
@ -340,7 +340,7 @@ func TestSendNotificationsFails(t *testing.T) {
|
|||
rec.EventRecorderAdapter.apiFactory = apiFactory
|
||||
|
||||
err := rec.sendNotifications(mockAPI, &r, EventOptions{EventReason: "FooReason"})
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, err, 1)
|
||||
})
|
||||
|
||||
t.Run("GetAPIError", func(t *testing.T) {
|
||||
|
@ -349,7 +349,7 @@ func TestSendNotificationsFails(t *testing.T) {
|
|||
rec.EventRecorderAdapter.apiFactory = apiFactory
|
||||
|
||||
err := rec.sendNotifications(nil, &r, EventOptions{EventReason: "FooReason"})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
}
|
||||
|
@ -380,7 +380,7 @@ func TestSendNotificationsFailsWithRunTriggerError(t *testing.T) {
|
|||
rec.EventRecorderAdapter.apiFactory = apiFactory
|
||||
|
||||
err := rec.sendNotifications(mockAPI, &r, EventOptions{EventReason: "FooReason"})
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, err, 1)
|
||||
})
|
||||
|
||||
t.Run("GetAPIError", func(t *testing.T) {
|
||||
|
@ -389,7 +389,7 @@ func TestSendNotificationsFailsWithRunTriggerError(t *testing.T) {
|
|||
rec.EventRecorderAdapter.apiFactory = apiFactory
|
||||
|
||||
err := rec.sendNotifications(nil, &r, EventOptions{EventReason: "FooReason"})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
}
|
||||
|
@ -419,7 +419,7 @@ func TestSendNotificationsNoTrigger(t *testing.T) {
|
|||
rec.EventRecorderAdapter.apiFactory = apiFactory
|
||||
|
||||
err := rec.sendNotifications(mockAPI, &r, EventOptions{EventReason: "MissingReason"})
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, err, 1)
|
||||
}
|
||||
|
||||
func TestNewAPIFactorySettings(t *testing.T) {
|
||||
|
|
|
@ -41,8 +41,13 @@ func AtDesiredReplicaCountsForCanary(ro *v1alpha1.Rollout, newRS, stableRS *apps
|
|||
return false
|
||||
}
|
||||
}
|
||||
if GetAvailableReplicaCountForReplicaSets(olderRSs) != int32(0) {
|
||||
return false
|
||||
if ro.Spec.Strategy.Canary.TrafficRouting == nil {
|
||||
// For basic canary, all older ReplicaSets must be scaled to zero since they serve traffic.
|
||||
// For traffic weighted canary, it's okay if they are still scaled up, since the traffic
|
||||
// router will prevent them from serving traffic
|
||||
if GetAvailableReplicaCountForReplicaSets(olderRSs) != int32(0) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -341,6 +341,15 @@ func FindActiveOrLatest(newRS *appsv1.ReplicaSet, oldRSs []*appsv1.ReplicaSet) *
|
|||
}
|
||||
}
|
||||
|
||||
// IsActive returns if replica set is active (has, or at least ought to have pods).
|
||||
func IsActive(rs *appsv1.ReplicaSet) bool {
|
||||
if rs == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(controller.FilterActiveReplicaSets([]*appsv1.ReplicaSet{rs})) > 0
|
||||
}
|
||||
|
||||
// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets.
|
||||
func GetReplicaCountForReplicaSets(replicaSets []*appsv1.ReplicaSet) int32 {
|
||||
totalReplicas := int32(0)
|
||||
|
@ -583,17 +592,6 @@ func (o ReplicaSetsByRevisionNumber) Less(i, j int) bool {
|
|||
return iRevision < jRevision
|
||||
}
|
||||
|
||||
// IsStillReferenced returns if the given ReplicaSet is still being referenced by any of
|
||||
// the current, stable, blue-green active references. Used to determine if the ReplicaSet can
|
||||
// safely be scaled to zero, or deleted.
|
||||
func IsStillReferenced(status v1alpha1.RolloutStatus, rs *appsv1.ReplicaSet) bool {
|
||||
hash := GetPodTemplateHash(rs)
|
||||
if hash != "" && (hash == status.StableRS || hash == status.CurrentPodHash || hash == status.BlueGreen.ActiveSelector) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HasScaleDownDeadline returns whether or not the given ReplicaSet is annotated with a scale-down delay
|
||||
func HasScaleDownDeadline(rs *appsv1.ReplicaSet) bool {
|
||||
if rs == nil || rs.Annotations == nil {
|
||||
|
|
|
@ -153,6 +153,18 @@ func TestFindOldReplicaSets(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIsActive(t *testing.T) {
|
||||
rs1 := generateRS(generateRollout("foo"))
|
||||
*(rs1.Spec.Replicas) = 1
|
||||
|
||||
rs2 := generateRS(generateRollout("foo"))
|
||||
*(rs2.Spec.Replicas) = 0
|
||||
|
||||
assert.False(t, IsActive(nil))
|
||||
assert.True(t, IsActive(&rs1))
|
||||
assert.False(t, IsActive(&rs2))
|
||||
}
|
||||
|
||||
func TestGetReplicaCountForReplicaSets(t *testing.T) {
|
||||
rs1 := generateRS(generateRollout("foo"))
|
||||
*(rs1.Spec.Replicas) = 1
|
||||
|
@ -1066,48 +1078,6 @@ func TestNeedsRestart(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestIsStillReferenced(t *testing.T) {
|
||||
newRSWithPodTemplateHash := func(hash string) *appsv1.ReplicaSet {
|
||||
return &appsv1.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1alpha1.DefaultRolloutUniqueLabelKey: hash,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
{
|
||||
status := v1alpha1.RolloutStatus{StableRS: "abc123"}
|
||||
rs := &appsv1.ReplicaSet{}
|
||||
assert.False(t, IsStillReferenced(status, rs))
|
||||
}
|
||||
{
|
||||
status := v1alpha1.RolloutStatus{StableRS: "abc123"}
|
||||
rs := newRSWithPodTemplateHash("")
|
||||
assert.False(t, IsStillReferenced(status, rs))
|
||||
}
|
||||
{
|
||||
status := v1alpha1.RolloutStatus{StableRS: "abc123"}
|
||||
rs := newRSWithPodTemplateHash("abc123")
|
||||
assert.True(t, IsStillReferenced(status, rs))
|
||||
}
|
||||
{
|
||||
status := v1alpha1.RolloutStatus{CurrentPodHash: "abc123"}
|
||||
rs := newRSWithPodTemplateHash("abc123")
|
||||
assert.True(t, IsStillReferenced(status, rs))
|
||||
}
|
||||
{
|
||||
status := v1alpha1.RolloutStatus{BlueGreen: v1alpha1.BlueGreenStatus{ActiveSelector: "abc123"}}
|
||||
rs := newRSWithPodTemplateHash("abc123")
|
||||
assert.True(t, IsStillReferenced(status, rs))
|
||||
}
|
||||
{
|
||||
status := v1alpha1.RolloutStatus{StableRS: "abc123"}
|
||||
rs := newRSWithPodTemplateHash("def456")
|
||||
assert.False(t, IsStillReferenced(status, rs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasScaleDownDeadline(t *testing.T) {
|
||||
{
|
||||
assert.False(t, HasScaleDownDeadline(nil))
|
||||
|
|
|
@ -13,3 +13,8 @@ var Now = time.Now
|
|||
var MetaNow = func() metav1.Time {
|
||||
return metav1.Time{Time: Now()}
|
||||
}
|
||||
|
||||
// MetaTime is a wrapper around metav1.Time and used to override behavior in tests.
|
||||
var MetaTime = func(time time.Time) metav1.Time {
|
||||
return metav1.Time{Time: time}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue