From 4ecbdf0d498326a6ee3babdba7b012009f21e264 Mon Sep 17 00:00:00 2001 From: veophi Date: Sun, 30 Jan 2022 14:33:08 +0800 Subject: [PATCH] merge batchrelease controller Signed-off-by: veophi --- api/v1alpha1/rollout_types.go | 14 +- api/v1alpha1/zz_generated.deepcopy.go | 4 + .../rollouts.kruise.io_batchreleases.yaml | 19 + .../bases/rollouts.kruise.io_rollouts.yaml | 104 ++ config/manager/kustomization.yaml | 12 + config/rbac/role.yaml | 16 + go.mod | 3 + go.sum | 21 +- main.go | 14 +- .../batchrelease/batchrelease_controller.go | 215 +++ .../batchrelease_event_handler.go | 281 ++++ .../batchrelease_plan_controller.go | 272 ++++ .../batchrelease_special_cases_handler.go | 190 +++ .../batchrelease/batchrelease_util.go | 100 ++ .../workloads/cloneset_control_plan.go | 284 ++++ .../workloads/cloneset_controller.go | 152 ++ .../batchrelease/workloads/controller.go | 81 + .../deployment_double_control_plan.go | 451 ++++++ .../workloads/deployment_double_controller.go | 197 +++ .../batchrelease/workloads/workloads_utils.go | 116 ++ test/e2e/batchrelease_test.go | 1333 +++++++++++++++++ test/e2e/test_data/workload/cloneset.yaml | 25 + test/e2e/test_data/workload/deployment.yaml | 25 + vendor/github.com/nxadm/tail/.gitignore | 6 +- vendor/github.com/nxadm/tail/.travis.yml | 16 + vendor/github.com/nxadm/tail/CHANGES.md | 4 + vendor/github.com/nxadm/tail/README.md | 36 +- vendor/github.com/nxadm/tail/appveyor.yml | 11 + vendor/github.com/nxadm/tail/go.mod | 5 + vendor/github.com/nxadm/tail/go.sum | 7 + vendor/github.com/nxadm/tail/tail.go | 72 + vendor/github.com/nxadm/tail/tail_posix.go | 6 + vendor/github.com/nxadm/tail/tail_windows.go | 10 + vendor/github.com/nxadm/tail/util/util.go | 3 + .../nxadm/tail/watch/filechanges.go | 3 + vendor/github.com/nxadm/tail/watch/inotify.go | 3 + .../nxadm/tail/watch/inotify_tracker.go | 3 + vendor/github.com/nxadm/tail/watch/polling.go | 3 + vendor/github.com/nxadm/tail/watch/watch.go | 3 + .../github.com/nxadm/tail/winfile/winfile.go | 3 + vendor/github.com/onsi/ginkgo/.travis.yml | 16 + vendor/github.com/onsi/ginkgo/CHANGELOG.md | 3 + vendor/github.com/onsi/ginkgo/README.md | 12 + vendor/github.com/onsi/ginkgo/RELEASING.md | 9 +- .../github.com/onsi/ginkgo/config/config.go | 29 + vendor/github.com/onsi/ginkgo/ginkgo_dsl.go | 95 ++ vendor/github.com/onsi/ginkgo/go.mod | 12 + vendor/github.com/onsi/ginkgo/go.sum | 52 + .../remote/output_interceptor_darwin.go | 11 + .../remote/output_interceptor_dragonfly.go | 11 + .../remote/output_interceptor_freebsd.go | 11 + .../remote/output_interceptor_linux.go | 12 + .../output_interceptor_linux_mips64le.go | 12 + .../remote/output_interceptor_netbsd.go | 11 + .../remote/output_interceptor_openbsd.go | 11 + .../remote/output_interceptor_solaris.go | 11 + .../remote/output_interceptor_unix.go | 8 + .../onsi/ginkgo/internal/spec/specs.go | 22 + .../onsi/ginkgo/internal/suite/suite.go | 4 + .../internal/testingtproxy/testing_t_proxy.go | 42 + .../onsi/ginkgo/reporters/junit_reporter.go | 17 + vendor/github.com/onsi/gomega/.travis.yml | 18 + vendor/github.com/onsi/gomega/CHANGELOG.md | 3 + vendor/github.com/onsi/gomega/Makefile | 9 + vendor/github.com/onsi/gomega/README.md | 4 + .../github.com/onsi/gomega/format/format.go | 90 ++ vendor/github.com/onsi/gomega/go.mod | 9 + vendor/github.com/onsi/gomega/go.sum | 55 + vendor/github.com/onsi/gomega/gomega_dsl.go | 387 +++++ .../gomega/internal/assertion/assertion.go | 109 ++ .../asyncassertion/async_assertion.go | 198 +++ .../internal/oraclematcher/oracle_matcher.go | 25 + .../testingtsupport/testing_t_support.go | 60 + vendor/github.com/onsi/gomega/matchers.go | 20 + vendor/github.com/onsi/gomega/matchers/and.go | 12 + .../gomega/matchers/be_element_of_matcher.go | 28 + .../gomega/matchers/be_numerically_matcher.go | 4 + .../onsi/gomega/matchers/consist_of.go | 33 + .../matchers/contain_elements_matcher.go | 8 + .../matchers/have_http_status_matcher.go | 28 + .../gomega/matchers/match_error_matcher.go | 11 + vendor/github.com/onsi/gomega/matchers/not.go | 8 + vendor/github.com/onsi/gomega/matchers/or.go | 12 + .../onsi/gomega/matchers/with_transform.go | 31 + vendor/github.com/onsi/gomega/types/types.go | 22 + .../encoding/simplifiedchinese/hzgb2312.go | 4 + .../x/text/internal/language/language.go | 78 + .../x/text/internal/language/parse.go | 40 + vendor/golang.org/x/text/language/go1_1.go | 3 + vendor/golang.org/x/text/language/go1_2.go | 3 + vendor/golang.org/x/text/language/language.go | 3 + vendor/golang.org/x/text/language/tables.go | 13 + vendor/golang.org/x/xerrors/LICENSE | 27 + vendor/golang.org/x/xerrors/PATENTS | 22 + vendor/golang.org/x/xerrors/README | 2 + vendor/golang.org/x/xerrors/adaptor.go | 193 +++ vendor/golang.org/x/xerrors/codereview.cfg | 1 + vendor/golang.org/x/xerrors/doc.go | 22 + vendor/golang.org/x/xerrors/errors.go | 33 + vendor/golang.org/x/xerrors/fmt.go | 187 +++ vendor/golang.org/x/xerrors/format.go | 34 + vendor/golang.org/x/xerrors/frame.go | 56 + vendor/golang.org/x/xerrors/go.mod | 3 + .../golang.org/x/xerrors/internal/internal.go | 8 + vendor/golang.org/x/xerrors/wrap.go | 106 ++ vendor/modules.txt | 36 + 106 files changed, 6570 insertions(+), 22 deletions(-) create mode 100644 pkg/controller/batchrelease/batchrelease_controller.go create mode 100644 pkg/controller/batchrelease/batchrelease_event_handler.go create mode 100644 pkg/controller/batchrelease/batchrelease_plan_controller.go create mode 100644 pkg/controller/batchrelease/batchrelease_special_cases_handler.go create mode 100644 pkg/controller/batchrelease/batchrelease_util.go create mode 100644 pkg/controller/batchrelease/workloads/cloneset_control_plan.go create mode 100644 pkg/controller/batchrelease/workloads/cloneset_controller.go create mode 100644 pkg/controller/batchrelease/workloads/controller.go create mode 100644 pkg/controller/batchrelease/workloads/deployment_double_control_plan.go create mode 100644 pkg/controller/batchrelease/workloads/deployment_double_controller.go create mode 100644 pkg/controller/batchrelease/workloads/workloads_utils.go create mode 100644 test/e2e/batchrelease_test.go create mode 100644 test/e2e/test_data/workload/cloneset.yaml create mode 100644 test/e2e/test_data/workload/deployment.yaml create mode 100644 vendor/github.com/nxadm/tail/.travis.yml create mode 100644 vendor/github.com/nxadm/tail/appveyor.yml create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go create mode 100644 vendor/github.com/onsi/gomega/internal/assertion/assertion.go create mode 100644 vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go create mode 100644 vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go create mode 100644 vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go create mode 100644 vendor/golang.org/x/xerrors/LICENSE create mode 100644 vendor/golang.org/x/xerrors/PATENTS create mode 100644 vendor/golang.org/x/xerrors/README create mode 100644 vendor/golang.org/x/xerrors/adaptor.go create mode 100644 vendor/golang.org/x/xerrors/codereview.cfg create mode 100644 vendor/golang.org/x/xerrors/doc.go create mode 100644 vendor/golang.org/x/xerrors/errors.go create mode 100644 vendor/golang.org/x/xerrors/fmt.go create mode 100644 vendor/golang.org/x/xerrors/format.go create mode 100644 vendor/golang.org/x/xerrors/frame.go create mode 100644 vendor/golang.org/x/xerrors/go.mod create mode 100644 vendor/golang.org/x/xerrors/internal/internal.go create mode 100644 vendor/golang.org/x/xerrors/wrap.go diff --git a/api/v1alpha1/rollout_types.go b/api/v1alpha1/rollout_types.go index 0c3a726..eccf21b 100644 --- a/api/v1alpha1/rollout_types.go +++ b/api/v1alpha1/rollout_types.go @@ -1,5 +1,5 @@ /* -Copyright 2022 Kruise Authors. +Copyright 2022 The Kruise Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -261,12 +261,16 @@ const ( RolloutPhasePreparing RolloutPhase = "Preparing" // RolloutPhaseProgressing indicates a rollout is not yet healthy but still making progress towards a healthy state RolloutPhaseProgressing RolloutPhase = "Progressing" - // RolloutPhaseFinalizing indicates a rollout is finalizing - RolloutPhaseFinalizing RolloutPhase = "Finalizing" - // RolloutPhaseTerminating indicates a rollout is terminated - RolloutPhaseTerminating RolloutPhase = "Terminating" // RolloutPhaseRollback indicates rollback RolloutPhaseRollback RolloutPhase = "Rollback" + // RolloutPhasePaused indicates a rollout is not yet healthy and will not make progress until unpaused + RolloutPhasePaused RolloutPhase = "Paused" + // RolloutPhaseFinalizing indicates a rollout is finalizing + RolloutPhaseFinalizing RolloutPhase = "Finalizing" + // RolloutPhaseRollingBack indicates a rollout is rolling back + RolloutPhaseRollingBack RolloutPhase = "RollingBack" + // RolloutPhaseTerminating indicates a rollout is terminated + RolloutPhaseTerminating RolloutPhase = "Terminating" // RolloutPhaseCompleted indicates a rollout is completed RolloutPhaseCompleted RolloutPhase = "Completed" // RolloutPhaseCancelled indicates a rollout is cancelled diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index bffc973..1e51315 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -104,7 +104,11 @@ func (in *BatchReleaseList) DeepCopyObject() runtime.Object { func (in *BatchReleaseSpec) DeepCopyInto(out *BatchReleaseSpec) { *out = *in out.Strategy = in.Strategy +<<<<<<< HEAD in.TargetRef.DeepCopyInto(&out.TargetRef) +======= + out.TargetRef = in.TargetRef +>>>>>>> 33cbc1d (add batchrelease controller) in.ReleasePlan.DeepCopyInto(&out.ReleasePlan) } diff --git a/config/crd/bases/rollouts.kruise.io_batchreleases.yaml b/config/crd/bases/rollouts.kruise.io_batchreleases.yaml index 5649f60..85c6ac5 100644 --- a/config/crd/bases/rollouts.kruise.io_batchreleases.yaml +++ b/config/crd/bases/rollouts.kruise.io_batchreleases.yaml @@ -109,6 +109,7 @@ spec: description: TargetRevisionName contains the name of the componentRevisionName that we need to upgrade to. properties: +<<<<<<< HEAD type: description: workloadRef, revisionRef default is workloadRef type: string @@ -132,6 +133,21 @@ spec: type: object required: - type +======= + apiVersion: + description: API Version of the referent + type: string + kind: + description: Kind of the referent + type: string + name: + description: Name of the referent + type: string + required: + - apiVersion + - kind + - name +>>>>>>> 33cbc1d (add batchrelease controller) type: object required: - releasePlan @@ -145,8 +161,11 @@ spec: description: Canary describes the state of the canary rollout properties: batchState: +<<<<<<< HEAD description: ReleasingBatchState indicates the state of the current batch. +======= +>>>>>>> 33cbc1d (add batchrelease controller) type: string currentBatch: description: The current batch the rollout is working on/blocked, diff --git a/config/crd/bases/rollouts.kruise.io_rollouts.yaml b/config/crd/bases/rollouts.kruise.io_rollouts.yaml index ba77652..ad0d220 100644 --- a/config/crd/bases/rollouts.kruise.io_rollouts.yaml +++ b/config/crd/bases/rollouts.kruise.io_rollouts.yaml @@ -36,6 +36,7 @@ spec: spec: description: RolloutSpec defines the desired state of Rollout properties: +<<<<<<< HEAD objectRef: properties: type: @@ -62,20 +63,38 @@ spec: required: - type type: object +======= +>>>>>>> 33cbc1d (add batchrelease controller) strategy: description: The deployment strategy to use to replace existing pods with new ones. properties: +<<<<<<< HEAD canaryPlan: description: CanaryStrategy defines parameters for a Replica Based Canary properties: +======= + canary: + description: BlueGreen *BlueGreenStrategy `json:"blueGreen,omitempty" + protobuf:"bytes,1,opt,name=blueGreen"` + properties: + stableService: + description: CanaryService holds the name of a service which + selects pods with canary version and don't select any pods + with stable version. CanaryService string `json:"canaryService,omitempty"` + StableService holds the name of a service which selects + pods with stable version and don't select any pods with + canary version. + type: string +>>>>>>> 33cbc1d (add batchrelease controller) steps: description: Steps define the order of phases to execute the canary deployment items: description: CanaryStep defines a step of a canary workload. properties: +<<<<<<< HEAD canaryReplicas: anyOf: - type: integer @@ -85,6 +104,8 @@ spec: 5) or a percentage of total pods. it is mutually exclusive with the PodList field' x-kubernetes-int-or-string: true +======= +>>>>>>> 33cbc1d (add batchrelease controller) pause: description: Pause freezes the rollout by setting spec.Paused to true. A Rollout will resume when spec.Paused is @@ -96,7 +117,11 @@ spec: format: int32 type: integer type: object +<<<<<<< HEAD weight: +======= + setWeight: +>>>>>>> 33cbc1d (add batchrelease controller) description: SetWeight sets what percentage of the canary pods should receive format: int32 @@ -107,6 +132,32 @@ spec: description: TrafficRouting hosts all the supported service meshes supported to enable more fine-grained traffic routing properties: +<<<<<<< HEAD +======= + alb: + description: AlbTrafficRouting configuration for Nginx + ingress controller to control traffic routing + properties: + ingress: + description: Ingress refers to the name of an `Ingress` + resource in the same namespace as the `Rollout` + type: string + tickets: + description: A/B Testing + properties: + cookie: + additionalProperties: + type: string + type: object + header: + additionalProperties: + type: string + type: object + type: object + required: + - ingress + type: object +>>>>>>> 33cbc1d (add batchrelease controller) nginx: description: Nginx holds Nginx Ingress specific configuration to route traffic @@ -115,6 +166,7 @@ spec: description: Ingress refers to the name of an `Ingress` resource in the same namespace as the `Rollout` type: string +<<<<<<< HEAD required: - ingress type: object @@ -143,13 +195,59 @@ spec: required: - objectRef - strategy +======= + tickets: + description: A/B Testing + properties: + cookie: + additionalProperties: + type: string + type: object + header: + additionalProperties: + type: string + type: object + type: object + required: + - ingress + type: object + type: + type: string + type: object + type: object + type: object + targetRef: + description: TargetRef contains enough information to let you identify + a workload for Rollout + properties: + apiVersion: + description: API Version of the referent + type: string + kind: + description: Kind of the referent + type: string + name: + description: Name of the referent + type: string + required: + - apiVersion + - kind + - name + type: object + required: + - strategy + - targetRef +>>>>>>> 33cbc1d (add batchrelease controller) type: object status: description: RolloutStatus defines the observed state of Rollout properties: +<<<<<<< HEAD canaryRevision: description: CanaryRevision the hash of the canary pod template type: string +======= +>>>>>>> 33cbc1d (add batchrelease controller) canaryStatus: description: Canary describes the state of the canary rollout properties: @@ -241,6 +339,12 @@ spec: description: StableRevision indicates the revision pods that has successfully rolled out type: string +<<<<<<< HEAD +======= + updateRevision: + description: UpdateRevision the hash of the current pod template + type: string +>>>>>>> 33cbc1d (add batchrelease controller) type: object type: object served: true diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 2bcd3ee..cde1722 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -5,6 +5,18 @@ generatorOptions: disableNameSuffixHash: true configMapGenerator: +<<<<<<< Updated upstream - name: manager-config files: - controller_manager_config.yaml +======= +- files: + - controller_manager_config.yaml + name: manager-config +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: minchou/kruiserollout + newTag: br-2 +>>>>>>> Stashed changes diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index d2fa4fe..45058a5 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -7,6 +7,7 @@ metadata: name: manager-role rules: - apiGroups: +<<<<<<< HEAD - admissionregistration.k8s.io resources: - mutatingwebhookconfigurations @@ -36,6 +37,15 @@ rules: - patch - update - watch +======= + - '*' + resources: + - events + verbs: + - create + - patch + - update +>>>>>>> 33cbc1d (add batchrelease controller) - apiGroups: - apps resources: @@ -97,6 +107,7 @@ rules: - patch - update - apiGroups: +<<<<<<< HEAD - "" resources: - pods @@ -175,6 +186,8 @@ rules: - patch - update - apiGroups: +======= +>>>>>>> 33cbc1d (add batchrelease controller) - rollouts.kruise.io resources: - batchreleases @@ -194,6 +207,7 @@ rules: - get - patch - update +<<<<<<< HEAD - apiGroups: - rollouts.kruise.io resources: @@ -220,3 +234,5 @@ rules: - get - patch - update +======= +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/go.mod b/go.mod index fbf851b..fe3fde3 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,9 @@ require ( github.com/onsi/gomega v1.17.0 github.com/openkruise/kruise-api v1.0.0 gopkg.in/yaml.v2 v2.4.0 + github.com/onsi/ginkgo v1.14.1 + github.com/onsi/gomega v1.10.2 + gopkg.in/yaml.v2 v2.3.0 k8s.io/api v0.20.10 k8s.io/apiextensions-apiserver v0.20.1 k8s.io/apimachinery v0.20.10 diff --git a/go.sum b/go.sum index bc2406a..4068196 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -246,11 +247,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -301,8 +300,11 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +<<<<<<< HEAD github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +======= +>>>>>>> 33cbc1d (add batchrelease controller) github.com/openkruise/kruise-api v1.0.0 h1:ScA0LxRRNBsgbcyLhTzR9B+KpGNWsIMptzzmjTqfYQo= github.com/openkruise/kruise-api v1.0.0/go.mod h1:kxV/UA/vrf/hz3z+kL21c0NOawC6K1ZjaKcJFgiOwsE= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -312,7 +314,6 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= @@ -371,7 +372,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -392,12 +392,10 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -434,7 +432,6 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -443,7 +440,6 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -473,6 +469,10 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +<<<<<<< HEAD +======= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +>>>>>>> 33cbc1d (add batchrelease controller) golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= @@ -586,8 +586,11 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +<<<<<<< HEAD golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +======= +>>>>>>> 33cbc1d (add batchrelease controller) golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -656,7 +659,6 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -688,7 +690,6 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= diff --git a/main.go b/main.go index 50d921a..42a7928 100644 --- a/main.go +++ b/main.go @@ -18,23 +18,25 @@ package main import ( "flag" - "github.com/openkruise/rollouts/pkg/util" "os" kruisev1aplphal "github.com/openkruise/kruise-api/apps/v1alpha1" rolloutsv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1" "github.com/openkruise/rollouts/controllers/rollout" + br "github.com/openkruise/rollouts/pkg/controller/batchrelease" + "github.com/openkruise/rollouts/pkg/util" "github.com/openkruise/rollouts/webhook" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - // to ensure that exec-entrypoint and run can make use of them. - _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" //+kubebuilder:scaffold:imports ) @@ -82,6 +84,10 @@ func main() { Finder: util.NewControllerFinder(mgr.GetClient()), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Rollout") + } + + if err = br.Add(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "BatchRelease") os.Exit(1) } //+kubebuilder:scaffold:builder diff --git a/pkg/controller/batchrelease/batchrelease_controller.go b/pkg/controller/batchrelease/batchrelease_controller.go new file mode 100644 index 0000000..fdacce7 --- /dev/null +++ b/pkg/controller/batchrelease/batchrelease_controller.go @@ -0,0 +1,215 @@ +package batchrelease + +import ( + "context" + "flag" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/workloads" + "reflect" + "time" + + apps "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + "github.com/openkruise/rollouts/api/v1alpha1" +) + +var ( + concurrentReconciles = 3 +) + +const ReleaseFinalizer = "rollouts.kruise.io/batch-release-finalizer" + +func init() { + flag.IntVar(&concurrentReconciles, "batchrelease-workers", concurrentReconciles, "Max concurrent workers for BatchRelease controller.") +} + +// Add creates a new Rollout Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + recorder := mgr.GetEventRecorderFor("batchrelease-controller") + cli := mgr.GetClient() + return &BatchReleaseReconciler{ + Client: cli, + Scheme: mgr.GetScheme(), + recorder: recorder, + executor: NewReleasePlanExecutor(cli, recorder), + } +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("batchrelease-controller", mgr, controller.Options{ + Reconciler: r, MaxConcurrentReconciles: concurrentReconciles}) + if err != nil { + return err + } + + // Watch for changes to BatchRelease + err = c.Watch(&source.Kind{Type: &v1alpha1.BatchRelease{}}, &handler.EnqueueRequestForObject{}, predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + old := e.ObjectOld.(*v1alpha1.BatchRelease) + new := e.ObjectNew.(*v1alpha1.BatchRelease) + if old.Generation != new.Generation { + klog.V(3).Infof("Observed updated Spec for BatchRelease: %s/%s", new.Namespace, new.Name) + return true + } + return false + }, + }) + if err != nil { + return err + } + + // Watch changes to CloneSet + err = c.Watch(&source.Kind{Type: &kruiseappsv1alpha1.CloneSet{}}, &workloadEventHandler{Reader: mgr.GetCache()}) + if err != nil { + return err + } + + // Watch changes to Deployment + err = c.Watch(&source.Kind{Type: &apps.Deployment{}}, &workloadEventHandler{Reader: mgr.GetCache()}) + if err != nil { + return err + } + return nil +} + +var _ reconcile.Reconciler = &BatchReleaseReconciler{} + +// BatchReleaseReconciler reconciles a BatchRelease object +type BatchReleaseReconciler struct { + client.Client + Scheme *runtime.Scheme + recorder record.EventRecorder + executor *Executor +} + +// +kubebuilder:rbac:groups="*",resources="events",verbs=create;update;patch +// +kubebuilder:rbac:groups=rollouts.kruise.io,resources=batchreleases,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=rollouts.kruise.io,resources=batchreleases/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=apps.kruise.io,resources=clonesets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps.kruise.io,resources=clonesets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=apps,resources=replicasets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=replicasets/status,verbs=get;update;patch + +// Reconcile reads that state of the cluster for a Rollout object and makes changes based on the state read +// and what is in the Rollout.Spec +func (r *BatchReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + release := new(v1alpha1.BatchRelease) + err := r.Get(context.TODO(), req.NamespacedName, release) + if err != nil { + if errors.IsNotFound(err) { + // Object not found, return. Created objects are automatically garbage collected. + // For additional cleanup logic use finalizers. + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, err + } + + klog.V(3).Infof("begin to reconcile batch-release(%v/%v), release-phase: %v", release.Namespace, release.Name, release.Status.Phase) + + // finalizer will block the deletion of batchRelease + // util all canary resources and settings are cleaned up. + reconcileDone, err := r.handleFinalizer(release) + if reconcileDone { + return reconcile.Result{}, err + } + + // set the release info for executor before executing. + r.executor.SetReleaseInfo(release) + + // executor start to execute the batch release plan. + startTimestamp := time.Now() + result, currentStatus := r.executor.Do() + + defer func() { + klog.V(3).InfoS("Finished one round of reconciling release plan", "release-phase", currentStatus.Phase, + "batch-state", currentStatus.CanaryStatus.ReleasingBatchState, "current-batch", currentStatus.CanaryStatus.CurrentBatch, + "reconcile-result ", result, "time-cost", time.Since(startTimestamp)) + }() + + return result, r.updateStatus(ctx, release, currentStatus) +} + +func (r *BatchReleaseReconciler) updateStatus(ctx context.Context, release *v1alpha1.BatchRelease, newStatus *v1alpha1.BatchReleaseStatus) (err error) { + // observe and record the latest changes for generation and release plan + newStatus.ObservedGeneration = release.Generation + newStatus.ObservedReleasePlanHash = hashReleasePlanBatches(&release.Spec.ReleasePlan) + + key := types.NamespacedName{Namespace: release.Namespace, Name: release.Name} + if !reflect.DeepEqual(release.Status, *newStatus) { + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + fetchedRelease := &v1alpha1.BatchRelease{} + getErr := r.Client.Get(context.TODO(), key, fetchedRelease) + if getErr != nil { + return getErr + } + fetchedRelease.Status = *newStatus + return r.Status().Update(ctx, fetchedRelease) + }) + if err != nil { + klog.Errorf("Failed to update status for BatchRelease(%v/%v), error: %v", release.Namespace, release.Name, err) + } + } + return +} + +func (r *BatchReleaseReconciler) handleFinalizer(release *v1alpha1.BatchRelease) (bool, error) { + var err error + defer func() { + if err != nil { + klog.Errorf("Failed to patch finalizer to BatchRelease (%v/%v)", release.Namespace, release.Name) + } + }() + + // remove the release finalizer if it needs + if !release.DeletionTimestamp.IsZero() && + HasTerminatingCondition(release.Status) && + controllerutil.ContainsFinalizer(release, ReleaseFinalizer) { + finalizers := sets.NewString(release.Finalizers...).Delete(ReleaseFinalizer).List() + err = workloads.PatchFinalizer(r.Client, release, finalizers) + if client.IgnoreNotFound(err) != nil { + return true, err + } + return true, nil + } + + // add the release finalizer if it needs + if !controllerutil.ContainsFinalizer(release, ReleaseFinalizer) { + finalizers := append(release.Finalizers, ReleaseFinalizer) + err = workloads.PatchFinalizer(r.Client, release, finalizers) + if client.IgnoreNotFound(err) != nil { + return true, err + } else if errors.IsNotFound(err) { + return true, nil + } + } + + return false, nil +} diff --git a/pkg/controller/batchrelease/batchrelease_event_handler.go b/pkg/controller/batchrelease/batchrelease_event_handler.go new file mode 100644 index 0000000..e78667e --- /dev/null +++ b/pkg/controller/batchrelease/batchrelease_event_handler.go @@ -0,0 +1,281 @@ +/* +Copyright 2021 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package batchrelease + +import ( + "context" + "encoding/json" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + "github.com/openkruise/rollouts/api/v1alpha1" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/workloads" +) + +type EventAction string + +const ( + CreateEventAction EventAction = "Create" + DeleteEventAction EventAction = "Delete" +) + +var ( + controllerKruiseKindCS = kruiseappsv1alpha1.SchemeGroupVersion.WithKind("CloneSet") + controllerKindDep = appsv1.SchemeGroupVersion.WithKind("Deployment") +) + +var _ handler.EventHandler = &workloadEventHandler{} + +type workloadEventHandler struct { + client.Reader +} + +func (w workloadEventHandler) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + w.handleWorkload(q, evt.Object, CreateEventAction) +} + +func (w workloadEventHandler) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + var oldAccessor, newAccessor *workloads.Accessor + var gvk schema.GroupVersionKind + + switch evt.ObjectNew.(type) { + case *kruiseappsv1alpha1.CloneSet: + gvk = controllerKruiseKindCS + oldClone := evt.ObjectOld.(*kruiseappsv1alpha1.CloneSet) + newClone := evt.ObjectNew.(*kruiseappsv1alpha1.CloneSet) + + var oldReplicas, newReplicas int32 + if oldClone.Spec.Replicas != nil { + oldReplicas = *oldClone.Spec.Replicas + } + if newClone.Spec.Replicas != nil { + newReplicas = *newClone.Spec.Replicas + } + + oldAccessor = &workloads.Accessor{ + Replicas: &oldReplicas, + Paused: oldClone.Spec.UpdateStrategy.Paused, + Status: &workloads.Status{ + Replicas: oldClone.Status.Replicas, + ReadyReplicas: oldClone.Status.ReadyReplicas, + UpdatedReplicas: oldClone.Status.UpdatedReplicas, + UpdatedReadyReplicas: oldClone.Status.UpdatedReadyReplicas, + ObservedGeneration: oldClone.Status.ObservedGeneration, + }, + Metadata: &oldClone.ObjectMeta, + } + + newAccessor = &workloads.Accessor{ + Replicas: &newReplicas, + Paused: newClone.Spec.UpdateStrategy.Paused, + Status: &workloads.Status{ + Replicas: newClone.Status.Replicas, + ReadyReplicas: newClone.Status.ReadyReplicas, + UpdatedReplicas: newClone.Status.UpdatedReplicas, + UpdatedReadyReplicas: newClone.Status.UpdatedReadyReplicas, + ObservedGeneration: newClone.Status.ObservedGeneration, + }, + Metadata: &newClone.ObjectMeta, + } + + case *appsv1.Deployment: + gvk = controllerKindDep + oldDeploy := evt.ObjectOld.(*appsv1.Deployment) + newDeploy := evt.ObjectNew.(*appsv1.Deployment) + + var oldReplicas, newReplicas int32 + if oldDeploy.Spec.Replicas != nil { + oldReplicas = *oldDeploy.Spec.Replicas + } + if newDeploy.Spec.Replicas != nil { + newReplicas = *newDeploy.Spec.Replicas + } + + oldAccessor = &workloads.Accessor{ + Replicas: &oldReplicas, + Paused: oldDeploy.Spec.Paused, + Status: &workloads.Status{ + Replicas: oldDeploy.Status.Replicas, + ReadyReplicas: oldDeploy.Status.AvailableReplicas, + UpdatedReplicas: oldDeploy.Status.UpdatedReplicas, + ObservedGeneration: oldDeploy.Status.ObservedGeneration, + }, + Metadata: &oldDeploy.ObjectMeta, + } + + newAccessor = &workloads.Accessor{ + Replicas: &newReplicas, + Paused: newDeploy.Spec.Paused, + Status: &workloads.Status{ + Replicas: newDeploy.Status.Replicas, + ReadyReplicas: newDeploy.Status.AvailableReplicas, + UpdatedReplicas: newDeploy.Status.UpdatedReplicas, + ObservedGeneration: newDeploy.Status.ObservedGeneration, + }, + Metadata: &newDeploy.ObjectMeta, + } + + default: + return + } + + if observeGenerationChanged(newAccessor, oldAccessor) || + observeLatestGeneration(newAccessor, oldAccessor) || + observeScaleEventDone(newAccessor, oldAccessor) || + observeReplicasChanged(newAccessor, oldAccessor) { + + workloadNsn := types.NamespacedName{ + Namespace: newAccessor.Metadata.Namespace, + Name: newAccessor.Metadata.Name, + } + + controllerInfo, controlled := newAccessor.Metadata.Annotations[workloads.BatchReleaseControlAnnotation] + if controlled && len(controllerInfo) > 0 { + br := &metav1.OwnerReference{} + if err := json.Unmarshal([]byte(controllerInfo), br); err == nil { + klog.V(3).Infof("%s (%v) is managed by BatchRelease (%s), append queue", gvk.Kind, workloadNsn, br.Name) + nsn := types.NamespacedName{Namespace: workloadNsn.Namespace, Name: br.Name} + q.Add(reconcile.Request{NamespacedName: nsn}) + return + } + } + + br, err := w.getBatchRelease(workloadNsn, gvk) + if err != nil { + klog.Errorf("unable to get BatchRelease related with %s (%s/%s), err: %v", + gvk.Kind, workloadNsn.Namespace, workloadNsn.Name, err) + return + } + + if br != nil { + klog.V(3).Infof("%s (%s/%s) changed generation from %d to %d managed by BatchRelease (%s/%s)", + gvk.Kind, workloadNsn.Namespace, workloadNsn.Name, oldAccessor.Metadata.Generation, newAccessor.Metadata.Generation, br.GetNamespace(), br.GetName()) + nsn := types.NamespacedName{Namespace: br.GetNamespace(), Name: br.GetName()} + q.Add(reconcile.Request{NamespacedName: nsn}) + } + } +} + +func (w workloadEventHandler) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + w.handleWorkload(q, evt.Object, DeleteEventAction) +} + +func (w workloadEventHandler) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +} + +func (w *workloadEventHandler) handleWorkload(q workqueue.RateLimitingInterface, + obj client.Object, action EventAction) { + var gvk schema.GroupVersionKind + switch obj.(type) { + case *kruiseappsv1alpha1.CloneSet: + gvk = controllerKruiseKindCS + case *appsv1.Deployment: + gvk = controllerKindDep + default: + return + } + + workloadNsn := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + } + ws, err := w.getBatchRelease(workloadNsn, gvk) + if err != nil { + klog.Errorf("unable to get BatchRelease related with %s (%s/%s), err: %v", + gvk.Kind, workloadNsn.Namespace, workloadNsn.Name, err) + return + } + if ws != nil { + klog.V(5).Infof("%s %s (%s/%s) and reconcile BatchRelease (%s/%s)", + action, gvk.Kind, workloadNsn.Namespace, workloadNsn.Namespace, ws.Namespace, ws.Name) + nsn := types.NamespacedName{Namespace: ws.GetNamespace(), Name: ws.GetName()} + q.Add(reconcile.Request{NamespacedName: nsn}) + } +} + +func (w *workloadEventHandler) getBatchRelease(workloadNamespaceName types.NamespacedName, gvk schema.GroupVersionKind) (*v1alpha1.BatchRelease, error) { + bsList := &v1alpha1.BatchReleaseList{} + listOptions := &client.ListOptions{Namespace: workloadNamespaceName.Namespace} + if err := w.List(context.TODO(), bsList, listOptions); err != nil { + klog.Errorf("List BatchRelease failed: %s", err.Error()) + return nil, err + } + + for _, bs := range bsList.Items { + if bs.DeletionTimestamp != nil { + continue + } + + targetRef := bs.Spec.TargetRef + targetGV, err := schema.ParseGroupVersion(targetRef.APIVersion) + if err != nil { + klog.Errorf("failed to parse targetRef's group version: %s", targetRef.APIVersion) + continue + } + + if targetRef.Kind == gvk.Kind && targetGV.Group == gvk.Group && targetRef.Name == workloadNamespaceName.Name { + return &bs, nil + } + } + + return nil, nil +} + +func observeGenerationChanged(newOne, oldOne *workloads.Accessor) bool { + return newOne.Metadata.Generation != oldOne.Metadata.Generation +} + +func observeLatestGeneration(newOne, oldOne *workloads.Accessor) bool { + oldNot := oldOne.Metadata.Generation != oldOne.Status.ObservedGeneration + newDid := newOne.Metadata.Generation == newOne.Status.ObservedGeneration + return oldNot && newDid +} + +func observeScaleEventDone(newOne, oldOne *workloads.Accessor) bool { + _, controlled := newOne.Metadata.Annotations[workloads.BatchReleaseControlAnnotation] + if !controlled { + return false + } + + oldScaling := oldOne.Replicas != newOne.Replicas || + *oldOne.Replicas != oldOne.Status.Replicas + newDone := newOne.Metadata.Generation == newOne.Status.ObservedGeneration && + *oldOne.Replicas == oldOne.Status.Replicas + return oldScaling && newDone +} + +func observeReplicasChanged(newOne, oldOne *workloads.Accessor) bool { + _, controlled := newOne.Metadata.Annotations[workloads.BatchReleaseControlAnnotation] + if !controlled { + return false + } + + return oldOne.Status.Replicas != newOne.Status.Replicas || + oldOne.Status.ReadyReplicas != newOne.Status.ReadyReplicas || + oldOne.Status.UpdatedReplicas != newOne.Status.UpdatedReplicas || + oldOne.Status.UpdatedReadyReplicas != newOne.Status.UpdatedReadyReplicas +} diff --git a/pkg/controller/batchrelease/batchrelease_plan_controller.go b/pkg/controller/batchrelease/batchrelease_plan_controller.go new file mode 100644 index 0000000..dba4a7f --- /dev/null +++ b/pkg/controller/batchrelease/batchrelease_plan_controller.go @@ -0,0 +1,272 @@ +package batchrelease + +import ( + "fmt" + "reflect" + "time" + + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + "github.com/openkruise/rollouts/api/v1alpha1" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/workloads" +) + +const ( + DefaultLongDuration = 5 * time.Second + DefaultShortDuration = (50 * 1000) * time.Microsecond +) + +// Executor is the controller that controls the release plan resource +type Executor struct { + client client.Client + recorder record.EventRecorder + + release *v1alpha1.BatchRelease + releasePlan *v1alpha1.ReleasePlan + releaseStatus *v1alpha1.BatchReleaseStatus +} + +// NewReleasePlanExecutor creates a RolloutPlanController +func NewReleasePlanExecutor(client client.Client, recorder record.EventRecorder) *Executor { + return &Executor{ + client: client, + recorder: recorder, + } +} + +func (r *Executor) SetReleaseInfo(release *v1alpha1.BatchRelease) { + r.release = release + r.releaseStatus = release.Status.DeepCopy() + r.releasePlan = release.Spec.ReleasePlan.DeepCopy() + initializeStatusIfNeeds(r.releaseStatus) +} + +// Do execute the release plan +func (r *Executor) Do() (reconcile.Result, *v1alpha1.BatchReleaseStatus) { + klog.V(3).InfoS("Reconcile the release plan", + "target-workload", r.release.Spec.TargetRef.Name) + + klog.V(3).InfoS("release-status:", + "release-phase", r.releaseStatus.Phase, + "batch-rolling-state", r.releaseStatus.CanaryStatus.ReleasingBatchState, + "current-batch", r.releaseStatus.CanaryStatus.CurrentBatch) + + workloadController, err := r.GetWorkloadController() + if err != nil { + return reconcile.Result{}, r.releaseStatus + } + + shouldStopThisRound, retryDuration := r.handleSpecialCases(workloadController) + if shouldStopThisRound { + return retryDuration, r.releaseStatus + } + + return r.executeBatchReleasePlan(workloadController) +} + +func (r *Executor) executeBatchReleasePlan(workloadController workloads.WorkloadController) (reconcile.Result, *v1alpha1.BatchReleaseStatus) { + status := r.releaseStatus + retryDuration := reconcile.Result{} + + switch status.Phase { + case v1alpha1.RolloutPhaseHealthy: + r.releaseStatus.Phase = v1alpha1.RolloutPhaseVerify + fallthrough + + case v1alpha1.RolloutPhaseVerify: + klog.V(3).Infof("ReleasePlan State Machine into %s state", v1alpha1.RolloutPhaseVerify) + // verify whether the workload is ready to execute the release plan in this state. + verified, err := workloadController.VerifySpec() + switch { + case err != nil: + setCondition(r.releaseStatus, "VerifyWorkloadError", err.Error(), v1.ConditionFalse) + case verified: + setCondition(r.releaseStatus, "VerifyWorkloadSuccessfully", "", v1.ConditionTrue) + status.Phase = v1alpha1.RolloutPhaseInitial + fallthrough + default: + retryDuration = reconcile.Result{RequeueAfter: DefaultShortDuration} + } + + case v1alpha1.RolloutPhaseInitial: + klog.V(3).Infof("ReleasePlan State Machine into %s state", v1alpha1.RolloutPhaseInitial) + r.releaseStatus.Phase = v1alpha1.RolloutPhasePreparing + fallthrough + + case v1alpha1.RolloutPhasePreparing: + klog.V(3).Infof("ReleasePlan State Machine into %s state", v1alpha1.RolloutPhasePreparing) + // prepare and initialize something before progressing in this state. + initialized, err := workloadController.Initialize() + switch { + case err != nil: + setCondition(r.releaseStatus, "InitializeError", err.Error(), v1.ConditionFalse) + case initialized: + setCondition(r.releaseStatus, "InitializeSuccessfully", "", v1.ConditionTrue) + status.Phase = v1alpha1.RolloutPhaseProgressing + fallthrough + default: + retryDuration = reconcile.Result{RequeueAfter: DefaultShortDuration} + } + + case v1alpha1.RolloutPhaseProgressing: + klog.V(3).Infof("ReleasePlan State Machine into %s state", v1alpha1.RolloutPhaseProgressing) + // progress the release plan in this state. + var progressDone bool + progressDone, retryDuration = r.progressBatches(workloadController) + if progressDone { + setCondition(r.releaseStatus, "ProgressSuccessfully", "", v1.ConditionTrue) + status.Phase = v1alpha1.RolloutPhaseFinalizing + } + + case v1alpha1.RolloutPhaseFinalizing: + klog.V(3).Infof("ReleasePlan State Machine into %s state", v1alpha1.RolloutPhaseFinalizing) + // restore the workload in this state + if succeed := workloadController.Finalize(false, false); succeed { + cleanupConditions(status) + status.Phase = v1alpha1.RolloutPhaseCompleted + } + retryDuration = reconcile.Result{RequeueAfter: DefaultShortDuration} + + case v1alpha1.RolloutPhaseRollingBack: + klog.V(3).Infof("ReleasePlan State Machine into %s state", v1alpha1.RolloutPhaseRollingBack) + // restore the workload in this state + cleanup := metav1.GetControllerOf(r.release) == nil + if succeed := workloadController.Finalize(false, cleanup); succeed { + cleanupConditions(status) + status.Phase = v1alpha1.RolloutPhaseCancelled + } + retryDuration = reconcile.Result{RequeueAfter: DefaultShortDuration} + + case v1alpha1.RolloutPhaseCompleted: + klog.V(3).Infof("ReleasePlan State Machine into %s state", v1alpha1.RolloutPhaseCompleted) + // this state indicates that the plan is executed successfully, should do nothing in this state. + + case v1alpha1.RolloutPhaseTerminating: + klog.V(3).Infof("ReleasePlan State Machine into %s state", v1alpha1.RolloutPhaseTerminating) + if succeed := workloadController.Finalize(true, true); succeed { + if r.release.DeletionTimestamp != nil { + setCondition(status, v1alpha1.TerminatingReasonInTerminating, "Release plan was cancelled or deleted", v1.ConditionTrue) + } else { + status.Phase = v1alpha1.RolloutPhaseCancelled + } + } + retryDuration = reconcile.Result{RequeueAfter: DefaultShortDuration} + + case v1alpha1.RolloutPhaseCancelled: + klog.V(3).Infof("ReleasePlan State Machine into %s state", v1alpha1.RolloutPhaseCancelled) + // this state indicates that the plan is cancelled successfully, should do nothing in this state. + + default: + klog.V(3).Infof("ReleasePlan State Machine into %s state", "Unknown") + panic(fmt.Sprintf("illegal release status %+v", status)) + } + + return retryDuration, status +} + +// reconcile logic when we are in the middle of release, we have to go through finalizing state before succeed or fail +func (r *Executor) progressBatches(workloadController workloads.WorkloadController) (bool, reconcile.Result) { + progressDone := false + retryDuration := reconcile.Result{} + + switch r.releaseStatus.CanaryStatus.ReleasingBatchState { + case "", v1alpha1.InitializeBatchState: + klog.V(3).Infof("ReleaseBatch State Machine into %s state", v1alpha1.InitializeBatchState) + // prepare something before do canary to modify workload, such as calculating suitable batch index. + r.releaseStatus.CanaryStatus.ReleasingBatchState = v1alpha1.DoCanaryBatchState + fallthrough + + case v1alpha1.DoCanaryBatchState: + klog.V(3).Infof("ReleaseBatch State Machine into %s state", v1alpha1.DoCanaryBatchState) + // modify workload replicas/partition based on release plan in this state. + upgradeDone, err := workloadController.RolloutOneBatchPods() + switch { + case err != nil: + setCondition(r.releaseStatus, "DoCanaryError", err.Error(), v1.ConditionFalse) + case upgradeDone: + r.releaseStatus.CanaryStatus.ReleasingBatchState = v1alpha1.VerifyBatchState + fallthrough + default: + retryDuration = reconcile.Result{RequeueAfter: DefaultShortDuration} + } + + case v1alpha1.VerifyBatchState: + klog.V(3).Infof("ReleaseBatch State Machine into %s state", v1alpha1.VerifyBatchState) + // TODO: metrics analysis + // replicas/partition has been modified, should wait pod ready in this state. + verified, err := workloadController.CheckOneBatchPods() + switch { + case err != nil: + setCondition(r.releaseStatus, "VerifyBatchReadyError", err.Error(), v1.ConditionFalse) + case verified: + retryDuration = reconcile.Result{RequeueAfter: DefaultShortDuration} + r.releaseStatus.CanaryStatus.LastBatchReadyTime = metav1.Now() + r.releaseStatus.CanaryStatus.ReleasingBatchState = v1alpha1.ReadyBatchState + default: + r.releaseStatus.CanaryStatus.ReleasingBatchState = v1alpha1.InitializeBatchState + } + + case v1alpha1.ReadyBatchState: + klog.V(3).Infof("ReleaseBatch State Machine into %s state", v1alpha1.ReadyBatchState) + // all the pods in the batch are upgraded and their state are ready + // wait to move to the next batch if there are any + progressDone = r.moveToNextBatch() + retryDuration = reconcile.Result{RequeueAfter: DefaultShortDuration} + + default: + klog.V(3).Infof("ReleaseBatch State Machine into %s state", "Unknown") + panic(fmt.Sprintf("illegal status %+v", r.releaseStatus)) + } + + return progressDone, retryDuration +} + +// GetWorkloadController pick the right workload controller to work on the workload +func (r *Executor) GetWorkloadController() (workloads.WorkloadController, error) { + targetRef := r.release.Spec.TargetRef + targetKey := types.NamespacedName{ + Namespace: r.release.Namespace, + Name: targetRef.Name, + } + + switch targetRef.APIVersion { + case kruiseappsv1alpha1.GroupVersion.String(): + if targetRef.Kind == reflect.TypeOf(kruiseappsv1alpha1.CloneSet{}).Name() { + klog.InfoS("using cloneset batch release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace) + return workloads.NewCloneSetRolloutController(r.client, r.recorder, r.release, r.releasePlan, r.releaseStatus, targetKey), nil + } + + case apps.SchemeGroupVersion.String(): + if targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() { + klog.InfoS("using deployment batch release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace) + return workloads.NewDeploymentRolloutController(r.client, r.recorder, r.release, r.releasePlan, r.releaseStatus, targetKey), nil + } + } + message := fmt.Sprintf("the workload `%v/%v` is not supported", targetRef.APIVersion, targetRef.Kind) + r.recorder.Event(r.release, v1.EventTypeWarning, "UnsupportedWorkload", message) + return nil, fmt.Errorf(message) +} + +func (r *Executor) moveToNextBatch() bool { + currentBatch := int(r.releaseStatus.CanaryStatus.CurrentBatch) + if currentBatch >= len(r.releasePlan.Batches)-1 { + klog.V(3).InfoS("Finished all batch release", "current batch", r.releaseStatus.CanaryStatus.CurrentBatch) + return true + } else { + if r.releasePlan.BatchPartition == nil || + *r.releasePlan.BatchPartition > r.releaseStatus.CanaryStatus.CurrentBatch { + r.releaseStatus.CanaryStatus.CurrentBatch++ + } + r.releaseStatus.CanaryStatus.ReleasingBatchState = v1alpha1.InitializeBatchState + klog.V(3).InfoS("Finished one batch release", "current batch", r.releaseStatus.CanaryStatus.CurrentBatch) + return false + } +} diff --git a/pkg/controller/batchrelease/batchrelease_special_cases_handler.go b/pkg/controller/batchrelease/batchrelease_special_cases_handler.go new file mode 100644 index 0000000..9b900b6 --- /dev/null +++ b/pkg/controller/batchrelease/batchrelease_special_cases_handler.go @@ -0,0 +1,190 @@ +package batchrelease + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/openkruise/rollouts/api/v1alpha1" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/workloads" +) + +const ( + Keep = "Keep" + Restart = "Restart" + RollingBack = "RollingBack" + Terminating = "Terminating" + Recalculate = "Recalculate" +) + +func (r *Executor) handleSpecialCases(controller workloads.WorkloadController) (needStopThisRound bool, result reconcile.Result) { + var reason string + var message string + var action string + + // watch the event of workload change + workloadEvent, workloadInfo, err := controller.WatchWorkload() + + // Note: must keep the order of the following cases + switch { + case r.releasePlanTerminating(): + reason = "PlanCancelled" + message = "release plan is cancelled, clean up and stop reconcile" + needStopThisRound = false + action = Terminating + + case r.workloadHasGone(err): + reason = "WorkloadGone" + message = "target workload has gone, clean up and stop reconcile" + needStopThisRound = false + action = Terminating + + case client.IgnoreNotFound(err) != nil: + reason = "GetWorkloadError" + message = err.Error() + needStopThisRound = true + action = Keep + result = reconcile.Result{RequeueAfter: DefaultShortDuration} + + case r.releasePlanPaused(): + reason = "PlanPaused" + message = "release plan is paused, no need to reconcile" + needStopThisRound = true + action = Keep + + case r.releasePlanUnhealthy(): + reason = "PlanStatusUnhealthy" + message = "release plan status is unhealthy, try to restart release plan" + needStopThisRound = false + action = Restart + + case r.releasePlanChanged(): + reason = "PlanChanged" + message = "release plan was changed, try to recalculate canary status" + needStopThisRound = false + action = Recalculate + + case workloadEvent == workloads.WorkloadStableOrRollback: + reason = "StableOrRollback" + message = "workload is table or rolling back, stop the release plan" + needStopThisRound = false + action = RollingBack + + case workloadEvent == workloads.WorkloadReplicasChanged: + reason = "ReplicasChanged" + message = "workload is scaling, paused and wait for it to be done" + needStopThisRound = true + action = Recalculate + + case workloadEvent == workloads.WorkloadPodTemplateChanged: + reason = "RevisionChanged" + message = "workload revision was changed, try to restart release plan" + needStopThisRound = false + action = Restart + + case workloadEvent == workloads.WorkloadUnHealthy: + reason = "WorkloadUnHealthy" + message = "workload is UnHealthy, should stop the release plan" + needStopThisRound = true + action = Keep + + case workloadEvent == workloads.WorkloadStillReconciling: + if r.releaseStatus.Phase != v1alpha1.RolloutPhaseCompleted { + reason = "WorkloadNotStable" + message = "workload status is not stable, wait for it to be stable" + } + needStopThisRound = true + action = Keep + + default: + // check canary batch pause seconds + if r.releaseStatus.Phase == v1alpha1.RolloutPhaseProgressing && + r.releaseStatus.CanaryStatus.ReleasingBatchState == v1alpha1.ReadyBatchState && + int(r.releaseStatus.CanaryStatus.CurrentBatch) < len(r.releasePlan.Batches) { + currentTimestamp := time.Now() + currentBatch := r.releasePlan.Batches[r.releaseStatus.CanaryStatus.CurrentBatch] + waitDuration := time.Duration(currentBatch.PauseSeconds) * time.Second + if waitDuration > 0 && r.releaseStatus.CanaryStatus.LastBatchReadyTime.Time.Add(waitDuration).After(currentTimestamp) { + needStopThisRound = true + restDuration := r.releaseStatus.CanaryStatus.LastBatchReadyTime.Time.Add(waitDuration).Sub(currentTimestamp) + result = reconcile.Result{RequeueAfter: restDuration} + klog.V(3).Infof("BatchRelease %v/%v paused and will continue to reconcile after %v", r.release.Namespace, r.release.Name, restDuration) + } + } + } + + if len(message) > 0 { + klog.Warning(message) + setCondition(r.releaseStatus, reason, message, v1.ConditionFalse) + r.recorder.Eventf(r.release, v1.EventTypeWarning, reason, message) + } + + // refresh workload info + if workloadInfo != nil { + if workloadInfo.Replicas != nil { + r.releaseStatus.ObservedWorkloadReplicas = *workloadInfo.Replicas + } + if workloadInfo.UpdateRevision != nil { + r.releaseStatus.UpdateRevision = *workloadInfo.UpdateRevision + } + if workloadInfo.Status != nil { + r.releaseStatus.CanaryStatus.UpdatedReplicas = workloadInfo.Status.UpdatedReplicas + r.releaseStatus.CanaryStatus.UpdatedReadyReplicas = workloadInfo.Status.UpdatedReadyReplicas + } + } + + switch action { + case Keep: + // keep current status, do nothing + case Restart: + signalRestart(r.releaseStatus) + case Recalculate: + signalRecalculate(r.releaseStatus) + case RollingBack: + signalRollingBack(r.releaseStatus) + case Terminating: + signalTerminating(r.releaseStatus) + } + + return needStopThisRound, result +} + +func (r *Executor) releasePlanTerminating() bool { + return r.isTerminating() +} + +func (r *Executor) releasePlanUnhealthy() bool { + return r.isProgressing() && int(r.release.Status.CanaryStatus.CurrentBatch) >= len(r.releasePlan.Batches) +} + +func (r *Executor) releasePlanChanged() bool { + return r.isProgressing() && r.releaseStatus.ObservedReleasePlanHash != hashReleasePlanBatches(r.releasePlan) +} + +func (r *Executor) workloadHasGone(err error) bool { + return !r.isTerminating() && errors.IsNotFound(err) +} + +func (r *Executor) releasePlanPaused() bool { + partitioned := r.releasePlan.BatchPartition != nil && + r.releaseStatus.Phase == v1alpha1.RolloutPhaseProgressing && + r.releaseStatus.CanaryStatus.ReleasingBatchState == v1alpha1.ReadyBatchState && + r.releaseStatus.CanaryStatus.CurrentBatch >= *r.releasePlan.BatchPartition + return !r.isTerminating() && (r.releasePlan.Paused || partitioned) +} + +func (r *Executor) isTerminating() bool { + return r.release.Spec.Cancelled || + r.release.DeletionTimestamp != nil || + r.release.Status.Phase == v1alpha1.RolloutPhaseTerminating +} + +func (r *Executor) isProgressing() bool { + return !r.release.Spec.Cancelled && + r.release.DeletionTimestamp != nil && + r.releaseStatus.Phase == v1alpha1.RolloutPhaseProgressing +} diff --git a/pkg/controller/batchrelease/batchrelease_util.go b/pkg/controller/batchrelease/batchrelease_util.go new file mode 100644 index 0000000..e2f2ef5 --- /dev/null +++ b/pkg/controller/batchrelease/batchrelease_util.go @@ -0,0 +1,100 @@ +package batchrelease + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openkruise/rollouts/api/v1alpha1" +) + +func HasTerminatingCondition(status v1alpha1.BatchReleaseStatus) bool { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Reason == v1alpha1.TerminatingReasonInTerminating { + return true + } + } + return false +} + +func hashReleasePlanBatches(releasePlan *v1alpha1.ReleasePlan) string { + by, _ := json.Marshal(releasePlan.Batches) + md5Hash := sha256.Sum256(by) + return hex.EncodeToString(md5Hash[:]) +} + +func initializeStatusIfNeeds(status *v1alpha1.BatchReleaseStatus) { + if len(status.Phase) == 0 { + resetStatus(status) + } +} + +func signalRestart(status *v1alpha1.BatchReleaseStatus) { + resetStatus(status) +} + +func signalRecalculate(status *v1alpha1.BatchReleaseStatus) { + status.CanaryStatus.ReleasingBatchState = v1alpha1.InitializeBatchState +} + +func signalTerminating(status *v1alpha1.BatchReleaseStatus) { + status.Phase = v1alpha1.RolloutPhaseTerminating +} + +func signalRollingBack(status *v1alpha1.BatchReleaseStatus) { + status.Phase = v1alpha1.RolloutPhaseRollingBack +} + +func resetStatus(status *v1alpha1.BatchReleaseStatus) { + status.Phase = v1alpha1.RolloutPhaseHealthy + status.StableRevision = "" + status.UpdateRevision = "" + status.ObservedReleasePlanHash = "" + status.ObservedWorkloadReplicas = -1 + status.CanaryStatus = v1alpha1.BatchReleaseCanaryStatus{} +} + +func setCondition(status *v1alpha1.BatchReleaseStatus, reason, message string, conditionStatusType v1.ConditionStatus) { + if status == nil { + return + } + + var suitableCondition *v1alpha1.RolloutCondition + for i := range status.Conditions { + condition := &status.Conditions[i] + if condition.Type == getConditionType(status.Phase) { + suitableCondition = condition + } + } + + if suitableCondition == nil { + status.Conditions = append(status.Conditions, v1alpha1.RolloutCondition{ + Type: getConditionType(status.Phase), + Status: conditionStatusType, + Reason: reason, + Message: message, + LastUpdateTime: metav1.Now(), + }) + } else { + suitableCondition.Reason = reason + suitableCondition.Message = message + suitableCondition.LastUpdateTime = metav1.Now() + if suitableCondition.Status != conditionStatusType { + suitableCondition.LastTransitionTime = metav1.Now() + } + suitableCondition.Status = conditionStatusType + } +} + +func cleanupConditions(status *v1alpha1.BatchReleaseStatus) { + status.Conditions = nil +} + +func getConditionType(phase v1alpha1.RolloutPhase) v1alpha1.RolloutConditionType { + return v1alpha1.RolloutConditionType(fmt.Sprintf("%sPhaseCompleted", phase)) +} diff --git a/pkg/controller/batchrelease/workloads/cloneset_control_plan.go b/pkg/controller/batchrelease/workloads/cloneset_control_plan.go new file mode 100644 index 0000000..13cf7c2 --- /dev/null +++ b/pkg/controller/batchrelease/workloads/cloneset_control_plan.go @@ -0,0 +1,284 @@ +package workloads + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + "github.com/openkruise/rollouts/api/v1alpha1" +) + +// CloneSetRolloutController is responsible for handling rollout CloneSet type of workloads +type CloneSetRolloutController struct { + cloneSetController + clone *kruiseappsv1alpha1.CloneSet +} + +//TODO: scale during releasing: workload replicas changed -> Finalising CloneSet with Paused=true + +// NewCloneSetRolloutController creates a new CloneSet rollout controller +func NewCloneSetRolloutController(client client.Client, recorder record.EventRecorder, release *v1alpha1.BatchRelease, plan *v1alpha1.ReleasePlan, status *v1alpha1.BatchReleaseStatus, targetNamespacedName types.NamespacedName) *CloneSetRolloutController { + return &CloneSetRolloutController{ + cloneSetController: cloneSetController{ + workloadController: workloadController{ + client: client, + recorder: recorder, + parentController: release, + releasePlan: plan, + releaseStatus: status, + }, + targetNamespacedName: targetNamespacedName, + }, + } +} + +// VerifySpec verifies that the rollout resource is consistent with the rollout spec +func (c *CloneSetRolloutController) VerifySpec() (bool, error) { + var verifyErr error + defer func() { + if verifyErr != nil { + klog.Warningf(verifyErr.Error()) + c.recorder.Event(c.parentController, v1.EventTypeWarning, "VerifyFailed", verifyErr.Error()) + } + }() + + if err := c.fetchCloneSet(); err != nil { + //c.releaseStatus.RolloutRetry(err.Error()) + return false, nil + } + + if c.clone.Status.ObservedGeneration != c.clone.Generation { + klog.Warningf("CloneSet is still reconciling, wait for it to be done") + return false, nil + } + + if c.clone.Status.UpdatedReplicas == *c.clone.Spec.Replicas { + verifyErr = fmt.Errorf("update revision has been promoted, no need to reconcile") + return false, verifyErr + } + + if !c.clone.Spec.UpdateStrategy.Paused && !IsControlledBy(c.clone, c.parentController) { + verifyErr = fmt.Errorf("cloneSet should be paused before execute the release plan") + return false, verifyErr + } + + c.recordCloneSetRevisionAndReplicas() + klog.V(3).Infof("Verified Successfully, Status %+v", c.releaseStatus) + c.recorder.Event(c.parentController, v1.EventTypeNormal, "VerifiedSuccessfully", "ReleasePlan and the CloneSet resource are verified") + return true, nil +} + +// Initialize makes sure that the source and target CloneSet is under our control +func (c *CloneSetRolloutController) Initialize() (bool, error) { + if err := c.fetchCloneSet(); err != nil { + //c.releaseStatus.RolloutRetry(err.Error()) + return false, nil + } + + if _, err := c.claimCloneSet(c.clone); err != nil { + return false, nil + } + + c.recorder.Event(c.parentController, v1.EventTypeNormal, "InitializedSuccessfully", "Rollout resource are initialized") + return true, nil +} + +// RolloutOneBatchPods calculates the number of pods we can upgrade once according to the rollout spec +// and then set the partition accordingly +func (c *CloneSetRolloutController) RolloutOneBatchPods() (bool, error) { + if err := c.fetchCloneSet(); err != nil { + return false, nil + } + + updateSize := c.calculateCurrentTarget(c.releaseStatus.ObservedWorkloadReplicas) + stableSize := c.calculateCurrentSource(c.releaseStatus.ObservedWorkloadReplicas) + workloadPartition, _ := intstr.GetValueFromIntOrPercent(c.clone.Spec.UpdateStrategy.Partition, + int(c.releaseStatus.ObservedWorkloadReplicas), true) + + if c.clone.Status.UpdatedReplicas >= updateSize && int32(workloadPartition) <= stableSize { + klog.V(3).InfoS("upgraded one batch, but no need to update partition of cloneset", "current batch", + c.releaseStatus.CanaryStatus.CurrentBatch, "real updateRevision replicas", c.clone.Status.UpdatedReplicas) + return true, nil + } + + if err := c.patchCloneSetPartition(c.clone, stableSize); err != nil { + return false, nil + } + + klog.V(3).InfoS("upgraded one batch", "current batch", c.releaseStatus.CanaryStatus.CurrentBatch, "updateRevision size", updateSize) + c.recorder.Eventf(c.parentController, v1.EventTypeNormal, "SetBatchDone", "Finished submitting all upgrade quests for batch %d", c.releaseStatus.CanaryStatus.CurrentBatch) + return true, nil +} + +// CheckOneBatchPods checks to see if the pods are all available according to the rollout plan +func (c *CloneSetRolloutController) CheckOneBatchPods() (bool, error) { + if err := c.fetchCloneSet(); err != nil { + return false, nil + } + + // wait for cloneSet controller to watch update event + if c.clone.Status.ObservedGeneration != c.clone.Generation { + return false, nil + } + + updatePodCount := c.clone.Status.UpdatedReplicas + stablePodCount := c.clone.Status.Replicas - updatePodCount + readyUpdatePodCount := c.clone.Status.UpdatedReadyReplicas + updateGoal := c.calculateCurrentTarget(c.releaseStatus.ObservedWorkloadReplicas) + stableGoal := c.calculateCurrentSource(c.releaseStatus.ObservedWorkloadReplicas) + + c.releaseStatus.CanaryStatus.UpdatedReplicas = updatePodCount + c.releaseStatus.CanaryStatus.UpdatedReadyReplicas = readyUpdatePodCount + + maxUnavailable := 0 + if c.clone.Spec.UpdateStrategy.MaxUnavailable != nil { + maxUnavailable, _ = intstr.GetValueFromIntOrPercent(c.clone.Spec.UpdateStrategy.MaxUnavailable, int(c.releaseStatus.ObservedWorkloadReplicas), true) + } + + klog.InfoS("checking the batch releasing progress", "current-batch", c.releaseStatus.CanaryStatus.CurrentBatch, + "target-pod-ready-count", readyUpdatePodCount, "source-pod-count", stablePodCount, + "max-unavailable-pod-allowed", maxUnavailable, "target-goal", updateGoal, "source-goal", stableGoal) + + if c.clone.Status.Replicas != c.releaseStatus.ObservedWorkloadReplicas { + err := fmt.Errorf("CloneSet replicas don't match ObservedWorkloadReplicas, sourceTarget = %d, targetTarget = %d, "+ + "rolloutTargetSize = %d", stablePodCount, updatePodCount, c.releaseStatus.ObservedWorkloadReplicas) + klog.ErrorS(err, "the batch is not valid", "current-batch", c.releaseStatus.CanaryStatus.CurrentBatch) + return false, nil + } + + if updateGoal > updatePodCount || stableGoal < stablePodCount || readyUpdatePodCount+int32(maxUnavailable) < updateGoal { + klog.InfoS("the batch is not ready yet", "current-batch", c.releaseStatus.CanaryStatus.CurrentBatch) + return false, nil + } + + klog.InfoS("All pods in current batch are ready", "current-batch", c.releaseStatus.CanaryStatus.CurrentBatch) + c.recorder.Eventf(c.parentController, v1.EventTypeNormal, "BatchAvailable", "Batch %d is available", c.releaseStatus.CanaryStatus.CurrentBatch) + return true, nil +} + +// FinalizeOneBatch makes sure that the rollout status are updated correctly +func (c *CloneSetRolloutController) FinalizeOneBatch() (bool, error) { + return true, nil +} + +// Finalize makes sure the CloneSet is all upgraded +func (c *CloneSetRolloutController) Finalize(pause, cleanup bool) bool { + if err := c.fetchCloneSet(); client.IgnoreNotFound(err) != nil { + return false + } + + if _, err := c.releaseCloneSet(c.clone, pause, cleanup); err != nil { + return false + } + + c.recorder.Eventf(c.parentController, v1.EventTypeNormal, "FinalizedSuccessfully", "Rollout resource are finalized, "+ + "pause = %v, cleanup = %v", pause, cleanup) + return true +} + +// WatchWorkload return change type if workload was changed during release +func (c *CloneSetRolloutController) WatchWorkload() (WorkloadChangeEventType, *Accessor, error) { + if c.parentController.Spec.Cancelled || + c.parentController.DeletionTimestamp != nil || + c.releaseStatus.Phase == v1alpha1.RolloutPhaseFinalizing || + c.releaseStatus.Phase == v1alpha1.RolloutPhaseRollingBack || + c.releaseStatus.Phase == v1alpha1.RolloutPhaseTerminating { + return IgnoreWorkloadEvent, nil, nil + } + + workloadInfo := &Accessor{} + err := c.fetchCloneSet() + if client.IgnoreNotFound(err) != nil { + return "", nil, err + } else if apierrors.IsNotFound(err) { + workloadInfo.Status = &Status{} + return "", workloadInfo, err + } + + if c.clone.Status.ObservedGeneration != c.clone.Generation { + klog.Warningf("CloneSet is still reconciling, waiting for it to complete, generation: %v, observed: %v", + c.clone.Generation, c.clone.Status.ObservedGeneration) + return WorkloadStillReconciling, nil, nil + } + + workloadInfo.Status = &Status{ + UpdatedReplicas: c.clone.Status.UpdatedReplicas, + UpdatedReadyReplicas: c.clone.Status.UpdatedReadyReplicas, + } + + if !c.clone.Spec.UpdateStrategy.Paused && c.clone.Status.UpdatedReplicas == c.clone.Status.Replicas { + return IgnoreWorkloadEvent, workloadInfo, nil + } + + switch c.releaseStatus.Phase { + case v1alpha1.RolloutPhaseHealthy, v1alpha1.RolloutPhaseVerify: + return IgnoreWorkloadEvent, workloadInfo, nil + + default: + if c.clone.Status.CurrentRevision == c.clone.Status.UpdateRevision && + c.parentController.Status.UpdateRevision != c.clone.Status.UpdateRevision { + workloadInfo.UpdateRevision = &c.clone.Status.UpdateRevision + klog.Warning("CloneSet is stable or is rolling back, release plan should stop") + return WorkloadStableOrRollback, workloadInfo, nil + } + if *c.clone.Spec.Replicas != c.releaseStatus.ObservedWorkloadReplicas { + workloadInfo.Replicas = c.clone.Spec.Replicas + klog.Warningf("CloneSet replicas changed during releasing, should pause and wait for it to complete, replicas from: %v -> %v", + c.releaseStatus.ObservedWorkloadReplicas, *c.clone.Spec.Replicas) + return WorkloadReplicasChanged, workloadInfo, nil + } + fallthrough + + case v1alpha1.RolloutPhaseCompleted, v1alpha1.RolloutPhaseCancelled: + if c.clone.Status.UpdateRevision != c.releaseStatus.UpdateRevision { + workloadInfo.UpdateRevision = &c.clone.Status.UpdateRevision + klog.Warningf("CloneSet updateRevision changed during releasing, should try to restart the release plan, updateRevision from: %v -> %v", + c.releaseStatus.UpdateRevision, c.clone.Status.UpdateRevision) + return WorkloadPodTemplateChanged, workloadInfo, nil + } + } + + return IgnoreWorkloadEvent, workloadInfo, nil +} + +func (c *CloneSetRolloutController) fetchCloneSet() error { + clone := &kruiseappsv1alpha1.CloneSet{} + if err := c.client.Get(context.TODO(), c.targetNamespacedName, clone); err != nil { + if !apierrors.IsNotFound(err) { + c.recorder.Event(c.parentController, v1.EventTypeWarning, "GetCloneSetFailed", err.Error()) + } + return err + } + c.clone = clone + return nil +} + +// the target workload size for the current batch +func (c *CloneSetRolloutController) calculateCurrentTarget(totalSize int32) int32 { + targetSize := int32(calculateNewBatchTarget(c.releasePlan, int(totalSize), int(c.releaseStatus.CanaryStatus.CurrentBatch))) + klog.InfoS("Calculated the number of pods in the target CloneSet after current batch", + "current batch", c.releaseStatus.CanaryStatus.CurrentBatch, "workload updateRevision size", targetSize) + return targetSize +} + +// the source workload size for the current batch +func (c *CloneSetRolloutController) calculateCurrentSource(totalSize int32) int32 { + sourceSize := totalSize - c.calculateCurrentTarget(totalSize) + klog.InfoS("Calculated the number of pods in the source CloneSet after current batch", + "current batch", c.releaseStatus.CanaryStatus.CurrentBatch, "workload stableRevision size", sourceSize) + return sourceSize +} + +func (c *CloneSetRolloutController) recordCloneSetRevisionAndReplicas() { + c.releaseStatus.ObservedWorkloadReplicas = *c.clone.Spec.Replicas + c.releaseStatus.StableRevision = c.clone.Status.CurrentRevision + c.releaseStatus.UpdateRevision = c.clone.Status.UpdateRevision +} diff --git a/pkg/controller/batchrelease/workloads/cloneset_controller.go b/pkg/controller/batchrelease/workloads/cloneset_controller.go new file mode 100644 index 0000000..6772aff --- /dev/null +++ b/pkg/controller/batchrelease/workloads/cloneset_controller.go @@ -0,0 +1,152 @@ +package workloads + +import ( + "context" + "encoding/json" + "fmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" +) + +const BatchReleaseControlAnnotation = "batchrelease.rollouts.kruise.io/control-info" +const StashCloneSetPartition = "batchrelease.rollouts.kruise.io/stash-partition" + +// cloneSetController is the place to hold fields needed for handle CloneSet type of workloads +type cloneSetController struct { + workloadController + targetNamespacedName types.NamespacedName +} + +// add the parent controller to the owner of the deployment, unpause it and initialize the size +// before kicking start the update and start from every pod in the old version +func (c *cloneSetController) claimCloneSet(clone *kruiseappsv1alpha1.CloneSet) (bool, error) { + var controlled bool + if controlInfo, ok := clone.Annotations[BatchReleaseControlAnnotation]; ok && controlInfo != "" { + ref := &metav1.OwnerReference{} + err := json.Unmarshal([]byte(controlInfo), ref) + if err == nil && ref.UID == c.parentController.UID { + klog.V(3).Info("CloneSet has been controlled by this BatchRelease, no need to claim again") + controlled = true + } else { + klog.Error("Failed to parse controller info from cloneset annotation, error: %v, controller info: %+v", err, *ref) + } + } + + patch := map[string]interface{}{} + switch { + case controlled: + patch = map[string]interface{}{ + "spec": map[string]interface{}{ + "updateStrategy": map[string]interface{}{ + "paused": false, + }, + }, + } + + default: + patch = map[string]interface{}{ + "spec": map[string]interface{}{ + "updateStrategy": map[string]interface{}{ + "partition": &intstr.IntOrString{Type: intstr.String, StrVal: "100%"}, + "paused": false, + }, + }, + } + + controlInfo := metav1.NewControllerRef(c.parentController, c.parentController.GetObjectKind().GroupVersionKind()) + controlByte, _ := json.Marshal(controlInfo) + patch["metadata"] = map[string]interface{}{ + "annotations": map[string]string{ + BatchReleaseControlAnnotation: string(controlByte), + }, + } + + if clone.Spec.UpdateStrategy.Partition != nil { + partitionByte, _ := json.Marshal(clone.Spec.UpdateStrategy.Partition) + metadata := patch["metadata"].(map[string]interface{}) + annotations := metadata["annotations"].(map[string]string) + annotations[StashCloneSetPartition] = string(partitionByte) + annotations[BatchReleaseControlAnnotation] = string(controlByte) + } + } + + patchByte, _ := json.Marshal(patch) + if err := c.client.Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, patchByte)); err != nil { + c.recorder.Eventf(c.parentController, v1.EventTypeWarning, "ClaimCloneSetFailed", err.Error()) + return false, err + } + + klog.V(3).Info("Claim CloneSet Successfully") + return false, nil +} + +// remove the parent controller from the deployment's owner list +func (c *cloneSetController) releaseCloneSet(clone *kruiseappsv1alpha1.CloneSet, pause, cleanup bool) (bool, error) { + if clone == nil { + return true, nil + } + + var found bool + var refByte string + if refByte, found = clone.Annotations[BatchReleaseControlAnnotation]; found && refByte != "" { + ref := &metav1.OwnerReference{} + if err := json.Unmarshal([]byte(refByte), ref); err != nil { + found = false + klog.Error("failed to decode controller annotations of BatchRelease") + } else if ref.UID != c.parentController.UID { + found = false + } + } + + if !found { + klog.V(3).InfoS("the cloneset is already released", "CloneSet", clone.Name) + return true, nil + } + + patchSpec := map[string]interface{}{ + "updateStrategy": map[string]interface{}{ + "partition": nil, + "paused": pause, + }, + } + patchSpecByte, _ := json.Marshal(patchSpec) + patchByte := fmt.Sprintf(`{"metadata":{"annotations":{"%s":null, "%s":null}},"spec":%s}`, + BatchReleaseControlAnnotation, StashCloneSetPartition, string(patchSpecByte)) + + if err := c.client.Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, []byte(patchByte))); err != nil { + c.recorder.Eventf(c.parentController, v1.EventTypeWarning, "ReleaseCloneSetFailed", err.Error()) + return false, err + } + + klog.V(3).Info("Release CloneSet Successfully") + return false, nil +} + +// scale the deployment +func (c *cloneSetController) patchCloneSetPartition(clone *kruiseappsv1alpha1.CloneSet, partition int32) error { + patch := map[string]interface{}{ + "spec": map[string]interface{}{ + "updateStrategy": map[string]interface{}{ + "partition": &intstr.IntOrString{Type: intstr.Int, IntVal: partition}, + }, + }, + } + + patchByte, _ := json.Marshal(patch) + if err := c.client.Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, patchByte)); err != nil { + c.recorder.Eventf(c.parentController, v1.EventTypeWarning, "PatchPartitionFailed", "Failed to update the CloneSet to the correct target partition %d, error: %v", partition, err) + //c.releaseStatus.RolloutRetry(err.Error()) + return err + } + + klog.InfoS("Submitted modified partition quest for cloneset", "CloneSet", + clone.GetName(), "target partition size", partition, "batch", c.releaseStatus.CanaryStatus.CurrentBatch) + return nil +} diff --git a/pkg/controller/batchrelease/workloads/controller.go b/pkg/controller/batchrelease/workloads/controller.go new file mode 100644 index 0000000..09b76b7 --- /dev/null +++ b/pkg/controller/batchrelease/workloads/controller.go @@ -0,0 +1,81 @@ +package workloads + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/openkruise/rollouts/api/v1alpha1" +) + +type WorkloadChangeEventType string + +const ( + IgnoreWorkloadEvent WorkloadChangeEventType = "workload-not-cared" + WorkloadStableOrRollback WorkloadChangeEventType = "workload-rollback" + WorkloadPodTemplateChanged WorkloadChangeEventType = "workload-pod-template-changed" + WorkloadReplicasChanged WorkloadChangeEventType = "workload-replicas-changed" + WorkloadStillReconciling WorkloadChangeEventType = "workload-is-reconciling" + WorkloadUnHealthy WorkloadChangeEventType = "workload-is-unhealthy" +) + +// WorkloadController is the interface that all type of cloneSet controller implements +type WorkloadController interface { + // VerifySpec makes sure that the resources can be upgraded according to the rollout plan + // it returns if the verification succeeded/failed or should retry + VerifySpec() (bool, error) + + // Initialize make sure that the resource is ready to be upgraded + // this function is tasked to do any initialization work on the resources + // it returns if the initialization succeeded/failed or should retry + Initialize() (bool, error) + + // RolloutOneBatchPods tries to upgrade pods in the resources following the rollout plan + // it will upgrade pods as the rollout plan allows at once + // it returns if the upgrade actionable items succeeded/failed or should continue + RolloutOneBatchPods() (bool, error) + + // CheckOneBatchPods checks how many pods are ready to serve requests in the current batch + // it returns whether the number of pods upgraded in this round satisfies the rollout plan + CheckOneBatchPods() (bool, error) + + // FinalizeOneBatch makes sure that the rollout can start the next batch + // it returns if the finalization of this batch succeeded/failed or should retry + FinalizeOneBatch() (bool, error) + + // Finalize makes sure the resources are in a good final state. + // It might depend on if the rollout succeeded or not. + // For example, we may remove the source object to prevent scalar traits to ever work + // and the finalize rollout web hooks will be called after this call succeeds + Finalize(pause bool, cleanup bool) bool + + // WatchWorkload will observe and compare the status recorded in release.status and the real-time + // workload status. If workload status is inconsistent with that recorded in release.status, + // will return the corresponding WorkloadChangeEventType and info. + WatchWorkload() (WorkloadChangeEventType, *Accessor, error) +} + +type Status struct { + Replicas int32 + ReadyReplicas int32 + UpdatedReplicas int32 + UpdatedReadyReplicas int32 + ObservedGeneration int64 +} + +type Accessor struct { + Replicas *int32 + Paused bool + Status *Status + UpdateRevision *string + Metadata *metav1.ObjectMeta +} + +type workloadController struct { + client client.Client + recorder record.EventRecorder + parentController *v1alpha1.BatchRelease + + releasePlan *v1alpha1.ReleasePlan + releaseStatus *v1alpha1.BatchReleaseStatus +} diff --git a/pkg/controller/batchrelease/workloads/deployment_double_control_plan.go b/pkg/controller/batchrelease/workloads/deployment_double_control_plan.go new file mode 100644 index 0000000..d825f62 --- /dev/null +++ b/pkg/controller/batchrelease/workloads/deployment_double_control_plan.go @@ -0,0 +1,451 @@ +package workloads + +import ( + "context" + "fmt" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sort" + + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/openkruise/rollouts/api/v1alpha1" +) + +// DeploymentsRolloutController is responsible for handling rollout Deployment type of workloads +type DeploymentsRolloutController struct { + deploymentController + stable *apps.Deployment + canary *apps.Deployment +} + +//TODO: scale during releasing: workload replicas changed -> Finalising Deployment with Paused=true + +// NewDeploymentRolloutController creates a new Deployment rollout controller +func NewDeploymentRolloutController(client client.Client, recorder record.EventRecorder, release *v1alpha1.BatchRelease, plan *v1alpha1.ReleasePlan, status *v1alpha1.BatchReleaseStatus, stableNamespacedName types.NamespacedName) *DeploymentsRolloutController { + canaryNamespacedName := types.NamespacedName{ + Namespace: stableNamespacedName.Namespace, + Name: fmt.Sprintf("%v-canary", stableNamespacedName.Name), + } + return &DeploymentsRolloutController{ + deploymentController: deploymentController{ + workloadController: workloadController{ + client: client, + recorder: recorder, + parentController: release, + releasePlan: plan, + releaseStatus: status, + }, + stableNamespacedName: stableNamespacedName, + canaryNamespacedName: canaryNamespacedName, + }, + } +} + +// VerifySpec verifies that the rollout resource is consistent with the rollout spec +func (c *DeploymentsRolloutController) VerifySpec() (bool, error) { + var verifyErr error + + defer func() { + if verifyErr != nil { + klog.Error(verifyErr) + c.recorder.Event(c.parentController, v1.EventTypeWarning, "VerifyFailed", verifyErr.Error()) + } + }() + + if err := c.fetchStableDeployment(); err != nil { + return false, nil + } + + if c.stable.Status.ObservedGeneration != c.stable.Generation { + klog.Warningf("Deployment is still reconciling, wait for it to be done") + return false, nil + } + + if c.stable.Status.UpdatedReplicas == *c.stable.Spec.Replicas { + verifyErr = fmt.Errorf("update revision has been promoted, no need to reconcile") + return false, verifyErr + } + + if !c.stable.Spec.Paused { + verifyErr = fmt.Errorf("deployment should be paused before execute the release plan") + return false, verifyErr + } + + if err := c.recordDeploymentRevisionAndReplicas(); err != nil { + klog.Warningf("Failed to record deployment revision and replicas info, error: %v", err) + return false, nil + } + + klog.V(3).Infof("Verified Successfully, Status %+v", c.releaseStatus) + c.recorder.Event(c.parentController, v1.EventTypeNormal, "RolloutVerified", "ReleasePlan and the Deployment resource are verified") + return true, nil +} + +// Initialize makes sure that the source and target Deployment is under our control +func (c *DeploymentsRolloutController) Initialize() (bool, error) { + if err := c.fetchStableDeployment(); err != nil { + //c.releaseStatus.RolloutRetry(err.Error()) + return false, nil + } + + if err := c.fetchCanaryDeployment(); client.IgnoreNotFound(err) != nil { + //c.releaseStatus.RolloutRetry(err.Error()) + return false, nil + } + + if _, err := c.claimDeployment(c.stable, c.canary); err != nil { + return false, nil + } + + c.recorder.Event(c.parentController, v1.EventTypeNormal, "Rollout Initialized", "Rollout resource are initialized") + return true, nil +} + +// RolloutOneBatchPods calculates the number of pods we can upgrade once according to the rollout spec +// and then set the partition accordingly +func (c *DeploymentsRolloutController) RolloutOneBatchPods() (bool, error) { + if err := c.fetchStableDeployment(); err != nil { + return false, nil + } + + if err := c.fetchCanaryDeployment(); err != nil { + return false, nil + } + + canaryGoal := c.calculateCurrentTarget(c.releaseStatus.ObservedWorkloadReplicas) + canaryReplicas := *c.canary.Spec.Replicas + if canaryReplicas >= canaryGoal { + klog.V(3).InfoS("upgraded one batch, but no need to update replicas of canart Deployment", "current batch", + c.releaseStatus.CanaryStatus.CurrentBatch, "goal canary replicas", canaryGoal, "real canary replicas", canaryReplicas, "real canary pod count", c.canary.Status.UpdatedReplicas) + return true, nil + } + + if err := c.patchCanaryReplicas(c.canary, canaryGoal); err != nil { + return false, nil + } + + klog.V(3).InfoS("upgraded one batch", "current batch", c.releaseStatus.CanaryStatus.CurrentBatch, "canary goal size", canaryGoal) + c.recorder.Eventf(c.parentController, v1.EventTypeNormal, "Batch Rollout", "Finished submitting all upgrade quests for batch %d", c.releaseStatus.CanaryStatus.CurrentBatch) + return true, nil +} + +// CheckOneBatchPods checks to see if the pods are all available according to the rollout plan +func (c *DeploymentsRolloutController) CheckOneBatchPods() (bool, error) { + if err := c.fetchStableDeployment(); err != nil { + //c.releaseStatus.RolloutRetry(err.Error()) + return false, nil + } + + if err := c.fetchCanaryDeployment(); err != nil { + return false, nil + } + + if c.canary.Status.ObservedGeneration != c.canary.Generation { + return false, nil + } + + canaryPodCount := c.canary.Status.Replicas + availableCanaryPodCount := c.canary.Status.AvailableReplicas + canaryGoal := c.calculateCurrentTarget(c.releaseStatus.ObservedWorkloadReplicas) + + c.releaseStatus.CanaryStatus.UpdatedReplicas = canaryPodCount + c.releaseStatus.CanaryStatus.UpdatedReadyReplicas = availableCanaryPodCount + + maxUnavailable := 0 + if c.canary.Spec.Strategy.RollingUpdate != nil && + c.canary.Spec.Strategy.RollingUpdate.MaxUnavailable != nil { + maxUnavailable, _ = intstr.GetValueFromIntOrPercent(c.canary.Spec.Strategy.RollingUpdate.MaxUnavailable, int(c.releaseStatus.ObservedWorkloadReplicas), true) + } + + klog.InfoS("checking the batch releasing progress", "current batch", c.releaseStatus.CanaryStatus.CurrentBatch, + "canary pod available count", availableCanaryPodCount, "stable pod count", c.stable.Status.Replicas, + "max unavailable pod allowed", maxUnavailable, "canary goal", canaryGoal) + + if canaryGoal > canaryPodCount || availableCanaryPodCount+int32(maxUnavailable) < canaryGoal { + klog.InfoS("the batch is not ready yet", "current batch", c.releaseStatus.CanaryStatus.CurrentBatch) + return false, nil + } + + klog.InfoS("all pods in current batch are ready", "current batch", c.releaseStatus.CanaryStatus.CurrentBatch) + c.recorder.Eventf(c.parentController, v1.EventTypeNormal, "Batch Available", "Batch %d is available", c.releaseStatus.CanaryStatus.CurrentBatch) + return true, nil +} + +// FinalizeOneBatch makes sure that the rollout status are updated correctly +func (c *DeploymentsRolloutController) FinalizeOneBatch() (bool, error) { + return true, nil +} + +// Finalize makes sure the Deployment is all upgraded +func (c *DeploymentsRolloutController) Finalize(pause, cleanup bool) bool { + if err := c.fetchStableDeployment(); client.IgnoreNotFound(err) != nil { + return false + } + + if _, err := c.releaseDeployment(c.stable, pause, cleanup); err != nil { + return false + } + + c.recorder.Eventf(c.parentController, v1.EventTypeNormal, "Finalized", "Finalized: "+ + "paused=%v, cleanup=%v", pause, cleanup) + return true +} + +// WatchWorkload return change type if workload was changed during release +func (c *DeploymentsRolloutController) WatchWorkload() (WorkloadChangeEventType, *Accessor, error) { + if c.parentController.Spec.Cancelled || + c.parentController.DeletionTimestamp != nil || + c.releaseStatus.Phase == v1alpha1.RolloutPhaseFinalizing || + c.releaseStatus.Phase == v1alpha1.RolloutPhaseRollingBack || + c.releaseStatus.Phase == v1alpha1.RolloutPhaseTerminating { + return IgnoreWorkloadEvent, nil, nil + } + + var err error + workloadInfo := &Accessor{} + err = c.fetchStableDeployment() + if err != nil { + return "", nil, err + } + + err = c.fetchCanaryDeployment() + switch { + case client.IgnoreNotFound(err) != nil: + return "", nil, err + case apierrors.IsNotFound(err): + workloadInfo.Status = &Status{} + default: + workloadInfo.Status = &Status{ + UpdatedReplicas: c.canary.Status.Replicas, + UpdatedReadyReplicas: c.canary.Status.AvailableReplicas, + } + } + + if c.canary != nil && c.canary.DeletionTimestamp != nil && + controllerutil.ContainsFinalizer(c.canary, CanaryDeploymentFinalizer) { + return WorkloadUnHealthy, workloadInfo, nil + } + + if c.stable.Status.ObservedGeneration != c.stable.Generation { + klog.Warningf("Deployment is still reconciling, waiting for it to complete, generation: %v, observed: %v", + c.stable.Generation, c.stable.Status.ObservedGeneration) + return WorkloadStillReconciling, workloadInfo, nil + } + + if !c.stable.Spec.Paused && c.stable.Status.UpdatedReplicas == c.stable.Status.Replicas { + return IgnoreWorkloadEvent, workloadInfo, nil + } + + var updateRevision string + switch c.releaseStatus.Phase { + case v1alpha1.RolloutPhaseHealthy, v1alpha1.RolloutPhaseVerify: + return IgnoreWorkloadEvent, workloadInfo, nil + + default: + if isRollingBack, err := c.isRollingBack(); err != nil { + return "", workloadInfo, err + } else if isRollingBack { + workloadInfo.UpdateRevision = &updateRevision + return WorkloadStableOrRollback, workloadInfo, nil + } + if *c.stable.Spec.Replicas != c.releaseStatus.ObservedWorkloadReplicas { + workloadInfo.Replicas = c.stable.Spec.Replicas + klog.Warningf("Deployment replicas changed during releasing, should pause and wait for it to complete, replicas from: %v -> %v", + c.releaseStatus.ObservedWorkloadReplicas, *c.stable.Spec.Replicas) + return WorkloadReplicasChanged, workloadInfo, nil + } + fallthrough + + case v1alpha1.RolloutPhaseCompleted, v1alpha1.RolloutPhaseCancelled: + realStableRevision, err := c.GetPodTemplateHash(c.stable, "stable") + if err != nil { + return "", workloadInfo, err + } + if (c.canary == nil || !EqualIgnoreHash(&c.stable.Spec.Template, &c.canary.Spec.Template)) && + c.releaseStatus.UpdateRevision != realStableRevision { + workloadInfo.UpdateRevision = &updateRevision + klog.Warning("Deployment updateRevision changed during releasing, should try to restart the release plan") + return WorkloadPodTemplateChanged, workloadInfo, nil + } + } + + return IgnoreWorkloadEvent, workloadInfo, nil +} + +func (c *DeploymentsRolloutController) isRollingBack() (bool, error) { + rss, err := c.listReplicaSetsFor(c.stable) + if err != nil { + return false, err + } + for _, rs := range rss { + if c.releaseStatus.StableRevision != "" && *rs.Spec.Replicas > 0 && + c.releaseStatus.StableRevision == rs.Labels[apps.DefaultDeploymentUniqueLabelKey] && + EqualIgnoreHash(&rs.Spec.Template, &c.stable.Spec.Template) { + return true, nil + } + } + return false, nil +} + +func (c *DeploymentsRolloutController) fetchStableDeployment() error { + if c.stable != nil { + return nil + } + + stable := &apps.Deployment{} + if err := c.client.Get(context.TODO(), c.stableNamespacedName, stable); err != nil { + if !apierrors.IsNotFound(err) { + c.recorder.Event(c.parentController, v1.EventTypeWarning, "GetStableDeploymentFailed", err.Error()) + } + return err + } + c.stable = stable + return nil +} + +func (c *DeploymentsRolloutController) fetchCanaryDeployment() error { + err := c.fetchStableDeployment() + if err != nil { + return err + } + + ds, err := c.listCanaryDeployment(client.InNamespace(c.stable.Namespace), + client.MatchingLabels(map[string]string{CanaryDeploymentLabelKey: string(c.stable.UID)})) + if err != nil { + return err + } + + sort.Slice(ds, func(i, j int) bool { + return ds[j].CreationTimestamp.Before(&ds[i].CreationTimestamp) + }) + + if len(ds) == 0 || !EqualIgnoreHash(&ds[0].Spec.Template, &c.stable.Spec.Template) { + err := apierrors.NewNotFound(schema.GroupResource{ + Group: apps.SchemeGroupVersion.Group, + Resource: c.stable.Kind, + }, c.canaryNamespacedName.Name) + c.recorder.Event(c.parentController, v1.EventTypeWarning, "GetCanaryDeploymentFailed", err.Error()) + return err + } + + c.canary = ds[0] + return nil +} + +// the target workload size for the current batch +func (c *DeploymentsRolloutController) calculateCurrentTarget(totalSize int32) int32 { + targetSize := int32(calculateNewBatchTarget(c.releasePlan, int(totalSize), int(c.releaseStatus.CanaryStatus.CurrentBatch))) + klog.InfoS("Calculated the number of pods in the canary Deployment after current batch", + "current batch", c.releaseStatus.CanaryStatus.CurrentBatch, "workload updateRevision size", targetSize) + return targetSize +} + +// the source workload size for the current batch +func (c *DeploymentsRolloutController) calculateCurrentSource(totalSize int32) int32 { + sourceSize := totalSize - c.calculateCurrentTarget(totalSize) + klog.InfoS("Calculated the number of pods in the stable Deployment after current batch", + "current batch", c.releaseStatus.CanaryStatus.CurrentBatch, "workload stableRevision size", sourceSize) + return sourceSize +} + +func (c *DeploymentsRolloutController) recordDeploymentRevisionAndReplicas() error { + err := c.fetchStableDeployment() + if err != nil { + return err + } + + err = c.fetchCanaryDeployment() + if client.IgnoreNotFound(err) != nil { + return err + } + + var claimErr error + c.canary, claimErr = c.claimDeployment(c.stable, c.canary) + if claimErr != nil { + return claimErr + } + + c.releaseStatus.StableRevision, err = c.GetPodTemplateHash(c.stable, "stable") + if err != nil { + return err + } + c.releaseStatus.UpdateRevision, err = c.GetPodTemplateHash(c.canary, "canary") + if err != nil { + return err + } + c.releaseStatus.ObservedWorkloadReplicas = *c.stable.Spec.Replicas + return nil +} + +func (c *DeploymentsRolloutController) GetPodTemplateHash(deploy *apps.Deployment, kind string) (string, error) { + switch kind { + case "stable", "canary": + if deploy == nil { + return "", fmt.Errorf("workload cannot be found, may be deleted or not be created yet") + } + default: + panic("wrong kind type, must be 'stable' or 'canary'") + } + + rss, err := c.listReplicaSetsFor(deploy) + if err != nil { + return "", err + } + + sort.Slice(rss, func(i, j int) bool { + return rss[i].CreationTimestamp.Before(&rss[j].CreationTimestamp) + }) + + for _, rs := range rss { + switch kind { + case "stable": + if rs.Spec.Replicas != nil && *rs.Spec.Replicas > 0 { + return rs.Labels[apps.DefaultDeploymentUniqueLabelKey], nil + } + case "canary": + if EqualIgnoreHash(&deploy.Spec.Template, &rs.Spec.Template) { + return rs.Labels[apps.DefaultDeploymentUniqueLabelKey], nil + } + } + } + + return "", fmt.Errorf("cannot find the suitable replicaset pod template hash, kind: %v", kind) +} + +func (c *DeploymentsRolloutController) listReplicaSetsFor(deploy *apps.Deployment) ([]*apps.ReplicaSet, error) { + deploySelector, err := metav1.LabelSelectorAsSelector(deploy.Spec.Selector) + if err != nil { + return nil, err + } + + rsList := &apps.ReplicaSetList{} + err = c.client.List(context.TODO(), rsList, &client.ListOptions{ + Namespace: deploy.Namespace, + LabelSelector: deploySelector, + }) + if err != nil { + return nil, err + } + + var rss []*apps.ReplicaSet + for i := range rsList.Items { + rs := &rsList.Items[i] + if rs.DeletionTimestamp != nil { + continue + } + if owner := metav1.GetControllerOf(rs); owner == nil || owner.UID != deploy.UID { + continue + } + rss = append(rss, rs) + } + return rss, nil +} diff --git a/pkg/controller/batchrelease/workloads/deployment_double_controller.go b/pkg/controller/batchrelease/workloads/deployment_double_controller.go new file mode 100644 index 0000000..14cde57 --- /dev/null +++ b/pkg/controller/batchrelease/workloads/deployment_double_controller.go @@ -0,0 +1,197 @@ +package workloads + +import ( + "context" + "encoding/json" + "fmt" + "k8s.io/apimachinery/pkg/util/sets" + "sort" + + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + CanaryDeploymentLabelKey = "rollouts.kruise.io/canary-deployment" + CanaryDeploymentFinalizer = "finalizer.rollouts.kruise.io/canary-deployment" +) + +// deploymentController is the place to hold fields needed for handle Deployment type of workloads +type deploymentController struct { + workloadController + stableNamespacedName types.NamespacedName + canaryNamespacedName types.NamespacedName +} + +// add the parent controller to the owner of the deployment, unpause it and initialize the size +// before kicking start the update and start from every pod in the old version +func (c *deploymentController) claimDeployment(stableDeploy, canaryDeploy *apps.Deployment) (*apps.Deployment, error) { + if canaryDeploy == nil || !EqualIgnoreHash(&stableDeploy.Spec.Template, &canaryDeploy.Spec.Template) { + var err error + var collisionCount int32 + if c.releaseStatus.CollisionCount != nil { + collisionCount = *c.releaseStatus.CollisionCount + } + + for { + canaryDeploy, err = c.createCanaryDeployment(stableDeploy, &collisionCount) + if errors.IsAlreadyExists(err) { + collisionCount++ + continue + } else if err != nil { + return nil, err + } + break + } + + *c.releaseStatus.CollisionCount = collisionCount + } + return canaryDeploy, nil +} + +func (c *deploymentController) createCanaryDeployment(stableDeploy *apps.Deployment, collisionCount *int32) (*apps.Deployment, error) { + suffix := ComputeHash(&stableDeploy.Spec.Template, collisionCount) + canaryDeploy := &apps.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v-%v", c.canaryNamespacedName.Name, suffix), + Namespace: c.stableNamespacedName.Namespace, + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + } + for k, v := range stableDeploy.Labels { + canaryDeploy.Labels[k] = v + } + for k, v := range stableDeploy.Annotations { + canaryDeploy.Annotations[k] = v + } + for _, f := range stableDeploy.Finalizers { + canaryDeploy.Finalizers = append(canaryDeploy.Finalizers, f) + } + for _, o := range stableDeploy.OwnerReferences { + canaryDeploy.OwnerReferences = append(canaryDeploy.OwnerReferences, *o.DeepCopy()) + } + + canaryDeploy.Finalizers = append(canaryDeploy.Finalizers, CanaryDeploymentFinalizer) + canaryDeploy.OwnerReferences = append(canaryDeploy.OwnerReferences, *metav1.NewControllerRef( + stableDeploy, stableDeploy.GroupVersionKind())) + + // set labels & annotations + canaryDeploy.Labels[CanaryDeploymentLabelKey] = string(stableDeploy.UID) + owner := metav1.NewControllerRef(c.parentController, c.parentController.GroupVersionKind()) + if owner != nil { + ownerInfo, _ := json.Marshal(owner) + canaryDeploy.Annotations[BatchReleaseControlAnnotation] = string(ownerInfo) + } + + // copy spec + canaryDeploy.Spec = *stableDeploy.Spec.DeepCopy() + canaryDeploy.Spec.Replicas = pointer.Int32Ptr(0) + canaryDeploy.Spec.Paused = false + + // create canary Deployment + err := c.client.Create(context.TODO(), canaryDeploy) + if err != nil { + klog.Errorf("Failed to create canary Deployment(%v), error: %v", c.canaryNamespacedName, err) + return nil, err + } + + // fetch the canary Deployment + var fetchedCanary *apps.Deployment + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + fetchedCanary = &apps.Deployment{} + return c.client.Get(context.TODO(), c.canaryNamespacedName, fetchedCanary) + }) + + return fetchedCanary, err +} + +func (c *deploymentController) releaseDeployment(stableDeploy *apps.Deployment, pause, cleanup bool) (bool, error) { + if stableDeploy == nil { + return true, nil + } + + var patchErr, deleteErr error + if cleanup { + ds, err := c.listCanaryDeployment(client.InNamespace(stableDeploy.Namespace), + client.MatchingLabels(map[string]string{CanaryDeploymentLabelKey: string(stableDeploy.UID)})) + if err != nil { + return false, err + } + + // must delete the older first + sort.Slice(ds, func(i, j int) bool { + return ds[i].CreationTimestamp.Before(&ds[j].CreationTimestamp) + }) + + for _, d := range ds { + finalizers := sets.NewString(d.Finalizers...).Delete(CanaryDeploymentFinalizer).List() + patchErr = PatchFinalizer(c.client, d, finalizers) + if patchErr != nil && !errors.IsNotFound(patchErr) { + klog.Error("Error occurred when patching Deployment, error: %v", patchErr) + return false, patchErr + } + deleteErr = c.client.Delete(context.TODO(), d) + if deleteErr != nil && !errors.IsNotFound(deleteErr) { + klog.Error("Error occurred when deleting Deployment, error: %v", deleteErr) + return false, deleteErr + } + } + } + + if stableDeploy.Spec.Paused != pause { + patchByte := []byte(fmt.Sprintf(`{"spec":{"paused":%v}}`, pause)) + patchErr = c.client.Patch(context.TODO(), stableDeploy, client.RawPatch(types.StrategicMergePatchType, patchByte)) + if patchErr != nil { + klog.Error("Error occurred when patching Deployment, error: %v", patchErr) + return false, patchErr + } + } + + return true, nil +} + +// scale the deployment +func (c *deploymentController) patchCanaryReplicas(canaryDeploy *apps.Deployment, replicas int32) error { + patch := map[string]interface{}{ + "spec": map[string]interface{}{ + "replicas": pointer.Int32Ptr(replicas), + }, + } + + patchByte, _ := json.Marshal(patch) + if err := c.client.Patch(context.TODO(), canaryDeploy, client.RawPatch(types.MergePatchType, patchByte)); err != nil { + c.recorder.Eventf(c.parentController, v1.EventTypeWarning, "PatchPartitionFailed", + "Failed to update the canary Deployment to the correct canary replicas %d, error: %v", replicas, err) + return err + } + + klog.InfoS("Submitted modified partition quest for canary Deployment", "Deployment", + canaryDeploy.GetName(), "target canary replicas size", replicas, "batch", c.releaseStatus.CanaryStatus.CurrentBatch) + return nil +} + +func (c *deploymentController) listCanaryDeployment(options ...client.ListOption) ([]*apps.Deployment, error) { + dList := &apps.DeploymentList{} + if err := c.client.List(context.TODO(), dList, options...); err != nil { + return nil, err + } + + var ds []*apps.Deployment + for i := range dList.Items { + d := &dList.Items[i] + if d.DeletionTimestamp != nil { + continue + } + ds = append(ds, d) + } + + return ds, nil +} diff --git a/pkg/controller/batchrelease/workloads/workloads_utils.go b/pkg/controller/batchrelease/workloads/workloads_utils.go new file mode 100644 index 0000000..1ef27d7 --- /dev/null +++ b/pkg/controller/batchrelease/workloads/workloads_utils.go @@ -0,0 +1,116 @@ +package workloads + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "hash/fnv" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/davecgh/go-spew/spew" + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" + + "github.com/openkruise/rollouts/api/v1alpha1" +) + +const ( + // We omit vowels from the set of available characters to reduce the chances + // of "bad words" being formed. + alphanums = "bcdfghjklmnpqrstvwxz2456789" +) + +// DeepHashObject writes specified object to hash using the spew library +// which follows pointers and prints actual values of the nested objects +// ensuring the hash does not change when a pointer changes. +func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { + hasher.Reset() + printer := spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + } + printer.Fprintf(hasher, "%#v", objectToWrite) +} + +// ComputeHash returns a hash value calculated from pod template and +// a collisionCount to avoid hash collision. The hash will be safe encoded to +// avoid bad words. +func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int32) string { + podTemplateSpecHasher := fnv.New32a() + DeepHashObject(podTemplateSpecHasher, *template) + + // Add collisionCount in the hash if it exists. + if collisionCount != nil { + collisionCountBytes := make([]byte, 8) + binary.LittleEndian.PutUint32(collisionCountBytes, uint32(*collisionCount)) + podTemplateSpecHasher.Write(collisionCountBytes) + } + + return SafeEncodeString(fmt.Sprint(podTemplateSpecHasher.Sum32())) +} + +// SafeEncodeString encodes s using the same characters as rand.String. This reduces the chances of bad words and +// ensures that strings generated from hash functions appear consistent throughout the API. +func SafeEncodeString(s string) string { + r := make([]byte, len(s)) + for i, b := range []rune(s) { + r[i] = alphanums[(int(b) % len(alphanums))] + } + return string(r) +} + +func IsControlledBy(object, owner metav1.Object) bool { + controlInfo, controlled := object.GetAnnotations()[BatchReleaseControlAnnotation] + if !controlled { + return false + } + + o := &metav1.OwnerReference{} + if err := json.Unmarshal([]byte(controlInfo), o); err != nil { + return false + } + + return o.UID == owner.GetUID() +} + +func calculateNewBatchTarget(rolloutSpec *v1alpha1.ReleasePlan, workloadReplicas, currentBatch int) int { + batchSize, _ := intstr.GetValueFromIntOrPercent(&rolloutSpec.Batches[currentBatch].CanaryReplicas, workloadReplicas, true) + if batchSize > workloadReplicas { + klog.Warningf("releasePlan has wrong batch replicas, batches[%d].replicas %v is more than workload.replicas %v", currentBatch, batchSize, workloadReplicas) + batchSize = workloadReplicas + } else if batchSize < 0 { + klog.Warningf("releasePlan has wrong batch replicas, batches[%d].replicas %v is less than 0 %v", currentBatch, batchSize) + batchSize = 0 + } + + klog.V(3).InfoS("calculated the number of new pod size", "current batch", currentBatch, + "new pod target", batchSize) + return batchSize +} + +func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool { + t1Copy := template1.DeepCopy() + t2Copy := template2.DeepCopy() + // Remove hash labels from template.Labels before comparing + delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) + delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) +} + +func PatchFinalizer(c client.Client, object client.Object, finalizers []string) error { + patchByte, _ := json.Marshal(map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": finalizers, + }, + }) + return c.Patch(context.TODO(), object, client.RawPatch(types.MergePatchType, patchByte)) +} diff --git a/test/e2e/batchrelease_test.go b/test/e2e/batchrelease_test.go new file mode 100644 index 0000000..199d8fd --- /dev/null +++ b/test/e2e/batchrelease_test.go @@ -0,0 +1,1333 @@ +package e2e + +import ( + "context" + "fmt" + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + rolloutsv1alpha1 "github.com/openkruise/rollouts/api/v1alpha1" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/workloads" + "github.com/openkruise/rollouts/test/images" + apps "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/util/retry" + "k8s.io/utils/integer" + "k8s.io/utils/pointer" + "sort" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = SIGDescribe("Test BatchRelease Controller", func() { + var namespace string + + CreateObject := func(object client.Object, options ...client.CreateOption) { + object.SetNamespace(namespace) + Expect(k8sClient.Create(context.TODO(), object)).NotTo(HaveOccurred()) + } + + GetObject := func(namespace, name string, object client.Object) error { + key := types.NamespacedName{Namespace: namespace, Name: name} + return k8sClient.Get(context.TODO(), key, object) + } + + DeleteObject := func(object client.Object, options ...client.DeleteOption) { + Expect(k8sClient.Delete(context.TODO(), object)).NotTo(HaveOccurred()) + } + + UpdateCloneSet := func(object *kruiseappsv1alpha1.CloneSet) *kruiseappsv1alpha1.CloneSet { + var clone *kruiseappsv1alpha1.CloneSet + Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { + clone = &kruiseappsv1alpha1.CloneSet{} + err := GetObject(object.Namespace, object.Name, clone) + if err != nil { + return err + } + clone.Spec = *object.Spec.DeepCopy() + return k8sClient.Update(context.TODO(), clone) + })).NotTo(HaveOccurred()) + + return clone + } + + WaitCloneSetAllPodsReady := func(cloneset *kruiseappsv1alpha1.CloneSet) { + Eventually(func() bool { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.ObservedGeneration == clone.Generation && clone.Status.Replicas == clone.Status.ReadyReplicas + }, 20*time.Minute, time.Second).Should(BeTrue()) + } + + GetUpdateRevision := func(cloneset *kruiseappsv1alpha1.CloneSet) string { + var revision string + Eventually(func() bool { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + revision = clone.Status.UpdateRevision + return clone.Status.ObservedGeneration == clone.Generation + }, 20*time.Minute, time.Second).Should(BeTrue()) + return revision + } + + UpdateDeployment := func(object *apps.Deployment) *apps.Deployment { + var clone *apps.Deployment + Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { + clone = &apps.Deployment{} + err := GetObject(object.Namespace, object.Name, clone) + if err != nil { + return err + } + clone.Spec = *object.Spec.DeepCopy() + return k8sClient.Update(context.TODO(), clone) + })).NotTo(HaveOccurred()) + + return clone + } + + GetCanaryDeployment := func(deployment *apps.Deployment) *apps.Deployment { + var dList *apps.DeploymentList + fetchedDeployment := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, fetchedDeployment)).NotTo(HaveOccurred()) + Eventually(func() int { + dList = &apps.DeploymentList{} + Expect(k8sClient.List( + context.TODO(), dList, + client.InNamespace(deployment.Namespace), + client.MatchingLabels(map[string]string{workloads.CanaryDeploymentLabelKey: string(fetchedDeployment.UID)}))).NotTo(HaveOccurred()) + return len(dList.Items) + }, 5*time.Minute, time.Second).Should(BeNumerically(">", 0)) + + var ds []*apps.Deployment + for i := range dList.Items { + d := &dList.Items[i] + if d.DeletionTimestamp != nil { + continue + } + ds = append(ds, d) + } + + sort.Slice(ds, func(i, j int) bool { + return ds[j].CreationTimestamp.Before(&ds[i].CreationTimestamp) + }) + + return ds[0] + } + + WaitDeploymentAllPodsReady := func(deployment *apps.Deployment) { + Eventually(func() bool { + clone := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.ObservedGeneration == clone.Generation && clone.Status.Replicas == clone.Status.ReadyReplicas + }, 20*time.Minute, time.Second).Should(BeTrue()) + } + + BeforeEach(func() { + namespace = randomNamespaceName("batchrelease") + ns := v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + Expect(k8sClient.Create(context.TODO(), &ns)).Should(SatisfyAny(BeNil())) + }) + + AfterEach(func() { + By("[TEST] Clean up resources after an integration test") + k8sClient.DeleteAllOf(context.TODO(), &apps.Deployment{}, client.InNamespace(namespace)) + k8sClient.DeleteAllOf(context.TODO(), &kruiseappsv1alpha1.CloneSet{}, client.InNamespace(namespace)) + k8sClient.DeleteAllOf(context.TODO(), &rolloutsv1alpha1.BatchRelease{}, client.InNamespace(namespace)) + Expect(k8sClient.Delete(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(Succeed()) + }) + + KruiseDescribe("CloneSet BatchRelease Checker", func() { + + It("V1->V2: Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/cloneset_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + cloneset := &kruiseappsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/workload/cloneset.yaml", cloneset)).ToNot(HaveOccurred()) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(cloneset) + WaitCloneSetAllPodsReady(cloneset) + + // record stable revision --> v1 + stableRevision := GetUpdateRevision(cloneset) + + cloneset.Spec.UpdateStrategy.Paused = true + cloneset.Spec.Replicas = pointer.Int32Ptr(5) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateCloneSet(cloneset) + + // record canary revision --> v2 + canaryRevision := GetUpdateRevision(cloneset) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking CloneSet updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*cloneset.Spec.Replicas), true) + Eventually(func() int32 { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + }) + + It("V1->V2: Percentage, 50%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/cloneset_percentage_50.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + cloneset := &kruiseappsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/workload/cloneset.yaml", cloneset)).ToNot(HaveOccurred()) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(cloneset) + WaitCloneSetAllPodsReady(cloneset) + + // record stable revision --> v1 + stableRevision := GetUpdateRevision(cloneset) + + cloneset.Spec.UpdateStrategy.Paused = true + cloneset.Spec.Replicas = pointer.Int32Ptr(5) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateCloneSet(cloneset) + + // record canary revision --> v2 + canaryRevision := GetUpdateRevision(cloneset) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking CloneSet updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*cloneset.Spec.Replicas), true) + Eventually(func() int32 { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + By("Checking all pod were updated when release completed...") + Eventually(func() int32 { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(Equal(*cloneset.Spec.Replicas)) + }) + + It("V1->V2(Completed)->V3: Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease....") + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/cloneset_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating CloneSet and waiting for all pods ready....") + By("Creating workload and waiting for all pods ready...") + cloneset := &kruiseappsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/workload/cloneset.yaml", cloneset)).ToNot(HaveOccurred()) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(cloneset) + WaitCloneSetAllPodsReady(cloneset) + stableRevisionV1 := GetUpdateRevision(cloneset) + + /************************************************************************************* + Start to release V1->V2 + *************************************************************************************/ + By("Start to release V1->V2....") + cloneset.Spec.UpdateStrategy.Paused = true + cloneset.Spec.Replicas = pointer.Int32Ptr(5) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateCloneSet(cloneset) + + // record canary revision --> v2 + canaryRevisionV2 := GetUpdateRevision(cloneset) + Expect(canaryRevisionV2).ShouldNot(Equal(stableRevisionV1)) + + By("V1->V2: Checking CloneSet updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*cloneset.Spec.Replicas), true) + Eventually(func() int32 { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("V1->V2: Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + /************************************************************************************* + V1->V2 Succeeded, Start to release V2->V3 + *************************************************************************************/ + By("Start to release V2->V3....") + cloneset.Spec.UpdateStrategy.Paused = true + cloneset.Spec.Replicas = pointer.Int32Ptr(5) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV3) + UpdateCloneSet(cloneset) + + // record canary revision --> v3 + canaryRevisionV3 := GetUpdateRevision(cloneset) + Expect(canaryRevisionV3).ShouldNot(Equal(stableRevisionV1)) + Expect(canaryRevisionV3).ShouldNot(Equal(canaryRevisionV2)) + + By("V2->V3: Checking CloneSet updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*cloneset.Spec.Replicas), true) + Eventually(func() int32 { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, 20*time.Second, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("V2->V3: Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + }) + + It("V1->V2(UnCompleted)->V3: Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease....") + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/cloneset_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating CloneSet and waiting for all pods ready....") + By("Creating workload and waiting for all pods ready...") + cloneset := &kruiseappsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/workload/cloneset.yaml", cloneset)).ToNot(HaveOccurred()) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(cloneset) + WaitCloneSetAllPodsReady(cloneset) + stableRevisionV1 := GetUpdateRevision(cloneset) + + /************************************************************************************* + Start to release V1->V2 + *************************************************************************************/ + By("Start to release V1->V2....") + cloneset.Spec.UpdateStrategy.Paused = true + cloneset.Spec.Replicas = pointer.Int32Ptr(5) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateCloneSet(cloneset) + + // record canary revision --> v2 + canaryRevisionV2 := GetUpdateRevision(cloneset) + Expect(canaryRevisionV2).ShouldNot(Equal(stableRevisionV1)) + + By("V1->V2: Checking CloneSet updated replicas...") + for i := 0; i < len(release.Spec.ReleasePlan.Batches)-2; i++ { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*cloneset.Spec.Replicas), true) + Eventually(func() int32 { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("V1->V2: Checking BatchRelease status...") + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + Expect(clone.Status.Phase).ShouldNot(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + /************************************************************************************* + V1->V2 Succeeded, Start to release V2->V3 + *************************************************************************************/ + By("Start to release V2->V3....") + cloneset.Spec.UpdateStrategy.Paused = true + cloneset.Spec.Replicas = pointer.Int32Ptr(5) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV3) + UpdateCloneSet(cloneset) + + // record canary revision --> v3 + canaryRevisionV3 := GetUpdateRevision(cloneset) + Expect(canaryRevisionV3).ShouldNot(Equal(stableRevisionV1)) + Expect(canaryRevisionV3).ShouldNot(Equal(canaryRevisionV2)) + + By("V2->V3: Checking CloneSet updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*cloneset.Spec.Replicas), true) + Eventually(func() int32 { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("V2->V3: Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + }) + + It("V1->V2: ScalingUp, Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/cloneset_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + cloneset := &kruiseappsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/workload/cloneset.yaml", cloneset)).ToNot(HaveOccurred()) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(cloneset) + WaitCloneSetAllPodsReady(cloneset) + + // record stable revision --> v1 + stableRevision := GetUpdateRevision(cloneset) + + cloneset.Spec.UpdateStrategy.Paused = true + cloneset.Spec.Replicas = pointer.Int32Ptr(5) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateCloneSet(cloneset) + + // record canary revision --> v2 + canaryRevision := GetUpdateRevision(cloneset) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking CloneSet updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + cloneCopy := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, cloneCopy)).NotTo(HaveOccurred()) + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*cloneCopy.Spec.Replicas), true) + Eventually(func() int32 { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(BeNumerically(">=", int32(expectedUpdatedReplicas))) + if i == 1 { + By("\tScaling up from 5 to 10...") + cloneCopy := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, cloneCopy)).NotTo(HaveOccurred()) + cloneCopy.Spec.Replicas = pointer.Int32Ptr(10) + UpdateCloneSet(cloneCopy) + } + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + By("Checking all pod were updated when release completed...") + Eventually(func() bool { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas == *clone.Spec.Replicas + }, 10*time.Minute, time.Second).Should(BeTrue()) + }) + + It("V1->V2: ScalingDown, Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/cloneset_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + cloneset := &kruiseappsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/workload/cloneset.yaml", cloneset)).ToNot(HaveOccurred()) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(cloneset) + WaitCloneSetAllPodsReady(cloneset) + + // record stable revision --> v1 + stableRevision := GetUpdateRevision(cloneset) + + cloneset.Spec.UpdateStrategy.Paused = true + cloneset.Spec.Replicas = pointer.Int32Ptr(10) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateCloneSet(cloneset) + + // record canary revision --> v2 + canaryRevision := GetUpdateRevision(cloneset) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking CloneSet updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + cloneCopy := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, cloneCopy)).NotTo(HaveOccurred()) + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*cloneCopy.Spec.Replicas), true) + Eventually(func() int32 { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(BeNumerically(">=", int32(expectedUpdatedReplicas))) + if i == 1 { + By("\tScaling down from 10 to 2...") + cloneCopy := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, cloneCopy)).NotTo(HaveOccurred()) + cloneCopy.Spec.Replicas = pointer.Int32Ptr(2) + UpdateCloneSet(cloneCopy) + } + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + By("Checking all pod were updated when release completed...") + Eventually(func() bool { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas == *clone.Spec.Replicas + }, 10*time.Minute, time.Second).Should(BeTrue()) + }) + + It("V1->V2: ScalingUp, Number, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/cloneset_number_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + cloneset := &kruiseappsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/workload/cloneset.yaml", cloneset)).ToNot(HaveOccurred()) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(cloneset) + WaitCloneSetAllPodsReady(cloneset) + + // record stable revision --> v1 + stableRevision := GetUpdateRevision(cloneset) + + cloneset.Spec.UpdateStrategy.Paused = true + cloneset.Spec.Replicas = pointer.Int32Ptr(5) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateCloneSet(cloneset) + + // record canary revision --> v2 + canaryRevision := GetUpdateRevision(cloneset) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking CloneSet updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + cloneCopy := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, cloneCopy)).NotTo(HaveOccurred()) + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*cloneCopy.Spec.Replicas), true) + Eventually(func() int32 { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(BeNumerically(">=", int32(expectedUpdatedReplicas))) + if i == 1 { + By("\tScaling up from 5 to 10...") + cloneCopy := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, cloneCopy)).NotTo(HaveOccurred()) + cloneCopy.Spec.Replicas = pointer.Int32Ptr(10) + UpdateCloneSet(cloneCopy) + } + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + By("Checking all pod were updated when release completed...") + Eventually(func() bool { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas == *clone.Spec.Replicas + }, 10*time.Minute, time.Second).Should(BeTrue()) + }) + + It("V1->V2: ScalingDown, Number, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/cloneset_number_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + cloneset := &kruiseappsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/workload/cloneset.yaml", cloneset)).ToNot(HaveOccurred()) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(cloneset) + WaitCloneSetAllPodsReady(cloneset) + + // record stable revision --> v1 + stableRevision := GetUpdateRevision(cloneset) + + cloneset.Spec.UpdateStrategy.Paused = true + cloneset.Spec.Replicas = pointer.Int32Ptr(10) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateCloneSet(cloneset) + + // record canary revision --> v2 + canaryRevision := GetUpdateRevision(cloneset) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking CloneSet updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + cloneCopy := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, cloneCopy)).NotTo(HaveOccurred()) + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*cloneCopy.Spec.Replicas), true) + expectedUpdatedReplicas = integer.IntMin(expectedUpdatedReplicas, int(*cloneCopy.Spec.Replicas)) + Eventually(func() int32 { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(BeNumerically(">=", int32(expectedUpdatedReplicas))) + if i == 1 { + By("\tScaling down from 10 to 2...") + cloneCopy := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, cloneCopy)).NotTo(HaveOccurred()) + cloneCopy.Spec.Replicas = pointer.Int32Ptr(2) + UpdateCloneSet(cloneCopy) + } + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + By("Checking all pod were updated when release completed...") + Eventually(func() bool { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas == *clone.Spec.Replicas + }, 10*time.Minute, time.Second).Should(BeTrue()) + }) + + It("Rollback V1->V2->V1: Percentage, 100%, Succeeded", func() { + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/cloneset_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + cloneset := &kruiseappsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/workload/cloneset.yaml", cloneset)).ToNot(HaveOccurred()) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + cloneset.Spec.Template.Spec.Containers[0].ImagePullPolicy = v1.PullIfNotPresent + CreateObject(cloneset) + WaitCloneSetAllPodsReady(cloneset) + + // record stable revision --> v1 + stableRevision := GetUpdateRevision(cloneset) + + cloneset.Spec.UpdateStrategy.Paused = true + cloneset.Spec.Replicas = pointer.Int32Ptr(10) + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.FailedImage) + UpdateCloneSet(cloneset) + + // record canary revision --> v2 + canaryRevision := GetUpdateRevision(cloneset) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Waiting a minute and checking failed revision...") + time.Sleep(time.Minute) + for i := 0; i < 30; i++ { + fetchedRelease := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, fetchedRelease)).NotTo(HaveOccurred()) + Expect(fetchedRelease.Status.CanaryStatus.CurrentBatch).Should(Equal(int32(1))) + time.Sleep(time.Second) + } + + By("Updating cloneset to V1...") + cloneset.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + UpdateCloneSet(cloneset) + + By("Checking all pod were updated when release completed...") + Eventually(func() bool { + clone := &kruiseappsv1alpha1.CloneSet{} + Expect(GetObject(cloneset.Namespace, cloneset.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas == *clone.Spec.Replicas + }, 10*time.Minute, time.Second).Should(BeTrue()) + + By("Checking BatchRelease completed status phase...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCancelled)) + }) + }) + + KruiseDescribe("Deployment BatchRelease Checker", func() { + + It("V1->V2: Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/deployment_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + deployment := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/workload/deployment.yaml", deployment)).ToNot(HaveOccurred()) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(deployment) + WaitDeploymentAllPodsReady(deployment) + + // record stable revision --> v1 + stableRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + + deployment.Spec.Paused = true + deployment.Spec.Replicas = pointer.Int32Ptr(5) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateDeployment(deployment) + + // record canary revision --> v2 + canaryRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking Deployment updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*deployment.Spec.Replicas), true) + Eventually(func() int32 { + clone := GetCanaryDeployment(deployment) + return clone.Status.Replicas + }, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + }) + + It("V1->V2: Percentage, 50%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/deployment_percentage_50.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + deployment := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/workload/deployment.yaml", deployment)).ToNot(HaveOccurred()) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(deployment) + WaitDeploymentAllPodsReady(deployment) + + // record stable revision --> v1 + stableRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + + deployment.Spec.Paused = true + deployment.Spec.Replicas = pointer.Int32Ptr(5) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateDeployment(deployment) + + // record canary revision --> v2 + canaryRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking Deployment updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*deployment.Spec.Replicas), true) + Eventually(func() int32 { + clone := GetCanaryDeployment(deployment) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 5*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + By("Checking all pod were updated when release completed...") + Eventually(func() int32 { + clone := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, 10*time.Minute, time.Second).Should(Equal(*deployment.Spec.Replicas)) + }) + + It("V1->V2(Completed)->V3: Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease....") + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/deployment_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating Deployment and waiting for all pods ready....") + By("Creating workload and waiting for all pods ready...") + deployment := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/workload/deployment.yaml", deployment)).ToNot(HaveOccurred()) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(deployment) + WaitDeploymentAllPodsReady(deployment) + stableRevisionV1 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + + /************************************************************************************* + Start to release V1->V2 + *************************************************************************************/ + By("Start to release V1->V2....") + deployment.Spec.Paused = true + deployment.Spec.Replicas = pointer.Int32Ptr(5) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateDeployment(deployment) + + // record canary revision --> v2 + canaryRevisionV2 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevisionV2).ShouldNot(Equal(stableRevisionV1)) + + By("V1->V2: Checking Deployment updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*deployment.Spec.Replicas), true) + Eventually(func() int32 { + clone := GetCanaryDeployment(deployment) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("V1->V2: Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + /************************************************************************************* + V1->V2 Succeeded, Start to release V2->V3 + *************************************************************************************/ + By("Start to release V2->V3....") + deployment.Spec.Paused = true + deployment.Spec.Replicas = pointer.Int32Ptr(5) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV3) + UpdateDeployment(deployment) + + // record canary revision --> v3 + canaryRevisionV3 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevisionV3).ShouldNot(Equal(stableRevisionV1)) + Expect(canaryRevisionV3).ShouldNot(Equal(canaryRevisionV2)) + + By("V2->V3: Checking Deployment updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*deployment.Spec.Replicas), true) + Eventually(func() int32 { + clone := GetCanaryDeployment(deployment) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("V2->V3: Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + }) + + It("V1->V2(UnCompleted)->V3: Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease....") + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/deployment_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating Deployment and waiting for all pods ready....") + By("Creating workload and waiting for all pods ready...") + deployment := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/workload/deployment.yaml", deployment)).ToNot(HaveOccurred()) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(deployment) + WaitDeploymentAllPodsReady(deployment) + stableRevisionV1 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + + /************************************************************************************* + Start to release V1->V2 + *************************************************************************************/ + By("Start to release V1->V2....") + deployment.Spec.Paused = true + deployment.Spec.Replicas = pointer.Int32Ptr(5) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateDeployment(deployment) + + // record canary revision --> v2 + canaryRevisionV2 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevisionV2).ShouldNot(Equal(stableRevisionV1)) + + By("V1->V2: Checking Deployment updated replicas...") + for i := 0; i < len(release.Spec.ReleasePlan.Batches)-1; i++ { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*deployment.Spec.Replicas), true) + Eventually(func() int32 { + clone := GetCanaryDeployment(deployment) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("V1->V2: Checking BatchRelease status...") + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + Expect(clone.Status.Phase).ShouldNot(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + /************************************************************************************* + V1->V2 Not Completed, Start to release V1,V2->V3 + *************************************************************************************/ + By("Start to release V1,V2->V3...") + deployment.Spec.Paused = true + deployment.Spec.Replicas = pointer.Int32Ptr(5) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV3) + UpdateDeployment(deployment) + + // record canary revision --> v3 + canaryRevisionV3 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevisionV3).ShouldNot(Equal(stableRevisionV1)) + Expect(canaryRevisionV3).ShouldNot(Equal(canaryRevisionV2)) + + By("V2->V3: Checking Deployment updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*deployment.Spec.Replicas), true) + Eventually(func() int32 { + clone := GetCanaryDeployment(deployment) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(Equal(int32(expectedUpdatedReplicas))) + time.Sleep(time.Duration(batch.PauseSeconds) * time.Second) + } + + By("V2->V3: Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + }) + + It("V1->V2: ScalingUp, Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/deployment_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + deployment := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/workload/deployment.yaml", deployment)).ToNot(HaveOccurred()) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(deployment) + WaitDeploymentAllPodsReady(deployment) + + // record stable revision --> v1 + stableRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + + deployment.Spec.Paused = true + deployment.Spec.Replicas = pointer.Int32Ptr(5) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateDeployment(deployment) + + // record canary revision --> v2 + canaryRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking Deployment updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + fetchedDeployment := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, fetchedDeployment)).NotTo(HaveOccurred()) + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*fetchedDeployment.Spec.Replicas), true) + expectedUpdatedReplicas = integer.IntMin(expectedUpdatedReplicas, int(*fetchedDeployment.Spec.Replicas)) + Eventually(func() int32 { + clone := GetCanaryDeployment(deployment) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(BeNumerically(">=", int32(expectedUpdatedReplicas))) + if i == 1 { + By("\tScaling up from 5 to 10....") + deployCopy := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, deployCopy)).NotTo(HaveOccurred()) + deployCopy.Spec.Replicas = pointer.Int32Ptr(10) + UpdateDeployment(deployCopy) + } + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + By("Checking all pod were updated when release completed...") + Eventually(func() int32 { + clone := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, 10*time.Minute, time.Second).Should(Equal(*deployment.Spec.Replicas)) + }) + + It("V1->V2: ScalingDown, Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/deployment_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + deployment := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/workload/deployment.yaml", deployment)).ToNot(HaveOccurred()) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(deployment) + WaitDeploymentAllPodsReady(deployment) + + // record stable revision --> v1 + stableRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + + deployment.Spec.Paused = true + deployment.Spec.Replicas = pointer.Int32Ptr(10) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateDeployment(deployment) + + // record canary revision --> v2 + canaryRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking Deployment updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + fetchedDeployment := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, fetchedDeployment)).NotTo(HaveOccurred()) + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*fetchedDeployment.Spec.Replicas), true) + expectedUpdatedReplicas = integer.IntMin(expectedUpdatedReplicas, int(*fetchedDeployment.Spec.Replicas)) + Eventually(func() int32 { + clone := GetCanaryDeployment(deployment) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(BeNumerically(">=", int32(expectedUpdatedReplicas))) + if i == 1 { + By("\tScaling down from 10 to 2....") + deployCopy := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, deployCopy)).NotTo(HaveOccurred()) + deployCopy.Spec.Replicas = pointer.Int32Ptr(2) + UpdateDeployment(deployCopy) + } + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + By("Checking all pod were updated when release completed...") + Eventually(func() bool { + clone := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas == *clone.Spec.Replicas + }, 10*time.Minute, time.Second).Should(BeTrue()) + }) + + It("V1->V2: ScalingUp, Number, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/deployment_number_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + deployment := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/workload/deployment.yaml", deployment)).ToNot(HaveOccurred()) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(deployment) + WaitDeploymentAllPodsReady(deployment) + + // record stable revision --> v1 + stableRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + + deployment.Spec.Paused = true + deployment.Spec.Replicas = pointer.Int32Ptr(5) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateDeployment(deployment) + + // record canary revision --> v2 + canaryRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking Deployment updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + batch := &release.Spec.ReleasePlan.Batches[i] + fetchedDeployment := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, fetchedDeployment)).NotTo(HaveOccurred()) + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*fetchedDeployment.Spec.Replicas), true) + expectedUpdatedReplicas = integer.IntMin(expectedUpdatedReplicas, int(*fetchedDeployment.Spec.Replicas)) + Eventually(func() int32 { + clone := GetCanaryDeployment(deployment) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(BeNumerically(">=", int32(expectedUpdatedReplicas))) + if i == 1 { + By("\tScaling up from 5 to 10....") + deployCopy := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, deployCopy)).NotTo(HaveOccurred()) + deployCopy.Spec.Replicas = pointer.Int32Ptr(10) + UpdateDeployment(deployCopy) + } + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + By("Checking all pod were updated when release completed...") + Eventually(func() bool { + clone := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas == *clone.Spec.Replicas + }, 10*time.Minute, time.Second).Should(BeTrue()) + }) + + It("V1->V2: ScalingDown, Number, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/deployment_number_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + deployment := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/workload/deployment.yaml", deployment)).ToNot(HaveOccurred()) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + CreateObject(deployment) + WaitDeploymentAllPodsReady(deployment) + + // record stable revision --> v1 + stableRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + + deployment.Spec.Paused = true + deployment.Spec.Replicas = pointer.Int32Ptr(5) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV2) + UpdateDeployment(deployment) + + // record canary revision --> v2 + canaryRevision := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevision).ShouldNot(Equal(stableRevision)) + + By("Checking Deployment updated replicas...") + for i := range release.Spec.ReleasePlan.Batches { + By(fmt.Sprintf("\tWaiting for batch[%v] completed...", i)) + batch := &release.Spec.ReleasePlan.Batches[i] + fetchedDeployment := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, fetchedDeployment)).NotTo(HaveOccurred()) + expectedUpdatedReplicas, _ := intstr.GetScaledValueFromIntOrPercent(&batch.CanaryReplicas, int(*fetchedDeployment.Spec.Replicas), true) + expectedUpdatedReplicas = integer.IntMin(expectedUpdatedReplicas, int(*fetchedDeployment.Spec.Replicas)) + Eventually(func() int32 { + clone := GetCanaryDeployment(deployment) + return clone.Status.UpdatedReplicas + }, 5*time.Minute, time.Second).Should(BeNumerically(">=", int32(expectedUpdatedReplicas))) + if i == 1 { + By("\tScaling down from 10 to 2....") + deployCopy := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, deployCopy)).NotTo(HaveOccurred()) + deployCopy.Spec.Replicas = pointer.Int32Ptr(2) + UpdateDeployment(deployCopy) + } + } + + By("Checking BatchRelease status...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 10*time.Minute, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCompleted)) + + By("Checking all pod were updated when release completed...") + Eventually(func() bool { + clone := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas == *clone.Spec.Replicas + }, 10*time.Minute, time.Second).Should(BeTrue()) + }) + + It("Rollback V1->V2->V1: Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/deployment_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + deployment := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/workload/deployment.yaml", deployment)).ToNot(HaveOccurred()) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy = v1.PullIfNotPresent + CreateObject(deployment) + WaitDeploymentAllPodsReady(deployment) + + // record stable revision --> v1 + stableRevisionV1 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + + deployment.Spec.Paused = true + deployment.Spec.Replicas = pointer.Int32Ptr(10) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.FailedImage) + UpdateDeployment(deployment) + + // record canary revision --> v2 + canaryRevisionV2 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevisionV2).ShouldNot(Equal(stableRevisionV1)) + + By("Waiting a minute and checking failed revision...") + time.Sleep(time.Minute) + for i := 0; i < 30; i++ { + fetchedRelease := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, fetchedRelease)).NotTo(HaveOccurred()) + Expect(fetchedRelease.Status.CanaryStatus.CurrentBatch).Should(Equal(int32(1))) + time.Sleep(time.Second) + } + + By("Updating cloneset to V1...") + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + UpdateDeployment(deployment) + // record canary revision --> v2 + canaryRevisionV3 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevisionV3).Should(Equal(stableRevisionV1)) + + By("Checking all pod were updated when release completed...") + Eventually(func() int32 { + clone := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, 100*time.Second, time.Second).Should(Equal(*deployment.Spec.Replicas)) + + By("Checking BatchRelease completed status phase...") + Eventually(func() rolloutsv1alpha1.RolloutPhase { + clone := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.Phase + }, 100*time.Second, time.Second).Should(Equal(rolloutsv1alpha1.RolloutPhaseCancelled)) + }) + + It("Rollback V1->V2: Delete BatchRelease, Percentage, 100%, Succeeded", func() { + By("Creating BatchRelease...") + release := &rolloutsv1alpha1.BatchRelease{} + Expect(ReadYamlToObject("./test_data/batchrelease/deployment_percentage_100.yaml", release)).ToNot(HaveOccurred()) + CreateObject(release) + + By("Creating workload and waiting for all pods ready...") + deployment := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/workload/deployment.yaml", deployment)).ToNot(HaveOccurred()) + deployment.Spec.Replicas = pointer.Int32Ptr(10) + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy = v1.PullIfNotPresent + CreateObject(deployment) + WaitDeploymentAllPodsReady(deployment) + + // record stable revision --> v1 + stableRevisionV1 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + + deployment.Spec.Paused = true + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.FailedImage) + UpdateDeployment(deployment) + + // record canary revision --> v2 + canaryRevisionV2 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevisionV2).ShouldNot(Equal(stableRevisionV1)) + + By("Waiting a minute and checking failed revision...") + time.Sleep(time.Minute) + for i := 0; i < 30; i++ { + fetchedRelease := &rolloutsv1alpha1.BatchRelease{} + Expect(GetObject(release.Namespace, release.Name, fetchedRelease)).NotTo(HaveOccurred()) + Expect(fetchedRelease.Status.CanaryStatus.CurrentBatch).Should(Equal(int32(1))) + time.Sleep(time.Second) + } + + By("Deleting BatchReleasing...") + DeleteObject(release) + Eventually(func() bool { + objectCopy := &rolloutsv1alpha1.BatchRelease{} + err := GetObject(release.Namespace, release.Name, objectCopy) + return errors.IsNotFound(err) + }, time.Minute, time.Second).Should(BeTrue()) + + By("Updating cloneset to V1...") + deployment.Spec.Template.Spec.Containers[0].Image = images.GetE2EImage(images.BusyBoxV1) + UpdateDeployment(deployment) + // record canary revision --> v2 + canaryRevisionV3 := workloads.ComputeHash(&deployment.Spec.Template, deployment.Status.CollisionCount) + Expect(canaryRevisionV3).Should(Equal(stableRevisionV1)) + + By("Checking all pod were updated when release completed...") + Eventually(func() int32 { + clone := &apps.Deployment{} + Expect(GetObject(deployment.Namespace, deployment.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.UpdatedReplicas + }, 100*time.Second, time.Second).Should(Equal(*deployment.Spec.Replicas)) + }) + }) +}) diff --git a/test/e2e/test_data/workload/cloneset.yaml b/test/e2e/test_data/workload/cloneset.yaml new file mode 100644 index 0000000..db2b664 --- /dev/null +++ b/test/e2e/test_data/workload/cloneset.yaml @@ -0,0 +1,25 @@ +apiVersion: apps.kruise.io/v1alpha1 +kind: CloneSet +metadata: + labels: + app: busybox + name: sample +spec: + replicas: 5 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.32 + imagePullPolicy: IfNotPresent + command: ["bin/sh", "-c", "sleep 10000000"] + resources: + limits: + memory: "10Mi" + cpu: "50m" diff --git a/test/e2e/test_data/workload/deployment.yaml b/test/e2e/test_data/workload/deployment.yaml new file mode 100644 index 0000000..13787d9 --- /dev/null +++ b/test/e2e/test_data/workload/deployment.yaml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sample + labels: + app: busybox +spec: + replicas: 5 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.32 + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c", "sleep 10000"] + resources: + limits: + memory: "10Mi" + cpu: "50m" diff --git a/vendor/github.com/nxadm/tail/.gitignore b/vendor/github.com/nxadm/tail/.gitignore index 35d9351..299a669 100644 --- a/vendor/github.com/nxadm/tail/.gitignore +++ b/vendor/github.com/nxadm/tail/.gitignore @@ -1,3 +1,7 @@ .idea/ +<<<<<<< HEAD .test/ -examples/_* \ No newline at end of file +examples/_* +======= +.test/ +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/nxadm/tail/.travis.yml b/vendor/github.com/nxadm/tail/.travis.yml new file mode 100644 index 0000000..95dd3bd --- /dev/null +++ b/vendor/github.com/nxadm/tail/.travis.yml @@ -0,0 +1,16 @@ +language: go + +script: + - go test -race -v ./... + +go: + - "1.9" + - "1.10" + - "1.11" + - "1.12" + - "1.13" + - tip + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/nxadm/tail/CHANGES.md b/vendor/github.com/nxadm/tail/CHANGES.md index 224e54b..51d1034 100644 --- a/vendor/github.com/nxadm/tail/CHANGES.md +++ b/vendor/github.com/nxadm/tail/CHANGES.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # Version v1.4.7-v1.4.8 * Documentation updates. * Small linter cleanups. @@ -9,6 +10,9 @@ * Add example directories with example and tests for issues. # Version v1.4.4-v1.4.5 +======= +# Version v1.4.4 +>>>>>>> 33cbc1d (add batchrelease controller) * Fix of checksum problem because of forced tag. No changes to the code. diff --git a/vendor/github.com/nxadm/tail/README.md b/vendor/github.com/nxadm/tail/README.md index f47939c..897b947 100644 --- a/vendor/github.com/nxadm/tail/README.md +++ b/vendor/github.com/nxadm/tail/README.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD ![ci](https://github.com/nxadm/tail/workflows/ci/badge.svg)[![Go Reference](https://pkg.go.dev/badge/github.com/nxadm/tail.svg)](https://pkg.go.dev/github.com/nxadm/tail) # tail functionality in Go @@ -14,22 +15,50 @@ A simple example: // Create a tail t, err := tail.TailFile( "/var/log/nginx.log", tail.Config{Follow: true, ReOpen: true}) +======= +[![Build Status](https://travis-ci.org/nxadm/tail.svg?branch=master)](https://travis-ci.org/nxadm/tail) + +This is repo is forked from the dormant upstream repo at +[hpcloud](https://github.com/hpcloud/tail). This fork adds support for go +modules, updates the dependencies, adds features and fixes bugs. Go 1.9 is +the oldest compiler release supported. + +# Go package for tail-ing files + +A Go package striving to emulate the features of the BSD `tail` program. + +```Go +t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true}) +>>>>>>> 33cbc1d (add batchrelease controller) if err != nil { panic(err) } +<<<<<<< HEAD // Print the text of each received line +======= +>>>>>>> 33cbc1d (add batchrelease controller) for line := range t.Lines { fmt.Println(line.Text) } ``` +<<<<<<< HEAD See [API documentation](https://pkg.go.dev/github.com/nxadm/tail). +======= +See [API documentation](http://godoc.org/github.com/nxadm/tail). + +## Log rotation + +Tail comes with full support for truncation/move detection as it is +designed to work with log rotation tools. +>>>>>>> 33cbc1d (add batchrelease controller) ## Installing go get github.com/nxadm/tail/... +<<<<<<< HEAD ## History This project is an active, drop-in replacement for the @@ -41,4 +70,9 @@ nxadm/tail continues the development by keeping up to date with the Go toolchain and fixing bugs. ## Examples -Examples, e.g. used to debug an issue, are kept in the [examples directory](/examples). \ No newline at end of file +Examples, e.g. used to debug an issue, are kept in the [examples directory](/examples). +======= +## Windows support + +This package [needs assistance](https://github.com/nxadm/tail/labels/Windows) for full Windows support. +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/nxadm/tail/appveyor.yml b/vendor/github.com/nxadm/tail/appveyor.yml new file mode 100644 index 0000000..e149bc6 --- /dev/null +++ b/vendor/github.com/nxadm/tail/appveyor.yml @@ -0,0 +1,11 @@ +version: 0.{build} +skip_tags: true +cache: C:\Users\appveyor\AppData\Local\NuGet\Cache +build_script: +- SET GOPATH=c:\workspace +- go test -v -race ./... +test: off +clone_folder: c:\workspace\src\github.com\nxadm\tail +branches: + only: + - master diff --git a/vendor/github.com/nxadm/tail/go.mod b/vendor/github.com/nxadm/tail/go.mod index 5de9a60..825b265 100644 --- a/vendor/github.com/nxadm/tail/go.mod +++ b/vendor/github.com/nxadm/tail/go.mod @@ -3,6 +3,11 @@ module github.com/nxadm/tail go 1.13 require ( +<<<<<<< HEAD github.com/fsnotify/fsnotify v1.4.9 +======= + github.com/fsnotify/fsnotify v1.4.7 + golang.org/x/sys v0.0.0-20190904154756-749cb33beabd // indirect +>>>>>>> 33cbc1d (add batchrelease controller) gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 ) diff --git a/vendor/github.com/nxadm/tail/go.sum b/vendor/github.com/nxadm/tail/go.sum index 3485dae..33b8fb4 100644 --- a/vendor/github.com/nxadm/tail/go.sum +++ b/vendor/github.com/nxadm/tail/go.sum @@ -1,6 +1,13 @@ +<<<<<<< HEAD github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +======= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +>>>>>>> 33cbc1d (add batchrelease controller) gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= diff --git a/vendor/github.com/nxadm/tail/tail.go b/vendor/github.com/nxadm/tail/tail.go index 37ea441..a623cd0 100644 --- a/vendor/github.com/nxadm/tail/tail.go +++ b/vendor/github.com/nxadm/tail/tail.go @@ -1,3 +1,4 @@ +<<<<<<< HEAD // Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. @@ -7,6 +8,11 @@ //it is designed to work with log rotation tools. The library works on all //operating systems supported by Go, including POSIX systems like Linux and //*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported. +======= +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +>>>>>>> 33cbc1d (add batchrelease controller) package tail import ( @@ -28,11 +34,15 @@ import ( ) var ( +<<<<<<< HEAD // ErrStop is returned when the tail of a file has been marked to be stopped. +======= +>>>>>>> 33cbc1d (add batchrelease controller) ErrStop = errors.New("tail should now stop") ) type Line struct { +<<<<<<< HEAD Text string // The contents of the file Num int // The line number SeekInfo SeekInfo // SeekInfo @@ -45,14 +55,31 @@ type Line struct { // release. // // NewLine returns a * pointer to a Line struct. +======= + Text string + Num int + SeekInfo SeekInfo + Time time.Time + Err error // Error from tail +} + +// NewLine returns a Line with present time. +>>>>>>> 33cbc1d (add batchrelease controller) func NewLine(text string, lineNum int) *Line { return &Line{text, lineNum, SeekInfo{}, time.Now(), nil} } +<<<<<<< HEAD // SeekInfo represents arguments to io.Seek. See: https://golang.org/pkg/io/#SectionReader.Seek type SeekInfo struct { Offset int64 Whence int +======= +// SeekInfo represents arguments to `io.Seek` +type SeekInfo struct { + Offset int64 + Whence int // io.Seek* +>>>>>>> 33cbc1d (add batchrelease controller) } type logger interface { @@ -70,28 +97,48 @@ type logger interface { // Config is used to specify how a file must be tailed. type Config struct { // File-specifc +<<<<<<< HEAD Location *SeekInfo // Tail from this location. If nil, start at the beginning of the file ReOpen bool // Reopen recreated files (tail -F) MustExist bool // Fail early if the file does not exist Poll bool // Poll for file changes instead of using the default inotify Pipe bool // The file is a named pipe (mkfifo) +======= + Location *SeekInfo // Seek to this location before tailing + ReOpen bool // Reopen recreated files (tail -F) + MustExist bool // Fail early if the file does not exist + Poll bool // Poll for file changes instead of using inotify + Pipe bool // Is a named pipe (mkfifo) + RateLimiter *ratelimiter.LeakyBucket +>>>>>>> 33cbc1d (add batchrelease controller) // Generic IO Follow bool // Continue looking for new lines (tail -f) MaxLineSize int // If non-zero, split longer lines into multiple lines +<<<<<<< HEAD // Optionally, use a ratelimiter (e.g. created by the ratelimiter/NewLeakyBucket function) RateLimiter *ratelimiter.LeakyBucket // Optionally use a Logger. When nil, the Logger is set to tail.DefaultLogger. // To disable logging, set it to tail.DiscardingLogger +======= + // Logger, when nil, is set to tail.DefaultLogger + // To disable logging: set field to tail.DiscardingLogger +>>>>>>> 33cbc1d (add batchrelease controller) Logger logger } type Tail struct { +<<<<<<< HEAD Filename string // The filename Lines chan *Line // A consumable channel of *Line Config // Tail.Configuration +======= + Filename string + Lines chan *Line + Config +>>>>>>> 33cbc1d (add batchrelease controller) file *os.File reader *bufio.Reader @@ -106,17 +153,28 @@ type Tail struct { } var ( +<<<<<<< HEAD // DefaultLogger logs to os.Stderr and it is used when Config.Logger == nil +======= + // DefaultLogger is used when Config.Logger == nil +>>>>>>> 33cbc1d (add batchrelease controller) DefaultLogger = log.New(os.Stderr, "", log.LstdFlags) // DiscardingLogger can be used to disable logging output DiscardingLogger = log.New(ioutil.Discard, "", 0) ) +<<<<<<< HEAD // TailFile begins tailing the file. And returns a pointer to a Tail struct // and an error. An output stream is made available via the Tail.Lines // channel (e.g. to be looped and printed). To handle errors during tailing, // after finishing reading from the Lines channel, invoke the `Wait` or `Err` // method on the returned *Tail. +======= +// TailFile begins tailing the file. Output stream is made available +// via the `Tail.Lines` channel. To handle errors during tailing, +// invoke the `Wait` or `Err` method after finishing reading from the +// `Lines` channel. +>>>>>>> 33cbc1d (add batchrelease controller) func TailFile(filename string, config Config) (*Tail, error) { if config.ReOpen && !config.Follow { util.Fatal("cannot set ReOpen without Follow.") @@ -152,9 +210,16 @@ func TailFile(filename string, config Config) (*Tail, error) { return t, nil } +<<<<<<< HEAD // Tell returns the file's current position, like stdio's ftell() and an error. // Beware that this value may not be completely accurate because one line from // the chan(tail.Lines) may have been read already. +======= +// Tell returns the file's current position, like stdio's ftell(). +// But this value is not very accurate. +// One line from the chan(tail.Lines) may have been read, +// so it may have lost one line. +>>>>>>> 33cbc1d (add batchrelease controller) func (tail *Tail) Tell() (offset int64, err error) { if tail.file == nil { return @@ -180,8 +245,12 @@ func (tail *Tail) Stop() error { return tail.Wait() } +<<<<<<< HEAD // StopAtEOF stops tailing as soon as the end of the file is reached. The function // returns an error, +======= +// StopAtEOF stops tailing as soon as the end of the file is reached. +>>>>>>> 33cbc1d (add batchrelease controller) func (tail *Tail) StopAtEOF() error { tail.Kill(errStopAtEOF) return tail.Wait() @@ -449,7 +518,10 @@ func (tail *Tail) sendLine(line string) bool { // Cleanup removes inotify watches added by the tail package. This function is // meant to be invoked from a process's exit handler. Linux kernel may not // automatically remove inotify watches after the process exits. +<<<<<<< HEAD // If you plan to re-read a file, don't call Cleanup in between. +======= +>>>>>>> 33cbc1d (add batchrelease controller) func (tail *Tail) Cleanup() { watch.Cleanup(tail.Filename) } diff --git a/vendor/github.com/nxadm/tail/tail_posix.go b/vendor/github.com/nxadm/tail/tail_posix.go index 23e071d..57c2bfd 100644 --- a/vendor/github.com/nxadm/tail/tail_posix.go +++ b/vendor/github.com/nxadm/tail/tail_posix.go @@ -1,4 +1,7 @@ +<<<<<<< HEAD // Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail +======= +>>>>>>> 33cbc1d (add batchrelease controller) // +build !windows package tail @@ -7,11 +10,14 @@ import ( "os" ) +<<<<<<< HEAD // Deprecated: this function is only useful internally and, as such, // it will be removed from the API in a future major release. // // OpenFile proxies a os.Open call for a file so it can be correctly tailed // on POSIX and non-POSIX OSes like MS Windows. +======= +>>>>>>> 33cbc1d (add batchrelease controller) func OpenFile(name string) (file *os.File, err error) { return os.Open(name) } diff --git a/vendor/github.com/nxadm/tail/tail_windows.go b/vendor/github.com/nxadm/tail/tail_windows.go index da0d2f3..ab76fb3 100644 --- a/vendor/github.com/nxadm/tail/tail_windows.go +++ b/vendor/github.com/nxadm/tail/tail_windows.go @@ -1,9 +1,13 @@ +<<<<<<< HEAD // Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail +======= +>>>>>>> 33cbc1d (add batchrelease controller) // +build windows package tail import ( +<<<<<<< HEAD "os" "github.com/nxadm/tail/winfile" @@ -14,6 +18,12 @@ import ( // // OpenFile proxies a os.Open call for a file so it can be correctly tailed // on POSIX and non-POSIX OSes like MS Windows. +======= + "github.com/nxadm/tail/winfile" + "os" +) + +>>>>>>> 33cbc1d (add batchrelease controller) func OpenFile(name string) (file *os.File, err error) { return winfile.OpenFile(name, os.O_RDONLY, 0) } diff --git a/vendor/github.com/nxadm/tail/util/util.go b/vendor/github.com/nxadm/tail/util/util.go index b64caa2..f3e20df 100644 --- a/vendor/github.com/nxadm/tail/util/util.go +++ b/vendor/github.com/nxadm/tail/util/util.go @@ -1,4 +1,7 @@ +<<<<<<< HEAD // Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail +======= +>>>>>>> 33cbc1d (add batchrelease controller) // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. diff --git a/vendor/github.com/nxadm/tail/watch/filechanges.go b/vendor/github.com/nxadm/tail/watch/filechanges.go index 5b65f42..f591b9a 100644 --- a/vendor/github.com/nxadm/tail/watch/filechanges.go +++ b/vendor/github.com/nxadm/tail/watch/filechanges.go @@ -1,4 +1,7 @@ +<<<<<<< HEAD // Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail +======= +>>>>>>> 33cbc1d (add batchrelease controller) package watch type FileChanges struct { diff --git a/vendor/github.com/nxadm/tail/watch/inotify.go b/vendor/github.com/nxadm/tail/watch/inotify.go index cbd11ad..a31c820 100644 --- a/vendor/github.com/nxadm/tail/watch/inotify.go +++ b/vendor/github.com/nxadm/tail/watch/inotify.go @@ -1,4 +1,7 @@ +<<<<<<< HEAD // Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail +======= +>>>>>>> 33cbc1d (add batchrelease controller) // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. diff --git a/vendor/github.com/nxadm/tail/watch/inotify_tracker.go b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go index cb9572a..0470e38 100644 --- a/vendor/github.com/nxadm/tail/watch/inotify_tracker.go +++ b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go @@ -1,4 +1,7 @@ +<<<<<<< HEAD // Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail +======= +>>>>>>> 33cbc1d (add batchrelease controller) // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. diff --git a/vendor/github.com/nxadm/tail/watch/polling.go b/vendor/github.com/nxadm/tail/watch/polling.go index 74e10aa..8099ce9 100644 --- a/vendor/github.com/nxadm/tail/watch/polling.go +++ b/vendor/github.com/nxadm/tail/watch/polling.go @@ -1,4 +1,7 @@ +<<<<<<< HEAD // Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail +======= +>>>>>>> 33cbc1d (add batchrelease controller) // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. diff --git a/vendor/github.com/nxadm/tail/watch/watch.go b/vendor/github.com/nxadm/tail/watch/watch.go index 2b51128..febd7ef 100644 --- a/vendor/github.com/nxadm/tail/watch/watch.go +++ b/vendor/github.com/nxadm/tail/watch/watch.go @@ -1,4 +1,7 @@ +<<<<<<< HEAD // Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail +======= +>>>>>>> 33cbc1d (add batchrelease controller) // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. diff --git a/vendor/github.com/nxadm/tail/winfile/winfile.go b/vendor/github.com/nxadm/tail/winfile/winfile.go index 4562ac7..26f54f7 100644 --- a/vendor/github.com/nxadm/tail/winfile/winfile.go +++ b/vendor/github.com/nxadm/tail/winfile/winfile.go @@ -1,4 +1,7 @@ +<<<<<<< HEAD // Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail +======= +>>>>>>> 33cbc1d (add batchrelease controller) // +build windows package winfile diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml index ea0966d..a20b48e 100644 --- a/vendor/github.com/onsi/ginkgo/.travis.yml +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -1,8 +1,14 @@ language: go go: +<<<<<<< HEAD - tip - 1.16.x - 1.15.x +======= + - 1.13.x + - 1.14.x + - tip +>>>>>>> 33cbc1d (add batchrelease controller) cache: directories: @@ -16,9 +22,19 @@ install: - GO111MODULE="off" go get golang.org/x/tools/cmd/cover - GO111MODULE="off" go get github.com/onsi/gomega - GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo +<<<<<<< HEAD - export PATH=$GOPATH/bin:$PATH script: - GO111MODULE="on" go mod tidy && git diff --exit-code go.mod go.sum - go vet - ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace +======= + - export PATH=$PATH:$HOME/gopath/bin + +script: + - GO111MODULE="on" go mod tidy + - diff -u <(echo -n) <(git diff go.mod) + - diff -u <(echo -n) <(git diff go.sum) + - $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md index a26bc53..d9f2f32 100644 --- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD ## 1.16.5 Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC. @@ -65,6 +66,8 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - correct handling windows backslash in import path (#721) [97f3d51] - Add additional methods to GinkgoT() to improve compatibility with the testing.TB interface [b5fe44d] +======= +>>>>>>> 33cbc1d (add batchrelease controller) ## 1.14.1 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md index a25ca5e..31a4b6a 100644 --- a/vendor/github.com/onsi/ginkgo/README.md +++ b/vendor/github.com/onsi/ginkgo/README.md @@ -1,5 +1,6 @@ ![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png) +<<<<<<< HEAD [![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! @@ -14,6 +15,14 @@ As described in the [changelog](https://github.com/onsi/ginkgo/blob/ver2/docs/MI Please start exploring and using the V2 release! To get started follow the [Using the Release Candidate](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta) directions in the migration guide. +======= +[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo) + +Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! + +If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW). + +>>>>>>> 33cbc1d (add batchrelease controller) ## TLDR Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests. It is typically (and optionally) paired with the [Gomega](https://github.com/onsi/gomega) matcher library. @@ -69,8 +78,11 @@ Describe("the strings package", func() { - [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`. +<<<<<<< HEAD - [Ginkgo tools for VSCode](https://marketplace.visualstudio.com/items?itemName=joselitofilho.ginkgotestexplorer): just use VSCode's extension installer to install `ginkgoTestExplorer`. +======= +>>>>>>> 33cbc1d (add batchrelease controller) - Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details. - A modular architecture that lets you easily: diff --git a/vendor/github.com/onsi/ginkgo/RELEASING.md b/vendor/github.com/onsi/ginkgo/RELEASING.md index db3d234..581d633 100644 --- a/vendor/github.com/onsi/ginkgo/RELEASING.md +++ b/vendor/github.com/onsi/ginkgo/RELEASING.md @@ -8,10 +8,17 @@ A Ginkgo release is a tagged git sha and a GitHub release. To cut a release: - Fixes (fix version) - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) 1. Update `VERSION` in `config/config.go` +<<<<<<< HEAD 1. Commit, push, and release: ``` git commit -m "vM.m.p" git push gh release create "vM.m.p" git fetch --tags origin master - ``` \ No newline at end of file + ``` +======= +1. Create a commit with the version number as the commit message (e.g. `v1.3.0`) +1. Tag the commit with the version number as the tag name (e.g. `v1.3.0`) +1. Push the commit and tag to GitHub +1. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes. +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go index 3130c77..c1a50e2 100644 --- a/vendor/github.com/onsi/ginkgo/config/config.go +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -20,14 +20,23 @@ import ( "fmt" ) +<<<<<<< HEAD const VERSION = "1.16.5" +======= +const VERSION = "1.14.1" +>>>>>>> 33cbc1d (add batchrelease controller) type GinkgoConfigType struct { RandomSeed int64 RandomizeAllSpecs bool RegexScansFilePath bool +<<<<<<< HEAD FocusStrings []string SkipStrings []string +======= + FocusString string + SkipString string +>>>>>>> 33cbc1d (add batchrelease controller) SkipMeasurements bool FailOnPending bool FailFast bool @@ -65,11 +74,14 @@ func processPrefix(prefix string) string { return prefix } +<<<<<<< HEAD type flagFunc func(string) func (f flagFunc) String() string { return "" } func (f flagFunc) Set(s string) error { f(s); return nil } +======= +>>>>>>> 33cbc1d (add batchrelease controller) func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { prefix = processPrefix(prefix) flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") @@ -80,8 +92,13 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.") +<<<<<<< HEAD flagSet.Var(flagFunc(flagFocus), prefix+"focus", "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed.") flagSet.Var(flagFunc(flagSkip), prefix+"skip", "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed.") +======= + flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.") + flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.") +>>>>>>> 33cbc1d (add batchrelease controller) flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).") @@ -138,12 +155,21 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor result = append(result, fmt.Sprintf("--%sdryRun", prefix)) } +<<<<<<< HEAD for _, s := range ginkgo.FocusStrings { result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, s)) } for _, s := range ginkgo.SkipStrings { result = append(result, fmt.Sprintf("--%sskip=%s", prefix, s)) +======= + if ginkgo.FocusString != "" { + result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString)) + } + + if ginkgo.SkipString != "" { + result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString)) +>>>>>>> 33cbc1d (add batchrelease controller) } if ginkgo.FlakeAttempts > 1 { @@ -216,6 +242,7 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor return result } +<<<<<<< HEAD // flagFocus implements the -focus flag. func flagFocus(arg string) { @@ -230,3 +257,5 @@ func flagSkip(arg string) { GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg) } } +======= +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go index ccd7685..ccdaa61 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -17,7 +17,10 @@ import ( "io" "net/http" "os" +<<<<<<< HEAD "reflect" +======= +>>>>>>> 33cbc1d (add batchrelease controller) "strings" "time" @@ -33,8 +36,11 @@ import ( "github.com/onsi/ginkgo/types" ) +<<<<<<< HEAD var deprecationTracker = types.NewDeprecationTracker() +======= +>>>>>>> 33cbc1d (add batchrelease controller) const GINKGO_VERSION = config.VERSION const GINKGO_PANIC = ` Your test failed. @@ -73,6 +79,7 @@ func GinkgoRandomSeed() int64 { return config.GinkgoConfig.RandomSeed } +<<<<<<< HEAD //GinkgoParallelNode is deprecated, use GinkgoParallelProcess instead func GinkgoParallelNode() int { deprecationTracker.TrackDeprecation(types.Deprecations.ParallelNode(), codelocation.New(1)) @@ -82,6 +89,11 @@ func GinkgoParallelNode() int { //GinkgoParallelProcess returns the parallel process number for the current ginkgo process //The process number is 1-indexed func GinkgoParallelProcess() int { +======= +//GinkgoParallelNode returns the parallel node number for the current ginkgo process +//The node number is 1-indexed +func GinkgoParallelNode() int { +>>>>>>> 33cbc1d (add batchrelease controller) return config.GinkgoConfig.ParallelNode } @@ -102,6 +114,7 @@ func GinkgoT(optionalOffset ...int) GinkgoTInterface { if len(optionalOffset) > 0 { offset = optionalOffset[0] } +<<<<<<< HEAD failedFunc := func() bool { return CurrentGinkgoTestDescription().Failed } @@ -109,11 +122,15 @@ func GinkgoT(optionalOffset ...int) GinkgoTInterface { return CurrentGinkgoTestDescription().FullTestText } return testingtproxy.New(GinkgoWriter, Fail, Skip, failedFunc, nameFunc, offset) +======= + return testingtproxy.New(GinkgoWriter, Fail, offset) +>>>>>>> 33cbc1d (add batchrelease controller) } //The interface returned by GinkgoT(). This covers most of the methods //in the testing package's T. type GinkgoTInterface interface { +<<<<<<< HEAD Cleanup(func()) Setenv(key, value string) Error(args ...interface{}) @@ -133,6 +150,22 @@ type GinkgoTInterface interface { Skipf(format string, args ...interface{}) Skipped() bool TempDir() string +======= + Fail() + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Failed() bool + Parallel() + Skip(args ...interface{}) + Skipf(format string, args ...interface{}) + SkipNow() + Skipped() bool +>>>>>>> 33cbc1d (add batchrelease controller) } //Custom Ginkgo test reporters must implement the Reporter interface. @@ -215,27 +248,41 @@ func RunSpecs(t GinkgoTestingT, description string) bool { if config.DefaultReporterConfig.ReportFile != "" { reportFile := config.DefaultReporterConfig.ReportFile specReporters[0] = reporters.NewJUnitReporter(reportFile) +<<<<<<< HEAD specReporters = append(specReporters, buildDefaultReporter()) } return runSpecsWithCustomReporters(t, description, specReporters) +======= + return RunSpecsWithDefaultAndCustomReporters(t, description, specReporters) + } + return RunSpecsWithCustomReporters(t, description, specReporters) +>>>>>>> 33cbc1d (add batchrelease controller) } //To run your tests with Ginkgo's default reporter and your custom reporter(s), replace //RunSpecs() with this method. func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { +<<<<<<< HEAD deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) specReporters = append(specReporters, buildDefaultReporter()) return runSpecsWithCustomReporters(t, description, specReporters) +======= + specReporters = append(specReporters, buildDefaultReporter()) + return RunSpecsWithCustomReporters(t, description, specReporters) +>>>>>>> 33cbc1d (add batchrelease controller) } //To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace //RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { +<<<<<<< HEAD deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter()) return runSpecsWithCustomReporters(t, description, specReporters) } func runSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { +======= +>>>>>>> 33cbc1d (add batchrelease controller) writer := GinkgoWriter.(*writer.Writer) writer.SetStream(config.DefaultReporterConfig.Verbose) reporters := make([]reporters.Reporter, len(specReporters)) @@ -243,11 +290,14 @@ func runSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor reporters[i] = reporter } passed, hasFocusedTests := global.Suite.Run(t, description, reporters, writer, config.GinkgoConfig) +<<<<<<< HEAD if deprecationTracker.DidTrackDeprecations() { fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport()) } +======= +>>>>>>> 33cbc1d (add batchrelease controller) if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { fmt.Println("PASS | FOCUSED") os.Exit(types.GINKGO_FOCUS_EXIT_CODE) @@ -401,14 +451,20 @@ func XWhen(text string, body func()) bool { //Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a //function that accepts a Done channel. When you do this, you can also provide an optional timeout. func It(text string, body interface{}, timeout ...float64) bool { +<<<<<<< HEAD validateBodyFunc(body, codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) return true } //You can focus individual Its using FIt func FIt(text string, body interface{}, timeout ...float64) bool { +<<<<<<< HEAD validateBodyFunc(body, codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -429,14 +485,20 @@ func XIt(text string, _ ...interface{}) bool { //which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks //which apply to It blocks. func Specify(text string, body interface{}, timeout ...float64) bool { +<<<<<<< HEAD validateBodyFunc(body, codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) return true } //You can focus individual Specifys using FSpecify func FSpecify(text string, body interface{}, timeout ...float64) bool { +<<<<<<< HEAD validateBodyFunc(body, codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -480,28 +542,40 @@ func By(text string, callbacks ...func()) { //The body function must have the signature: // func(b Benchmarker) func Measure(text string, body interface{}, samples int) bool { +<<<<<<< HEAD deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples) return true } //You can focus individual Measures using FMeasure func FMeasure(text string, body interface{}, samples int) bool { +<<<<<<< HEAD deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples) return true } //You can mark Measurements as pending using PMeasure func PMeasure(text string, _ ...interface{}) bool { +<<<<<<< HEAD deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) return true } //You can mark Measurements as pending using XMeasure func XMeasure(text string, _ ...interface{}) bool { +<<<<<<< HEAD deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) return true } @@ -513,7 +587,10 @@ func XMeasure(text string, _ ...interface{}) bool { // //You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. func BeforeSuite(body interface{}, timeout ...float64) bool { +<<<<<<< HEAD validateBodyFunc(body, codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -527,7 +604,10 @@ func BeforeSuite(body interface{}, timeout ...float64) bool { // //You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. func AfterSuite(body interface{}, timeout ...float64) bool { +<<<<<<< HEAD validateBodyFunc(body, codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -615,7 +695,10 @@ func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, tim //Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func BeforeEach(body interface{}, timeout ...float64) bool { +<<<<<<< HEAD validateBodyFunc(body, codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -626,7 +709,10 @@ func BeforeEach(body interface{}, timeout ...float64) bool { //Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func JustBeforeEach(body interface{}, timeout ...float64) bool { +<<<<<<< HEAD validateBodyFunc(body, codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -637,7 +723,10 @@ func JustBeforeEach(body interface{}, timeout ...float64) bool { //Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func JustAfterEach(body interface{}, timeout ...float64) bool { +<<<<<<< HEAD validateBodyFunc(body, codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -648,11 +737,15 @@ func JustAfterEach(body interface{}, timeout ...float64) bool { //Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func AfterEach(body interface{}, timeout ...float64) bool { +<<<<<<< HEAD validateBodyFunc(body, codelocation.New(1)) +======= +>>>>>>> 33cbc1d (add batchrelease controller) global.Suite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } +<<<<<<< HEAD func validateBodyFunc(body interface{}, cl types.CodeLocation) { t := reflect.TypeOf(body) if t.Kind() != reflect.Func { @@ -672,6 +765,8 @@ func validateBodyFunc(body interface{}, cl types.CodeLocation) { } } +======= +>>>>>>> 33cbc1d (add batchrelease controller) func parseTimeout(timeout ...float64) time.Duration { if len(timeout) == 0 { return global.DefaultTimeout diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod index 1711443..27fbcae 100644 --- a/vendor/github.com/onsi/ginkgo/go.mod +++ b/vendor/github.com/onsi/ginkgo/go.mod @@ -1,5 +1,6 @@ module github.com/onsi/ginkgo +<<<<<<< HEAD go 1.16 require ( @@ -11,3 +12,14 @@ require ( ) retract v1.16.3 // git tag accidentally associated with incorrect git commit +======= +require ( + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/nxadm/tail v1.4.4 + github.com/onsi/gomega v1.10.1 + golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 + golang.org/x/text v0.3.2 // indirect +) + +go 1.13 +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/onsi/ginkgo/go.sum b/vendor/github.com/onsi/ginkgo/go.sum index 5c5c3c5..e77f63b 100644 --- a/vendor/github.com/onsi/ginkgo/go.sum +++ b/vendor/github.com/onsi/ginkgo/go.sum @@ -1,3 +1,4 @@ +<<<<<<< HEAD github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -6,6 +7,13 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +======= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +>>>>>>> 33cbc1d (add batchrelease controller) github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -18,6 +26,7 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +<<<<<<< HEAD github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= @@ -68,6 +77,41 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +======= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +>>>>>>> 33cbc1d (add batchrelease controller) google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -77,10 +121,18 @@ google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyz google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +<<<<<<< HEAD gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +======= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +>>>>>>> 33cbc1d (add batchrelease controller) gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go new file mode 100644 index 0000000..e3d09ea --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go @@ -0,0 +1,11 @@ +// +build darwin + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go new file mode 100644 index 0000000..72d3868 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go @@ -0,0 +1,11 @@ +// +build dragonfly + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go new file mode 100644 index 0000000..497d548 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go @@ -0,0 +1,11 @@ +// +build freebsd + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go new file mode 100644 index 0000000..29add0d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go @@ -0,0 +1,12 @@ +// +build linux +// +build !mips64le + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go new file mode 100644 index 0000000..09bd062 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go @@ -0,0 +1,12 @@ +// +build linux +// +build mips64le + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup3(oldfd, newfd, 0) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go new file mode 100644 index 0000000..16ad6ae --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go @@ -0,0 +1,11 @@ +// +build netbsd + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go new file mode 100644 index 0000000..4275f84 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go @@ -0,0 +1,11 @@ +// +build openbsd + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go new file mode 100644 index 0000000..882a38a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go @@ -0,0 +1,11 @@ +// +build solaris + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go index 774967d..b43a2a0 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go @@ -8,7 +8,10 @@ import ( "os" "github.com/nxadm/tail" +<<<<<<< HEAD "golang.org/x/sys/unix" +======= +>>>>>>> 33cbc1d (add batchrelease controller) ) func NewOutputInterceptor() OutputInterceptor { @@ -36,10 +39,15 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error { return err } +<<<<<<< HEAD // This might call Dup3 if the dup2 syscall is not available, e.g. on // linux/arm64 or linux/riscv64 unix.Dup2(int(interceptor.redirectFile.Fd()), 1) unix.Dup2(int(interceptor.redirectFile.Fd()), 2) +======= + interceptorDupx(int(interceptor.redirectFile.Fd()), 1) + interceptorDupx(int(interceptor.redirectFile.Fd()), 2) +>>>>>>> 33cbc1d (add batchrelease controller) if interceptor.streamTarget != nil { interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go index 0a24139..92218e2 100644 --- a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go +++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go @@ -4,7 +4,10 @@ import ( "math/rand" "regexp" "sort" +<<<<<<< HEAD "strings" +======= +>>>>>>> 33cbc1d (add batchrelease controller) ) type Specs struct { @@ -47,11 +50,19 @@ func (e *Specs) Shuffle(r *rand.Rand) { e.names = names } +<<<<<<< HEAD func (e *Specs) ApplyFocus(description string, focus, skip []string) { if len(focus)+len(skip) == 0 { e.applyProgrammaticFocus() } else { e.applyRegExpFocusAndSkip(description, focus, skip) +======= +func (e *Specs) ApplyFocus(description string, focusString string, skipString string) { + if focusString == "" && skipString == "" { + e.applyProgrammaticFocus() + } else { + e.applyRegExpFocusAndSkip(description, focusString, skipString) +>>>>>>> 33cbc1d (add batchrelease controller) } } @@ -91,6 +102,7 @@ func (e *Specs) toMatch(description string, i int) []byte { } } +<<<<<<< HEAD func (e *Specs) applyRegExpFocusAndSkip(description string, focus, skip []string) { var focusFilter, skipFilter *regexp.Regexp if len(focus) > 0 { @@ -98,6 +110,16 @@ func (e *Specs) applyRegExpFocusAndSkip(description string, focus, skip []string } if len(skip) > 0 { skipFilter = regexp.MustCompile(strings.Join(skip, "|")) +======= +func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) { + var focusFilter *regexp.Regexp + if focusString != "" { + focusFilter = regexp.MustCompile(focusString) + } + var skipFilter *regexp.Regexp + if skipString != "" { + skipFilter = regexp.MustCompile(skipString) +>>>>>>> 33cbc1d (add batchrelease controller) } for i, spec := range e.specs { diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go index b4a83c4..7c83d9e 100644 --- a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -97,7 +97,11 @@ func (suite *Suite) generateSpecsIterator(description string, config config.Gink specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) } +<<<<<<< HEAD specs.ApplyFocus(description, config.FocusStrings, config.SkipStrings) +======= + specs.ApplyFocus(description, config.FocusString, config.SkipString) +>>>>>>> 33cbc1d (add batchrelease controller) if config.SkipMeasurements { specs.SkipMeasurements() diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go index 4dcfaf4..bf0d304 100644 --- a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go @@ -6,30 +6,42 @@ import ( ) type failFunc func(message string, callerSkip ...int) +<<<<<<< HEAD type skipFunc func(message string, callerSkip ...int) type failedFunc func() bool type nameFunc func() string func New(writer io.Writer, fail failFunc, skip skipFunc, failed failedFunc, name nameFunc, offset int) *ginkgoTestingTProxy { +======= + +func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy { +>>>>>>> 33cbc1d (add batchrelease controller) return &ginkgoTestingTProxy{ fail: fail, offset: offset, writer: writer, +<<<<<<< HEAD skip: skip, failed: failed, name: name, +======= +>>>>>>> 33cbc1d (add batchrelease controller) } } type ginkgoTestingTProxy struct { fail failFunc +<<<<<<< HEAD skip skipFunc failed failedFunc name nameFunc +======= +>>>>>>> 33cbc1d (add batchrelease controller) offset int writer io.Writer } +<<<<<<< HEAD func (t *ginkgoTestingTProxy) Cleanup(func()) { // No-op } @@ -39,6 +51,8 @@ func (t *ginkgoTestingTProxy) Setenv(kev, value string) { // No-op until Cleanup is implemented } +======= +>>>>>>> 33cbc1d (add batchrelease controller) func (t *ginkgoTestingTProxy) Error(args ...interface{}) { t.fail(fmt.Sprintln(args...), t.offset) } @@ -55,10 +69,13 @@ func (t *ginkgoTestingTProxy) FailNow() { t.fail("failed", t.offset) } +<<<<<<< HEAD func (t *ginkgoTestingTProxy) Failed() bool { return t.failed() } +======= +>>>>>>> 33cbc1d (add batchrelease controller) func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { t.fail(fmt.Sprintln(args...), t.offset) } @@ -67,10 +84,13 @@ func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { t.fail(fmt.Sprintf(format, args...), t.offset) } +<<<<<<< HEAD func (t *ginkgoTestingTProxy) Helper() { // No-op } +======= +>>>>>>> 33cbc1d (add batchrelease controller) func (t *ginkgoTestingTProxy) Log(args ...interface{}) { fmt.Fprintln(t.writer, args...) } @@ -79,6 +99,7 @@ func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { t.Log(fmt.Sprintf(format, args...)) } +<<<<<<< HEAD func (t *ginkgoTestingTProxy) Name() string { return t.name() } @@ -97,13 +118,34 @@ func (t *ginkgoTestingTProxy) SkipNow() { func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { t.skip(fmt.Sprintf(format, args...), t.offset) +======= +func (t *ginkgoTestingTProxy) Failed() bool { + return false +} + +func (t *ginkgoTestingTProxy) Parallel() { +} + +func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { + fmt.Println(args...) +} + +func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { + t.Skip(fmt.Sprintf(format, args...)) +} + +func (t *ginkgoTestingTProxy) SkipNow() { +>>>>>>> 33cbc1d (add batchrelease controller) } func (t *ginkgoTestingTProxy) Skipped() bool { return false } +<<<<<<< HEAD func (t *ginkgoTestingTProxy) TempDir() string { // No-op return "" } +======= +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go index 01ddca6..126066f 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -33,12 +33,23 @@ type JUnitTestSuite struct { type JUnitTestCase struct { Name string `xml:"name,attr"` ClassName string `xml:"classname,attr"` +<<<<<<< HEAD +======= + PassedMessage *JUnitPassedMessage `xml:"passed,omitempty"` +>>>>>>> 33cbc1d (add batchrelease controller) FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` Skipped *JUnitSkipped `xml:"skipped,omitempty"` Time float64 `xml:"time,attr"` SystemOut string `xml:"system-out,omitempty"` } +<<<<<<< HEAD +======= +type JUnitPassedMessage struct { + Message string `xml:",chardata"` +} + +>>>>>>> 33cbc1d (add batchrelease controller) type JUnitFailureMessage struct { Type string `xml:"type,attr"` Message string `xml:",chardata"` @@ -109,7 +120,13 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { ClassName: reporter.testSuiteName, } if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed { +<<<<<<< HEAD testCase.SystemOut = specSummary.CapturedOutput +======= + testCase.PassedMessage = &JUnitPassedMessage{ + Message: specSummary.CapturedOutput, + } +>>>>>>> 33cbc1d (add batchrelease controller) } if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { testCase.FailureMessage = &JUnitFailureMessage{ diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml index 6543dc5..8f85f97 100644 --- a/vendor/github.com/onsi/gomega/.travis.yml +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -1,4 +1,5 @@ language: go +<<<<<<< HEAD arch: - amd64 - ppc64le @@ -7,12 +8,29 @@ go: - gotip - 1.16.x - 1.15.x +======= + +go: + - 1.13.x + - 1.14.x + - gotip +>>>>>>> 33cbc1d (add batchrelease controller) env: - GO111MODULE=on +<<<<<<< HEAD install: skip script: - go mod tidy && git diff --exit-code go.mod go.sum - make test +======= +install: + - go get -v ./... + - go build ./... + - go get github.com/onsi/ginkgo + - go install github.com/onsi/ginkgo/ginkgo + +script: make test +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 65c6c1d..2c9b287 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD ## 1.17.0 ### Features @@ -96,6 +97,8 @@ In addition, 1.15.0 cleans up some of Gomega's internals. Most users shouldn't ### Fixes - updates golang/x/net to fix vulnerability detected by snyk (#394) [c479356] +======= +>>>>>>> 33cbc1d (add batchrelease controller) ## 1.10.2 ### Fixes diff --git a/vendor/github.com/onsi/gomega/Makefile b/vendor/github.com/onsi/gomega/Makefile index 1c6d107..f658320 100644 --- a/vendor/github.com/onsi/gomega/Makefile +++ b/vendor/github.com/onsi/gomega/Makefile @@ -1,3 +1,4 @@ +<<<<<<< HEAD ###### Help ################################################################### .DEFAULT_GOAL = help @@ -31,3 +32,11 @@ docker_test: ## Run tests in a container via docker-compose version: ## Display the version of Go @@go version +======= +test: + [ -z "`gofmt -s -w -l -e .`" ] + go vet + ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race + +.PHONY: test +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/onsi/gomega/README.md b/vendor/github.com/onsi/gomega/README.md index d45a8c4..97d1cc7 100644 --- a/vendor/github.com/onsi/gomega/README.md +++ b/vendor/github.com/onsi/gomega/README.md @@ -1,6 +1,10 @@ ![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png) +<<<<<<< HEAD [![test](https://github.com/onsi/gomega/actions/workflows/test.yml/badge.svg)](https://github.com/onsi/gomega/actions/workflows/test.yml) +======= +[![Build Status](https://travis-ci.org/onsi/gomega.svg?branch=master)](https://travis-ci.org/onsi/gomega) +>>>>>>> 33cbc1d (add batchrelease controller) Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers). diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index 6e78c39..c621c02 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -7,7 +7,10 @@ Gomega's format package pretty-prints objects. It explores input objects recurs package format import ( +<<<<<<< HEAD "context" +======= +>>>>>>> 33cbc1d (add batchrelease controller) "fmt" "reflect" "strconv" @@ -18,10 +21,13 @@ import ( // Use MaxDepth to set the maximum recursion depth when printing deeply nested objects var MaxDepth = uint(10) +<<<<<<< HEAD // MaxLength of the string representation of an object. // If MaxLength is set to 0, the Object will not be truncated. var MaxLength = 4000 +======= +>>>>>>> 33cbc1d (add batchrelease controller) /* By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output. @@ -49,7 +55,20 @@ var TruncateThreshold uint = 50 // after the first diff location in a truncated string assertion error message. var CharactersAroundMismatchToInclude uint = 5 +<<<<<<< HEAD var contextType = reflect.TypeOf((*context.Context)(nil)).Elem() +======= +// Ctx interface defined here to keep backwards compatibility with go < 1.7 +// It matches the context.Context interface +type Ctx interface { + Deadline() (deadline time.Time, ok bool) + Done() <-chan struct{} + Err() error + Value(key interface{}) interface{} +} + +var contextType = reflect.TypeOf((*Ctx)(nil)).Elem() +>>>>>>> 33cbc1d (add batchrelease controller) var timeType = reflect.TypeOf(time.Time{}) //The default indentation string emitted by the format package @@ -57,6 +76,7 @@ var Indent = " " var longFormThreshold = 20 +<<<<<<< HEAD // GomegaStringer allows for custom formating of objects for gomega. type GomegaStringer interface { // GomegaString will be used to custom format an object. @@ -65,6 +85,8 @@ type GomegaStringer interface { GomegaString() string } +======= +>>>>>>> 33cbc1d (add batchrelease controller) /* Generates a formatted matcher success/failure message of the form: @@ -109,6 +131,7 @@ func MessageWithDiff(actual, message, expected string) string { tabLength := 4 spaceFromMessageToActual := tabLength + len(": ") - len(message) +<<<<<<< HEAD paddingCount := spaceFromMessageToActual + spacesBeforeFormattedMismatch if paddingCount < 0 { @@ -116,6 +139,9 @@ func MessageWithDiff(actual, message, expected string) string { } padding := strings.Repeat(" ", paddingCount) + "|" +======= + padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|" +>>>>>>> 33cbc1d (add batchrelease controller) return Message(formattedActual, message+padding, formattedExpected) } @@ -171,6 +197,7 @@ func findFirstMismatch(a, b string) int { return 0 } +<<<<<<< HEAD const truncateHelpText = ` Gomega truncated this representation as it exceeds 'format.MaxLength'. Consider having the object provide a custom 'GomegaStringer' representation @@ -198,6 +225,8 @@ func truncateLongStrings(s string) string { return s } +======= +>>>>>>> 33cbc1d (add batchrelease controller) /* Pretty prints the passed in object at the passed in indentation level. @@ -212,7 +241,11 @@ Set PrintContextObjects to true to print the content of objects implementing con func Object(object interface{}, indentation uint) string { indent := strings.Repeat(Indent, int(indentation)) value := reflect.ValueOf(object) +<<<<<<< HEAD return fmt.Sprintf("%s<%s>: %s", indent, formatType(value), formatValue(value, indentation)) +======= + return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation)) +>>>>>>> 33cbc1d (add batchrelease controller) } /* @@ -232,6 +265,7 @@ func IndentString(s string, indentation uint) string { return result } +<<<<<<< HEAD func formatType(v reflect.Value) string { switch v.Kind() { case reflect.Invalid: @@ -246,6 +280,27 @@ func formatType(v reflect.Value) string { return fmt.Sprintf("%s | len:%d", v.Type(), v.Len()) default: return fmt.Sprintf("%s", v.Type()) +======= +func formatType(object interface{}) string { + t := reflect.TypeOf(object) + if t == nil { + return "nil" + } + switch t.Kind() { + case reflect.Chan: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Ptr: + return fmt.Sprintf("%T | %p", object, object) + case reflect.Slice: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Map: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d", object, v.Len()) + default: + return fmt.Sprintf("%T", object) +>>>>>>> 33cbc1d (add batchrelease controller) } } @@ -258,6 +313,7 @@ func formatValue(value reflect.Value, indentation uint) string { return "nil" } +<<<<<<< HEAD if value.CanInterface() { obj := value.Interface() @@ -273,6 +329,16 @@ func formatValue(value reflect.Value, indentation uint) string { return truncateLongStrings(x.GoString()) case fmt.Stringer: return truncateLongStrings(x.String()) +======= + if UseStringerRepresentation { + if value.CanInterface() { + obj := value.Interface() + switch x := obj.(type) { + case fmt.GoStringer: + return x.GoString() + case fmt.Stringer: + return x.String() +>>>>>>> 33cbc1d (add batchrelease controller) } } } @@ -303,6 +369,7 @@ func formatValue(value reflect.Value, indentation uint) string { case reflect.Ptr: return formatValue(value.Elem(), indentation) case reflect.Slice: +<<<<<<< HEAD return truncateLongStrings(formatSlice(value, indentation)) case reflect.String: return truncateLongStrings(formatString(value.String(), indentation)) @@ -310,11 +377,21 @@ func formatValue(value reflect.Value, indentation uint) string { return truncateLongStrings(formatSlice(value, indentation)) case reflect.Map: return truncateLongStrings(formatMap(value, indentation)) +======= + return formatSlice(value, indentation) + case reflect.String: + return formatString(value.String(), indentation) + case reflect.Array: + return formatSlice(value, indentation) + case reflect.Map: + return formatMap(value, indentation) +>>>>>>> 33cbc1d (add batchrelease controller) case reflect.Struct: if value.Type() == timeType && value.CanInterface() { t, _ := value.Interface().(time.Time) return t.Format(time.RFC3339Nano) } +<<<<<<< HEAD return truncateLongStrings(formatStruct(value, indentation)) case reflect.Interface: return formatInterface(value, indentation) @@ -323,6 +400,16 @@ func formatValue(value reflect.Value, indentation uint) string { return truncateLongStrings(fmt.Sprintf("%#v", value.Interface())) } return truncateLongStrings(fmt.Sprintf("%#v", value)) +======= + return formatStruct(value, indentation) + case reflect.Interface: + return formatValue(value.Elem(), indentation) + default: + if value.CanInterface() { + return fmt.Sprintf("%#v", value.Interface()) + } + return fmt.Sprintf("%#v", value) +>>>>>>> 33cbc1d (add batchrelease controller) } } @@ -412,10 +499,13 @@ func formatStruct(v reflect.Value, indentation uint) string { return fmt.Sprintf("{%s}", strings.Join(result, ", ")) } +<<<<<<< HEAD func formatInterface(v reflect.Value, indentation uint) string { return fmt.Sprintf("<%s>%s", formatType(v.Elem()), formatValue(v.Elem(), indentation)) } +======= +>>>>>>> 33cbc1d (add batchrelease controller) func isNilValue(a reflect.Value) bool { switch a.Kind() { case reflect.Invalid: diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod index 7fea4ac..c3b346a 100644 --- a/vendor/github.com/onsi/gomega/go.mod +++ b/vendor/github.com/onsi/gomega/go.mod @@ -1,5 +1,6 @@ module github.com/onsi/gomega +<<<<<<< HEAD go 1.16 require ( @@ -7,4 +8,12 @@ require ( github.com/onsi/ginkgo v1.16.4 golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 gopkg.in/yaml.v2 v2.4.0 +======= +require ( + github.com/golang/protobuf v1.4.2 + github.com/onsi/ginkgo v1.12.1 + golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 + gopkg.in/yaml.v2 v2.3.0 +>>>>>>> 33cbc1d (add batchrelease controller) ) diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum index 56f1b44..8357dc2 100644 --- a/vendor/github.com/onsi/gomega/go.sum +++ b/vendor/github.com/onsi/gomega/go.sum @@ -1,3 +1,4 @@ +<<<<<<< HEAD github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -6,12 +7,18 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +======= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +>>>>>>> 33cbc1d (add batchrelease controller) github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +<<<<<<< HEAD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= @@ -84,23 +91,71 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +======= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +>>>>>>> 33cbc1d (add batchrelease controller) google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +<<<<<<< HEAD google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +======= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +>>>>>>> 33cbc1d (add batchrelease controller) gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +<<<<<<< HEAD gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +======= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index f5156fd..20934ee 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -14,6 +14,7 @@ Gomega is MIT-Licensed package gomega import ( +<<<<<<< HEAD "errors" "fmt" "time" @@ -25,11 +26,27 @@ import ( const GOMEGA_VERSION = "1.17.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. +======= + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/assertion" + "github.com/onsi/gomega/internal/asyncassertion" + "github.com/onsi/gomega/internal/testingtsupport" + "github.com/onsi/gomega/types" +) + +const GOMEGA_VERSION = "1.10.2" + +const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. +>>>>>>> 33cbc1d (add batchrelease controller) If you're using Ginkgo then you probably forgot to put your assertion in an It(). Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT(). Depending on your vendoring solution you may be inadvertently importing gomega and subpackages (e.g. ghhtp, gexec,...) from different locations. ` +<<<<<<< HEAD // Gomega describes the essential Gomega DSL. This interface allows libraries // to abstract between the standard package-level function implementations // and alternatives like *WithT. @@ -102,16 +119,81 @@ func RegisterFailHandlerWithT(_ types.GomegaTestingT, fail types.GomegaFailHandl // Testing.T tests. It is now deprecated and you should use NewWithT() instead to get a fresh instance of Gomega for each test. func RegisterTestingT(t types.GomegaTestingT) { Default.(*internal.Gomega).ConfigureWithT(t) +======= +var globalFailWrapper *types.GomegaFailWrapper + +var defaultEventuallyTimeout = time.Second +var defaultEventuallyPollingInterval = 10 * time.Millisecond +var defaultConsistentlyDuration = 100 * time.Millisecond +var defaultConsistentlyPollingInterval = 10 * time.Millisecond + +// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails +// the fail handler passed into RegisterFailHandler is called. +func RegisterFailHandler(handler types.GomegaFailHandler) { + RegisterFailHandlerWithT(testingtsupport.EmptyTWithHelper{}, handler) +} + +// RegisterFailHandlerWithT ensures that the given types.TWithHelper and fail handler +// are used globally. +func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandler) { + if handler == nil { + globalFailWrapper = nil + return + } + + globalFailWrapper = &types.GomegaFailWrapper{ + Fail: handler, + TWithHelper: t, + } +} + +// RegisterTestingT connects Gomega to Golang's XUnit style +// Testing.T tests. It is now deprecated and you should use NewWithT() instead. +// +// Legacy Documentation: +// +// You'll need to call this at the top of each XUnit style test: +// +// func TestFarmHasCow(t *testing.T) { +// RegisterTestingT(t) +// +// f := farm.New([]string{"Cow", "Horse"}) +// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +// +// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to +// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests +// in parallel as the global fail handler cannot point to more than one testing.T at a time. +// +// NewWithT() does not have this limitation +// +// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). +func RegisterTestingT(t types.GomegaTestingT) { + tWithHelper, hasHelper := t.(types.TWithHelper) + if !hasHelper { + RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) + return + } + RegisterFailHandlerWithT(tWithHelper, testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) +>>>>>>> 33cbc1d (add batchrelease controller) } // InterceptGomegaFailures runs a given callback and returns an array of // failure messages generated by any Gomega assertions within the callback. +<<<<<<< HEAD // Exeuction continues after the first failure allowing users to collect all failures // in the callback. +======= +// +// This is accomplished by temporarily replacing the *global* fail handler +// with a fail handler that simply annotates failures. The original fail handler +// is reset when InterceptGomegaFailures returns. +>>>>>>> 33cbc1d (add batchrelease controller) // // This is most useful when testing custom matchers, but can also be used to check // on a value using a Gomega assertion without causing a test failure. func InterceptGomegaFailures(f func()) []string { +<<<<<<< HEAD originalHandler := Default.(*internal.Gomega).Fail failures := []string{} Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) { @@ -156,6 +238,18 @@ func ensureDefaultGomegaIsConfigured() { } } +======= + originalHandler := globalFailWrapper.Fail + failures := []string{} + RegisterFailHandler(func(message string, callerSkip ...int) { + failures = append(failures, message) + }) + f() + RegisterFailHandler(originalHandler) + return failures +} + +>>>>>>> 33cbc1d (add batchrelease controller) // Ω wraps an actual value allowing assertions to be made on it: // Ω("foo").Should(Equal("foo")) // @@ -174,8 +268,12 @@ func ensureDefaultGomegaIsConfigured() { // // Ω and Expect are identical func Ω(actual interface{}, extra ...interface{}) Assertion { +<<<<<<< HEAD ensureDefaultGomegaIsConfigured() return Default.Ω(actual, extra...) +======= + return ExpectWithOffset(0, actual, extra...) +>>>>>>> 33cbc1d (add batchrelease controller) } // Expect wraps an actual value allowing assertions to be made on it: @@ -196,21 +294,30 @@ func Ω(actual interface{}, extra ...interface{}) Assertion { // // Expect and Ω are identical func Expect(actual interface{}, extra ...interface{}) Assertion { +<<<<<<< HEAD ensureDefaultGomegaIsConfigured() return Default.Expect(actual, extra...) +======= + return ExpectWithOffset(0, actual, extra...) +>>>>>>> 33cbc1d (add batchrelease controller) } // ExpectWithOffset wraps an actual value allowing assertions to be made on it: // ExpectWithOffset(1, "foo").To(Equal("foo")) // // Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument +<<<<<<< HEAD // that is used to modify the call-stack offset when computing line numbers. It is // the same as `Expect(...).WithOffset`. +======= +// that is used to modify the call-stack offset when computing line numbers. +>>>>>>> 33cbc1d (add batchrelease controller) // // This is most useful in helper functions that make assertions. If you want Gomega's // error message to refer to the calling line in the test (as opposed to the line in the helper function) // set the first argument of `ExpectWithOffset` appropriately. func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion { +<<<<<<< HEAD ensureDefaultGomegaIsConfigured() return Default.ExpectWithOffset(offset, actual, extra...) } @@ -308,11 +415,58 @@ the same as `Eventually(...).WithTimeout` or `Eventually(...).WithTimeout(...).W func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() return Default.Eventually(actual, intervals...) +======= + if globalFailWrapper == nil { + panic(nilFailHandlerPanic) + } + return assertion.New(actual, globalFailWrapper, offset, extra...) +} + +// Eventually wraps an actual value allowing assertions to be made on it. +// The assertion is tried periodically until it passes or a timeout occurs. +// +// Both the timeout and polling interval are configurable as optional arguments: +// The first optional argument is the timeout +// The second optional argument is the polling interval +// +// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +// last case they are interpreted as seconds. +// +// If Eventually is passed an actual that is a function taking no arguments and returning at least one value, +// then Eventually will call the function periodically and try the matcher against the function's first return value. +// +// Example: +// +// Eventually(func() int { +// return thingImPolling.Count() +// }).Should(BeNumerically(">=", 17)) +// +// Note that this example could be rewritten: +// +// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17)) +// +// If the function returns more than one value, then Eventually will pass the first value to the matcher and +// assert that all other values are nil/zero. +// This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. +// +// For example, consider a method that returns a value and an error: +// func FetchFromDB() (string, error) +// +// Then +// Eventually(FetchFromDB).Should(Equal("hasselhoff")) +// +// Will pass only if the the returned error is nil and the returned string passes the matcher. +// +// Eventually's default timeout is 1 second, and its default polling interval is 10ms +func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { + return EventuallyWithOffset(0, actual, intervals...) +>>>>>>> 33cbc1d (add batchrelease controller) } // EventuallyWithOffset operates like Eventually but takes an additional // initial argument to indicate an offset in the call stack. This is useful when building helper // functions that contain matchers. To learn more, read about `ExpectWithOffset`. +<<<<<<< HEAD // // `EventuallyWithOffset` is the same as `Eventually(...).WithOffset`. // @@ -342,37 +496,111 @@ This will block for 200 milliseconds and repeatedly check the channel and ensure func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() return Default.Consistently(actual, intervals...) +======= +func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { + if globalFailWrapper == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultEventuallyTimeout + pollingInterval := defaultEventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset) +} + +// Consistently wraps an actual value allowing assertions to be made on it. +// The assertion is tried periodically and is required to pass for a period of time. +// +// Both the total time and polling interval are configurable as optional arguments: +// The first optional argument is the duration that Consistently will run for +// The second optional argument is the polling interval +// +// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +// last case they are interpreted as seconds. +// +// If Consistently is passed an actual that is a function taking no arguments and returning at least one value, +// then Consistently will call the function periodically and try the matcher against the function's first return value. +// +// If the function returns more than one value, then Consistently will pass the first value to the matcher and +// assert that all other values are nil/zero. +// This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. +// +// Consistently is useful in cases where you want to assert that something *does not happen* over a period of time. +// For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: +// +// Consistently(channel).ShouldNot(Receive()) +// +// Consistently's default duration is 100ms, and its default polling interval is 10ms +func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { + return ConsistentlyWithOffset(0, actual, intervals...) +>>>>>>> 33cbc1d (add batchrelease controller) } // ConsistentlyWithOffset operates like Consistently but takes an additional // initial argument to indicate an offset in the call stack. This is useful when building helper // functions that contain matchers. To learn more, read about `ExpectWithOffset`. +<<<<<<< HEAD // // `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and // optional `WithTimeout` and `WithPolling`. func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() return Default.ConsistentlyWithOffset(offset, actual, intervals...) +======= +func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { + if globalFailWrapper == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultConsistentlyDuration + pollingInterval := defaultConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset) +>>>>>>> 33cbc1d (add batchrelease controller) } // SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. func SetDefaultEventuallyTimeout(t time.Duration) { +<<<<<<< HEAD Default.SetDefaultEventuallyTimeout(t) +======= + defaultEventuallyTimeout = t +>>>>>>> 33cbc1d (add batchrelease controller) } // SetDefaultEventuallyPollingInterval sets the default polling interval for Eventually. func SetDefaultEventuallyPollingInterval(t time.Duration) { +<<<<<<< HEAD Default.SetDefaultEventuallyPollingInterval(t) +======= + defaultEventuallyPollingInterval = t +>>>>>>> 33cbc1d (add batchrelease controller) } // SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satisfied for this long. func SetDefaultConsistentlyDuration(t time.Duration) { +<<<<<<< HEAD Default.SetDefaultConsistentlyDuration(t) +======= + defaultConsistentlyDuration = t +>>>>>>> 33cbc1d (add batchrelease controller) } // SetDefaultConsistentlyPollingInterval sets the default polling interval for Consistently. func SetDefaultConsistentlyPollingInterval(t time.Duration) { +<<<<<<< HEAD Default.SetDefaultConsistentlyPollingInterval(t) +======= + defaultConsistentlyPollingInterval = t +>>>>>>> 33cbc1d (add batchrelease controller) } // AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against @@ -390,10 +618,20 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { // // Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") // Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." }) +<<<<<<< HEAD type AsyncAssertion = types.AsyncAssertion // GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter. type GomegaAsyncAssertion = types.AsyncAssertion +======= +type AsyncAssertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +// GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter. +type GomegaAsyncAssertion = AsyncAssertion +>>>>>>> 33cbc1d (add batchrelease controller) // Assertion is returned by Ω and Expect and compares the actual value to the matcher // passed to the Should/ShouldNot and To/ToNot/NotTo methods. @@ -412,6 +650,7 @@ type GomegaAsyncAssertion = types.AsyncAssertion // Example: // // Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +<<<<<<< HEAD type Assertion = types.Assertion // GomegaAssertion is deprecated in favor of Assertion, which does not stutter. @@ -419,3 +658,151 @@ type GomegaAssertion = types.Assertion // OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it type OmegaMatcher = types.GomegaMatcher +======= +type Assertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + + To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +// GomegaAssertion is deprecated in favor of Assertion, which does not stutter. +type GomegaAssertion = Assertion + +// OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it +type OmegaMatcher types.GomegaMatcher + +// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage +// Gomega's rich ecosystem of matchers in standard `testing` test suites. +// +// Use `NewWithT` to instantiate a `WithT` +type WithT struct { + t types.GomegaTestingT +} + +// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter. +type GomegaWithT = WithT + +// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with +// Gomega's rich ecosystem of matchers in standard `testing` test suits. +// +// func TestFarmHasCow(t *testing.T) { +// g := gomega.NewWithT(t) +// +// f := farm.New([]string{"Cow", "Horse"}) +// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +func NewWithT(t types.GomegaTestingT) *WithT { + return &WithT{ + t: t, + } +} + +// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter. +func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT { + return NewWithT(t) +} + +// ExpectWithOffset is used to make assertions. See documentation for ExpectWithOffset. +func (g *WithT) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion { + return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), offset, extra...) +} + +// EventuallyWithOffset is used to make asynchronous assertions. See documentation for EventuallyWithOffset. +func (g *WithT) EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { + timeoutInterval := defaultEventuallyTimeout + pollingInterval := defaultEventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, offset) +} + +// ConsistentlyWithOffset is used to make asynchronous assertions. See documentation for ConsistentlyWithOffset. +func (g *WithT) ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { + timeoutInterval := defaultConsistentlyDuration + pollingInterval := defaultConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, offset) +} + +// Expect is used to make assertions. See documentation for Expect. +func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion { + return g.ExpectWithOffset(0, actual, extra...) +} + +// Eventually is used to make asynchronous assertions. See documentation for Eventually. +func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { + return g.EventuallyWithOffset(0, actual, intervals...) +} + +// Consistently is used to make asynchronous assertions. See documentation for Consistently. +func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { + return g.ConsistentlyWithOffset(0, actual, intervals...) +} + +func toDuration(input interface{}) time.Duration { + duration, ok := input.(time.Duration) + if ok { + return duration + } + + value := reflect.ValueOf(input) + kind := reflect.TypeOf(input).Kind() + + if reflect.Int <= kind && kind <= reflect.Int64 { + return time.Duration(value.Int()) * time.Second + } else if reflect.Uint <= kind && kind <= reflect.Uint64 { + return time.Duration(value.Uint()) * time.Second + } else if reflect.Float32 <= kind && kind <= reflect.Float64 { + return time.Duration(value.Float() * float64(time.Second)) + } else if reflect.String == kind { + duration, err := time.ParseDuration(value.String()) + if err != nil { + panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + } + return duration + } + + panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) +} + +// Gomega describes the essential Gomega DSL. This interface allows libraries +// to abstract between the standard package-level function implementations +// and alternatives like *WithT. +type Gomega interface { + Expect(actual interface{}, extra ...interface{}) Assertion + Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion + Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion +} + +type globalFailHandlerGomega struct{} + +// DefaultGomega supplies the standard package-level implementation +var Default Gomega = globalFailHandlerGomega{} + +// Expect is used to make assertions. See documentation for Expect. +func (globalFailHandlerGomega) Expect(actual interface{}, extra ...interface{}) Assertion { + return Expect(actual, extra...) +} + +// Eventually is used to make asynchronous assertions. See documentation for Eventually. +func (globalFailHandlerGomega) Eventually(actual interface{}, extra ...interface{}) AsyncAssertion { + return Eventually(actual, extra...) +} + +// Consistently is used to make asynchronous assertions. See documentation for Consistently. +func (globalFailHandlerGomega) Consistently(actual interface{}, extra ...interface{}) AsyncAssertion { + return Consistently(actual, extra...) +} +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go new file mode 100644 index 0000000..a248298 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go @@ -0,0 +1,109 @@ +package assertion + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/types" +) + +type Assertion struct { + actualInput interface{} + failWrapper *types.GomegaFailWrapper + offset int + extra []interface{} +} + +func New(actualInput interface{}, failWrapper *types.GomegaFailWrapper, offset int, extra ...interface{}) *Assertion { + return &Assertion{ + actualInput: actualInput, + failWrapper: failWrapper, + offset: offset, + extra: extra, + } +} + +func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + case 1: + if describe, ok := optionalDescription[0].(func() string); ok { + return describe() + "\n" + } + } + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" +} + +func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + matches, err := matcher.Match(assertion.actualInput) + assertion.failWrapper.TWithHelper.Helper() + if err != nil { + description := assertion.buildDescription(optionalDescription...) + assertion.failWrapper.Fail(description+err.Error(), 2+assertion.offset) + return false + } + if matches != desiredMatch { + var message string + if desiredMatch { + message = matcher.FailureMessage(assertion.actualInput) + } else { + message = matcher.NegatedFailureMessage(assertion.actualInput) + } + description := assertion.buildDescription(optionalDescription...) + assertion.failWrapper.Fail(description+message, 2+assertion.offset) + return false + } + + return true +} + +func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool { + success, message := vetExtras(assertion.extra) + if success { + return true + } + + description := assertion.buildDescription(optionalDescription...) + assertion.failWrapper.TWithHelper.Helper() + assertion.failWrapper.Fail(description+message, 2+assertion.offset) + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go new file mode 100644 index 0000000..5204836 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go @@ -0,0 +1,198 @@ +// untested sections: 2 + +package asyncassertion + +import ( + "errors" + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AsyncAssertionType uint + +const ( + AsyncAssertionTypeEventually AsyncAssertionType = iota + AsyncAssertionTypeConsistently +) + +type AsyncAssertion struct { + asyncType AsyncAssertionType + actualInput interface{} + timeoutInterval time.Duration + pollingInterval time.Duration + failWrapper *types.GomegaFailWrapper + offset int +} + +func New(asyncType AsyncAssertionType, actualInput interface{}, failWrapper *types.GomegaFailWrapper, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { + actualType := reflect.TypeOf(actualInput) + if actualType.Kind() == reflect.Func { + if actualType.NumIn() != 0 || actualType.NumOut() == 0 { + panic("Expected a function with no arguments and one or more return values.") + } + } + + return &AsyncAssertion{ + asyncType: asyncType, + actualInput: actualInput, + failWrapper: failWrapper, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + offset: offset, + } +} + +func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + case 1: + if describe, ok := optionalDescription[0].(func() string); ok { + return describe() + "\n" + } + } + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" +} + +func (assertion *AsyncAssertion) actualInputIsAFunction() bool { + actualType := reflect.TypeOf(assertion.actualInput) + return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0 +} + +func (assertion *AsyncAssertion) pollActual() (interface{}, error) { + if assertion.actualInputIsAFunction() { + values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{}) + + extras := []interface{}{} + for _, value := range values[1:] { + extras = append(extras, value.Interface()) + } + + success, message := vetExtras(extras) + + if !success { + return nil, errors.New(message) + } + + return values[0].Interface(), nil + } + + return assertion.actualInput, nil +} + +func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualInputIsAFunction() { + return true + } + + return oraclematcher.MatchMayChangeInTheFuture(matcher, value) +} + +func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + timer := time.Now() + timeout := time.After(assertion.timeoutInterval) + + var matches bool + var err error + mayChange := true + value, err := assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + + assertion.failWrapper.TWithHelper.Helper() + + fail := func(preamble string) { + errMsg := "" + message := "" + if err != nil { + errMsg = "Error: " + err.Error() + } else { + if desiredMatch { + message = matcher.FailureMessage(value) + } else { + message = matcher.NegatedFailureMessage(value) + } + } + assertion.failWrapper.TWithHelper.Helper() + description := assertion.buildDescription(optionalDescription...) + assertion.failWrapper.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) + } + + if assertion.asyncType == AsyncAssertionTypeEventually { + for { + if err == nil && matches == desiredMatch { + return true + } + + if !mayChange { + fail("No future change is possible. Bailing out early") + return false + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + fail("Timed out") + return false + } + } + } else if assertion.asyncType == AsyncAssertionTypeConsistently { + for { + if !(err == nil && matches == desiredMatch) { + fail("Failed") + return false + } + + if !mayChange { + return true + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + return true + } + } + } + + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go new file mode 100644 index 0000000..66cad88 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go @@ -0,0 +1,25 @@ +package oraclematcher + +import "github.com/onsi/gomega/types" + +/* +GomegaMatchers that also match the OracleMatcher interface can convey information about +whether or not their result will change upon future attempts. + +This allows `Eventually` and `Consistently` to short circuit if success becomes impossible. + +For example, a process' exit code can never change. So, gexec's Exit matcher returns `true` +for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. +*/ +type OracleMatcher interface { + MatchMayChangeInTheFuture(actual interface{}) bool +} + +func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool { + oracleMatcher, ok := matcher.(OracleMatcher) + if !ok { + return true + } + + return oracleMatcher.MatchMayChangeInTheFuture(value) +} diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go new file mode 100644 index 0000000..bb27032 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go @@ -0,0 +1,60 @@ +package testingtsupport + +import ( + "regexp" + "runtime/debug" + "strings" + + "github.com/onsi/gomega/types" +) + +var StackTracePruneRE = regexp.MustCompile(`\/gomega\/|\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + +type EmptyTWithHelper struct{} + +func (e EmptyTWithHelper) Helper() {} + +type gomegaTestingT interface { + Fatalf(format string, args ...interface{}) +} + +func BuildTestingTGomegaFailWrapper(t gomegaTestingT) *types.GomegaFailWrapper { + tWithHelper, hasHelper := t.(types.TWithHelper) + if !hasHelper { + tWithHelper = EmptyTWithHelper{} + } + + fail := func(message string, callerSkip ...int) { + if hasHelper { + tWithHelper.Helper() + t.Fatalf("\n%s", message) + } else { + skip := 2 + if len(callerSkip) > 0 { + skip += callerSkip[0] + } + stackTrace := pruneStack(string(debug.Stack()), skip) + t.Fatalf("\n%s\n%s\n", stackTrace, message) + } + } + + return &types.GomegaFailWrapper{ + Fail: fail, + TWithHelper: tWithHelper, + } +} + +func pruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n")[1:] + if len(stack) > 2*skip { + stack = stack[2*skip:] + } + prunedStack := []string{} + for i := 0; i < len(stack)/2; i++ { + if !StackTracePruneRE.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index e4aacc9..ebeff14 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -342,6 +342,7 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { } } +<<<<<<< HEAD //HaveField succeeds if actual is a struct and the value at the passed in field //matches the passed in matcher. By default HaveField used Equal() to perform the match, //however a matcher can be passed in in stead. @@ -370,6 +371,8 @@ func HaveField(field string, expected interface{}) types.GomegaMatcher { } } +======= +>>>>>>> 33cbc1d (add batchrelease controller) //BeNumerically performs numerical assertions in a type-agnostic way. //Actual and expected should be numbers, though the specific type of //number is irrelevant (float32, float64, uint8, etc...). @@ -451,6 +454,7 @@ func BeADirectory() types.GomegaMatcher { //Expected must be either an int or a string. // Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200 // Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found" +<<<<<<< HEAD // Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204 func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher { return &matchers.HaveHTTPStatusMatcher{Expected: expected} @@ -474,6 +478,12 @@ func HaveHTTPBody(expected interface{}) types.GomegaMatcher { return &matchers.HaveHTTPBodyMatcher{Expected: expected} } +======= +func HaveHTTPStatus(expected interface{}) types.GomegaMatcher { + return &matchers.HaveHTTPStatusMatcher{Expected: expected} +} + +>>>>>>> 33cbc1d (add batchrelease controller) //And succeeds only if all of the given matchers succeed. //The matchers are tried in order, and will fail-fast if one doesn't succeed. // Expect("hi").To(And(HaveLen(2), Equal("hi")) @@ -513,6 +523,7 @@ func Not(matcher types.GomegaMatcher) types.GomegaMatcher { } //WithTransform applies the `transform` to the actual value and matches it against `matcher`. +<<<<<<< HEAD //The given transform must be either a function of one parameter that returns one value or a // function of one parameter that returns two values, where the second value must be of the // error type. @@ -522,10 +533,17 @@ func Not(matcher types.GomegaMatcher) types.GomegaMatcher { // var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" } // Expect(1).To(WithTransform(failingplus1, Equal(2))) // +======= +//The given transform must be a function of one parameter that returns one value. +// var plus1 = func(i int) int { return i + 1 } +// Expect(1).To(WithTransform(plus1, Equal(2)) +// +>>>>>>> 33cbc1d (add batchrelease controller) //And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { return matchers.NewWithTransformMatcher(transform, matcher) } +<<<<<<< HEAD //Satisfy matches the actual value against the `predicate` function. //The given predicate must be a function of one paramter that returns bool. @@ -534,3 +552,5 @@ func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.Gom func Satisfy(predicate interface{}) types.GomegaMatcher { return matchers.NewSatisfyMatcher(predicate) } +======= +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go index 6bd826a..4d84a12 100644 --- a/vendor/github.com/onsi/gomega/matchers/and.go +++ b/vendor/github.com/onsi/gomega/matchers/and.go @@ -4,6 +4,10 @@ import ( "fmt" "github.com/onsi/gomega/format" +<<<<<<< HEAD +======= + "github.com/onsi/gomega/internal/oraclematcher" +>>>>>>> 33cbc1d (add batchrelease controller) "github.com/onsi/gomega/types" ) @@ -51,12 +55,20 @@ func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { if m.firstFailedMatcher == nil { // so all matchers succeeded.. Any one of them changing would change the result. for _, matcher := range m.Matchers { +<<<<<<< HEAD if types.MatchMayChangeInTheFuture(matcher, actual) { +======= + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { +>>>>>>> 33cbc1d (add batchrelease controller) return true } } return false // none of were going to change } // one of the matchers failed.. it must be able to change in order to affect the result +<<<<<<< HEAD return types.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) +======= + return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) +>>>>>>> 33cbc1d (add batchrelease controller) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go index 9ee75a5..8c496c9 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go @@ -18,9 +18,29 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err return false, fmt.Errorf("BeElement matcher expects actual to be typed") } +<<<<<<< HEAD var lastError error for _, m := range flatten(matcher.Elements) { matcher := &EqualMatcher{Expected: m} +======= + length := len(matcher.Elements) + valueAt := func(i int) interface{} { + return matcher.Elements[i] + } + // Special handling of a single element of type Array or Slice + if length == 1 && isArrayOrSlice(valueAt(0)) { + element := valueAt(0) + value := reflect.ValueOf(element) + length = value.Len() + valueAt = func(i int) interface{} { + return value.Index(i).Interface() + } + } + + var lastError error + for i := 0; i < length; i++ { + matcher := &EqualMatcher{Expected: valueAt(i)} +>>>>>>> 33cbc1d (add batchrelease controller) success, err := matcher.Match(actual) if err != nil { lastError = err @@ -35,9 +55,17 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err } func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) { +<<<<<<< HEAD return format.Message(actual, "to be an element of", presentable(matcher.Elements)) } func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { return format.Message(actual, "not to be an element of", presentable(matcher.Elements)) +======= + return format.Message(actual, "to be an element of", matcher.Elements) +} + +func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be an element of", matcher.Elements) +>>>>>>> 33cbc1d (add batchrelease controller) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go index 100735d..f467f4b 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -45,7 +45,11 @@ func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, er return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) } if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) { +<<<<<<< HEAD return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[1], 1)) +======= + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) +>>>>>>> 33cbc1d (add batchrelease controller) } switch matcher.Comparator { diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go index e8ef0de..8e23e1a 100644 --- a/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -57,6 +57,7 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { return } +<<<<<<< HEAD func flatten(elems []interface{}) []interface{} { if len(elems) != 1 || !isArrayOrSlice(elems[0]) { return elems @@ -72,6 +73,19 @@ func flatten(elems []interface{}) []interface{} { func matchers(expectedElems []interface{}) (matchers []interface{}) { for _, e := range flatten(expectedElems) { +======= +func matchers(expectedElems []interface{}) (matchers []interface{}) { + elems := expectedElems + if len(expectedElems) == 1 && isArrayOrSlice(expectedElems[0]) { + elems = []interface{}{} + value := reflect.ValueOf(expectedElems[0]) + for i := 0; i < value.Len(); i++ { + elems = append(elems, value.Index(i).Interface()) + } + } + + for _, e := range elems { +>>>>>>> 33cbc1d (add batchrelease controller) matcher, isMatcher := e.(omegaMatcher) if !isMatcher { matcher = &EqualMatcher{Expected: e} @@ -81,6 +95,7 @@ func matchers(expectedElems []interface{}) (matchers []interface{}) { return } +<<<<<<< HEAD func presentable(elems []interface{}) interface{} { elems = flatten(elems) @@ -104,6 +119,8 @@ func presentable(elems []interface{}) interface{} { return ss.Interface() } +======= +>>>>>>> 33cbc1d (add batchrelease controller) func valuesOf(actual interface{}) []interface{} { value := reflect.ValueOf(actual) values := []interface{}{} @@ -122,11 +139,19 @@ func valuesOf(actual interface{}) []interface{} { } func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { +<<<<<<< HEAD message = format.Message(actual, "to consist of", presentable(matcher.Elements)) message = appendMissingElements(message, matcher.missingElements) if len(matcher.extraElements) > 0 { message = fmt.Sprintf("%s\nthe extra elements were\n%s", message, format.Object(presentable(matcher.extraElements), 1)) +======= + message = format.Message(actual, "to consist of", matcher.Elements) + message = appendMissingElements(message, matcher.missingElements) + if len(matcher.extraElements) > 0 { + message = fmt.Sprintf("%s\nthe extra elements were\n%s", message, + format.Object(matcher.extraElements, 1)) +>>>>>>> 33cbc1d (add batchrelease controller) } return } @@ -136,9 +161,17 @@ func appendMissingElements(message string, missingElements []interface{}) string return message } return fmt.Sprintf("%s\nthe missing elements were\n%s", message, +<<<<<<< HEAD format.Object(presentable(missingElements), 1)) } func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { return format.Message(actual, "not to consist of", presentable(matcher.Elements)) +======= + format.Object(missingElements, 1)) +} + +func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to consist of", matcher.Elements) +>>>>>>> 33cbc1d (add batchrelease controller) } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go index 946cd8b..a7bb465 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go @@ -35,10 +35,18 @@ func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, } func (matcher *ContainElementsMatcher) FailureMessage(actual interface{}) (message string) { +<<<<<<< HEAD message = format.Message(actual, "to contain elements", presentable(matcher.Elements)) +======= + message = format.Message(actual, "to contain elements", matcher.Elements) +>>>>>>> 33cbc1d (add batchrelease controller) return appendMissingElements(message, matcher.missingElements) } func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { +<<<<<<< HEAD return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) +======= + return format.Message(actual, "not to contain elements", matcher.Elements) +>>>>>>> 33cbc1d (add batchrelease controller) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go index 85f7764..e3f2b9d 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go @@ -2,17 +2,26 @@ package matchers import ( "fmt" +<<<<<<< HEAD "io" "net/http" "net/http/httptest" "reflect" "strings" +======= + "net/http" + "net/http/httptest" +>>>>>>> 33cbc1d (add batchrelease controller) "github.com/onsi/gomega/format" ) type HaveHTTPStatusMatcher struct { +<<<<<<< HEAD Expected []interface{} +======= + Expected interface{} +>>>>>>> 33cbc1d (add batchrelease controller) } func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) { @@ -26,6 +35,7 @@ func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, e return false, fmt.Errorf("HaveHTTPStatus matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1)) } +<<<<<<< HEAD if len(matcher.Expected) == 0 { return false, fmt.Errorf("HaveHTTPStatus matcher must be passed an int or a string. Got nothing") } @@ -93,4 +103,22 @@ func formatHttpResponse(input interface{}) string { s.WriteString(fmt.Sprintf("%s}", format.Indent)) return s.String() +======= + switch e := matcher.Expected.(type) { + case int: + return resp.StatusCode == e, nil + case string: + return resp.Status == e, nil + } + + return false, fmt.Errorf("HaveHTTPStatus matcher must be passed an int or a string. Got:\n%s", format.Object(matcher.Expected, 1)) +} + +func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have HTTP status", matcher.Expected) +} + +func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have HTTP status", matcher.Expected) +>>>>>>> 33cbc1d (add batchrelease controller) } diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index c8993a8..3a20da0 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -1,11 +1,18 @@ package matchers import ( +<<<<<<< HEAD "errors" +======= +>>>>>>> 33cbc1d (add batchrelease controller) "fmt" "reflect" "github.com/onsi/gomega/format" +<<<<<<< HEAD +======= + "golang.org/x/xerrors" +>>>>>>> 33cbc1d (add batchrelease controller) ) type MatchErrorMatcher struct { @@ -25,7 +32,11 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e expected := matcher.Expected if isError(expected) { +<<<<<<< HEAD return reflect.DeepEqual(actualErr, expected) || errors.Is(actualErr, expected.(error)), nil +======= + return reflect.DeepEqual(actualErr, expected) || xerrors.Is(actualErr, expected.(error)), nil +>>>>>>> 33cbc1d (add batchrelease controller) } if isString(expected) { diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go index 78b7191..ad5ad8e 100644 --- a/vendor/github.com/onsi/gomega/matchers/not.go +++ b/vendor/github.com/onsi/gomega/matchers/not.go @@ -1,6 +1,10 @@ package matchers import ( +<<<<<<< HEAD +======= + "github.com/onsi/gomega/internal/oraclematcher" +>>>>>>> 33cbc1d (add batchrelease controller) "github.com/onsi/gomega/types" ) @@ -25,5 +29,9 @@ func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) } func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +<<<<<<< HEAD return types.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value +======= + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value +>>>>>>> 33cbc1d (add batchrelease controller) } diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go index 841ae26..a62977b 100644 --- a/vendor/github.com/onsi/gomega/matchers/or.go +++ b/vendor/github.com/onsi/gomega/matchers/or.go @@ -4,6 +4,10 @@ import ( "fmt" "github.com/onsi/gomega/format" +<<<<<<< HEAD +======= + "github.com/onsi/gomega/internal/oraclematcher" +>>>>>>> 33cbc1d (add batchrelease controller) "github.com/onsi/gomega/types" ) @@ -53,11 +57,19 @@ func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { if m.firstSuccessfulMatcher != nil { // one of the matchers succeeded.. it must be able to change in order to affect the result +<<<<<<< HEAD return types.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual) } else { // so all matchers failed.. Any one of them changing would change the result. for _, matcher := range m.Matchers { if types.MatchMayChangeInTheFuture(matcher, actual) { +======= + return oraclematcher.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual) + } else { + // so all matchers failed.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { +>>>>>>> 33cbc1d (add batchrelease controller) return true } } diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go index 6f743b1..df2b106 100644 --- a/vendor/github.com/onsi/gomega/matchers/with_transform.go +++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go @@ -4,12 +4,20 @@ import ( "fmt" "reflect" +<<<<<<< HEAD +======= + "github.com/onsi/gomega/internal/oraclematcher" +>>>>>>> 33cbc1d (add batchrelease controller) "github.com/onsi/gomega/types" ) type WithTransformMatcher struct { // input +<<<<<<< HEAD Transform interface{} // must be a function of one parameter that returns one value and an optional error +======= + Transform interface{} // must be a function of one parameter that returns one value +>>>>>>> 33cbc1d (add batchrelease controller) Matcher types.GomegaMatcher // cached value @@ -19,9 +27,12 @@ type WithTransformMatcher struct { transformedValue interface{} } +<<<<<<< HEAD // reflect.Type for error var errorT = reflect.TypeOf((*error)(nil)).Elem() +======= +>>>>>>> 33cbc1d (add batchrelease controller) func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher { if transform == nil { panic("transform function cannot be nil") @@ -30,10 +41,15 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) if txType.NumIn() != 1 { panic("transform function must have 1 argument") } +<<<<<<< HEAD if numout := txType.NumOut(); numout != 1 { if numout != 2 || !txType.Out(1).AssignableTo(errorT) { panic("transform function must either have 1 return value, or 1 return value plus 1 error value") } +======= + if txType.NumOut() != 1 { + panic("transform function must have 1 return value") +>>>>>>> 33cbc1d (add batchrelease controller) } return &WithTransformMatcher{ @@ -44,6 +60,7 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) } func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { +<<<<<<< HEAD // prepare a parameter to pass to the Transform function var param reflect.Value if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) { @@ -57,16 +74,26 @@ func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { } else { return false, fmt.Errorf("Transform function expects '%s' but we have '%T'", m.transformArgType, actual) +======= + // return error if actual's type is incompatible with Transform function's argument type + actualType := reflect.TypeOf(actual) + if !actualType.AssignableTo(m.transformArgType) { + return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType) +>>>>>>> 33cbc1d (add batchrelease controller) } // call the Transform function with `actual` fn := reflect.ValueOf(m.Transform) +<<<<<<< HEAD result := fn.Call([]reflect.Value{param}) if len(result) == 2 { if !result[1].IsNil() { return false, fmt.Errorf("Transform function failed: %s", result[1].Interface().(error).Error()) } } +======= + result := fn.Call([]reflect.Value{reflect.ValueOf(actual)}) +>>>>>>> 33cbc1d (add batchrelease controller) m.transformedValue = result[0].Interface() // expect exactly one value return m.Matcher.Match(m.transformedValue) @@ -86,5 +113,9 @@ func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool { // Querying the next matcher is fine if the transformer always will return the same value. // But if the transformer is non-deterministic and returns a different value each time, then there // is no point in querying the next matcher, since it can only comment on the last transformed value. +<<<<<<< HEAD return types.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue) +======= + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue) +>>>>>>> 33cbc1d (add batchrelease controller) } diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index c315ef0..e0cbc6a 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -1,5 +1,6 @@ package types +<<<<<<< HEAD import ( "time" ) @@ -30,6 +31,24 @@ type Gomega interface { SetDefaultConsistentlyPollingInterval(time.Duration) } +======= +type TWithHelper interface { + Helper() +} + +type GomegaFailHandler func(message string, callerSkip ...int) + +type GomegaFailWrapper struct { + Fail GomegaFailHandler + TWithHelper TWithHelper +} + +//A simple *testing.T interface wrapper +type GomegaTestingT interface { + Fatalf(format string, args ...interface{}) +} + +>>>>>>> 33cbc1d (add batchrelease controller) //All Gomega matchers must implement the GomegaMatcher interface // //For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers @@ -38,6 +57,7 @@ type GomegaMatcher interface { FailureMessage(actual interface{}) (message string) NegatedFailureMessage(actual interface{}) (message string) } +<<<<<<< HEAD /* GomegaMatchers that also match the OracleMatcher interface can convey information about @@ -85,3 +105,5 @@ type Assertion interface { Error() Assertion } +======= +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go index e15b7bf..b30319b 100644 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go +++ b/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go @@ -57,7 +57,11 @@ loop: err = transform.ErrShortSrc break loop } +<<<<<<< HEAD r, size = utf8.RuneError, 1 +======= + r = utf8.RuneError +>>>>>>> 33cbc1d (add batchrelease controller) goto write } size = 2 diff --git a/vendor/golang.org/x/text/internal/language/language.go b/vendor/golang.org/x/text/internal/language/language.go index f41aedc..b19e6b2 100644 --- a/vendor/golang.org/x/text/internal/language/language.go +++ b/vendor/golang.org/x/text/internal/language/language.go @@ -303,6 +303,7 @@ func (t Tag) Extensions() []string { // are of the allowed values defined for the Unicode locale extension ('u') in // https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. // TypeForKey will traverse the inheritance chain to get the correct value. +<<<<<<< HEAD // // If there are multiple types associated with a key, only the first will be // returned. If there is no type associated with a key, it returns the empty @@ -314,6 +315,11 @@ func (t Tag) TypeForKey(key string) string { s = s[:p] } return s +======= +func (t Tag) TypeForKey(key string) string { + if start, end, _ := t.findTypeForKey(key); end != start { + return t.str[start:end] +>>>>>>> 33cbc1d (add batchrelease controller) } return "" } @@ -337,6 +343,7 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { // Remove the setting if value is "". if value == "" { +<<<<<<< HEAD start, sep, end, _ := t.findTypeForKey(key) if start != sep { // Remove a possible empty extension. @@ -344,6 +351,15 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { case t.str[start-2] != '-': // has previous elements. case end == len(t.str), // end of string end+2 < len(t.str) && t.str[end+2] == '-': // end of extension +======= + start, end, _ := t.findTypeForKey(key) + if start != end { + // Remove key tag and leading '-'. + start -= 4 + + // Remove a possible empty extension. + if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' { +>>>>>>> 33cbc1d (add batchrelease controller) start -= 2 } if start == int(t.pVariant) && end == len(t.str) { @@ -389,6 +405,7 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { t.str = string(buf[:uStart+len(b)]) } else { s := t.str +<<<<<<< HEAD start, sep, end, hasExt := t.findTypeForKey(key) if start == sep { if hasExt { @@ -397,6 +414,16 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { t.str = fmt.Sprintf("%s-%s%s", s[:sep], b, s[end:]) } else { t.str = fmt.Sprintf("%s-%s%s", s[:start+3], value, s[end:]) +======= + start, end, hasExt := t.findTypeForKey(key) + if start == end { + if hasExt { + b = b[2:] + } + t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:]) + } else { + t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:]) +>>>>>>> 33cbc1d (add batchrelease controller) } } return t, nil @@ -407,10 +434,17 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { // wasn't found. The hasExt return value reports whether an -u extension was present. // Note: the extensions are typically very small and are likely to contain // only one key-type pair. +<<<<<<< HEAD func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) { p := int(t.pExt) if len(key) != 2 || p == len(t.str) || p == 0 { return p, p, p, false +======= +func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) { + p := int(t.pExt) + if len(key) != 2 || p == len(t.str) || p == 0 { + return p, p, false +>>>>>>> 33cbc1d (add batchrelease controller) } s := t.str @@ -418,10 +452,17 @@ func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) { for p++; s[p] != 'u'; p++ { if s[p] > 'u' { p-- +<<<<<<< HEAD return p, p, p, false } if p = nextExtension(s, p); p == len(s) { return len(s), len(s), len(s), false +======= + return p, p, false + } + if p = nextExtension(s, p); p == len(s) { + return len(s), len(s), false +>>>>>>> 33cbc1d (add batchrelease controller) } } // Proceed to the hyphen following the extension name. @@ -432,6 +473,7 @@ func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) { // Iterate over keys until we get the end of a section. for { +<<<<<<< HEAD end = p for p++; p < len(s) && s[p] != '-'; p++ { } @@ -454,6 +496,42 @@ func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) { } start = end sep = p +======= + // p points to the hyphen preceding the current token. + if p3 := p + 3; s[p3] == '-' { + // Found a key. + // Check whether we just processed the key that was requested. + if curKey == key { + return start, p, true + } + // Set to the next key and continue scanning type tokens. + curKey = s[p+1 : p3] + if curKey > key { + return p, p, true + } + // Start of the type token sequence. + start = p + 4 + // A type is at least 3 characters long. + p += 7 // 4 + 3 + } else { + // Attribute or type, which is at least 3 characters long. + p += 4 + } + // p points past the third character of a type or attribute. + max := p + 5 // maximum length of token plus hyphen. + if len(s) < max { + max = len(s) + } + for ; p < max && s[p] != '-'; p++ { + } + // Bail if we have exhausted all tokens or if the next token starts + // a new extension. + if p == len(s) || s[p+2] == '-' { + if curKey == key { + return start, p, true + } + return p, p, true +>>>>>>> 33cbc1d (add batchrelease controller) } } } diff --git a/vendor/golang.org/x/text/internal/language/parse.go b/vendor/golang.org/x/text/internal/language/parse.go index c696fd0..e796d76 100644 --- a/vendor/golang.org/x/text/internal/language/parse.go +++ b/vendor/golang.org/x/text/internal/language/parse.go @@ -133,6 +133,7 @@ func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) { s.start = oldStart if end := oldStart + newSize; end != oldEnd { diff := end - oldEnd +<<<<<<< HEAD var b []byte if n := len(s.b) + diff; n > cap(s.b) { b = make([]byte, n) @@ -142,6 +143,16 @@ func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) { } copy(b[end:], s.b[oldEnd:]) s.b = b +======= + if end < cap(s.b) { + b := make([]byte, len(s.b)+diff) + copy(b, s.b[:oldStart]) + copy(b[end:], s.b[oldEnd:]) + s.b = b + } else { + s.b = append(s.b[end:], s.b[oldEnd:]...) + } +>>>>>>> 33cbc1d (add batchrelease controller) s.next = end + (s.next - s.end) s.end = end } @@ -483,7 +494,11 @@ func parseExtensions(scan *scanner) int { func parseExtension(scan *scanner) int { start, end := scan.start, scan.end switch scan.token[0] { +<<<<<<< HEAD case 'u': // https://www.ietf.org/rfc/rfc6067.txt +======= + case 'u': +>>>>>>> 33cbc1d (add batchrelease controller) attrStart := end scan.scan() for last := []byte{}; len(scan.token) > 2; scan.scan() { @@ -503,6 +518,7 @@ func parseExtension(scan *scanner) int { last = scan.token end = scan.end } +<<<<<<< HEAD // Scan key-type sequences. A key is of length 2 and may be followed // by 0 or more "type" subtags from 3 to the maximum of 8 letters. var last, key []byte @@ -514,18 +530,38 @@ func parseExtension(scan *scanner) int { } // TODO: check key value validity if bytes.Compare(key, last) != 1 || scan.err != nil { +======= + var last, key []byte + for attrEnd := end; len(scan.token) == 2; last = key { + key = scan.token + keyEnd := scan.end + end = scan.acceptMinSize(3) + // TODO: check key value validity + if keyEnd == end || bytes.Compare(key, last) != 1 { +>>>>>>> 33cbc1d (add batchrelease controller) // We have an invalid key or the keys are not sorted. // Start scanning keys from scratch and reorder. p := attrEnd + 1 scan.next = p keys := [][]byte{} for scan.scan(); len(scan.token) == 2; { +<<<<<<< HEAD keyStart := scan.start end = scan.end for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { end = scan.end } keys = append(keys, scan.b[keyStart:end]) +======= + keyStart, keyEnd := scan.start, scan.end + end = scan.acceptMinSize(3) + if keyEnd != end { + keys = append(keys, scan.b[keyStart:end]) + } else { + scan.setError(ErrSyntax) + end = keyStart + } +>>>>>>> 33cbc1d (add batchrelease controller) } sort.Stable(bytesSort{keys, 2}) if n := len(keys); n > 0 { @@ -549,7 +585,11 @@ func parseExtension(scan *scanner) int { break } } +<<<<<<< HEAD case 't': // https://www.ietf.org/rfc/rfc6497.txt +======= + case 't': +>>>>>>> 33cbc1d (add batchrelease controller) scan.scan() if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { _, end = parseTag(scan) diff --git a/vendor/golang.org/x/text/language/go1_1.go b/vendor/golang.org/x/text/language/go1_1.go index c743558..cabb449 100644 --- a/vendor/golang.org/x/text/language/go1_1.go +++ b/vendor/golang.org/x/text/language/go1_1.go @@ -2,7 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +<<<<<<< HEAD //go:build !go1.2 +======= +>>>>>>> 33cbc1d (add batchrelease controller) // +build !go1.2 package language diff --git a/vendor/golang.org/x/text/language/go1_2.go b/vendor/golang.org/x/text/language/go1_2.go index 77aaaa2..641efd7 100644 --- a/vendor/golang.org/x/text/language/go1_2.go +++ b/vendor/golang.org/x/text/language/go1_2.go @@ -2,7 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +<<<<<<< HEAD //go:build go1.2 +======= +>>>>>>> 33cbc1d (add batchrelease controller) // +build go1.2 package language diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go index 289b3a3..83ed66a 100644 --- a/vendor/golang.org/x/text/language/language.go +++ b/vendor/golang.org/x/text/language/language.go @@ -412,10 +412,13 @@ func (t Tag) Extensions() []Extension { // are of the allowed values defined for the Unicode locale extension ('u') in // https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. // TypeForKey will traverse the inheritance chain to get the correct value. +<<<<<<< HEAD // // If there are multiple types associated with a key, only the first will be // returned. If there is no type associated with a key, it returns the empty // string. +======= +>>>>>>> 33cbc1d (add batchrelease controller) func (t Tag) TypeForKey(key string) string { if !compact.Tag(t).MayHaveExtensions() { if key != "rg" && key != "va" { diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go index 96b57f6..fd247b4 100644 --- a/vendor/golang.org/x/text/language/tables.go +++ b/vendor/golang.org/x/text/language/tables.go @@ -47,7 +47,11 @@ const ( _Zzzz = 251 ) +<<<<<<< HEAD var regionToGroups = []uint8{ // 358 elements +======= +var regionToGroups = []uint8{ // 357 elements +>>>>>>> 33cbc1d (add batchrelease controller) // Entry 0 - 3F 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00, @@ -98,8 +102,13 @@ var regionToGroups = []uint8{ // 358 elements 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +<<<<<<< HEAD 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } // Size: 382 bytes +======= + 0x00, 0x00, 0x00, 0x00, 0x00, +} // Size: 381 bytes +>>>>>>> 33cbc1d (add batchrelease controller) var paradigmLocales = [][3]uint16{ // 3 elements 0: [3]uint16{0x139, 0x0, 0x7b}, @@ -295,4 +304,8 @@ var matchRegion = []regionIntelligibility{ // 15 elements 14: {lang: 0x529, script: 0x3c, group: 0x80, distance: 0x5}, } // Size: 114 bytes +<<<<<<< HEAD // Total table size 1472 bytes (1KiB); checksum: F86C669 +======= +// Total table size 1471 bytes (1KiB); checksum: 4CB1CD46 +>>>>>>> 33cbc1d (add batchrelease controller) diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE new file mode 100644 index 0000000..e4a47e1 --- /dev/null +++ b/vendor/golang.org/x/xerrors/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/xerrors/PATENTS b/vendor/golang.org/x/xerrors/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/xerrors/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/xerrors/README b/vendor/golang.org/x/xerrors/README new file mode 100644 index 0000000..aac7867 --- /dev/null +++ b/vendor/golang.org/x/xerrors/README @@ -0,0 +1,2 @@ +This repository holds the transition packages for the new Go 1.13 error values. +See golang.org/design/29934-error-values. diff --git a/vendor/golang.org/x/xerrors/adaptor.go b/vendor/golang.org/x/xerrors/adaptor.go new file mode 100644 index 0000000..4317f24 --- /dev/null +++ b/vendor/golang.org/x/xerrors/adaptor.go @@ -0,0 +1,193 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strconv" +) + +// FormatError calls the FormatError method of f with an errors.Printer +// configured according to s and verb, and writes the result to s. +func FormatError(f Formatter, s fmt.State, verb rune) { + // Assuming this function is only called from the Format method, and given + // that FormatError takes precedence over Format, it cannot be called from + // any package that supports errors.Formatter. It is therefore safe to + // disregard that State may be a specific printer implementation and use one + // of our choice instead. + + // limitations: does not support printing error as Go struct. + + var ( + sep = " " // separator before next error + p = &state{State: s} + direct = true + ) + + var err error = f + + switch verb { + // Note that this switch must match the preference order + // for ordinary string printing (%#v before %+v, and so on). + + case 'v': + if s.Flag('#') { + if stringer, ok := err.(fmt.GoStringer); ok { + io.WriteString(&p.buf, stringer.GoString()) + goto exit + } + // proceed as if it were %v + } else if s.Flag('+') { + p.printDetail = true + sep = "\n - " + } + case 's': + case 'q', 'x', 'X': + // Use an intermediate buffer in the rare cases that precision, + // truncation, or one of the alternative verbs (q, x, and X) are + // specified. + direct = false + + default: + p.buf.WriteString("%!") + p.buf.WriteRune(verb) + p.buf.WriteByte('(') + switch { + case err != nil: + p.buf.WriteString(reflect.TypeOf(f).String()) + default: + p.buf.WriteString("") + } + p.buf.WriteByte(')') + io.Copy(s, &p.buf) + return + } + +loop: + for { + switch v := err.(type) { + case Formatter: + err = v.FormatError((*printer)(p)) + case fmt.Formatter: + v.Format(p, 'v') + break loop + default: + io.WriteString(&p.buf, v.Error()) + break loop + } + if err == nil { + break + } + if p.needColon || !p.printDetail { + p.buf.WriteByte(':') + p.needColon = false + } + p.buf.WriteString(sep) + p.inDetail = false + p.needNewline = false + } + +exit: + width, okW := s.Width() + prec, okP := s.Precision() + + if !direct || (okW && width > 0) || okP { + // Construct format string from State s. + format := []byte{'%'} + if s.Flag('-') { + format = append(format, '-') + } + if s.Flag('+') { + format = append(format, '+') + } + if s.Flag(' ') { + format = append(format, ' ') + } + if okW { + format = strconv.AppendInt(format, int64(width), 10) + } + if okP { + format = append(format, '.') + format = strconv.AppendInt(format, int64(prec), 10) + } + format = append(format, string(verb)...) + fmt.Fprintf(s, string(format), p.buf.String()) + } else { + io.Copy(s, &p.buf) + } +} + +var detailSep = []byte("\n ") + +// state tracks error printing state. It implements fmt.State. +type state struct { + fmt.State + buf bytes.Buffer + + printDetail bool + inDetail bool + needColon bool + needNewline bool +} + +func (s *state) Write(b []byte) (n int, err error) { + if s.printDetail { + if len(b) == 0 { + return 0, nil + } + if s.inDetail && s.needColon { + s.needNewline = true + if b[0] == '\n' { + b = b[1:] + } + } + k := 0 + for i, c := range b { + if s.needNewline { + if s.inDetail && s.needColon { + s.buf.WriteByte(':') + s.needColon = false + } + s.buf.Write(detailSep) + s.needNewline = false + } + if c == '\n' { + s.buf.Write(b[k:i]) + k = i + 1 + s.needNewline = true + } + } + s.buf.Write(b[k:]) + if !s.inDetail { + s.needColon = true + } + } else if !s.inDetail { + s.buf.Write(b) + } + return len(b), nil +} + +// printer wraps a state to implement an xerrors.Printer. +type printer state + +func (s *printer) Print(args ...interface{}) { + if !s.inDetail || s.printDetail { + fmt.Fprint((*state)(s), args...) + } +} + +func (s *printer) Printf(format string, args ...interface{}) { + if !s.inDetail || s.printDetail { + fmt.Fprintf((*state)(s), format, args...) + } +} + +func (s *printer) Detail() bool { + s.inDetail = true + return s.printDetail +} diff --git a/vendor/golang.org/x/xerrors/codereview.cfg b/vendor/golang.org/x/xerrors/codereview.cfg new file mode 100644 index 0000000..3f8b14b --- /dev/null +++ b/vendor/golang.org/x/xerrors/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go new file mode 100644 index 0000000..eef99d9 --- /dev/null +++ b/vendor/golang.org/x/xerrors/doc.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xerrors implements functions to manipulate errors. +// +// This package is based on the Go 2 proposal for error values: +// https://golang.org/design/29934-error-values +// +// These functions were incorporated into the standard library's errors package +// in Go 1.13: +// - Is +// - As +// - Unwrap +// +// Also, Errorf's %w verb was incorporated into fmt.Errorf. +// +// Use this package to get equivalent behavior in all supported Go versions. +// +// No other features of this package were included in Go 1.13, and at present +// there are no plans to include any of them. +package xerrors // import "golang.org/x/xerrors" diff --git a/vendor/golang.org/x/xerrors/errors.go b/vendor/golang.org/x/xerrors/errors.go new file mode 100644 index 0000000..e88d377 --- /dev/null +++ b/vendor/golang.org/x/xerrors/errors.go @@ -0,0 +1,33 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import "fmt" + +// errorString is a trivial implementation of error. +type errorString struct { + s string + frame Frame +} + +// New returns an error that formats as the given text. +// +// The returned error contains a Frame set to the caller's location and +// implements Formatter to show this information when printed with details. +func New(text string) error { + return &errorString{text, Caller(1)} +} + +func (e *errorString) Error() string { + return e.s +} + +func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *errorString) FormatError(p Printer) (next error) { + p.Print(e.s) + e.frame.Format(p) + return nil +} diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go new file mode 100644 index 0000000..829862d --- /dev/null +++ b/vendor/golang.org/x/xerrors/fmt.go @@ -0,0 +1,187 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/xerrors/internal" +) + +const percentBangString = "%!" + +// Errorf formats according to a format specifier and returns the string as a +// value that satisfies error. +// +// The returned error includes the file and line number of the caller when +// formatted with additional detail enabled. If the last argument is an error +// the returned error's Format method will return it if the format string ends +// with ": %s", ": %v", or ": %w". If the last argument is an error and the +// format string ends with ": %w", the returned error implements an Unwrap +// method returning it. +// +// If the format specifier includes a %w verb with an error operand in a +// position other than at the end, the returned error will still implement an +// Unwrap method returning the operand, but the error's Format method will not +// return the wrapped error. +// +// It is invalid to include more than one %w verb or to supply it with an +// operand that does not implement the error interface. The %w verb is otherwise +// a synonym for %v. +func Errorf(format string, a ...interface{}) error { + format = formatPlusW(format) + // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. + wrap := strings.HasSuffix(format, ": %w") + idx, format2, ok := parsePercentW(format) + percentWElsewhere := !wrap && idx >= 0 + if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) { + err := errorAt(a, len(a)-1) + if err == nil { + return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} + } + // TODO: this is not entirely correct. The error value could be + // printed elsewhere in format if it mixes numbered with unnumbered + // substitutions. With relatively small changes to doPrintf we can + // have it optionally ignore extra arguments and pass the argument + // list in its entirety. + msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) + frame := Frame{} + if internal.EnableTrace { + frame = Caller(1) + } + if wrap { + return &wrapError{msg, err, frame} + } + return &noWrapError{msg, err, frame} + } + // Support %w anywhere. + // TODO: don't repeat the wrapped error's message when %w occurs in the middle. + msg := fmt.Sprintf(format2, a...) + if idx < 0 { + return &noWrapError{msg, nil, Caller(1)} + } + err := errorAt(a, idx) + if !ok || err == nil { + // Too many %ws or argument of %w is not an error. Approximate the Go + // 1.13 fmt.Errorf message. + return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)} + } + frame := Frame{} + if internal.EnableTrace { + frame = Caller(1) + } + return &wrapError{msg, err, frame} +} + +func errorAt(args []interface{}, i int) error { + if i < 0 || i >= len(args) { + return nil + } + err, ok := args[i].(error) + if !ok { + return nil + } + return err +} + +// formatPlusW is used to avoid the vet check that will barf at %w. +func formatPlusW(s string) string { + return s +} + +// Return the index of the only %w in format, or -1 if none. +// Also return a rewritten format string with %w replaced by %v, and +// false if there is more than one %w. +// TODO: handle "%[N]w". +func parsePercentW(format string) (idx int, newFormat string, ok bool) { + // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go. + idx = -1 + ok = true + n := 0 + sz := 0 + var isW bool + for i := 0; i < len(format); i += sz { + if format[i] != '%' { + sz = 1 + continue + } + // "%%" is not a format directive. + if i+1 < len(format) && format[i+1] == '%' { + sz = 2 + continue + } + sz, isW = parsePrintfVerb(format[i:]) + if isW { + if idx >= 0 { + ok = false + } else { + idx = n + } + // "Replace" the last character, the 'w', with a 'v'. + p := i + sz - 1 + format = format[:p] + "v" + format[p+1:] + } + n++ + } + return idx, format, ok +} + +// Parse the printf verb starting with a % at s[0]. +// Return how many bytes it occupies and whether the verb is 'w'. +func parsePrintfVerb(s string) (int, bool) { + // Assume only that the directive is a sequence of non-letters followed by a single letter. + sz := 0 + var r rune + for i := 1; i < len(s); i += sz { + r, sz = utf8.DecodeRuneInString(s[i:]) + if unicode.IsLetter(r) { + return i + sz, r == 'w' + } + } + return len(s), false +} + +type noWrapError struct { + msg string + err error + frame Frame +} + +func (e *noWrapError) Error() string { + return fmt.Sprint(e) +} + +func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *noWrapError) FormatError(p Printer) (next error) { + p.Print(e.msg) + e.frame.Format(p) + return e.err +} + +type wrapError struct { + msg string + err error + frame Frame +} + +func (e *wrapError) Error() string { + return fmt.Sprint(e) +} + +func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *wrapError) FormatError(p Printer) (next error) { + p.Print(e.msg) + e.frame.Format(p) + return e.err +} + +func (e *wrapError) Unwrap() error { + return e.err +} diff --git a/vendor/golang.org/x/xerrors/format.go b/vendor/golang.org/x/xerrors/format.go new file mode 100644 index 0000000..1bc9c26 --- /dev/null +++ b/vendor/golang.org/x/xerrors/format.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +// A Formatter formats error messages. +type Formatter interface { + error + + // FormatError prints the receiver's first error and returns the next error in + // the error chain, if any. + FormatError(p Printer) (next error) +} + +// A Printer formats error messages. +// +// The most common implementation of Printer is the one provided by package fmt +// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message +// typically provide their own implementations. +type Printer interface { + // Print appends args to the message output. + Print(args ...interface{}) + + // Printf writes a formatted string. + Printf(format string, args ...interface{}) + + // Detail reports whether error detail is requested. + // After the first call to Detail, all text written to the Printer + // is formatted as additional detail, or ignored when + // detail has not been requested. + // If Detail returns false, the caller can avoid printing the detail at all. + Detail() bool +} diff --git a/vendor/golang.org/x/xerrors/frame.go b/vendor/golang.org/x/xerrors/frame.go new file mode 100644 index 0000000..0de628e --- /dev/null +++ b/vendor/golang.org/x/xerrors/frame.go @@ -0,0 +1,56 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "runtime" +) + +// A Frame contains part of a call stack. +type Frame struct { + // Make room for three PCs: the one we were asked for, what it called, + // and possibly a PC for skipPleaseUseCallersFrames. See: + // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169 + frames [3]uintptr +} + +// Caller returns a Frame that describes a frame on the caller's stack. +// The argument skip is the number of frames to skip over. +// Caller(0) returns the frame for the caller of Caller. +func Caller(skip int) Frame { + var s Frame + runtime.Callers(skip+1, s.frames[:]) + return s +} + +// location reports the file, line, and function of a frame. +// +// The returned function may be "" even if file and line are not. +func (f Frame) location() (function, file string, line int) { + frames := runtime.CallersFrames(f.frames[:]) + if _, ok := frames.Next(); !ok { + return "", "", 0 + } + fr, ok := frames.Next() + if !ok { + return "", "", 0 + } + return fr.Function, fr.File, fr.Line +} + +// Format prints the stack as error detail. +// It should be called from an error's Format implementation +// after printing any other error detail. +func (f Frame) Format(p Printer) { + if p.Detail() { + function, file, line := f.location() + if function != "" { + p.Printf("%s\n ", function) + } + if file != "" { + p.Printf("%s:%d\n", file, line) + } + } +} diff --git a/vendor/golang.org/x/xerrors/go.mod b/vendor/golang.org/x/xerrors/go.mod new file mode 100644 index 0000000..870d4f6 --- /dev/null +++ b/vendor/golang.org/x/xerrors/go.mod @@ -0,0 +1,3 @@ +module golang.org/x/xerrors + +go 1.11 diff --git a/vendor/golang.org/x/xerrors/internal/internal.go b/vendor/golang.org/x/xerrors/internal/internal.go new file mode 100644 index 0000000..89f4eca --- /dev/null +++ b/vendor/golang.org/x/xerrors/internal/internal.go @@ -0,0 +1,8 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// EnableTrace indicates whether stack information should be recorded in errors. +var EnableTrace = true diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go new file mode 100644 index 0000000..9a3b510 --- /dev/null +++ b/vendor/golang.org/x/xerrors/wrap.go @@ -0,0 +1,106 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "reflect" +) + +// A Wrapper provides context around another error. +type Wrapper interface { + // Unwrap returns the next error in the error chain. + // If there is no next error, Unwrap returns nil. + Unwrap() error +} + +// Opaque returns an error with the same error formatting as err +// but that does not match err and cannot be unwrapped. +func Opaque(err error) error { + return noWrapper{err} +} + +type noWrapper struct { + error +} + +func (e noWrapper) FormatError(p Printer) (next error) { + if f, ok := e.error.(Formatter); ok { + return f.FormatError(p) + } + p.Print(e.error) + return nil +} + +// Unwrap returns the result of calling the Unwrap method on err, if err implements +// Unwrap. Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + u, ok := err.(Wrapper) + if !ok { + return nil + } + return u.Unwrap() +} + +// Is reports whether any error in err's chain matches target. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { + if target == nil { + return err == target + } + + isComparable := reflect.TypeOf(target).Comparable() + for { + if isComparable && err == target { + return true + } + if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { + return true + } + // TODO: consider supporing target.Is(err). This would allow + // user-definable predicates, but also may allow for coping with sloppy + // APIs, thereby making it easier to get away with them. + if err = Unwrap(err); err == nil { + return false + } + } +} + +// As finds the first error in err's chain that matches the type to which target +// points, and if so, sets the target to its value and returns true. An error +// matches a type if it is assignable to the target type, or if it has a method +// As(interface{}) bool such that As(target) returns true. As will panic if target +// is not a non-nil pointer to a type which implements error or is of interface type. +// +// The As method should set the target to its value and return true if err +// matches the type to which target points. +func As(err error, target interface{}) bool { + if target == nil { + panic("errors: target cannot be nil") + } + val := reflect.ValueOf(target) + typ := val.Type() + if typ.Kind() != reflect.Ptr || val.IsNil() { + panic("errors: target must be a non-nil pointer") + } + if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) { + panic("errors: *target must be interface or implement error") + } + targetType := typ.Elem() + for err != nil { + if reflect.TypeOf(err).AssignableTo(targetType) { + val.Elem().Set(reflect.ValueOf(err)) + return true + } + if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) { + return true + } + err = Unwrap(err) + } + return false +} + +var errorType = reflect.TypeOf((*error)(nil)).Elem() diff --git a/vendor/modules.txt b/vendor/modules.txt index 15cdba3..be3736d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -69,17 +69,28 @@ github.com/matttproud/golang_protobuf_extensions/pbutil github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 +<<<<<<< HEAD # github.com/nxadm/tail v1.4.8 +======= +# github.com/nxadm/tail v1.4.4 +>>>>>>> 33cbc1d (add batchrelease controller) github.com/nxadm/tail github.com/nxadm/tail/ratelimiter github.com/nxadm/tail/util github.com/nxadm/tail/watch github.com/nxadm/tail/winfile +<<<<<<< HEAD # github.com/onsi/ginkgo v1.16.5 ## explicit github.com/onsi/ginkgo github.com/onsi/ginkgo/config github.com/onsi/ginkgo/formatter +======= +# github.com/onsi/ginkgo v1.14.1 +## explicit +github.com/onsi/ginkgo +github.com/onsi/ginkgo/config +>>>>>>> 33cbc1d (add batchrelease controller) github.com/onsi/ginkgo/internal/codelocation github.com/onsi/ginkgo/internal/containernode github.com/onsi/ginkgo/internal/failer @@ -97,11 +108,22 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types +<<<<<<< HEAD # github.com/onsi/gomega v1.17.0 ## explicit github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal +======= +# github.com/onsi/gomega v1.10.2 +## explicit +github.com/onsi/gomega +github.com/onsi/gomega/format +github.com/onsi/gomega/internal/assertion +github.com/onsi/gomega/internal/asyncassertion +github.com/onsi/gomega/internal/oraclematcher +github.com/onsi/gomega/internal/testingtsupport +>>>>>>> 33cbc1d (add batchrelease controller) github.com/onsi/gomega/matchers github.com/onsi/gomega/matchers/support/goraph/bipartitegraph github.com/onsi/gomega/matchers/support/goraph/edge @@ -166,7 +188,11 @@ golang.org/x/oauth2/jwt golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows +<<<<<<< HEAD # golang.org/x/text v0.3.6 +======= +# golang.org/x/text v0.3.4 +>>>>>>> 33cbc1d (add batchrelease controller) golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -189,6 +215,9 @@ golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm # golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e golang.org/x/time/rate +# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 +golang.org/x/xerrors +golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.1.0 gomodules.xyz/jsonpatch/v2 # google.golang.org/appengine v1.6.6 @@ -237,7 +266,11 @@ google.golang.org/protobuf/types/known/timestamppb gopkg.in/inf.v0 # gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/tomb.v1 +<<<<<<< HEAD # gopkg.in/yaml.v2 v2.4.0 +======= +# gopkg.in/yaml.v2 v2.3.0 +>>>>>>> 33cbc1d (add batchrelease controller) ## explicit gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 @@ -294,6 +327,7 @@ k8s.io/api/storage/v1beta1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 +<<<<<<< HEAD k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 @@ -301,6 +335,8 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextension k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1 +======= +>>>>>>> 33cbc1d (add batchrelease controller) # k8s.io/apimachinery v0.20.10 ## explicit k8s.io/apimachinery/pkg/api/equality