upgrade to latest dependencies (#1571)

bumping knative.dev/networking c7164d4...ec78554:
  > ec78554 Add transport option (# 606)
bumping knative.dev/serving a3216ca...a4f8b72:
  > a4f8b72 Update net-kourier nightly (# 12531)
  > cec1a47 Update net-gateway-api nightly (# 12529)
  > 7826030 Update net-certmanager nightly (# 12530)
  > 30f5c96 Move our CI to GKE 1.21 (# 12509)
  > bd2a436 💫 Adding k8s 1.23.0 to GA action e2e workflows (# 12411)
  > d844f47 CodeReading: Move var to where is used (# 12532)
  > 0682e94 Add pvc support (# 12458)
bumping knative.dev/eventing e0e3f44...ad895de:
  > ad895de Upgrade test waits for sender deletion (# 6050)
  > c94bfb3 upgrade to latest dependencies (# 6065)
  > ef00e28 Update and rename roadmap-2021.md to roadmap.md (# 6067)

Signed-off-by: Knative Automation <automation@knative.team>
This commit is contained in:
knative-automation 2022-01-21 04:04:04 -08:00 committed by GitHub
parent fc85cff55c
commit 17d6184cc8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 97 additions and 41 deletions

6
go.mod
View File

@ -19,10 +19,10 @@ require (
k8s.io/cli-runtime v0.21.4 k8s.io/cli-runtime v0.21.4
k8s.io/client-go v0.22.5 k8s.io/client-go v0.22.5
k8s.io/code-generator v0.22.5 k8s.io/code-generator v0.22.5
knative.dev/eventing v0.28.1-0.20220118233852-e0e3f446c2d8 knative.dev/eventing v0.28.1-0.20220119171353-ad895de5fdd8
knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30 knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30
knative.dev/networking v0.0.0-20220118173833-c7164d4dd845 knative.dev/networking v0.0.0-20220120043934-ec785540a732
knative.dev/pkg v0.0.0-20220118160532-77555ea48cd4 knative.dev/pkg v0.0.0-20220118160532-77555ea48cd4
knative.dev/serving v0.28.1-0.20220118185833-a3216ca8e4a4 knative.dev/serving v0.28.1-0.20220121012810-a4f8b72183c4
sigs.k8s.io/yaml v1.3.0 sigs.k8s.io/yaml v1.3.0
) )

16
go.sum
View File

@ -1850,22 +1850,22 @@ k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 h1:ZKMMxTvduyf5WUtREOqg5LiXaN1KO/+0oOQPRFrClpo= k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 h1:ZKMMxTvduyf5WUtREOqg5LiXaN1KO/+0oOQPRFrClpo=
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211208161948-7d6a63dca704/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
knative.dev/caching v0.0.0-20220113145613-9df2c0c8a931/go.mod h1:kHPJyq2W2ADNAwB6lmmJcleEZfNWioIL5yEs/p5WHU0= knative.dev/caching v0.0.0-20220113145613-9df2c0c8a931/go.mod h1:kHPJyq2W2ADNAwB6lmmJcleEZfNWioIL5yEs/p5WHU0=
knative.dev/eventing v0.28.1-0.20220118233852-e0e3f446c2d8 h1:MmqEwo3+dw2JJ3KfOyq+FnMhaA+ztcuDvMiLLkLHM6M= knative.dev/eventing v0.28.1-0.20220119171353-ad895de5fdd8 h1:8b3bndqQAIFdEFzNip7QJEhtp+j9xdiRuJNh0jhSDT4=
knative.dev/eventing v0.28.1-0.20220118233852-e0e3f446c2d8/go.mod h1:9wxM6SRhgS541dVDVvwMwvSJl3B1MzV7FnxxJ8bSyNs= knative.dev/eventing v0.28.1-0.20220119171353-ad895de5fdd8/go.mod h1:u5T5NZTDUsLR7yJwp5MDnBnDX5MhywD3yK3Rq+7gTtI=
knative.dev/hack v0.0.0-20220111151514-59b0cf17578e/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack v0.0.0-20220111151514-59b0cf17578e/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30 h1:UkNpCWCMM5C4AeQ8aTrPTuR/6OeARiqk+LEQ6tuMP7c= knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30 h1:UkNpCWCMM5C4AeQ8aTrPTuR/6OeARiqk+LEQ6tuMP7c=
knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
knative.dev/hack/schema v0.0.0-20220111151514-59b0cf17578e/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= knative.dev/hack/schema v0.0.0-20220118141833-9b2ed8471e30/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0=
knative.dev/networking v0.0.0-20220117015928-52fb6ee37bf9/go.mod h1:h4JrHof14QSggAnthL3DPFw1cieenumKDaT+wg0EPmE= knative.dev/networking v0.0.0-20220117015928-52fb6ee37bf9/go.mod h1:h4JrHof14QSggAnthL3DPFw1cieenumKDaT+wg0EPmE=
knative.dev/networking v0.0.0-20220118173833-c7164d4dd845 h1:5+mqH+t0Y7M0T3izEFjQzPR2ZcPKhfV8F+bsYTsj8Jg= knative.dev/networking v0.0.0-20220120043934-ec785540a732 h1:fUUb5NWfYm8TQkhv1uFYlEn91YNKp+E4BD7ax7WYHW0=
knative.dev/networking v0.0.0-20220118173833-c7164d4dd845/go.mod h1:6cKBV/h/vIQWCPOkds/RvzUmMR8Vz6Dks2NWb0+3xks= knative.dev/networking v0.0.0-20220120043934-ec785540a732/go.mod h1:6cKBV/h/vIQWCPOkds/RvzUmMR8Vz6Dks2NWb0+3xks=
knative.dev/pkg v0.0.0-20220113045912-c0e1594c2fb1/go.mod h1:X5B0/SR3tzOioS2WQtS9uJTO71M52qcrDkWQ5wtyLVc= knative.dev/pkg v0.0.0-20220113045912-c0e1594c2fb1/go.mod h1:X5B0/SR3tzOioS2WQtS9uJTO71M52qcrDkWQ5wtyLVc=
knative.dev/pkg v0.0.0-20220114141842-0a429cba1c73/go.mod h1:X5B0/SR3tzOioS2WQtS9uJTO71M52qcrDkWQ5wtyLVc= knative.dev/pkg v0.0.0-20220114141842-0a429cba1c73/go.mod h1:X5B0/SR3tzOioS2WQtS9uJTO71M52qcrDkWQ5wtyLVc=
knative.dev/pkg v0.0.0-20220118160532-77555ea48cd4 h1:b9aXVrcfM/ajjHE/lGvlJOHZNAR5FF2TOTLWG7eMhzQ= knative.dev/pkg v0.0.0-20220118160532-77555ea48cd4 h1:b9aXVrcfM/ajjHE/lGvlJOHZNAR5FF2TOTLWG7eMhzQ=
knative.dev/pkg v0.0.0-20220118160532-77555ea48cd4/go.mod h1:etVT7Tm8pSDf4RKhGk4r7j/hj3dNBpvT7bO6a6wpahs= knative.dev/pkg v0.0.0-20220118160532-77555ea48cd4/go.mod h1:etVT7Tm8pSDf4RKhGk4r7j/hj3dNBpvT7bO6a6wpahs=
knative.dev/reconciler-test v0.0.0-20220117082429-6a9b91eef10c/go.mod h1:zhcMJ0CfqI7de0Ir0yXTTB6BgShWvLEx+BqQdvg+1pU= knative.dev/reconciler-test v0.0.0-20220118183433-c8bfbe66bada/go.mod h1:XV4cghzCtdASkfUsfMYSnGfGRyd/naDjy9h7Tnae22g=
knative.dev/serving v0.28.1-0.20220118185833-a3216ca8e4a4 h1:ihmnuJ2Aof2h908ZcvSksmFY4yakWc907ZpKQWIk9y8= knative.dev/serving v0.28.1-0.20220121012810-a4f8b72183c4 h1:mUz+V30RkB+nKQs1mnePZhKl62/66FvcU2Y9PK1+8dc=
knative.dev/serving v0.28.1-0.20220118185833-a3216ca8e4a4/go.mod h1:M6679d8WkZuG/NpVjPBkC9UppjOZRHNVJP0WIWqA62M= knative.dev/serving v0.28.1-0.20220121012810-a4f8b72183c4/go.mod h1:M6679d8WkZuG/NpVjPBkC9UppjOZRHNVJP0WIWqA62M=
pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=

View File

@ -54,6 +54,8 @@ func defaultFeaturesConfig() *Features {
ContainerSpecAddCapabilities: Disabled, ContainerSpecAddCapabilities: Disabled,
PodSpecTolerations: Disabled, PodSpecTolerations: Disabled,
PodSpecVolumesEmptyDir: Disabled, PodSpecVolumesEmptyDir: Disabled,
PodSpecPersistentVolumeClaim: Disabled,
PodSpecPersistentVolumeWrite: Disabled,
PodSpecInitContainers: Disabled, PodSpecInitContainers: Disabled,
TagHeaderBasedRouting: Disabled, TagHeaderBasedRouting: Disabled,
AutoDetectHTTP2: Disabled, AutoDetectHTTP2: Disabled,
@ -79,6 +81,8 @@ func NewFeaturesConfigFromMap(data map[string]string) (*Features, error) {
asFlag("kubernetes.podspec-tolerations", &nc.PodSpecTolerations), asFlag("kubernetes.podspec-tolerations", &nc.PodSpecTolerations),
asFlag("kubernetes.podspec-volumes-emptydir", &nc.PodSpecVolumesEmptyDir), asFlag("kubernetes.podspec-volumes-emptydir", &nc.PodSpecVolumesEmptyDir),
asFlag("kubernetes.podspec-init-containers", &nc.PodSpecInitContainers), asFlag("kubernetes.podspec-init-containers", &nc.PodSpecInitContainers),
asFlag("kubernetes.podspec-persistent-volume-claim", &nc.PodSpecPersistentVolumeClaim),
asFlag("kubernetes.podspec-persistent-volume-write", &nc.PodSpecPersistentVolumeWrite),
asFlag("tag-header-based-routing", &nc.TagHeaderBasedRouting), asFlag("tag-header-based-routing", &nc.TagHeaderBasedRouting),
asFlag("autodetect-http2", &nc.AutoDetectHTTP2)); err != nil { asFlag("autodetect-http2", &nc.AutoDetectHTTP2)); err != nil {
return nil, err return nil, err
@ -107,6 +111,8 @@ type Features struct {
PodSpecTolerations Flag PodSpecTolerations Flag
PodSpecVolumesEmptyDir Flag PodSpecVolumesEmptyDir Flag
PodSpecInitContainers Flag PodSpecInitContainers Flag
PodSpecPersistentVolumeClaim Flag
PodSpecPersistentVolumeWrite Flag
TagHeaderBasedRouting Flag TagHeaderBasedRouting Flag
AutoDetectHTTP2 Flag AutoDetectHTTP2 Flag
} }

View File

@ -45,6 +45,10 @@ func VolumeMask(ctx context.Context, in *corev1.Volume) *corev1.Volume {
out.EmptyDir = in.EmptyDir out.EmptyDir = in.EmptyDir
} }
if cfg.Features.PodSpecPersistentVolumeClaim != config.Disabled {
out.PersistentVolumeClaim = in.PersistentVolumeClaim
}
return out return out
} }
@ -67,6 +71,10 @@ func VolumeSourceMask(ctx context.Context, in *corev1.VolumeSource) *corev1.Volu
out.EmptyDir = in.EmptyDir out.EmptyDir = in.EmptyDir
} }
if cfg.Features.PodSpecPersistentVolumeClaim != config.Disabled {
out.PersistentVolumeClaim = in.PersistentVolumeClaim
}
// Too many disallowed fields to list // Too many disallowed fields to list
return out return out

View File

@ -85,12 +85,7 @@ var (
func ValidateVolumes(ctx context.Context, vs []corev1.Volume, mountedVolumes sets.String) (map[string]corev1.Volume, *apis.FieldError) { func ValidateVolumes(ctx context.Context, vs []corev1.Volume, mountedVolumes sets.String) (map[string]corev1.Volume, *apis.FieldError) {
volumes := make(map[string]corev1.Volume, len(vs)) volumes := make(map[string]corev1.Volume, len(vs))
var errs *apis.FieldError var errs *apis.FieldError
features := config.FromContextOrDefaults(ctx).Features
for i, volume := range vs { for i, volume := range vs {
if volume.EmptyDir != nil && features.PodSpecVolumesEmptyDir != config.Enabled {
errs = errs.Also((&apis.FieldError{Message: fmt.Sprintf("EmptyDir volume support is off, "+
"but found EmptyDir volume %s", volume.Name)}).ViaIndex(i))
}
if _, ok := volumes[volume.Name]; ok { if _, ok := volumes[volume.Name]; ok {
errs = errs.Also((&apis.FieldError{ errs = errs.Also((&apis.FieldError{
Message: fmt.Sprintf("duplicate volume name %q", volume.Name), Message: fmt.Sprintf("duplicate volume name %q", volume.Name),
@ -109,14 +104,36 @@ func ValidateVolumes(ctx context.Context, vs []corev1.Volume, mountedVolumes set
return volumes, errs return volumes, errs
} }
func validatePersistentVolumeClaims(volume corev1.VolumeSource, features *config.Features) *apis.FieldError {
var errs *apis.FieldError
if volume.PersistentVolumeClaim == nil {
return nil
}
if features.PodSpecPersistentVolumeClaim != config.Enabled {
errs = errs.Also(&apis.FieldError{Message: fmt.Sprintf("Persistent volume claim support is disabled, "+
"but found persistent volume claim %s", volume.PersistentVolumeClaim.ClaimName)})
}
isWriteEnabled := features.PodSpecPersistentVolumeWrite == config.Enabled
if !volume.PersistentVolumeClaim.ReadOnly && !isWriteEnabled {
errs = errs.Also(&apis.FieldError{Message: fmt.Sprintf("Persistent volume write support is disabled, "+
"but found persistent volume claim %s that is not read-only", volume.PersistentVolumeClaim.ClaimName)})
}
return errs
}
func validateVolume(ctx context.Context, volume corev1.Volume) *apis.FieldError { func validateVolume(ctx context.Context, volume corev1.Volume) *apis.FieldError {
errs := apis.CheckDisallowedFields(volume, *VolumeMask(ctx, &volume)) features := config.FromContextOrDefaults(ctx).Features
errs := validatePersistentVolumeClaims(volume.VolumeSource, features)
if volume.EmptyDir != nil && features.PodSpecVolumesEmptyDir != config.Enabled {
errs = errs.Also(&apis.FieldError{Message: fmt.Sprintf("EmptyDir volume support is disabled, "+
"but found EmptyDir volume %s", volume.Name)})
}
errs = errs.Also(apis.CheckDisallowedFields(volume, *VolumeMask(ctx, &volume)))
if volume.Name == "" { if volume.Name == "" {
errs = apis.ErrMissingField("name") errs = apis.ErrMissingField("name")
} else if len(validation.IsDNS1123Label(volume.Name)) != 0 { } else if len(validation.IsDNS1123Label(volume.Name)) != 0 {
errs = apis.ErrInvalidValue(volume.Name, "name") errs = apis.ErrInvalidValue(volume.Name, "name")
} }
vs := volume.VolumeSource vs := volume.VolumeSource
errs = errs.Also(apis.CheckDisallowedFields(vs, *VolumeSourceMask(ctx, &vs))) errs = errs.Also(apis.CheckDisallowedFields(vs, *VolumeSourceMask(ctx, &vs)))
var specified []string var specified []string
@ -142,12 +159,20 @@ func validateVolume(ctx context.Context, volume corev1.Volume) *apis.FieldError
specified = append(specified, "emptyDir") specified = append(specified, "emptyDir")
errs = errs.Also(validateEmptyDirFields(vs.EmptyDir).ViaField("emptyDir")) errs = errs.Also(validateEmptyDirFields(vs.EmptyDir).ViaField("emptyDir"))
} }
if vs.PersistentVolumeClaim != nil {
specified = append(specified, "persistentVolumeClaim")
}
if len(specified) == 0 { if len(specified) == 0 {
fieldPaths := []string{"secret", "configMap", "projected"} fieldPaths := []string{"secret", "configMap", "projected"}
cfg := config.FromContextOrDefaults(ctx) cfg := config.FromContextOrDefaults(ctx)
if cfg.Features.PodSpecVolumesEmptyDir == config.Enabled { if cfg.Features.PodSpecVolumesEmptyDir == config.Enabled {
fieldPaths = append(fieldPaths, "emptyDir") fieldPaths = append(fieldPaths, "emptyDir")
} }
if cfg.Features.PodSpecPersistentVolumeClaim == config.Enabled {
fieldPaths = append(fieldPaths, "persistentVolumeClaim")
}
errs = errs.Also(apis.ErrMissingOneOf(fieldPaths...)) errs = errs.Also(apis.ErrMissingOneOf(fieldPaths...))
} else if len(specified) > 1 { } else if len(specified) > 1 {
errs = errs.Also(apis.ErrMultipleOneOf(specified...)) errs = errs.Also(apis.ErrMultipleOneOf(specified...))
@ -616,8 +641,21 @@ func validateVolumeMounts(mounts []corev1.VolumeMount, volumes map[string]corev1
} }
seenMountPath.Insert(filepath.Clean(vm.MountPath)) seenMountPath.Insert(filepath.Clean(vm.MountPath))
if volumes[vm.Name].EmptyDir == nil && !vm.ReadOnly { shouldCheckReadOnlyVolume := volumes[vm.Name].EmptyDir == nil && volumes[vm.Name].PersistentVolumeClaim == nil
errs = errs.Also(apis.ErrMissingField("readOnly").ViaIndex(i)) if shouldCheckReadOnlyVolume && !vm.ReadOnly {
errs = errs.Also((&apis.FieldError{
Message: "volume mount should be readOnly for this type of volume",
Paths: []string{"readOnly"},
}).ViaIndex(i))
}
if volumes[vm.Name].PersistentVolumeClaim != nil {
if volumes[vm.Name].PersistentVolumeClaim.ReadOnly && !vm.ReadOnly {
errs = errs.Also((&apis.FieldError{
Message: "volume is readOnly but volume mount is not",
Paths: []string{"readOnly"},
}).ViaIndex(i))
}
} }
} }
return errs return errs

View File

@ -121,7 +121,7 @@ func (rs *RevisionSpec) applyDefault(ctx context.Context, container *corev1.Cont
vNames := make(sets.String) vNames := make(sets.String)
for _, v := range rs.PodSpec.Volumes { for _, v := range rs.PodSpec.Volumes {
if v.EmptyDir != nil { if v.EmptyDir != nil || v.PersistentVolumeClaim != nil {
vNames.Insert(v.Name) vNames.Insert(v.Name)
} }
} }

View File

@ -232,6 +232,22 @@ func WithRevisionInitContainers() RevisionOption {
} }
} }
func WithRevisionPVC() RevisionOption {
return func(r *v1.Revision) {
r.Spec.Volumes = []corev1.Volume{{
Name: "claimvolume",
VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "myclaim",
ReadOnly: false,
}}},
}
r.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{
Name: "claimvolume",
MountPath: "/data",
}}
}
}
// Revision creates a revision object with given ns/name and options. // Revision creates a revision object with given ns/name and options.
func Revision(namespace, name string, ro ...RevisionOption) *v1.Revision { func Revision(namespace, name string, ro ...RevisionOption) *v1.Revision {
r := &v1.Revision{ r := &v1.Revision{

View File

@ -158,12 +158,7 @@ function delete_dns_record() {
} }
# Script entry point. # Script entry point.
initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --enable-ha --cluster-version=1.21
# Skip installing istio as an add-on
# Temporarily increasing the cluster size for serving tests to rule out
# resource/eviction as causes of flakiness.
# Pin to 1.20 since scale test is super flakey on 1.21
initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --enable-ha --cluster-version=1.20
# Run the tests # Run the tests
header "Running tests" header "Running tests"

View File

@ -28,12 +28,7 @@
source $(dirname $0)/e2e-common.sh source $(dirname $0)/e2e-common.sh
# Script entry point. # Script entry point.
initialize --skip-istio-addon --min-nodes=4 --max-nodes=4 --enable-ha --cluster-version=1.21 "$@"
# Skip installing istio as an add-on.
# Temporarily increasing the cluster size for serving tests to rule out
# resource/eviction as causes of flakiness.
# Pin to 1.20 since scale test is super flakey on 1.21
initialize --skip-istio-addon --min-nodes=4 --max-nodes=4 --enable-ha --cluster-version=1.20 "$@"
# Run the tests # Run the tests
header "Running tests" header "Running tests"
@ -112,7 +107,8 @@ kubectl replace cm "config-gc" -n ${SYSTEM_NAMESPACE} -f ${TMP_DIR}/config-gc.ya
# Note that we use a very high -parallel because each ksvc is run as its own # Note that we use a very high -parallel because each ksvc is run as its own
# sub-test. If this is not larger than the maximum scale tested then the test # sub-test. If this is not larger than the maximum scale tested then the test
# simply cannot pass. # simply cannot pass.
go_test_e2e -timeout=20m -parallel=300 ./test/scale ${TEST_OPTIONS} || failed=1 # TODO - Renable once we get this reliably passing on GKE 1.21
# go_test_e2e -timeout=20m -parallel=300 ./test/scale ${TEST_OPTIONS} || failed=1
# Run HPA tests # Run HPA tests
go_test_e2e -timeout=30m -tags=hpa ./test/e2e ${TEST_OPTIONS} || failed=1 go_test_e2e -timeout=30m -tags=hpa ./test/e2e ${TEST_OPTIONS} || failed=1

View File

@ -40,10 +40,7 @@ function stage_test_resources() {
# Script entry point. # Script entry point.
# Skip installing istio as an add-on. # Skip installing istio as an add-on.
# Temporarily increasing the cluster size for serving tests to rule out initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --cluster-version=1.21 \
# resource/eviction as causes of flakiness.
# Pin to 1.20 since scale test is super flakey on 1.21
initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --cluster-version=1.20 \
--install-latest-release --install-latest-release
# TODO(#2656): Reduce the timeout after we get this test to consistently passing. # TODO(#2656): Reduce the timeout after we get this test to consistently passing.

6
vendor/modules.txt vendored
View File

@ -748,7 +748,7 @@ k8s.io/utils/buffer
k8s.io/utils/integer k8s.io/utils/integer
k8s.io/utils/pointer k8s.io/utils/pointer
k8s.io/utils/trace k8s.io/utils/trace
# knative.dev/eventing v0.28.1-0.20220118233852-e0e3f446c2d8 # knative.dev/eventing v0.28.1-0.20220119171353-ad895de5fdd8
## explicit ## explicit
knative.dev/eventing/pkg/apis/config knative.dev/eventing/pkg/apis/config
knative.dev/eventing/pkg/apis/duck knative.dev/eventing/pkg/apis/duck
@ -777,7 +777,7 @@ knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/fake
# knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30 # knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30
## explicit ## explicit
knative.dev/hack knative.dev/hack
# knative.dev/networking v0.0.0-20220118173833-c7164d4dd845 # knative.dev/networking v0.0.0-20220120043934-ec785540a732
## explicit ## explicit
knative.dev/networking/pkg knative.dev/networking/pkg
knative.dev/networking/pkg/apis/networking knative.dev/networking/pkg/apis/networking
@ -835,7 +835,7 @@ knative.dev/pkg/tracing/config
knative.dev/pkg/tracing/propagation knative.dev/pkg/tracing/propagation
knative.dev/pkg/tracing/propagation/tracecontextb3 knative.dev/pkg/tracing/propagation/tracecontextb3
knative.dev/pkg/tracker knative.dev/pkg/tracker
# knative.dev/serving v0.28.1-0.20220118185833-a3216ca8e4a4 # knative.dev/serving v0.28.1-0.20220121012810-a4f8b72183c4
## explicit ## explicit
knative.dev/serving/pkg/apis/autoscaling knative.dev/serving/pkg/apis/autoscaling
knative.dev/serving/pkg/apis/autoscaling/v1alpha1 knative.dev/serving/pkg/apis/autoscaling/v1alpha1