diff --git a/go.mod b/go.mod index 479c91d9c..05a75eb59 100644 --- a/go.mod +++ b/go.mod @@ -19,10 +19,10 @@ require ( k8s.io/cli-runtime v0.21.4 k8s.io/client-go v0.22.5 k8s.io/code-generator v0.22.5 - knative.dev/eventing v0.28.1-0.20220118233852-e0e3f446c2d8 + knative.dev/eventing v0.28.1-0.20220119171353-ad895de5fdd8 knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30 - knative.dev/networking v0.0.0-20220118173833-c7164d4dd845 + knative.dev/networking v0.0.0-20220120043934-ec785540a732 knative.dev/pkg v0.0.0-20220118160532-77555ea48cd4 - knative.dev/serving v0.28.1-0.20220118185833-a3216ca8e4a4 + knative.dev/serving v0.28.1-0.20220121012810-a4f8b72183c4 sigs.k8s.io/yaml v1.3.0 ) diff --git a/go.sum b/go.sum index dc3724ca1..df186da91 100644 --- a/go.sum +++ b/go.sum @@ -1850,22 +1850,22 @@ k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 h1:ZKMMxTvduyf5WUtREOqg5LiXaN1KO/+0oOQPRFrClpo= k8s.io/utils v0.0.0-20211208161948-7d6a63dca704/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= knative.dev/caching v0.0.0-20220113145613-9df2c0c8a931/go.mod h1:kHPJyq2W2ADNAwB6lmmJcleEZfNWioIL5yEs/p5WHU0= -knative.dev/eventing v0.28.1-0.20220118233852-e0e3f446c2d8 h1:MmqEwo3+dw2JJ3KfOyq+FnMhaA+ztcuDvMiLLkLHM6M= -knative.dev/eventing v0.28.1-0.20220118233852-e0e3f446c2d8/go.mod h1:9wxM6SRhgS541dVDVvwMwvSJl3B1MzV7FnxxJ8bSyNs= +knative.dev/eventing v0.28.1-0.20220119171353-ad895de5fdd8 h1:8b3bndqQAIFdEFzNip7QJEhtp+j9xdiRuJNh0jhSDT4= +knative.dev/eventing v0.28.1-0.20220119171353-ad895de5fdd8/go.mod h1:u5T5NZTDUsLR7yJwp5MDnBnDX5MhywD3yK3Rq+7gTtI= knative.dev/hack v0.0.0-20220111151514-59b0cf17578e/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30 h1:UkNpCWCMM5C4AeQ8aTrPTuR/6OeARiqk+LEQ6tuMP7c= knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= -knative.dev/hack/schema v0.0.0-20220111151514-59b0cf17578e/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= +knative.dev/hack/schema v0.0.0-20220118141833-9b2ed8471e30/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= knative.dev/networking v0.0.0-20220117015928-52fb6ee37bf9/go.mod h1:h4JrHof14QSggAnthL3DPFw1cieenumKDaT+wg0EPmE= -knative.dev/networking v0.0.0-20220118173833-c7164d4dd845 h1:5+mqH+t0Y7M0T3izEFjQzPR2ZcPKhfV8F+bsYTsj8Jg= -knative.dev/networking v0.0.0-20220118173833-c7164d4dd845/go.mod h1:6cKBV/h/vIQWCPOkds/RvzUmMR8Vz6Dks2NWb0+3xks= +knative.dev/networking v0.0.0-20220120043934-ec785540a732 h1:fUUb5NWfYm8TQkhv1uFYlEn91YNKp+E4BD7ax7WYHW0= +knative.dev/networking v0.0.0-20220120043934-ec785540a732/go.mod h1:6cKBV/h/vIQWCPOkds/RvzUmMR8Vz6Dks2NWb0+3xks= knative.dev/pkg v0.0.0-20220113045912-c0e1594c2fb1/go.mod h1:X5B0/SR3tzOioS2WQtS9uJTO71M52qcrDkWQ5wtyLVc= knative.dev/pkg v0.0.0-20220114141842-0a429cba1c73/go.mod h1:X5B0/SR3tzOioS2WQtS9uJTO71M52qcrDkWQ5wtyLVc= knative.dev/pkg v0.0.0-20220118160532-77555ea48cd4 h1:b9aXVrcfM/ajjHE/lGvlJOHZNAR5FF2TOTLWG7eMhzQ= knative.dev/pkg v0.0.0-20220118160532-77555ea48cd4/go.mod h1:etVT7Tm8pSDf4RKhGk4r7j/hj3dNBpvT7bO6a6wpahs= -knative.dev/reconciler-test v0.0.0-20220117082429-6a9b91eef10c/go.mod h1:zhcMJ0CfqI7de0Ir0yXTTB6BgShWvLEx+BqQdvg+1pU= -knative.dev/serving v0.28.1-0.20220118185833-a3216ca8e4a4 h1:ihmnuJ2Aof2h908ZcvSksmFY4yakWc907ZpKQWIk9y8= -knative.dev/serving v0.28.1-0.20220118185833-a3216ca8e4a4/go.mod h1:M6679d8WkZuG/NpVjPBkC9UppjOZRHNVJP0WIWqA62M= +knative.dev/reconciler-test v0.0.0-20220118183433-c8bfbe66bada/go.mod h1:XV4cghzCtdASkfUsfMYSnGfGRyd/naDjy9h7Tnae22g= +knative.dev/serving v0.28.1-0.20220121012810-a4f8b72183c4 h1:mUz+V30RkB+nKQs1mnePZhKl62/66FvcU2Y9PK1+8dc= +knative.dev/serving v0.28.1-0.20220121012810-a4f8b72183c4/go.mod h1:M6679d8WkZuG/NpVjPBkC9UppjOZRHNVJP0WIWqA62M= pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/vendor/knative.dev/serving/pkg/apis/config/features.go b/vendor/knative.dev/serving/pkg/apis/config/features.go index db4f212d6..0998025e3 100644 --- a/vendor/knative.dev/serving/pkg/apis/config/features.go +++ b/vendor/knative.dev/serving/pkg/apis/config/features.go @@ -54,6 +54,8 @@ func defaultFeaturesConfig() *Features { ContainerSpecAddCapabilities: Disabled, PodSpecTolerations: Disabled, PodSpecVolumesEmptyDir: Disabled, + PodSpecPersistentVolumeClaim: Disabled, + PodSpecPersistentVolumeWrite: Disabled, PodSpecInitContainers: Disabled, TagHeaderBasedRouting: Disabled, AutoDetectHTTP2: Disabled, @@ -79,6 +81,8 @@ func NewFeaturesConfigFromMap(data map[string]string) (*Features, error) { asFlag("kubernetes.podspec-tolerations", &nc.PodSpecTolerations), asFlag("kubernetes.podspec-volumes-emptydir", &nc.PodSpecVolumesEmptyDir), asFlag("kubernetes.podspec-init-containers", &nc.PodSpecInitContainers), + asFlag("kubernetes.podspec-persistent-volume-claim", &nc.PodSpecPersistentVolumeClaim), + asFlag("kubernetes.podspec-persistent-volume-write", &nc.PodSpecPersistentVolumeWrite), asFlag("tag-header-based-routing", &nc.TagHeaderBasedRouting), asFlag("autodetect-http2", &nc.AutoDetectHTTP2)); err != nil { return nil, err @@ -107,6 +111,8 @@ type Features struct { PodSpecTolerations Flag PodSpecVolumesEmptyDir Flag PodSpecInitContainers Flag + PodSpecPersistentVolumeClaim Flag + PodSpecPersistentVolumeWrite Flag TagHeaderBasedRouting Flag AutoDetectHTTP2 Flag } diff --git a/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go b/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go index 4ea939b29..32723a1f8 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go @@ -45,6 +45,10 @@ func VolumeMask(ctx context.Context, in *corev1.Volume) *corev1.Volume { out.EmptyDir = in.EmptyDir } + if cfg.Features.PodSpecPersistentVolumeClaim != config.Disabled { + out.PersistentVolumeClaim = in.PersistentVolumeClaim + } + return out } @@ -67,6 +71,10 @@ func VolumeSourceMask(ctx context.Context, in *corev1.VolumeSource) *corev1.Volu out.EmptyDir = in.EmptyDir } + if cfg.Features.PodSpecPersistentVolumeClaim != config.Disabled { + out.PersistentVolumeClaim = in.PersistentVolumeClaim + } + // Too many disallowed fields to list return out diff --git a/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation.go b/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation.go index 2d03ecbdb..eeed43fc8 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation.go @@ -85,12 +85,7 @@ var ( func ValidateVolumes(ctx context.Context, vs []corev1.Volume, mountedVolumes sets.String) (map[string]corev1.Volume, *apis.FieldError) { volumes := make(map[string]corev1.Volume, len(vs)) var errs *apis.FieldError - features := config.FromContextOrDefaults(ctx).Features for i, volume := range vs { - if volume.EmptyDir != nil && features.PodSpecVolumesEmptyDir != config.Enabled { - errs = errs.Also((&apis.FieldError{Message: fmt.Sprintf("EmptyDir volume support is off, "+ - "but found EmptyDir volume %s", volume.Name)}).ViaIndex(i)) - } if _, ok := volumes[volume.Name]; ok { errs = errs.Also((&apis.FieldError{ Message: fmt.Sprintf("duplicate volume name %q", volume.Name), @@ -109,14 +104,36 @@ func ValidateVolumes(ctx context.Context, vs []corev1.Volume, mountedVolumes set return volumes, errs } +func validatePersistentVolumeClaims(volume corev1.VolumeSource, features *config.Features) *apis.FieldError { + var errs *apis.FieldError + if volume.PersistentVolumeClaim == nil { + return nil + } + if features.PodSpecPersistentVolumeClaim != config.Enabled { + errs = errs.Also(&apis.FieldError{Message: fmt.Sprintf("Persistent volume claim support is disabled, "+ + "but found persistent volume claim %s", volume.PersistentVolumeClaim.ClaimName)}) + } + isWriteEnabled := features.PodSpecPersistentVolumeWrite == config.Enabled + if !volume.PersistentVolumeClaim.ReadOnly && !isWriteEnabled { + errs = errs.Also(&apis.FieldError{Message: fmt.Sprintf("Persistent volume write support is disabled, "+ + "but found persistent volume claim %s that is not read-only", volume.PersistentVolumeClaim.ClaimName)}) + } + return errs +} + func validateVolume(ctx context.Context, volume corev1.Volume) *apis.FieldError { - errs := apis.CheckDisallowedFields(volume, *VolumeMask(ctx, &volume)) + features := config.FromContextOrDefaults(ctx).Features + errs := validatePersistentVolumeClaims(volume.VolumeSource, features) + if volume.EmptyDir != nil && features.PodSpecVolumesEmptyDir != config.Enabled { + errs = errs.Also(&apis.FieldError{Message: fmt.Sprintf("EmptyDir volume support is disabled, "+ + "but found EmptyDir volume %s", volume.Name)}) + } + errs = errs.Also(apis.CheckDisallowedFields(volume, *VolumeMask(ctx, &volume))) if volume.Name == "" { errs = apis.ErrMissingField("name") } else if len(validation.IsDNS1123Label(volume.Name)) != 0 { errs = apis.ErrInvalidValue(volume.Name, "name") } - vs := volume.VolumeSource errs = errs.Also(apis.CheckDisallowedFields(vs, *VolumeSourceMask(ctx, &vs))) var specified []string @@ -142,12 +159,20 @@ func validateVolume(ctx context.Context, volume corev1.Volume) *apis.FieldError specified = append(specified, "emptyDir") errs = errs.Also(validateEmptyDirFields(vs.EmptyDir).ViaField("emptyDir")) } + + if vs.PersistentVolumeClaim != nil { + specified = append(specified, "persistentVolumeClaim") + } + if len(specified) == 0 { fieldPaths := []string{"secret", "configMap", "projected"} cfg := config.FromContextOrDefaults(ctx) if cfg.Features.PodSpecVolumesEmptyDir == config.Enabled { fieldPaths = append(fieldPaths, "emptyDir") } + if cfg.Features.PodSpecPersistentVolumeClaim == config.Enabled { + fieldPaths = append(fieldPaths, "persistentVolumeClaim") + } errs = errs.Also(apis.ErrMissingOneOf(fieldPaths...)) } else if len(specified) > 1 { errs = errs.Also(apis.ErrMultipleOneOf(specified...)) @@ -616,8 +641,21 @@ func validateVolumeMounts(mounts []corev1.VolumeMount, volumes map[string]corev1 } seenMountPath.Insert(filepath.Clean(vm.MountPath)) - if volumes[vm.Name].EmptyDir == nil && !vm.ReadOnly { - errs = errs.Also(apis.ErrMissingField("readOnly").ViaIndex(i)) + shouldCheckReadOnlyVolume := volumes[vm.Name].EmptyDir == nil && volumes[vm.Name].PersistentVolumeClaim == nil + if shouldCheckReadOnlyVolume && !vm.ReadOnly { + errs = errs.Also((&apis.FieldError{ + Message: "volume mount should be readOnly for this type of volume", + Paths: []string{"readOnly"}, + }).ViaIndex(i)) + } + + if volumes[vm.Name].PersistentVolumeClaim != nil { + if volumes[vm.Name].PersistentVolumeClaim.ReadOnly && !vm.ReadOnly { + errs = errs.Also((&apis.FieldError{ + Message: "volume is readOnly but volume mount is not", + Paths: []string{"readOnly"}, + }).ViaIndex(i)) + } } } return errs diff --git a/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults.go b/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults.go index 88724199f..354b12d89 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults.go @@ -121,7 +121,7 @@ func (rs *RevisionSpec) applyDefault(ctx context.Context, container *corev1.Cont vNames := make(sets.String) for _, v := range rs.PodSpec.Volumes { - if v.EmptyDir != nil { + if v.EmptyDir != nil || v.PersistentVolumeClaim != nil { vNames.Insert(v.Name) } } diff --git a/vendor/knative.dev/serving/pkg/testing/v1/revision.go b/vendor/knative.dev/serving/pkg/testing/v1/revision.go index 3445dc0ab..6a5cdfa82 100644 --- a/vendor/knative.dev/serving/pkg/testing/v1/revision.go +++ b/vendor/knative.dev/serving/pkg/testing/v1/revision.go @@ -232,6 +232,22 @@ func WithRevisionInitContainers() RevisionOption { } } +func WithRevisionPVC() RevisionOption { + return func(r *v1.Revision) { + r.Spec.Volumes = []corev1.Volume{{ + Name: "claimvolume", + VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "myclaim", + ReadOnly: false, + }}}, + } + r.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{ + Name: "claimvolume", + MountPath: "/data", + }} + } +} + // Revision creates a revision object with given ns/name and options. func Revision(namespace, name string, ro ...RevisionOption) *v1.Revision { r := &v1.Revision{ diff --git a/vendor/knative.dev/serving/test/e2e-auto-tls-tests.sh b/vendor/knative.dev/serving/test/e2e-auto-tls-tests.sh index 0b881e8f3..d893e0d4c 100644 --- a/vendor/knative.dev/serving/test/e2e-auto-tls-tests.sh +++ b/vendor/knative.dev/serving/test/e2e-auto-tls-tests.sh @@ -158,12 +158,7 @@ function delete_dns_record() { } # Script entry point. - -# Skip installing istio as an add-on -# Temporarily increasing the cluster size for serving tests to rule out -# resource/eviction as causes of flakiness. -# Pin to 1.20 since scale test is super flakey on 1.21 -initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --enable-ha --cluster-version=1.20 +initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --enable-ha --cluster-version=1.21 # Run the tests header "Running tests" diff --git a/vendor/knative.dev/serving/test/e2e-tests.sh b/vendor/knative.dev/serving/test/e2e-tests.sh index 2425a3622..4fc6f283b 100644 --- a/vendor/knative.dev/serving/test/e2e-tests.sh +++ b/vendor/knative.dev/serving/test/e2e-tests.sh @@ -28,12 +28,7 @@ source $(dirname $0)/e2e-common.sh # Script entry point. - -# Skip installing istio as an add-on. -# Temporarily increasing the cluster size for serving tests to rule out -# resource/eviction as causes of flakiness. -# Pin to 1.20 since scale test is super flakey on 1.21 -initialize --skip-istio-addon --min-nodes=4 --max-nodes=4 --enable-ha --cluster-version=1.20 "$@" +initialize --skip-istio-addon --min-nodes=4 --max-nodes=4 --enable-ha --cluster-version=1.21 "$@" # Run the tests header "Running tests" @@ -112,7 +107,8 @@ kubectl replace cm "config-gc" -n ${SYSTEM_NAMESPACE} -f ${TMP_DIR}/config-gc.ya # Note that we use a very high -parallel because each ksvc is run as its own # sub-test. If this is not larger than the maximum scale tested then the test # simply cannot pass. -go_test_e2e -timeout=20m -parallel=300 ./test/scale ${TEST_OPTIONS} || failed=1 +# TODO - Renable once we get this reliably passing on GKE 1.21 +# go_test_e2e -timeout=20m -parallel=300 ./test/scale ${TEST_OPTIONS} || failed=1 # Run HPA tests go_test_e2e -timeout=30m -tags=hpa ./test/e2e ${TEST_OPTIONS} || failed=1 diff --git a/vendor/knative.dev/serving/test/e2e-upgrade-tests.sh b/vendor/knative.dev/serving/test/e2e-upgrade-tests.sh index b1f08d2d4..396c821f9 100644 --- a/vendor/knative.dev/serving/test/e2e-upgrade-tests.sh +++ b/vendor/knative.dev/serving/test/e2e-upgrade-tests.sh @@ -40,10 +40,7 @@ function stage_test_resources() { # Script entry point. # Skip installing istio as an add-on. -# Temporarily increasing the cluster size for serving tests to rule out -# resource/eviction as causes of flakiness. -# Pin to 1.20 since scale test is super flakey on 1.21 -initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --cluster-version=1.20 \ +initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --cluster-version=1.21 \ --install-latest-release # TODO(#2656): Reduce the timeout after we get this test to consistently passing. diff --git a/vendor/modules.txt b/vendor/modules.txt index 2da336071..82fa73b55 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -748,7 +748,7 @@ k8s.io/utils/buffer k8s.io/utils/integer k8s.io/utils/pointer k8s.io/utils/trace -# knative.dev/eventing v0.28.1-0.20220118233852-e0e3f446c2d8 +# knative.dev/eventing v0.28.1-0.20220119171353-ad895de5fdd8 ## explicit knative.dev/eventing/pkg/apis/config knative.dev/eventing/pkg/apis/duck @@ -777,7 +777,7 @@ knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/fake # knative.dev/hack v0.0.0-20220118141833-9b2ed8471e30 ## explicit knative.dev/hack -# knative.dev/networking v0.0.0-20220118173833-c7164d4dd845 +# knative.dev/networking v0.0.0-20220120043934-ec785540a732 ## explicit knative.dev/networking/pkg knative.dev/networking/pkg/apis/networking @@ -835,7 +835,7 @@ knative.dev/pkg/tracing/config knative.dev/pkg/tracing/propagation knative.dev/pkg/tracing/propagation/tracecontextb3 knative.dev/pkg/tracker -# knative.dev/serving v0.28.1-0.20220118185833-a3216ca8e4a4 +# knative.dev/serving v0.28.1-0.20220121012810-a4f8b72183c4 ## explicit knative.dev/serving/pkg/apis/autoscaling knative.dev/serving/pkg/apis/autoscaling/v1alpha1