mirror of https://github.com/knative/client.git
upgrade to latest dependencies (#1513)
bumping knative.dev/pkg 92b8dc6...3cc697a: > 3cc697a upgrade to latest dependencies (# 2344) > 51b9e7f Build downstream tests with go 1.17 (# 2341) > e7547ea Update community files (# 2343) bumping knative.dev/eventing cdeae54...a72cfc9: > a72cfc9 Remove unsupported stackdriver tracing backend (# 5881) > 36bf0bd Add securityContext to Knative Eventing core objects (# 5863) > 81afe0b upgrade to latest dependencies (# 5879) > ee96d0b removed ginkgo from the project and ran update-codegen and mod tidy (# 5878) > 4cc5ecf Omit empty status fields (# 5800) > b62c8f0 Update community files (# 5873) bumping knative.dev/serving 782db02...fdaccb7: > fdaccb7 Aligns the last istio-net version with serving (# 12232) > 94329db Update net-istio nightly (# 12228) > 6c3e0fa Update net-kourier nightly (# 12226) > 268bea8 Tidy up comment and loop a bit (# 12239) > fb6a3ab add requestLogHandler to user container as well as healthcheck (# 12229) > 7aaca9a Update net-certmanager nightly (# 12237) > f556724 Update net-contour nightly (# 12236) > 77ba3bd Allow probes to explicitly set the port to the containerPort (# 8288) (# 12225) > af13029 Fix linting error from new version (# 12234) > afbc81c upgrade to latest dependencies (# 12233) > 9f61159 Update net-contour nightly (# 12208) > 2584021 Update net-certmanager nightly (# 12227) > 525a4cb user pkg drain for draining in queue proxy (# 12033) > cad72a3 Freeze pod after startup if container-freezer enabled (# 12168) > c48ebf1 upgrade to latest dependencies (# 12224) > 241eada Update net-kourier nightly (# 12222) > 416ded2 Tidy up probePodIPs a bit (# 12203) > 506bd98 Update net-istio nightly (# 12223) > 7513fa2 Update community files (# 12220) > a758daf Update net-certmanager nightly (# 12207) bumping knative.dev/hack b284d49...96aac1c: > 96aac1c Add wait_until_object_exists library function (# 96) > 29f86c2 fix latest version when running in GitHub actions (# 103) > 0f69979 Update community files (# 102) bumping knative.dev/networking 6ef2676...79a1ce1: > 79a1ce1 upgrade to latest dependencies (# 556) > 6871f98 asLabelSelector - don't mutate the target if the property is not set (# 543) > 1dcda56 Update community files (# 554) Signed-off-by: Knative Automation <automation@knative.team>
This commit is contained in:
parent
8e3a09cf7f
commit
780e1e1fae
10
go.mod
10
go.mod
|
|
@ -18,10 +18,10 @@ require (
|
|||
k8s.io/cli-runtime v0.21.4
|
||||
k8s.io/client-go v0.21.4
|
||||
k8s.io/code-generator v0.21.4
|
||||
knative.dev/eventing v0.27.1-0.20211103173047-cdeae54e3c74
|
||||
knative.dev/hack v0.0.0-20211102174540-b284d49386cc
|
||||
knative.dev/networking v0.0.0-20211103165948-6ef2676b9073
|
||||
knative.dev/pkg v0.0.0-20211103165848-92b8dc6750de
|
||||
knative.dev/serving v0.27.1-0.20211104011508-782db02f55f6
|
||||
knative.dev/eventing v0.27.1-0.20211108221643-a72cfc91c421
|
||||
knative.dev/hack v0.0.0-20211108170701-96aac1c30be3
|
||||
knative.dev/networking v0.0.0-20211108064904-79a1ce1e1952
|
||||
knative.dev/pkg v0.0.0-20211108064904-3cc697a3cb09
|
||||
knative.dev/serving v0.27.1-0.20211108215642-fdaccb7d2675
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
|
|
|||
27
go.sum
27
go.sum
|
|
@ -1710,23 +1710,26 @@ k8s.io/legacy-cloud-providers v0.21.0/go.mod h1:bNxo7gDg+PGkBmT/MFZswLTWdSWK9kAl
|
|||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ=
|
||||
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
knative.dev/caching v0.0.0-20211101214239-1e1ac2e932c7/go.mod h1:hz6tnVAmRLvPoprw+0wsA9IrZC3MF5l2Hn/2JT28Yk0=
|
||||
knative.dev/eventing v0.27.1-0.20211103173047-cdeae54e3c74 h1:95zn+xVLdHVQMULAbA+lMfikhj9gwx06F/vo4137Oxo=
|
||||
knative.dev/eventing v0.27.1-0.20211103173047-cdeae54e3c74/go.mod h1:T1rafFeW26ZWlEHcGEWVIYVTG6spstS8e8N/C0v0U9s=
|
||||
knative.dev/caching v0.0.0-20211104075903-bfa1e3f2d544/go.mod h1:Gd9oLbbRLbLlc3Z5PIIF0QR7gPI0wt4dvvESdxmNLPE=
|
||||
knative.dev/eventing v0.27.1-0.20211108221643-a72cfc91c421 h1:RSA8U3xuGlX7S8ZF8IZKp6t7kbm4jWpBimpslvO0EI8=
|
||||
knative.dev/eventing v0.27.1-0.20211108221643-a72cfc91c421/go.mod h1:lM+1rTzjcNmrPjTGaEk+QrmQGuM3/O+e3aN96GH4V0Y=
|
||||
knative.dev/hack v0.0.0-20211101195839-11d193bf617b/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
|
||||
knative.dev/hack v0.0.0-20211102174540-b284d49386cc h1:eO3AtpzYua2uPO5LyQtse/i84B4isDyeQubNzIkTHEg=
|
||||
knative.dev/hack v0.0.0-20211102174540-b284d49386cc/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
|
||||
knative.dev/hack/schema v0.0.0-20211102174540-b284d49386cc/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0=
|
||||
knative.dev/networking v0.0.0-20211101214339-4f29f65588a1/go.mod h1:7SKKM4MsBANrXNRZhb/zMkNjTdxYbNjwQDWgu+Fyye4=
|
||||
knative.dev/networking v0.0.0-20211103165948-6ef2676b9073 h1:5wkz5q82TS0ZyxrWoueOem32FpY2bZUi3vPkeFZUIGA=
|
||||
knative.dev/networking v0.0.0-20211103165948-6ef2676b9073/go.mod h1:gP1Wha/MhUkANNu5ac2nINkH+fh/TtIU54105JxmA9I=
|
||||
knative.dev/hack v0.0.0-20211105231158-29f86c2653b5/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
|
||||
knative.dev/hack v0.0.0-20211108170701-96aac1c30be3 h1:oSvRgnKoU308k7aXbPV3iL5Zh5kBGM2Ptar4hyeda+A=
|
||||
knative.dev/hack v0.0.0-20211108170701-96aac1c30be3/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
|
||||
knative.dev/hack/schema v0.0.0-20211105231158-29f86c2653b5/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0=
|
||||
knative.dev/networking v0.0.0-20211104064801-6871f98f7b4d/go.mod h1:gP1Wha/MhUkANNu5ac2nINkH+fh/TtIU54105JxmA9I=
|
||||
knative.dev/networking v0.0.0-20211108064904-79a1ce1e1952 h1:VOYJf0B2AAuO0GWKLrIMg92MnvrCpRJ82mPcd8bkj3w=
|
||||
knative.dev/networking v0.0.0-20211108064904-79a1ce1e1952/go.mod h1:6la3jzmZXWsmvoxaNJhLXMYMxjTQIsizx19Sq4ud6Cc=
|
||||
knative.dev/pkg v0.0.0-20211101212339-96c0204a70dc/go.mod h1:SkfDk9bWIiNZD7XtILGkG7AKVyF/M6M0bGxLgl0SYL8=
|
||||
knative.dev/pkg v0.0.0-20211103135647-464034912f7e/go.mod h1:a+9AWOb0zOaBVU6xoPgTvEA7vfMnLjPnK4SX3ciSHKE=
|
||||
knative.dev/pkg v0.0.0-20211103165848-92b8dc6750de h1:pU0ZAtnPXmw06B1qqJDDuZMAgEiSAgrTUxa5nTCFpsI=
|
||||
knative.dev/pkg v0.0.0-20211103165848-92b8dc6750de/go.mod h1:EKhRrRSAHrnJwQPfgUyrmHgLCam9xcRh79t+G3MJu4k=
|
||||
knative.dev/pkg v0.0.0-20211104101302-51b9e7f161b4/go.mod h1:EKhRrRSAHrnJwQPfgUyrmHgLCam9xcRh79t+G3MJu4k=
|
||||
knative.dev/pkg v0.0.0-20211108064904-3cc697a3cb09 h1:EuziitWfjhtDEI7vyNV79AtqFia5rACAFOy2rbinVo4=
|
||||
knative.dev/pkg v0.0.0-20211108064904-3cc697a3cb09/go.mod h1:YiSfFDMdYHZTuO5XS8UzxSeOEmocy+Kb2YOGik9C8lw=
|
||||
knative.dev/reconciler-test v0.0.0-20211101213739-c5f938aa9974/go.mod h1:gTsbLk496j/M9xqMpx/liyCQ0X3bwDpRtcs2Zzws364=
|
||||
knative.dev/serving v0.27.1-0.20211104011508-782db02f55f6 h1:EHLD2nwP10m5prYuhlCKvwmmimSPwQuPBsngLgTXNHs=
|
||||
knative.dev/serving v0.27.1-0.20211104011508-782db02f55f6/go.mod h1:eynL0SzqKQceez0kCA5T0v/aj7ffr5USWsOcy3TYaQ8=
|
||||
knative.dev/serving v0.27.1-0.20211108215642-fdaccb7d2675 h1:MbjjILIroDXQy6YFENbQcKOJy+2ztegD6DbnOgEhhE4=
|
||||
knative.dev/serving v0.27.1-0.20211108215642-fdaccb7d2675/go.mod h1:SWF3VPVAZuKjEewtqjCsvwBZyP9PqGpF5oHH6hz1iwo=
|
||||
pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
|
|
|
|||
|
|
@ -118,11 +118,13 @@ type SequenceStatus struct {
|
|||
|
||||
// SubscriptionStatuses is an array of corresponding Subscription statuses.
|
||||
// Matches the Spec.Steps array in the order.
|
||||
SubscriptionStatuses []SequenceSubscriptionStatus `json:"subscriptionStatuses"`
|
||||
// +optional
|
||||
SubscriptionStatuses []SequenceSubscriptionStatus `json:"subscriptionStatuses,omitempty"`
|
||||
|
||||
// ChannelStatuses is an array of corresponding Channel statuses.
|
||||
// Matches the Spec.Steps array in the order.
|
||||
ChannelStatuses []SequenceChannelStatus `json:"channelStatuses"`
|
||||
// +optional
|
||||
ChannelStatuses []SequenceChannelStatus `json:"channelStatuses,omitempty"`
|
||||
|
||||
// Address is the starting point to this Sequence. Sending to this
|
||||
// will target the first subscriber.
|
||||
|
|
|
|||
|
|
@ -186,6 +186,32 @@ function wait_until_object_does_not_exist() {
|
|||
return 1
|
||||
}
|
||||
|
||||
# Waits until the given object exists.
|
||||
# Parameters: $1 - the kind of the object.
|
||||
# $2 - object's name.
|
||||
# $3 - namespace (optional).
|
||||
function wait_until_object_exists() {
|
||||
local KUBECTL_ARGS="get $1 $2"
|
||||
local DESCRIPTION="$1 $2"
|
||||
|
||||
if [[ -n $3 ]]; then
|
||||
KUBECTL_ARGS="get -n $3 $1 $2"
|
||||
DESCRIPTION="$1 $3/$2"
|
||||
fi
|
||||
echo -n "Waiting until ${DESCRIPTION} exists"
|
||||
for i in {1..150}; do # timeout after 5 minutes
|
||||
if kubectl ${KUBECTL_ARGS} > /dev/null 2>&1; then
|
||||
echo -e "\n${DESCRIPTION} exists"
|
||||
return 0
|
||||
fi
|
||||
echo -n "."
|
||||
sleep 2
|
||||
done
|
||||
echo -e "\n\nERROR: timeout waiting for ${DESCRIPTION} to exist"
|
||||
kubectl ${KUBECTL_ARGS}
|
||||
return 1
|
||||
}
|
||||
|
||||
# Waits until all pods are running in the given namespace.
|
||||
# This function handles some edge cases that `kubectl wait` does not support,
|
||||
# and it provides nice debug info on the state of the pod if it failed,
|
||||
|
|
@ -771,6 +797,7 @@ function current_branch() {
|
|||
# Get the branch name from Prow's env var, see https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md.
|
||||
# Otherwise, try getting the current branch from git.
|
||||
(( IS_PROW )) && branch_name="${PULL_BASE_REF:-}"
|
||||
[[ -z "${branch_name}" ]] && branch_name="${GITHUB_BASE_REF:-}"
|
||||
[[ -z "${branch_name}" ]] && branch_name="$(git rev-parse --abbrev-ref HEAD)"
|
||||
echo "${branch_name}"
|
||||
}
|
||||
|
|
@ -835,30 +862,56 @@ function shellcheck_new_files() {
|
|||
fi
|
||||
}
|
||||
|
||||
# Note: if using Github checkout action please ensure you fetch all tags prior to calling
|
||||
# this function
|
||||
#
|
||||
# ie.
|
||||
# - uses: actions/checkout@v2
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
#
|
||||
# See: https://github.com/actions/checkout#fetch-all-history-for-all-tags-and-branches
|
||||
function latest_version() {
|
||||
local branch_name="$(current_branch)"
|
||||
|
||||
if [ "$branch_name" = "master" ] || [ "$branch_name" = "main" ]; then
|
||||
# For main branch, simply use git tag without major version, this will work even
|
||||
# if the release tag is not in the main
|
||||
# Use the latest release for main
|
||||
if [[ "$branch_name" == "main" ]] || [[ "$branch_name" == "master" ]]; then
|
||||
git tag -l "*$(git tag -l "*v[0-9]*" | cut -d '-' -f2 | sort -r --version-sort | head -n1)*"
|
||||
return
|
||||
fi
|
||||
|
||||
# Ideally we shouldn't need to treat release branches differently but
|
||||
# there are scenarios where git describe will return newer tags than
|
||||
# the ones on the current branch
|
||||
#
|
||||
# ie. create a PR pulling commits from 0.24 into a release-0.23 branch
|
||||
if [[ "$branch_name" == "release-"* ]]; then
|
||||
# Infer major, minor version from the branch name
|
||||
local tag="${branch_name##release-}"
|
||||
else
|
||||
## Assumption here is we are on a release branch
|
||||
local major_minor="${branch_name##release-}"
|
||||
local major_version="$(major_version $major_minor)"
|
||||
local minor_version="$(minor_version $major_minor)"
|
||||
# Nearest tag with the `knative-` prefix
|
||||
local tag=$(git describe --abbrev=0 --match "knative-v[0-9]*")
|
||||
|
||||
# Fallback to older tag scheme vX.Y.Z
|
||||
[[ -z "${tag}" ]] && tag=$(git describe --abbrev=0 --match "v[0-9]*")
|
||||
|
||||
# Drop the prefix
|
||||
tag="${tag##knative-}"
|
||||
fi
|
||||
|
||||
local major_version="$(major_version ${tag})"
|
||||
local minor_version="$(minor_version ${tag})"
|
||||
|
||||
# Hardcode the jump back from 1.0
|
||||
if [ "$major_version" = "1" ] && [ "$minor_version" == 0 ]; then
|
||||
local tag='v0.26*'
|
||||
if [ "$major_version" = "1" ] && [ "$minor_version" = "0" ]; then
|
||||
local tag_filter='v0.26*'
|
||||
else
|
||||
# Adjust the minor down by one
|
||||
local tag="*v$major_version.$(( minor_version - 1 ))*"
|
||||
local tag_filter="*v$major_version.$(( minor_version - 1 ))*"
|
||||
fi
|
||||
|
||||
# Get the latest patch release for the major minor
|
||||
git tag -l "${tag}*" | sort -r --version-sort | head -n1
|
||||
fi
|
||||
git tag -l "${tag_filter}" | sort -r --version-sort | head -n1
|
||||
}
|
||||
|
||||
# Initializations that depend on previous functions.
|
||||
|
|
|
|||
|
|
@ -594,13 +594,13 @@ func asMode(key string, target *MeshCompatibilityMode) cm.ParseFunc {
|
|||
// asLabelSelector returns a LabelSelector extracted from a given configmap key.
|
||||
func asLabelSelector(key string, target **metav1.LabelSelector) cm.ParseFunc {
|
||||
return func(data map[string]string) error {
|
||||
*target = nil
|
||||
if raw, ok := data[key]; ok {
|
||||
if len(raw) > 0 {
|
||||
*target = &metav1.LabelSelector{}
|
||||
if err := yaml.Unmarshal([]byte(raw), *target); err != nil {
|
||||
var selector *metav1.LabelSelector
|
||||
if err := yaml.Unmarshal([]byte(raw), &selector); err != nil {
|
||||
return err
|
||||
}
|
||||
*target = selector
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -125,6 +125,8 @@ const (
|
|||
// points will be missed entirely by the panic window which is
|
||||
// smaller than the stable window. Anything less than 6 seconds
|
||||
// isn't going to work well.
|
||||
//
|
||||
// nolint:revive // False positive, Min means minimum, not minutes.
|
||||
WindowMin = 6 * time.Second
|
||||
// WindowMax is the maximum permitted stable autoscaling window.
|
||||
// This keeps the event horizon to a reasonable enough limit.
|
||||
|
|
|
|||
|
|
@ -366,6 +366,7 @@ func HTTPGetActionMask(in *corev1.HTTPGetAction) *corev1.HTTPGetAction {
|
|||
out.Path = in.Path
|
||||
out.Scheme = in.Scheme
|
||||
out.HTTPHeaders = in.HTTPHeaders
|
||||
out.Port = in.Port
|
||||
|
||||
return out
|
||||
}
|
||||
|
|
@ -381,6 +382,7 @@ func TCPSocketActionMask(in *corev1.TCPSocketAction) *corev1.TCPSocketAction {
|
|||
|
||||
// Allowed fields
|
||||
out.Host = in.Host
|
||||
out.Port = in.Port
|
||||
|
||||
return out
|
||||
}
|
||||
|
|
|
|||
|
|
@ -324,14 +324,19 @@ func ValidatePodSpec(ctx context.Context, ps corev1.PodSpec) *apis.FieldError {
|
|||
errs = errs.Also(err.ViaField("volumes"))
|
||||
}
|
||||
|
||||
port, err := validateContainersPorts(ps.Containers)
|
||||
if err != nil {
|
||||
errs = errs.Also(err.ViaField("containers[*]"))
|
||||
}
|
||||
|
||||
switch len(ps.Containers) {
|
||||
case 0:
|
||||
errs = errs.Also(apis.ErrMissingField("containers"))
|
||||
case 1:
|
||||
errs = errs.Also(ValidateContainer(ctx, ps.Containers[0], volumes).
|
||||
errs = errs.Also(ValidateContainer(ctx, ps.Containers[0], volumes, port).
|
||||
ViaFieldIndex("containers", 0))
|
||||
default:
|
||||
errs = errs.Also(validateContainers(ctx, ps.Containers, volumes))
|
||||
errs = errs.Also(validateContainers(ctx, ps.Containers, volumes, port))
|
||||
}
|
||||
if ps.ServiceAccountName != "" {
|
||||
for _, err := range validation.IsDNS1123Subdomain(ps.ServiceAccountName) {
|
||||
|
|
@ -356,20 +361,20 @@ func validateInitContainers(ctx context.Context, containers []corev1.Container,
|
|||
return errs
|
||||
}
|
||||
|
||||
func validateContainers(ctx context.Context, containers []corev1.Container, volumes map[string]corev1.Volume) (errs *apis.FieldError) {
|
||||
func validateContainers(ctx context.Context, containers []corev1.Container, volumes map[string]corev1.Volume, port corev1.ContainerPort) (errs *apis.FieldError) {
|
||||
features := config.FromContextOrDefaults(ctx).Features
|
||||
if features.MultiContainer != config.Enabled {
|
||||
return errs.Also(&apis.FieldError{Message: fmt.Sprintf("multi-container is off, "+
|
||||
"but found %d containers", len(containers))})
|
||||
}
|
||||
errs = errs.Also(validateContainersPorts(containers).ViaField("containers"))
|
||||
for i := range containers {
|
||||
// Probes are not allowed on other than serving container,
|
||||
// ref: http://bit.ly/probes-condition
|
||||
if len(containers[i].Ports) == 0 {
|
||||
// Note, if we allow readiness/liveness checks on sidecars, we should pass in an *empty* port here, not the main container's port.
|
||||
errs = errs.Also(validateSidecarContainer(WithinSidecarContainer(ctx), containers[i], volumes).ViaFieldIndex("containers", i))
|
||||
} else {
|
||||
errs = errs.Also(ValidateContainer(WithinUserContainer(ctx), containers[i], volumes).ViaFieldIndex("containers", i))
|
||||
errs = errs.Also(ValidateContainer(WithinUserContainer(ctx), containers[i], volumes, port).ViaFieldIndex("containers", i))
|
||||
}
|
||||
}
|
||||
return errs
|
||||
|
|
@ -386,21 +391,41 @@ func AllMountedVolumes(containers []corev1.Container) sets.String {
|
|||
return volumeNames
|
||||
}
|
||||
|
||||
// validateContainersPorts validates port when specified multiple containers
|
||||
func validateContainersPorts(containers []corev1.Container) *apis.FieldError {
|
||||
// validateContainersPorts validates that the expected number of container ports are present
|
||||
// and returns the single serving port if error is nil.
|
||||
func validateContainersPorts(containers []corev1.Container) (corev1.ContainerPort, *apis.FieldError) {
|
||||
var count int
|
||||
var port corev1.ContainerPort
|
||||
for i := range containers {
|
||||
count += len(containers[i].Ports)
|
||||
if c := len(containers[i].Ports); c > 0 {
|
||||
count += c
|
||||
port = containers[i].Ports[0]
|
||||
}
|
||||
}
|
||||
|
||||
if port.Name == "" {
|
||||
port.Name = "http"
|
||||
}
|
||||
|
||||
if port.ContainerPort == 0 {
|
||||
port.ContainerPort = 8080
|
||||
}
|
||||
|
||||
// When no container ports are specified.
|
||||
if count == 0 {
|
||||
return apis.ErrMissingField("ports")
|
||||
if count == 0 && len(containers) > 1 {
|
||||
return port, apis.ErrMissingField("ports")
|
||||
}
|
||||
|
||||
// More than one container sections have ports.
|
||||
if count > 1 {
|
||||
return apis.ErrMultipleOneOf("ports")
|
||||
return port, &apis.FieldError{
|
||||
Message: "more than one container port is set",
|
||||
Paths: []string{"ports"},
|
||||
Details: "Only a single port is allowed across all containers",
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return port, nil
|
||||
}
|
||||
|
||||
// validateSidecarContainer validate fields for non serving containers
|
||||
|
|
@ -447,27 +472,14 @@ func validateInitContainer(ctx context.Context, container corev1.Container, volu
|
|||
}
|
||||
|
||||
// ValidateContainer validate fields for serving containers
|
||||
func ValidateContainer(ctx context.Context, container corev1.Container, volumes map[string]corev1.Volume) (errs *apis.FieldError) {
|
||||
// Single container cannot have multiple ports
|
||||
errs = errs.Also(portValidation(container.Ports).ViaField("ports"))
|
||||
func ValidateContainer(ctx context.Context, container corev1.Container, volumes map[string]corev1.Volume, port corev1.ContainerPort) (errs *apis.FieldError) {
|
||||
// Liveness Probes
|
||||
errs = errs.Also(validateProbe(container.LivenessProbe).ViaField("livenessProbe"))
|
||||
errs = errs.Also(validateProbe(container.LivenessProbe, port).ViaField("livenessProbe"))
|
||||
// Readiness Probes
|
||||
errs = errs.Also(validateReadinessProbe(container.ReadinessProbe).ViaField("readinessProbe"))
|
||||
errs = errs.Also(validateReadinessProbe(container.ReadinessProbe, port).ViaField("readinessProbe"))
|
||||
return errs.Also(validate(ctx, container, volumes))
|
||||
}
|
||||
|
||||
func portValidation(containerPorts []corev1.ContainerPort) *apis.FieldError {
|
||||
if len(containerPorts) > 1 {
|
||||
return &apis.FieldError{
|
||||
Message: "More than one container port is set",
|
||||
Paths: []string{apis.CurrentField},
|
||||
Details: "Only a single port is allowed",
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validate(ctx context.Context, container corev1.Container, volumes map[string]corev1.Volume) *apis.FieldError {
|
||||
if equality.Semantic.DeepEqual(container, corev1.Container{}) {
|
||||
return apis.ErrMissingField(apis.CurrentField)
|
||||
|
|
@ -653,12 +665,12 @@ func validateContainerPortBasic(port corev1.ContainerPort) *apis.FieldError {
|
|||
return errs
|
||||
}
|
||||
|
||||
func validateReadinessProbe(p *corev1.Probe) *apis.FieldError {
|
||||
func validateReadinessProbe(p *corev1.Probe, port corev1.ContainerPort) *apis.FieldError {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
errs := validateProbe(p)
|
||||
errs := validateProbe(p, port)
|
||||
|
||||
if p.PeriodSeconds < 0 {
|
||||
errs = errs.Also(apis.ErrOutOfBoundsValue(p.PeriodSeconds, 0, math.MaxInt32, "periodSeconds"))
|
||||
|
|
@ -700,7 +712,7 @@ func validateReadinessProbe(p *corev1.Probe) *apis.FieldError {
|
|||
return errs
|
||||
}
|
||||
|
||||
func validateProbe(p *corev1.Probe) *apis.FieldError {
|
||||
func validateProbe(p *corev1.Probe, port corev1.ContainerPort) *apis.FieldError {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -714,10 +726,18 @@ func validateProbe(p *corev1.Probe) *apis.FieldError {
|
|||
if h.HTTPGet != nil {
|
||||
handlers = append(handlers, "httpGet")
|
||||
errs = errs.Also(apis.CheckDisallowedFields(*h.HTTPGet, *HTTPGetActionMask(h.HTTPGet))).ViaField("httpGet")
|
||||
getPort := h.HTTPGet.Port
|
||||
if (getPort.StrVal != "" && getPort.StrVal != port.Name) || (getPort.IntVal != 0 && getPort.IntVal != port.ContainerPort) {
|
||||
errs = errs.Also(apis.ErrInvalidValue(getPort.String(), "httpGet.port", "May only probe containerPort"))
|
||||
}
|
||||
}
|
||||
if h.TCPSocket != nil {
|
||||
handlers = append(handlers, "tcpSocket")
|
||||
errs = errs.Also(apis.CheckDisallowedFields(*h.TCPSocket, *TCPSocketActionMask(h.TCPSocket))).ViaField("tcpSocket")
|
||||
tcpPort := h.TCPSocket.Port
|
||||
if (tcpPort.StrVal != "" && tcpPort.StrVal != port.Name) || (tcpPort.IntVal != 0 && tcpPort.IntVal != port.ContainerPort) {
|
||||
errs = errs.Also(apis.ErrInvalidValue(tcpPort.String(), "tcpSocket.port", "May only probe containerPort"))
|
||||
}
|
||||
}
|
||||
if h.Exec != nil {
|
||||
handlers = append(handlers, "exec")
|
||||
|
|
|
|||
|
|
@ -55,14 +55,22 @@ export SYSTEM_NAMESPACE="${SYSTEM_NAMESPACE:-$(uuidgen | tr 'A-Z' 'a-z')}"
|
|||
readonly REPLICAS=3
|
||||
readonly BUCKETS=10
|
||||
|
||||
# Receives the latest serving version and searches for the same version with major and minor and searches for the latest patch
|
||||
function latest_net_istio_version() {
|
||||
local serving_version=$1
|
||||
local major_minor
|
||||
major_minor=$(echo "$serving_version" | cut -d '.' -f 1,2)
|
||||
|
||||
curl -L --silent "https://api.github.com/repos/knative/net-istio/releases" | jq --arg major_minor "$major_minor" -r '[.[].tag_name] | map(select(. | startswith($major_minor))) | sort_by( sub("knative-";"") | sub("v";"") | split(".") | map(tonumber) ) | reverse[0]'
|
||||
}
|
||||
|
||||
# Latest serving release. If user does not supply this as a flag, the latest
|
||||
# tagged release on the current branch will be used.
|
||||
LATEST_SERVING_RELEASE_VERSION=$(latest_version)
|
||||
|
||||
# Latest net-istio release.
|
||||
LATEST_NET_ISTIO_RELEASE_VERSION=$(
|
||||
curl -L --silent "https://api.github.com/repos/knative/net-istio/releases" | \
|
||||
jq -r '[.[].tag_name] | sort_by( sub("knative-";"") | sub("v";"") | split(".") | map(tonumber) ) | reverse[0]')
|
||||
LATEST_NET_ISTIO_RELEASE_VERSION=$(latest_net_istio_version "$LATEST_SERVING_RELEASE_VERSION")
|
||||
|
||||
|
||||
# Parse our custom flags.
|
||||
function parse_flags() {
|
||||
|
|
|
|||
|
|
@ -731,7 +731,7 @@ k8s.io/utils/buffer
|
|||
k8s.io/utils/integer
|
||||
k8s.io/utils/pointer
|
||||
k8s.io/utils/trace
|
||||
# knative.dev/eventing v0.27.1-0.20211103173047-cdeae54e3c74
|
||||
# knative.dev/eventing v0.27.1-0.20211108221643-a72cfc91c421
|
||||
## explicit
|
||||
knative.dev/eventing/pkg/apis/config
|
||||
knative.dev/eventing/pkg/apis/duck
|
||||
|
|
@ -757,10 +757,10 @@ knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1
|
|||
knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/fake
|
||||
knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2
|
||||
knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/fake
|
||||
# knative.dev/hack v0.0.0-20211102174540-b284d49386cc
|
||||
# knative.dev/hack v0.0.0-20211108170701-96aac1c30be3
|
||||
## explicit
|
||||
knative.dev/hack
|
||||
# knative.dev/networking v0.0.0-20211103165948-6ef2676b9073
|
||||
# knative.dev/networking v0.0.0-20211108064904-79a1ce1e1952
|
||||
## explicit
|
||||
knative.dev/networking/pkg
|
||||
knative.dev/networking/pkg/apis/networking
|
||||
|
|
@ -768,7 +768,7 @@ knative.dev/networking/pkg/apis/networking/v1alpha1
|
|||
knative.dev/networking/pkg/client/clientset/versioned
|
||||
knative.dev/networking/pkg/client/clientset/versioned/scheme
|
||||
knative.dev/networking/pkg/client/clientset/versioned/typed/networking/v1alpha1
|
||||
# knative.dev/pkg v0.0.0-20211103165848-92b8dc6750de
|
||||
# knative.dev/pkg v0.0.0-20211108064904-3cc697a3cb09
|
||||
## explicit
|
||||
knative.dev/pkg/apis
|
||||
knative.dev/pkg/apis/duck
|
||||
|
|
@ -817,7 +817,7 @@ knative.dev/pkg/tracing/config
|
|||
knative.dev/pkg/tracing/propagation
|
||||
knative.dev/pkg/tracing/propagation/tracecontextb3
|
||||
knative.dev/pkg/tracker
|
||||
# knative.dev/serving v0.27.1-0.20211104011508-782db02f55f6
|
||||
# knative.dev/serving v0.27.1-0.20211108215642-fdaccb7d2675
|
||||
## explicit
|
||||
knative.dev/serving/pkg/apis/autoscaling
|
||||
knative.dev/serving/pkg/apis/autoscaling/v1alpha1
|
||||
|
|
|
|||
Loading…
Reference in New Issue