mirror of https://github.com/knative/serving.git
e2e - drop imperative bash yaml for declarative ytt/kapp (#11175)
* e2e - drop imperative bash yaml for declarative ytt/kapp * don't override knative_setup * fix namespace of some test-resources * move ingress class to a property file * change overlay file names to indicate which configmaps they overlay * domain-mapping controller is HA * better coordination between generate-yamls.sh and our e2e scripts * post-install job support * fix autotls tests * fix kourier - do a global search when replacing the system namespace * drop setting ambassador namespace * pin kapp to a specific sha * first pass at upgrade test fixes * drop ignore-unknown-comments flag - no longer needed with v0.32.0 * fix istio latest-release install * hardcode kapp app names upgrade tests change the E2E_SCRIPT name thus we can end up with two installations * kind tweaks * drop unused constant * minor cleanup * fix stray # in istio mesh ytt file * drop bash wait since kapp will do this for us * pull back the global rename and just do it where we need to otherwise we end up with cluster roles with weird names * apply injection to namespaces * custom-yamls now accepts the generated env file output from generate-yamls.sh * include peer auth for net-certmanager webhook * fix kong ingress value
This commit is contained in:
parent
e04223ba6a
commit
e2a823714b
|
@ -55,7 +55,7 @@ jobs:
|
|||
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
GO111MODULE: off
|
||||
GO111MODULE: on
|
||||
GOFLAGS: -tags=nostackdriver
|
||||
# https://github.com/google/go-containerregistry/pull/125 allows insecure registry for
|
||||
# '*.local' hostnames. This works both for `ko` and our own tag-to-digest resolution logic,
|
||||
|
@ -170,112 +170,31 @@ jobs:
|
|||
# local reigstry, even when pushing $REGISTRY_NAME:$REGISTRY_PORT/some/image
|
||||
sudo echo "127.0.0.1 $REGISTRY_NAME" | sudo tee -a /etc/hosts
|
||||
|
||||
- name: Install Knative Serving
|
||||
env:
|
||||
GO111MODULE: on
|
||||
GOFLAGS: -mod=vendor
|
||||
- name: Install Serving & Ingress
|
||||
working-directory: ./src/knative.dev/serving
|
||||
run: |
|
||||
set -o pipefail
|
||||
source ./test/e2e-common.sh
|
||||
|
||||
kubectl apply -f config/core/300-imagecache.yaml
|
||||
KIND=1
|
||||
INGRESS_CLASS="${{ matrix.kingress }}.ingress.networking.knative.dev"
|
||||
CLUSTER_DOMAIN="${{ matrix.cluster-suffix }}"
|
||||
|
||||
# Build and Publish our containers to the docker daemon (including test assets)
|
||||
ko apply --platform=linux/amd64 -PRf config/core
|
||||
ko apply --platform=linux/amd64 -Pf test/config/
|
||||
knative_setup
|
||||
test_setup
|
||||
|
||||
# Have Serving use the kingress option.
|
||||
kubectl patch configmap/config-network \
|
||||
--namespace knative-serving \
|
||||
--type merge \
|
||||
--patch '{"data":{"ingress.class":"${{ matrix.kingress }}.ingress.networking.knative.dev"}}'
|
||||
echo "INGRESS_CLASS=$INGRESS_CLASS" >> $GITHUB_ENV
|
||||
echo "CLUSTER_DOMAIN=$CLUSTER_DOMAIN" >> $GITHUB_ENV
|
||||
echo "SYSTEM_NAMESPACE=$SYSTEM_NAMESPACE" >> $GITHUB_ENV
|
||||
echo "GATEWAY_OVERRIDE=$GATEWAY_OVERRIDE" >> $GITHUB_ENV
|
||||
echo "GATEWAY_NAMESPACE_OVERRIDE=$GATEWAY_NAMESPACE_OVERRIDE" >> $GITHUB_ENV
|
||||
|
||||
# Be KinD to these tests.
|
||||
kubectl scale -nknative-serving deployment/chaosduck --replicas=0
|
||||
|
||||
# Setting deadline progress to a shorter value.
|
||||
kubectl patch cm "config-deployment" -n knative-serving \
|
||||
-p '{"data":{"progressDeadline":"120s"}}'
|
||||
|
||||
- name: Install Alpha Components
|
||||
if: contains(matrix.test-flags, '--enable-alpha')
|
||||
env:
|
||||
GO111MODULE: on
|
||||
GOFLAGS: -mod=vendor
|
||||
run: |
|
||||
ko apply --platform=all -PRf config/domain-mapping
|
||||
|
||||
- name: Wait for components to be up
|
||||
run: |
|
||||
# We need the webhook to be up
|
||||
kubectl wait --for=condition=Available deployment -n knative-serving --all
|
||||
|
||||
- name: Install kingress provider (Contour)
|
||||
if: matrix.kingress == 'contour'
|
||||
run: |
|
||||
set -o pipefail
|
||||
|
||||
# Apply a kingress option.
|
||||
ko resolve -f third_party/contour-latest | \
|
||||
sed 's/LoadBalancer/NodePort/g' | \
|
||||
sed 's/imagePullPolicy:/# DISABLED: imagePullPolicy:/g' | \
|
||||
kubectl apply -f -
|
||||
|
||||
# This tells the tests what namespace to look in for our kingress LB.
|
||||
echo "GATEWAY_OVERRIDE=envoy" >> $GITHUB_ENV
|
||||
echo "GATEWAY_NAMESPACE_OVERRIDE=contour-external" >> $GITHUB_ENV
|
||||
|
||||
- name: Install kingress provider (Kourier)
|
||||
if: matrix.kingress == 'kourier'
|
||||
run: |
|
||||
set -o pipefail
|
||||
|
||||
# Apply a kingress option.
|
||||
ko resolve -f third_party/kourier-latest | \
|
||||
sed 's/LoadBalancer/NodePort/g' | \
|
||||
kubectl apply -f -
|
||||
|
||||
# This tells the tests what namespace to look in for our kingress LB.
|
||||
echo "GATEWAY_OVERRIDE=kourier" >> $GITHUB_ENV
|
||||
echo "GATEWAY_NAMESPACE_OVERRIDE=kourier-system" >> $GITHUB_ENV
|
||||
|
||||
- name: Install kingress provider (Istio)
|
||||
if: matrix.kingress == 'istio'
|
||||
run: |
|
||||
set -o pipefail
|
||||
|
||||
source test/e2e-networking-library.sh
|
||||
|
||||
PATCHED_YAML=$(mktemp)
|
||||
ko resolve -f third_party/istio-latest/net-istio.yaml > $PATCHED_YAML
|
||||
|
||||
# TODO: figure out how to use e2e-common.sh directly even if no
|
||||
# custom namespace is used here.
|
||||
TMP_DIR=$(mktemp -d -t ci-$(date +%Y-%m-%d-%H-%M-%S)-XXXXXXXXXX)
|
||||
KNATIVE_DEFAULT_NAMESPACE="knative-serving"
|
||||
SYSTEM_NAMESPACE=$KNATIVE_DEFAULT_NAMESPACE
|
||||
export KIND=true
|
||||
export CLUSTER_DOMAIN="${{ matrix.cluster-suffix }}"
|
||||
install_istio $PATCHED_YAML
|
||||
|
||||
echo "GATEWAY_NAMESPACE_OVERRIDE=istio-system" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload Test Images
|
||||
run: |
|
||||
# Build and Publish our test images to the docker daemon.
|
||||
./test/upload-test-images.sh
|
||||
|
||||
- name: Wait for Serving and KIngress to be up
|
||||
run: |
|
||||
kubectl wait pod --for=condition=Ready -n knative-serving -l '!job-name'
|
||||
kubectl wait pod --for=condition=Ready -n "${GATEWAY_NAMESPACE_OVERRIDE}" -l '!job-name'
|
||||
|
||||
- name: Run e2e Tests
|
||||
- name: Run Test
|
||||
working-directory: ./src/knative.dev/serving
|
||||
run: |
|
||||
set -x
|
||||
|
||||
# Exclude the control-plane node, which doesn't seem to expose the nodeport service.
|
||||
IPS=( $(kubectl get nodes -lkubernetes.io/hostname!=kind-control-plane -ojsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}') )
|
||||
|
||||
# Run the tests tagged as e2e on the KinD cluster.
|
||||
go test -race -count=1 -timeout=20m -tags=e2e ${{ matrix.test-suite }} \
|
||||
--ingressendpoint="${IPS[0]}" \
|
||||
|
|
|
@ -40,6 +40,7 @@ set -o pipefail
|
|||
|
||||
readonly YAML_REPO_ROOT=${1:?"First argument must be the repo root dir"}
|
||||
readonly YAML_LIST_FILE=${2:?"Second argument must be the output file"}
|
||||
readonly YAML_ENV_FILE=${3:-$(mktemp)}
|
||||
|
||||
# Set output directory
|
||||
if [[ -z "${YAML_OUTPUT_DIR:-}" ]]; then
|
||||
|
@ -134,3 +135,15 @@ ${SERVING_DOMAINMAPPING_CRD_YAML}
|
|||
${SERVING_CRD_YAML}
|
||||
${SERVING_NSCERT_YAML}
|
||||
EOF
|
||||
|
||||
cat << EOF > "${YAML_ENV_FILE}"
|
||||
export SERVING_CORE_YAML=${SERVING_CORE_YAML}
|
||||
export SERVING_DEFAULT_DOMAIN_YAML=${SERVING_DEFAULT_DOMAIN_YAML}
|
||||
export SERVING_STORAGE_VERSION_MIGRATE_YAML=${SERVING_STORAGE_VERSION_MIGRATE_YAML}
|
||||
export SERVING_POST_INSTALL_JOBS_YAML=${SERVING_POST_INSTALL_JOBS_YAML}
|
||||
export SERVING_HPA_YAML=${SERVING_HPA_YAML}
|
||||
export SERVING_DOMAINMAPPING_YAML=${SERVING_DOMAINMAPPING_YAML}
|
||||
export SERVING_DOMAINMAPPING_CRD_YAML=${SERVING_DOMAINMAPPING_CRD_YAML}
|
||||
export SERVING_CRD_YAML=${SERVING_CRD_YAML}
|
||||
export SERVING_NSCERT_YAML=${SERVING_NSCERT_YAML}
|
||||
EOF
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
# Copyright 2018 The Knative Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: serving-tests
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: serving-tests-alt
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: tls
|
|
@ -1,21 +0,0 @@
|
|||
# Copyright 2019 The Knative Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: conformance-test-configmap
|
||||
namespace: serving-tests
|
||||
data:
|
||||
testKey: testValue
|
|
@ -1,22 +0,0 @@
|
|||
# Copyright 2019 The Knative Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: conformance-test-secret
|
||||
namespace: serving-tests
|
||||
type: Opaque
|
||||
data:
|
||||
testKey: dGVzdFZhbHVl
|
|
@ -1,59 +0,0 @@
|
|||
# Copyright 2019 The Knative Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: config-logging
|
||||
namespace: knative-serving
|
||||
labels:
|
||||
serving.knative.dev/release: devel
|
||||
|
||||
data:
|
||||
zap-logger-config: |
|
||||
{
|
||||
"level": "debug",
|
||||
"development": false,
|
||||
"outputPaths": ["stdout"],
|
||||
"errorOutputPaths": ["stderr"],
|
||||
"encoding": "json",
|
||||
"encoderConfig": {
|
||||
"timeKey": "timestamp",
|
||||
"levelKey": "severity",
|
||||
"nameKey": "logger",
|
||||
"callerKey": "caller",
|
||||
"messageKey": "message",
|
||||
"stacktraceKey": "stacktrace",
|
||||
"lineEnding": "",
|
||||
"levelEncoder": "",
|
||||
"timeEncoder": "iso8601",
|
||||
"durationEncoder": "",
|
||||
"callerEncoder": ""
|
||||
}
|
||||
}
|
||||
|
||||
# Log level overrides
|
||||
# For all components except the autoscaler and queue proxy,
|
||||
# changes are be picked up immediately.
|
||||
# For autoscaler and queue proxy, changes require recreation of the pods.
|
||||
loglevel.controller: "debug"
|
||||
loglevel.autoscaler: "debug"
|
||||
loglevel.queueproxy: "debug"
|
||||
loglevel.webhook: "debug"
|
||||
loglevel.activator: "debug"
|
||||
loglevel.hpaautoscaler: "debug"
|
||||
loglevel.domainmapping: "debug"
|
||||
loglevel.certcontroller: "debug"
|
||||
loglevel.istiocontroller: "debug"
|
||||
loglevel.nscontroller: "debug"
|
|
@ -1,27 +0,0 @@
|
|||
# Copyright 2018 The Knative Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: config-observability
|
||||
namespace: knative-serving
|
||||
labels:
|
||||
serving.knative.dev/release: devel
|
||||
|
||||
data:
|
||||
# Enable following features for testing.
|
||||
logging.request-log-template: '{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}'
|
||||
logging.enable-probe-request-log: "true"
|
||||
logging.enable-request-log: "true"
|
|
@ -0,0 +1,11 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
#@ load("helpers.lib.yaml", "system_resource")
|
||||
|
||||
#@overlay/match by=system_resource(name="activator", kind="HorizontalPodAutoscaler"), expects=1
|
||||
---
|
||||
#@overlay/match-child-defaults missing_ok=True
|
||||
spec:
|
||||
minReplicas: 15
|
||||
maxReplicas: 15
|
|
@ -0,0 +1,16 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("@ytt:yaml", "yaml")
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
#@ def rename_cluster_domain(old, new):
|
||||
#@ def replace(left, right):
|
||||
#@ yaml_left = yaml.encode(left)
|
||||
#@ yaml_left = yaml_left.replace(old, new)
|
||||
#@ return yaml.decode(yaml_left)
|
||||
#@ end
|
||||
#@ return replace
|
||||
#@ end
|
||||
|
||||
#@overlay/match by=overlay.all, expects="1+"
|
||||
#@overlay/replace via=rename_cluster_domain("cluster.local", data.values.k8s.cluster.domain)
|
||||
---
|
|
@ -0,0 +1,8 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "system_configmap")
|
||||
|
||||
#@overlay/match by=system_configmap("config-deployment"), expects=1
|
||||
---
|
||||
#@overlay/match-child-defaults missing_ok=True
|
||||
data:
|
||||
progressDeadline: "120s"
|
|
@ -0,0 +1,38 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "system_configmap")
|
||||
|
||||
#@overlay/match by=system_configmap("config-logging"), expects=1
|
||||
---
|
||||
#@overlay/match-child-defaults missing_ok=True
|
||||
data:
|
||||
zap-logger-config: |
|
||||
{
|
||||
"level": "debug",
|
||||
"development": false,
|
||||
"outputPaths": ["stdout"],
|
||||
"errorOutputPaths": ["stderr"], "encoding": "json",
|
||||
"encoderConfig": {
|
||||
"timeKey": "timestamp",
|
||||
"levelKey": "severity",
|
||||
"nameKey": "logger",
|
||||
"callerKey": "caller",
|
||||
"messageKey": "message",
|
||||
"stacktraceKey": "stacktrace",
|
||||
"lineEnding": "",
|
||||
"levelEncoder": "",
|
||||
"timeEncoder": "iso8601",
|
||||
"durationEncoder": "",
|
||||
"callerEncoder": ""
|
||||
}
|
||||
}
|
||||
|
||||
loglevel.controller: "debug"
|
||||
loglevel.autoscaler: "debug"
|
||||
loglevel.queueproxy: "debug"
|
||||
loglevel.webhook: "debug"
|
||||
loglevel.activator: "debug"
|
||||
loglevel.hpaautoscaler: "debug"
|
||||
loglevel.domainmapping: "debug"
|
||||
loglevel.certcontroller: "debug"
|
||||
loglevel.istiocontroller: "debug"
|
||||
loglevel.nscontroller: "debug"
|
|
@ -0,0 +1,12 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "system_configmap")
|
||||
|
||||
#@overlay/match by=system_configmap("config-observability"), expects=1
|
||||
---
|
||||
#@overlay/match-child-defaults missing_ok=True
|
||||
data:
|
||||
logging.request-log-template: '{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}'
|
||||
|
||||
logging.enable-probe-request-log: "true"
|
||||
logging.enable-request-log: "true"
|
||||
profiling.enable: "true"
|
|
@ -0,0 +1,9 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("helpers.lib.yaml", "system_configmap")
|
||||
|
||||
#@overlay/match by=system_configmap("config-network"), expects=1
|
||||
---
|
||||
#@overlay/match-child-defaults missing_ok=True
|
||||
data:
|
||||
ingress.class: #@ data.values.serving.ingress_class
|
|
@ -0,0 +1,8 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("helpers.lib.yaml", "subset", "serving_system_namespace")
|
||||
|
||||
#@overlay/match by=subset(kind="Namespace", name="knative-serving"), expects="1+"
|
||||
---
|
||||
metadata:
|
||||
name: #@ serving_system_namespace()
|
|
@ -0,0 +1,33 @@
|
|||
#@ load("@ytt:data", "data")
|
||||
|
||||
#@ namespaces = [
|
||||
#@ data.values.serving.namespaces.test.default,
|
||||
#@ data.values.serving.namespaces.test.alternate,
|
||||
#@ data.values.serving.namespaces.test.tls,
|
||||
#@ ]
|
||||
|
||||
#@ for namespace in namespaces:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: #@ namespace
|
||||
#@ end
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: conformance-test-configmap
|
||||
namespace: #@ data.values.serving.namespaces.test.default
|
||||
data:
|
||||
testKey: testValue
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: conformance-test-secret
|
||||
namespace: #@ data.values.serving.namespaces.test.default
|
||||
type: Opaque
|
||||
data:
|
||||
testKey: dGVzdFZhbHVl
|
|
@ -0,0 +1,23 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("helpers.lib.yaml", "system_deployment", "system_configmap")
|
||||
|
||||
#@ def ha_components():
|
||||
#@ components = []
|
||||
#@ for component in data.values.serving.ha.components:
|
||||
#@ components.append(system_deployment(name=component))
|
||||
#@ end
|
||||
#@ return overlay.or_op(*components)
|
||||
#@ end
|
||||
|
||||
#@overlay/match by=ha_components(), expects="1+"
|
||||
---
|
||||
spec:
|
||||
#@overlay/match missing_ok=True
|
||||
replicas: #@ data.values.serving.ha.replicas
|
||||
|
||||
#@overlay/match by=system_configmap("config-leader-election"), expects=1
|
||||
---
|
||||
data:
|
||||
#@overlay/match missing_ok=True
|
||||
buckets: #@ str(data.values.serving.ha.buckets)
|
|
@ -0,0 +1,6 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
|
||||
#@data/values
|
||||
---
|
||||
serving:
|
||||
ingress_class: ambassador.ingress.networking.knative.dev
|
|
@ -0,0 +1,8 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "subset")
|
||||
|
||||
#@overlay/match by=subset(kind="Deployment", name="ambassador")
|
||||
---
|
||||
spec:
|
||||
#@overlay/match missing_ok=True
|
||||
replicas: 6
|
|
@ -0,0 +1,10 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
|
||||
#@data/values
|
||||
---
|
||||
serving:
|
||||
#@overlay/match missing_ok=True
|
||||
ha:
|
||||
components:
|
||||
#@overlay/append
|
||||
- contour-ingress-controller
|
|
@ -0,0 +1,6 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
|
||||
#@data/values
|
||||
---
|
||||
serving:
|
||||
ingress_class: contour.ingress.networking.knative.dev
|
|
@ -0,0 +1,14 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "subset")
|
||||
|
||||
#@overlay/match by=subset(name="envoy", kind="DaemonSet"), expects=2
|
||||
---
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
#@overlay/match by="name"
|
||||
- name: envoy
|
||||
args:
|
||||
#@overlay/match by=lambda index,left,right: left.find("log-level") != -1
|
||||
- --log-level debug
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: "security.istio.io/v1beta1"
|
||||
kind: "PeerAuthentication"
|
||||
metadata:
|
||||
name: "net-certmanager-webhook"
|
||||
namespace: "knative-serving"
|
||||
labels:
|
||||
networking.knative.dev/ingress-provider: istio
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: net-certmanager-webhook
|
||||
portLevelMtls:
|
||||
8443:
|
||||
mode: PERMISSIVE
|
|
@ -0,0 +1,20 @@
|
|||
#! The istio profile selected _may_ include a required config-istio.yaml
|
||||
#! that we've downloaded.
|
||||
#!
|
||||
#! As a side-effect we end up with *two* configmap/config-istio resources
|
||||
#! in our YAML. We want to keep the profile specific version - which is
|
||||
#! the one with the 'serving.knative.dev/release: devel' label
|
||||
|
||||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "system_configmap")
|
||||
|
||||
#@ def keep_only_devel(left, right):
|
||||
#@ if left["metadata"]["labels"]["serving.knative.dev/release"] == "devel":
|
||||
#@ return left
|
||||
#@ end
|
||||
#@ end
|
||||
|
||||
#@overlay/match by=system_configmap("config-istio"), when="2+"
|
||||
#@overlay/replace via=keep_only_devel
|
||||
---
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
#! An istio issue causes the ingress pods to not start
|
||||
#! see: https://github.com/istio/istio/issues/31084
|
||||
#!
|
||||
#! We work around this by hinting to kapp the order
|
||||
#! these resources should be applied. Thus wait for
|
||||
#! istio to be running before applying net-istio
|
||||
#! customizations
|
||||
#!
|
||||
#! TODO - remove when it's fixed
|
||||
|
||||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "subset", "label_subset")
|
||||
|
||||
|
||||
#! istio's yamls has some null values which ytt doesn't replace by default
|
||||
#! so instead we remove them
|
||||
#@overlay/match by=overlay.subset({"metadata":{"annotations":None}}), expects="1+"
|
||||
---
|
||||
metadata:
|
||||
#@overlay/remove
|
||||
annotations: null
|
||||
|
||||
#@overlay/match by=subset(namespace="istio-system"), expects="1+"
|
||||
---
|
||||
metadata:
|
||||
#@overlay/match missing_ok=True
|
||||
annotations:
|
||||
#@overlay/match missing_ok=True
|
||||
kapp.k14s.io/change-group: "istio.io"
|
||||
|
||||
#@overlay/match by=label_subset("networking.knative.dev/ingress-provider", "istio"), expects="1+"
|
||||
---
|
||||
metadata:
|
||||
#@overlay/match missing_ok=True
|
||||
#@overlay/match-child-defaults missing_ok=True
|
||||
annotations:
|
||||
kapp.k14s.io/change-group: "knative.dev/net-istio"
|
||||
kapp.k14s.io/change-rule: "upsert after upserting istio.io"
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
|
||||
#@data/values
|
||||
---
|
||||
serving:
|
||||
#@overlay/match missing_ok=True
|
||||
ha:
|
||||
components:
|
||||
#@overlay/append
|
||||
- networking-istio
|
||||
#@overlay/append
|
||||
- istio-webhook
|
|
@ -0,0 +1,6 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
|
||||
#@data/values
|
||||
---
|
||||
serving:
|
||||
ingress_class: istio.ingress.networking.knative.dev
|
|
@ -0,0 +1,6 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
|
||||
#@data/values
|
||||
---
|
||||
serving:
|
||||
ingress_class: kong
|
|
@ -0,0 +1,8 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "subset")
|
||||
|
||||
#@overlay/match by=subset(kind="Deployment", name="ingress-kong")
|
||||
---
|
||||
spec:
|
||||
#@overlay/match missing_ok=True
|
||||
replicas: 6
|
|
@ -0,0 +1,6 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
|
||||
#@data/values
|
||||
---
|
||||
serving:
|
||||
ingress_class: kourier.ingress.networking.knative.dev
|
|
@ -0,0 +1,8 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "subset")
|
||||
|
||||
#@overlay/match by=subset(kind="Deployment", name="3scale-kourier-gateway")
|
||||
---
|
||||
spec:
|
||||
#@overlay/match missing_ok=True
|
||||
replicas: 6
|
|
@ -0,0 +1,18 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("@ytt:yaml", "yaml")
|
||||
#@ load("helpers.lib.yaml", "serving_system_namespace", "subset")
|
||||
|
||||
#@ def rename_namespace(old, new):
|
||||
#@ def replace(left, right):
|
||||
#@ yaml_left = yaml.encode(left)
|
||||
#@ yaml_left = yaml_left.replace(old, new)
|
||||
#@ return yaml.decode(yaml_left)
|
||||
#@ end
|
||||
#@ return replace
|
||||
#@ end
|
||||
|
||||
#@overlay/match by=subset(kind="ConfigMap", name="kourier-bootstrap")
|
||||
---
|
||||
data:
|
||||
#@overlay/replace via=rename_namespace("knative-serving", serving_system_namespace())
|
||||
envoy-bootstrap.yaml: ~ #! this value is ignored
|
|
@ -0,0 +1,11 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
#@ load("helpers.lib.yaml", "system_resource")
|
||||
|
||||
#@overlay/match by=system_resource(name="activator", kind="HorizontalPodAutoscaler"), expects=1
|
||||
---
|
||||
#@overlay/match-child-defaults missing_ok=True
|
||||
spec:
|
||||
minReplicas: 1
|
||||
maxReplicas: 1
|
|
@ -0,0 +1,8 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "subset")
|
||||
|
||||
#@overlay/match by=subset(kind="Deployment", name="ambassador")
|
||||
---
|
||||
spec:
|
||||
#@overlay/match missing_ok=True
|
||||
replicas: 1
|
|
@ -0,0 +1,8 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "subset")
|
||||
|
||||
#@overlay/match by=subset(kind="Service"), expects="1+"
|
||||
---
|
||||
spec:
|
||||
#@overlay/match by=lambda key,l,_: key == "type" and l == "LoadBalancer", when=1
|
||||
type: NodePort
|
|
@ -0,0 +1,8 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "subset")
|
||||
|
||||
#@overlay/match by=subset(kind="Deployment", name="ingress-kong")
|
||||
---
|
||||
spec:
|
||||
#@overlay/match missing_ok=True
|
||||
replicas: 1
|
|
@ -0,0 +1,14 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("helpers.lib.yaml", "subset")
|
||||
|
||||
#@overlay/match by=subset(kind="Service"), expects="1+"
|
||||
---
|
||||
spec:
|
||||
#@overlay/match by=lambda key,l,_: key == "type" and l == "LoadBalancer", when=1
|
||||
type: NodePort
|
||||
|
||||
#@overlay/match by=subset(kind="Deployment", name="3scale-kourier-gateway")
|
||||
---
|
||||
spec:
|
||||
#@overlay/match missing_ok=True
|
||||
replicas: 1
|
|
@ -0,0 +1,37 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("@ytt:data", "data")
|
||||
|
||||
#@ def label_subset(key, value):
|
||||
#@ return overlay.subset({"metadata": {"labels":{key:value}}})
|
||||
#@ end
|
||||
|
||||
#@ def subset(kind="", name="", namespace=""):
|
||||
#@ subsets=[]
|
||||
#@ if kind != "":
|
||||
#@ subsets.append(overlay.subset({"kind":kind}))
|
||||
#@ end
|
||||
#@ if name != "":
|
||||
#@ subsets.append(overlay.subset({"metadata": {"name":name}}))
|
||||
#@ end
|
||||
#@ if namespace != "":
|
||||
#@ subsets.append(overlay.subset({"metadata": {"namespace":namespace}}))
|
||||
#@ end
|
||||
#@ return overlay.and_op(*subsets)
|
||||
#@ end
|
||||
|
||||
#@ def system_resource(kind="", name=""):
|
||||
#@ return subset(kind=kind, name=name, namespace=serving_system_namespace())
|
||||
#@ end
|
||||
|
||||
#@ def system_configmap(name=""):
|
||||
#@ return system_resource(kind="ConfigMap", name=name)
|
||||
#@ end
|
||||
|
||||
#@ def system_deployment(name=""):
|
||||
#@ return system_resource(kind="Deployment", name=name)
|
||||
#@ end
|
||||
|
||||
#@ def serving_system_namespace():
|
||||
#@ return data.values.serving.namespaces.system
|
||||
#@ end
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("@ytt:data", "data")
|
||||
#@ load("helpers.lib.yaml", "subset")
|
||||
|
||||
#@ def namespaces():
|
||||
#@ names = [
|
||||
#@ data.values.serving.namespaces.system,
|
||||
#@ data.values.serving.namespaces.test.default,
|
||||
#@ data.values.serving.namespaces.test.alternate,
|
||||
#@ data.values.serving.namespaces.test.tls,
|
||||
#@ ]
|
||||
#@ matches = []
|
||||
#@ for name in names:
|
||||
#@ matches.append(subset(name=name, kind="Namespace"))
|
||||
#@ end
|
||||
#@ return overlay.or_op(*matches)
|
||||
#@ end
|
||||
|
||||
#@overlay/match by=namespaces(), expects="1+"
|
||||
---
|
||||
#@overlay/match-child-defaults missing_ok=True
|
||||
metadata:
|
||||
labels:
|
||||
istio-injection: enabled
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
#@ load("@ytt:overlay", "overlay")
|
||||
#@ load("@ytt:yaml", "yaml")
|
||||
#@ load("helpers.lib.yaml", "serving_system_namespace")
|
||||
|
||||
#@ def rename_namespace(old, new):
|
||||
#@ def replace(left, right):
|
||||
#@ yaml_left = yaml.encode(left)
|
||||
#@ yaml_left = yaml_left.replace("namespace: %s" % old, "namespace: %s" % new)
|
||||
#@ return yaml.decode(yaml_left)
|
||||
#@ end
|
||||
#@ return replace
|
||||
#@ end
|
||||
|
||||
#@overlay/match by=overlay.all, expects="0+"
|
||||
#@overlay/replace via=rename_namespace("knative-serving", serving_system_namespace())
|
||||
---
|
|
@ -0,0 +1,37 @@
|
|||
#! kapp doesn't handle generateName
|
||||
#! workaround source: https://github.com/vmware-tanzu/carvel-kapp/issues/76
|
||||
#!
|
||||
#! steps:
|
||||
#! - set the resource's name (generateName + "0")
|
||||
#! - remove generateName
|
||||
#! - use kapp annotations nonce and update-strategy to delete/recreate
|
||||
#! jobs when re-applying
|
||||
#!
|
||||
#@ load("@ytt:overlay", "overlay")
|
||||
|
||||
#@ def has_generate_name(index, left, right):
|
||||
#@ return not("name" in left["metadata"]) and "generateName" in left["metadata"]
|
||||
#@ end
|
||||
|
||||
#@ def generate_name(left, right):
|
||||
#@ return overlay.apply(left, right, set_name(left["generateName"]))
|
||||
#@ end
|
||||
|
||||
#@ def set_name(generateName):
|
||||
#@overlay/match missing_ok=True
|
||||
name: #@ generateName+ "0"
|
||||
#@overlay/remove
|
||||
generateName: ~
|
||||
#@ end
|
||||
|
||||
#@overlay/match by=has_generate_name, expects="1+"
|
||||
---
|
||||
#@overlay/replace via=generate_name
|
||||
metadata:
|
||||
#@overlay/match missing_ok=True
|
||||
#@overlay/match-child-defaults missing_ok=True
|
||||
annotations:
|
||||
kapp.k14s.io/update-strategy: "always-replace"
|
||||
kapp.k14s.io/nonce: ""
|
||||
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
#@data/values
|
||||
---
|
||||
k8s:
|
||||
cluster:
|
||||
domain: cluster.local
|
||||
serving:
|
||||
ingress_class: istio.ingress.networking.knative.dev
|
||||
namespaces:
|
||||
system: knative-serving
|
||||
test:
|
||||
default: serving-tests
|
||||
alternate: serving-tests-alt
|
||||
tls: tls
|
||||
ha:
|
||||
replicas: 3
|
||||
buckets: 10
|
||||
components:
|
||||
- controller
|
||||
- webhook
|
||||
- autoscaler-hpa
|
||||
- autoscaler
|
||||
- domainmapping-webhook
|
||||
- domain-mapping
|
|
@ -16,10 +16,6 @@
|
|||
|
||||
source $(dirname $0)/e2e-common.sh
|
||||
|
||||
function knative_setup() {
|
||||
install_knative_serving
|
||||
}
|
||||
|
||||
function setup_auto_tls_env_variables() {
|
||||
# DNS zone for the testing domain.
|
||||
export AUTO_TLS_TEST_DNS_ZONE="knative-e2e"
|
||||
|
@ -92,12 +88,12 @@ function setup_http01_auto_tls() {
|
|||
|
||||
if [[ -z "${MESH}" ]]; then
|
||||
echo "Install cert-manager no-mesh ClusterIssuer"
|
||||
kubectl apply -f ${TMP_DIR}/test/config/autotls/certmanager/http01/issuer.yaml
|
||||
kubectl apply -f ${E2E_YAML_DIR}/test/config/autotls/certmanager/http01/issuer.yaml
|
||||
else
|
||||
echo "Install cert-manager mesh ClusterIssuer"
|
||||
kubectl apply -f ${TMP_DIR}/test/config/autotls/certmanager/http01/mesh-issuer.yaml
|
||||
kubectl apply -f ${E2E_YAML_DIR}/test/config/autotls/certmanager/http01/mesh-issuer.yaml
|
||||
fi
|
||||
kubectl apply -f ${TMP_DIR}/test/config/autotls/certmanager/http01/config-certmanager.yaml
|
||||
kubectl apply -f ${E2E_YAML_DIR}/test/config/autotls/certmanager/http01/config-certmanager.yaml
|
||||
setup_dns_record
|
||||
}
|
||||
|
||||
|
@ -108,7 +104,7 @@ function setup_selfsigned_per_ksvc_auto_tls() {
|
|||
export TLS_SERVICE_NAME="self-per-ksvc"
|
||||
|
||||
kubectl delete kcert --all -n "${TLS_TEST_NAMESPACE}"
|
||||
kubectl apply -f ${TMP_DIR}/test/config/autotls/certmanager/selfsigned/
|
||||
kubectl apply -f ${E2E_YAML_DIR}/test/config/autotls/certmanager/selfsigned/
|
||||
}
|
||||
|
||||
function setup_selfsigned_per_namespace_auto_tls() {
|
||||
|
@ -123,7 +119,7 @@ function setup_selfsigned_per_namespace_auto_tls() {
|
|||
export NAMESPACE_WITH_CERT=""${TLS_TEST_NAMESPACE}""
|
||||
go run ./test/e2e/autotls/config/disablenscert
|
||||
|
||||
kubectl apply -f ${TMP_DIR}/test/config/autotls/certmanager/selfsigned/
|
||||
kubectl apply -f ${E2E_YAML_DIR}/test/config/autotls/certmanager/selfsigned/
|
||||
|
||||
# SERVING_NSCERT_YAML is set in build_knative_from_source function
|
||||
# when building knative.
|
||||
|
@ -132,9 +128,7 @@ function setup_selfsigned_per_namespace_auto_tls() {
|
|||
echo "Error: variable SERVING_NSCERT_YAML is not set."
|
||||
exit 1
|
||||
fi
|
||||
local YAML_NAME=${TMP_DIR}/${SERVING_NSCERT_YAML##*/}
|
||||
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${SERVING_NSCERT_YAML} > ${YAML_NAME}
|
||||
kubectl apply -f ${YAML_NAME}
|
||||
overlay_system_namespace "${SERVING_NSCERT_YAML}" | kubectl apply -f -
|
||||
}
|
||||
|
||||
function cleanup_per_selfsigned_namespace_auto_tls() {
|
||||
|
@ -146,7 +140,7 @@ function cleanup_per_selfsigned_namespace_auto_tls() {
|
|||
kubectl delete -f ${SERVING_NSCERT_YAML} --ignore-not-found=true
|
||||
|
||||
kubectl delete kcert --all -n "${TLS_TEST_NAMESPACE}"
|
||||
kubectl delete -f ${TMP_DIR}/test/config/autotls/certmanager/selfsigned/ --ignore-not-found=true
|
||||
kubectl delete -f ${E2E_YAML_DIR}/test/config/autotls/certmanager/selfsigned/ --ignore-not-found=true
|
||||
}
|
||||
|
||||
function setup_dns_record() {
|
||||
|
@ -175,19 +169,7 @@ function delete_dns_record() {
|
|||
# Temporarily increasing the cluster size for serving tests to rule out
|
||||
# resource/eviction as causes of flakiness.
|
||||
# Pin to 1.18 since scale test is super flakey on 1.19
|
||||
initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --cluster-version=1.18
|
||||
|
||||
header "Enabling high-availability"
|
||||
|
||||
scale_controlplane "${HA_COMPONENTS[@]}"
|
||||
|
||||
# Wait for a new leader Controller to prevent race conditions during service reconciliation
|
||||
wait_for_leader_controller || failed=1
|
||||
|
||||
# Dump the leases post-setup.
|
||||
header "Leaders"
|
||||
kubectl get lease -n "${SYSTEM_NAMESPACE}"
|
||||
|
||||
initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --enable-ha --cluster-version=1.18
|
||||
|
||||
# Run the tests
|
||||
header "Running tests"
|
||||
|
@ -203,7 +185,7 @@ add_trap "cleanup_auto_tls_common" EXIT SIGKILL SIGTERM SIGQUIT
|
|||
subheader "Auto TLS test for per-ksvc certificate provision using self-signed CA"
|
||||
setup_selfsigned_per_ksvc_auto_tls
|
||||
go_test_e2e -timeout=10m ./test/e2e/autotls/ || failed=1
|
||||
kubectl delete -f ${TMP_DIR}/test/config/autotls/certmanager/selfsigned/
|
||||
kubectl delete -f ${E2E_YAML_DIR}/test/config/autotls/certmanager/selfsigned/
|
||||
|
||||
subheader "Auto TLS test for per-namespace certificate provision using self-signed CA"
|
||||
setup_selfsigned_per_namespace_auto_tls
|
||||
|
@ -216,7 +198,7 @@ if [[ ${RUN_HTTP01_AUTO_TLS_TESTS} -eq 1 ]]; then
|
|||
setup_http01_auto_tls
|
||||
add_trap "delete_dns_record" SIGKILL SIGTERM SIGQUIT
|
||||
go_test_e2e -timeout=10m ./test/e2e/autotls/ || failed=1
|
||||
kubectl delete -f ${TMP_DIR}/test/config/autotls/certmanager/http01/
|
||||
kubectl delete -f ${E2E_YAML_DIR}/test/config/autotls/certmanager/http01/
|
||||
delete_dns_record
|
||||
fi
|
||||
|
||||
|
|
|
@ -19,37 +19,39 @@
|
|||
source "$(dirname "${BASH_SOURCE[0]}")/../vendor/knative.dev/hack/e2e-tests.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/e2e-networking-library.sh"
|
||||
|
||||
CERT_MANAGER_VERSION="latest"
|
||||
export CERT_MANAGER_VERSION="latest"
|
||||
# Since default is istio, make default ingress as istio
|
||||
INGRESS_CLASS=${INGRESS_CLASS:-istio.ingress.networking.knative.dev}
|
||||
ISTIO_VERSION=""
|
||||
KOURIER_VERSION=""
|
||||
AMBASSADOR_VERSION=""
|
||||
CONTOUR_VERSION=""
|
||||
CERTIFICATE_CLASS=""
|
||||
export RUN_HTTP01_AUTO_TLS_TESTS=0
|
||||
export INGRESS_CLASS=${INGRESS_CLASS:-istio.ingress.networking.knative.dev}
|
||||
export ISTIO_VERSION="stable"
|
||||
export KOURIER_VERSION=""
|
||||
export AMBASSADOR_VERSION=""
|
||||
export CONTOUR_VERSION=""
|
||||
export CERTIFICATE_CLASS=""
|
||||
# Only build linux/amd64 bit images
|
||||
KO_FLAGS="${KO_FLAGS:---platform=linux/amd64}"
|
||||
export KO_FLAGS="${KO_FLAGS:---platform=linux/amd64}"
|
||||
|
||||
HTTPS=0
|
||||
export RUN_HTTP01_AUTO_TLS_TESTS=0
|
||||
export HTTPS=0
|
||||
export ENABLE_HA=0
|
||||
export MESH=0
|
||||
export KIND=0
|
||||
export CLUSTER_DOMAIN=cluster.local
|
||||
|
||||
# List of custom YAMLs to install, if specified (space-separated).
|
||||
INSTALL_CUSTOM_YAMLS=""
|
||||
export INSTALL_CUSTOM_YAMLS=""
|
||||
export INSTALL_SERVING_VERSION="HEAD"
|
||||
export YTT_FILES=()
|
||||
|
||||
export TMP_DIR="${TMP_DIR:-$(mktemp -d -t ci-$(date +%Y-%m-%d-%H-%M-%S)-XXXXXXXXXX)}"
|
||||
|
||||
readonly E2E_YAML_DIR="${TMP_DIR}/e2e-yaml"
|
||||
|
||||
UNINSTALL_LIST=()
|
||||
export TMP_DIR
|
||||
TMP_DIR="${TMP_DIR:-$(mktemp -d -t ci-$(date +%Y-%m-%d-%H-%M-%S)-XXXXXXXXXX)}"
|
||||
readonly KNATIVE_DEFAULT_NAMESPACE="knative-serving"
|
||||
# This the namespace used to install Knative Serving. Use generated UUID as namespace.
|
||||
export SYSTEM_NAMESPACE
|
||||
SYSTEM_NAMESPACE="${SYSTEM_NAMESPACE:-$(uuidgen | tr 'A-Z' 'a-z')}"
|
||||
|
||||
export SYSTEM_NAMESPACE="${SYSTEM_NAMESPACE:-$(uuidgen | tr 'A-Z' 'a-z')}"
|
||||
|
||||
# Keep this in sync with test/ha/ha.go
|
||||
readonly REPLICAS=3
|
||||
readonly BUCKETS=10
|
||||
HA_COMPONENTS=()
|
||||
|
||||
# Latest serving release. If user does not supply this as a flag, the latest
|
||||
# tagged release on the current branch will be used.
|
||||
|
@ -74,6 +76,10 @@ function parse_flags() {
|
|||
LATEST_SERVING_RELEASE_VERSION=$2
|
||||
return 2
|
||||
;;
|
||||
--install-latest-release)
|
||||
INSTALL_SERVING_VERSION="latest-release"
|
||||
return 2
|
||||
;;
|
||||
--cert-manager-version)
|
||||
[[ $2 =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] || abort "version format must be '[0-9].[0-9].[0-9]'"
|
||||
readonly CERT_MANAGER_VERSION=$2
|
||||
|
@ -92,14 +98,26 @@ function parse_flags() {
|
|||
readonly MESH=0
|
||||
return 1
|
||||
;;
|
||||
--enable-ha)
|
||||
readonly ENABLE_HA=1
|
||||
return 1
|
||||
;;
|
||||
--kind)
|
||||
readonly KIND=1
|
||||
return 1
|
||||
;;
|
||||
--https)
|
||||
readonly HTTPS=1
|
||||
return 1
|
||||
;;
|
||||
--cluster-domain)
|
||||
[[ -z "$2" ]] && fail_test "Missing argument to --cluster-domain"
|
||||
readonly CLUSTER_DOMAIN="$2"
|
||||
return 2
|
||||
;;
|
||||
--custom-yamls)
|
||||
[[ -z "$2" ]] && fail_test "Missing argument to --custom-yamls"
|
||||
# Expect a list of comma-separated YAMLs.
|
||||
INSTALL_CUSTOM_YAMLS="${2//,/ }"
|
||||
INSTALL_CUSTOM_YAMLS="${2}"
|
||||
readonly INSTALL_CUSTOM_YAMLS
|
||||
return 2
|
||||
;;
|
||||
|
@ -140,193 +158,167 @@ function parse_flags() {
|
|||
return 0
|
||||
}
|
||||
|
||||
# Create all manifests required to install Knative Serving.
|
||||
# This will build everything from the current source.
|
||||
# All generated YAMLs will be available and pointed by the corresponding
|
||||
# environment variables as set in /hack/generate-yamls.sh.
|
||||
function build_knative_from_source() {
|
||||
local FULL_OUTPUT YAML_LIST LOG_OUTPUT ENV_OUTPUT
|
||||
YAML_LIST="$(mktemp)"
|
||||
# Gather all the YAML we require to run all our tests.
|
||||
# We stage these files into ${E2E_YAML_DIR}
|
||||
#
|
||||
# > serving built from HEAD > $E2E_YAML_DIR/serving/HEAD/install
|
||||
# > $E2E_YAML_DIR/serving/HEAD/post-install
|
||||
#
|
||||
# > serving latest-release > $E2E_YAML_DIR/serving/latest-release/install
|
||||
# > $E2E_YAML_DIR/serving/latest-release/post-install
|
||||
#
|
||||
# > net-istio HEAD > $E2E_YAML_DIR/istio/HEAD/install
|
||||
# > net-istio latest-release > $E2E_YAML_DIR/istio/latest-release/install
|
||||
#
|
||||
# We download istio.yaml for our given test profile (ie. mesh on kind).
|
||||
# The files downloaded are istio.yaml & config-istio.yaml.
|
||||
#
|
||||
# config-istio.yaml is to be applied _after_ we install net-istio.yaml since
|
||||
# it includes profile specific configuration
|
||||
#
|
||||
# > test/config/**.yaml > $E2E_YAML_DIR/serving/test/config
|
||||
#
|
||||
# These resources will be passed through `ko` if there exists a `ko://`
|
||||
# strict reference. Secondly namespace overlays will be applied to place
|
||||
# them in the correct
|
||||
#
|
||||
function knative_setup() {
|
||||
local need_latest_version=0
|
||||
|
||||
# Generate manifests, capture environment variables pointing to the YAML files.
|
||||
FULL_OUTPUT="$( \
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/../hack/generate-yamls.sh" "${REPO_ROOT_DIR}" "${YAML_LIST}" ; \
|
||||
set | grep _YAML=/)"
|
||||
LOG_OUTPUT="$(echo "${FULL_OUTPUT}" | grep -v _YAML=/)"
|
||||
ENV_OUTPUT="$(echo "${FULL_OUTPUT}" | grep '^[_0-9A-Z]\+_YAML=/')"
|
||||
[[ -z "${LOG_OUTPUT}" || -z "${ENV_OUTPUT}" ]] && fail_test "Error generating manifests"
|
||||
# Only import the environment variables pointing to the YAML files.
|
||||
echo "${LOG_OUTPUT}"
|
||||
echo -e "Generated manifests:\n${ENV_OUTPUT}"
|
||||
eval "${ENV_OUTPUT}"
|
||||
if [[ "$(basename "${E2E_SCRIPT}")" == "*upgrade*" ]]; then
|
||||
need_latest_version=1
|
||||
fi
|
||||
|
||||
if [[ "${INSTALL_SERVING_VERSION}" == "latest-release" ]]; then
|
||||
need_latest_version=1
|
||||
fi
|
||||
|
||||
if [[ -z "${INSTALL_CUSTOM_YAMLS}" ]]; then
|
||||
stage_serving_head
|
||||
else
|
||||
stage_serving_custom
|
||||
fi
|
||||
|
||||
# Download resources we need for upgrade tests
|
||||
if (( need_latest_version )); then
|
||||
stage_serving_latest
|
||||
fi
|
||||
|
||||
# Download Istio YAMLs
|
||||
if is_ingress_class istio; then
|
||||
stage_istio_head
|
||||
|
||||
# Download istio resources we need for upgrade tests
|
||||
if (( need_latest_version )); then
|
||||
stage_istio_latest
|
||||
fi
|
||||
fi
|
||||
|
||||
stage_test_resources
|
||||
|
||||
install "${INSTALL_SERVING_VERSION}"
|
||||
}
|
||||
|
||||
# Installs Knative Serving in the current cluster.
|
||||
# If no parameters are passed, installs the current source-based build, unless custom
|
||||
# YAML files were passed using the --custom-yamls flag.
|
||||
# Parameters: $1 - Knative Serving version "HEAD" or "latest-release". Default is "HEAD".
|
||||
# $2 - Knative Monitoring YAML file (optional)
|
||||
function install_knative_serving() {
|
||||
local version=${1:-"HEAD"}
|
||||
if [[ -z "${INSTALL_CUSTOM_YAMLS}" ]]; then
|
||||
install_knative_serving_standard "$version" "${2:-}"
|
||||
return
|
||||
fi
|
||||
echo ">> Installing Knative serving from custom YAMLs"
|
||||
echo "Custom YAML files: ${INSTALL_CUSTOM_YAMLS}"
|
||||
for yaml in ${INSTALL_CUSTOM_YAMLS}; do
|
||||
local YAML_NAME=${TMP_DIR}/${yaml##*/}
|
||||
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${yaml} > ${YAML_NAME}
|
||||
echo "Installing '${YAML_NAME}'"
|
||||
kubectl create -f "${YAML_NAME}" || return 1
|
||||
done
|
||||
}
|
||||
# Parameters: $1 - serving version "HEAD" or "latest-release". Default is "HEAD".
|
||||
# Parameters: $2 - ingress version "HEAD" or "latest-release". Default is "HEAD".
|
||||
#
|
||||
# TODO - ingress version toggle only works for istio
|
||||
# TODO - allow latest-release for cert-manager
|
||||
function install() {
|
||||
header "Installing Knative Serving"
|
||||
|
||||
local ingress=${INGRESS_CLASS%%.*}
|
||||
local serving_version="${1:-"HEAD"}"
|
||||
local ingress_version="${2:-"HEAD"}"
|
||||
|
||||
YTT_FILES=(
|
||||
"${REPO_ROOT_DIR}/test/config/ytt/lib"
|
||||
"${REPO_ROOT_DIR}/test/config/ytt/values.yaml"
|
||||
|
||||
# see cluster_setup for how the files are staged
|
||||
"${E2E_YAML_DIR}/serving/${serving_version}/install"
|
||||
"${REPO_ROOT_DIR}/test/config/ytt/overlay-system-namespace.yaml"
|
||||
"${REPO_ROOT_DIR}/test/config/ytt/core"
|
||||
)
|
||||
|
||||
if is_ingress_class istio; then
|
||||
# Istio - see cluster_setup for how the files are staged
|
||||
YTT_FILES+=("${E2E_YAML_DIR}/istio/${ingress_version}/install")
|
||||
else
|
||||
YTT_FILES+=("${REPO_ROOT_DIR}/third_party/${ingress}-latest")
|
||||
fi
|
||||
|
||||
YTT_FILES+=("${REPO_ROOT_DIR}/test/config/ytt/ingress/${ingress}")
|
||||
YTT_FILES+=("${REPO_ROOT_DIR}/third_party/cert-manager-${CERT_MANAGER_VERSION}/cert-manager.yaml")
|
||||
YTT_FILES+=("${REPO_ROOT_DIR}/third_party/cert-manager-${CERT_MANAGER_VERSION}/net-certmanager.yaml")
|
||||
|
||||
# Installs Knative Serving in the current cluster.
|
||||
# If no parameters are passed, installs the current source-based build.
|
||||
# Parameters: $1 - Knative Serving version "HEAD" or "latest-release".
|
||||
# $2 - Knative Monitoring YAML file (optional)
|
||||
function install_knative_serving_standard() {
|
||||
echo ">> Creating ${SYSTEM_NAMESPACE} namespace if it does not exist"
|
||||
kubectl get ns ${SYSTEM_NAMESPACE} || kubectl create namespace ${SYSTEM_NAMESPACE}
|
||||
if (( MESH )); then
|
||||
kubectl label namespace ${SYSTEM_NAMESPACE} istio-injection=enabled
|
||||
fi
|
||||
# Delete the test namespace
|
||||
add_trap "kubectl delete namespace ${SYSTEM_NAMESPACE} --ignore-not-found=true" SIGKILL SIGTERM SIGQUIT
|
||||
|
||||
echo ">> Installing Knative CRD"
|
||||
SERVING_RELEASE_YAML=""
|
||||
SERVING_POST_INSTALL_JOBS_YAML=""
|
||||
if [[ "$1" == "HEAD" ]]; then
|
||||
# If we need to build from source, then kick that off first.
|
||||
build_knative_from_source
|
||||
|
||||
echo "CRD YAML: ${SERVING_CRD_YAML}"
|
||||
kubectl apply -f "${SERVING_CRD_YAML}" || return 1
|
||||
UNINSTALL_LIST+=( "${SERVING_CRD_YAML}" )
|
||||
|
||||
echo "DOMAIN MAPPING CRD YAML: ${SERVING_DOMAINMAPPING_CRD_YAML}"
|
||||
kubectl apply -f "${SERVING_DOMAINMAPPING_CRD_YAML}" || return 1
|
||||
UNINSTALL_LIST+=( "${SERVING_DOMAINMAPPING_CRD_YAML}" )
|
||||
else
|
||||
# Download the latest release of Knative Serving.
|
||||
local url="https://github.com/knative/serving/releases/download/${LATEST_SERVING_RELEASE_VERSION}"
|
||||
|
||||
local SERVING_RELEASE_YAML=${TMP_DIR}/"serving-${LATEST_SERVING_RELEASE_VERSION}.yaml"
|
||||
local SERVING_POST_INSTALL_JOBS_YAML=${TMP_DIR}/"serving-${LATEST_SERVING_RELEASE_VERSION}-post-install-jobs.yaml"
|
||||
|
||||
wget "${url}/serving-crds.yaml" -O "${SERVING_RELEASE_YAML}" \
|
||||
|| fail_test "Unable to download latest knative/serving CRD file."
|
||||
wget "${url}/serving-core.yaml" -O ->> "${SERVING_RELEASE_YAML}" \
|
||||
|| fail_test "Unable to download latest knative/serving core file."
|
||||
# TODO - switch to upgrade yaml (SERVING_POST_INSTALL_JOBS_YAML) after 0.16 is released
|
||||
wget "${url}/serving-storage-version-migration.yaml" -O "${SERVING_POST_INSTALL_JOBS_YAML}" \
|
||||
|| fail_test "Unable to download latest knative/serving post install file."
|
||||
|
||||
# Replace the default system namespace with the test's system namespace.
|
||||
sed -i "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${SERVING_RELEASE_YAML}
|
||||
sed -i "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${SERVING_POST_INSTALL_JOBS_YAML}
|
||||
|
||||
echo "Knative YAML: ${SERVING_RELEASE_YAML}"
|
||||
ko apply -f "${SERVING_RELEASE_YAML}" --selector=knative.dev/crd-install=true || return 1
|
||||
YTT_FILES+=("${REPO_ROOT_DIR}/test/config/ytt/mesh")
|
||||
fi
|
||||
|
||||
if [[ -z "${REUSE_INGRESS:-}" ]]; then
|
||||
echo ">> Installing Ingress"
|
||||
if [[ -n "${KOURIER_VERSION:-}" ]]; then
|
||||
install_kourier || return 1
|
||||
elif [[ -n "${AMBASSADOR_VERSION:-}" ]]; then
|
||||
install_ambassador || return 1
|
||||
elif [[ -n "${CONTOUR_VERSION:-}" ]]; then
|
||||
install_contour || return 1
|
||||
elif [[ -n "${KONG_VERSION:-}" ]]; then
|
||||
install_kong || return 1
|
||||
else
|
||||
if [[ "$1" == "HEAD" ]]; then
|
||||
install_istio "./third_party/istio-latest/net-istio.yaml" || return 1
|
||||
else
|
||||
# Download the latest release of net-istio.
|
||||
local url="https://github.com/knative/net-istio/releases/download/${LATEST_NET_ISTIO_RELEASE_VERSION}"
|
||||
local yaml="net-istio.yaml"
|
||||
local YAML_NAME=${TMP_DIR}/"net-istio-${LATEST_NET_ISTIO_RELEASE_VERSION}.yaml"
|
||||
wget "${url}/${yaml}" -O "${YAML_NAME}" \
|
||||
|| fail_test "Unable to download latest knative/net-istio release."
|
||||
echo "net-istio YAML: ${YAML_NAME}"
|
||||
install_istio $YAML_NAME || return 1
|
||||
fi
|
||||
if (( ENABLE_HA )); then
|
||||
YTT_FILES+=("${E2E_YAML_DIR}/test/config/chaosduck.yaml")
|
||||
YTT_FILES+=("${REPO_ROOT_DIR}/test/config/ytt/ha")
|
||||
fi
|
||||
|
||||
if (( KIND )); then
|
||||
YTT_FILES+=("${REPO_ROOT_DIR}/test/config/ytt/kind/core")
|
||||
YTT_FILES+=("${REPO_ROOT_DIR}/test/config/ytt/kind/ingress/${ingress}-kind.yaml")
|
||||
fi
|
||||
|
||||
local ytt_result=$(mktemp)
|
||||
local ytt_post_install_result=$(mktemp)
|
||||
local ytt_flags=""
|
||||
|
||||
for file in "${YTT_FILES[@]}"; do
|
||||
if [[ -f "${file}" ]] || [[ -d "${file}" ]]; then
|
||||
echo "including ${file}"
|
||||
ytt_flags+=" -f ${file}"
|
||||
fi
|
||||
done
|
||||
|
||||
# use ytt to wrangle the yaml & kapp to apply the resources
|
||||
# to the cluster and wait
|
||||
run_ytt ${ytt_flags} \
|
||||
--data-value serving.namespaces.system="${SYSTEM_NAMESPACE}" \
|
||||
--data-value k8s.cluster.domain="${CLUSTER_DOMAIN}" \
|
||||
> "${ytt_result}" \
|
||||
|| fail_test "failed to create deployment configuration"
|
||||
|
||||
|
||||
# Post install jobs configuration
|
||||
run_ytt \
|
||||
-f "${REPO_ROOT_DIR}/test/config/ytt/lib" \
|
||||
-f "${REPO_ROOT_DIR}/test/config/ytt/values.yaml" \
|
||||
-f "${REPO_ROOT_DIR}/test/config/ytt/overlay-system-namespace.yaml" \
|
||||
-f "${REPO_ROOT_DIR}/test/config/ytt/post-install" \
|
||||
-f "${E2E_YAML_DIR}/serving/${serving_version}/post-install" \
|
||||
--data-value serving.namespaces.system="${SYSTEM_NAMESPACE}" \
|
||||
--data-value k8s.cluster.domain="${CLUSTER_DOMAIN}" \
|
||||
> "${ytt_post_install_result}" \
|
||||
|| fail_test "failed to create post-install jobs configuration"
|
||||
|
||||
|
||||
echo "serving config at ${ytt_result}"
|
||||
echo "serving post-install config at ${ytt_post_install_result}"
|
||||
|
||||
run_kapp deploy --yes --app serving --file "${ytt_result}" \
|
||||
|| fail_test "failed to setup knative"
|
||||
|
||||
run_kapp deploy --yes --app serving-post-install --file "${ytt_post_install_result}" \
|
||||
|| fail_test "failed to run serving post-install"
|
||||
|
||||
setup_ingress_env_vars
|
||||
|
||||
if (( ENABLE_HA )); then
|
||||
# # Changing the bucket count and cycling the controllers will leave around stale
|
||||
# # lease resources at the old sharding factor, so clean these up.
|
||||
# kubectl -n ${SYSTEM_NAMESPACE} delete leases --all
|
||||
wait_for_leader_controller || return 1
|
||||
fi
|
||||
|
||||
echo ">> Installing Cert-Manager"
|
||||
readonly INSTALL_CERT_MANAGER_YAML="./third_party/cert-manager-${CERT_MANAGER_VERSION}/cert-manager.yaml"
|
||||
echo "Cert Manager YAML: ${INSTALL_CERT_MANAGER_YAML}"
|
||||
# We skip installing cert-manager if it has been installed as "kubectl apply" will be stuck when
|
||||
# cert-manager has been installed. https://github.com/jetstack/cert-manager/issues/3367
|
||||
kubectl get ns cert-manager || kubectl apply -f "${INSTALL_CERT_MANAGER_YAML}" --validate=false || return 1
|
||||
UNINSTALL_LIST+=( "${INSTALL_CERT_MANAGER_YAML}" )
|
||||
readonly NET_CERTMANAGER_YAML="./third_party/cert-manager-${CERT_MANAGER_VERSION}/net-certmanager.yaml"
|
||||
echo "net-certmanager YAML: ${NET_CERTMANAGER_YAML}"
|
||||
local CERT_YAML_NAME=${TMP_DIR}/${NET_CERTMANAGER_YAML##*/}
|
||||
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${NET_CERTMANAGER_YAML} > ${CERT_YAML_NAME}
|
||||
kubectl apply \
|
||||
-f "${CERT_YAML_NAME}" || return 1
|
||||
UNINSTALL_LIST+=( "${CERT_YAML_NAME}" )
|
||||
|
||||
echo ">> Installing Knative serving"
|
||||
HA_COMPONENTS+=( "controller" "webhook" "autoscaler-hpa" "autoscaler" "domain-mapping" "domainmapping-webhook" )
|
||||
if [[ "$1" == "HEAD" ]]; then
|
||||
local CORE_YAML_NAME=${TMP_DIR}/${SERVING_CORE_YAML##*/}
|
||||
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${SERVING_CORE_YAML} > ${CORE_YAML_NAME}
|
||||
local HPA_YAML_NAME=${TMP_DIR}/${SERVING_HPA_YAML##*/}
|
||||
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${SERVING_HPA_YAML} > ${HPA_YAML_NAME}
|
||||
local DOMAINMAPPING_YAML_NAME=${TMP_DIR}/${SERVING_DOMAINMAPPING_YAML##*/}
|
||||
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${SERVING_DOMAINMAPPING_YAML} > ${DOMAINMAPPING_YAML_NAME}
|
||||
local POST_INSTALL_JOBS_YAML_NAME=${TMP_DIR}/${SERVING_POST_INSTALL_JOBS_YAML##*/}
|
||||
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${SERVING_POST_INSTALL_JOBS_YAML} > ${POST_INSTALL_JOBS_YAML_NAME}
|
||||
|
||||
echo "Knative YAML: ${CORE_YAML_NAME} and ${HPA_YAML_NAME} and ${DOMAINMAPPING_YAML_NAME}"
|
||||
kubectl apply \
|
||||
-f "${CORE_YAML_NAME}" \
|
||||
-f "${DOMAINMAPPING_YAML_NAME}" \
|
||||
-f "${HPA_YAML_NAME}" || return 1
|
||||
UNINSTALL_LIST+=( "${CORE_YAML_NAME}" "${HPA_YAML_NAME}" "${DOMAINMAPPING_YAML_NAME}" )
|
||||
kubectl create -f ${POST_INSTALL_JOBS_YAML_NAME}
|
||||
else
|
||||
echo "Knative YAML: ${SERVING_RELEASE_YAML}"
|
||||
# We use ko because it has better filtering support for CRDs.
|
||||
ko apply -f "${SERVING_RELEASE_YAML}" || return 1
|
||||
ko create -f "${SERVING_POST_INSTALL_JOBS_YAML}" || return 1
|
||||
UNINSTALL_LIST+=( "${SERVING_RELEASE_YAML}" )
|
||||
fi
|
||||
|
||||
echo ">> Configuring the default Ingress: ${INGRESS_CLASS}"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: config-network
|
||||
namespace: ${SYSTEM_NAMESPACE}
|
||||
labels:
|
||||
serving.knative.dev/release: devel
|
||||
data:
|
||||
ingress.class: ${INGRESS_CLASS}
|
||||
EOF
|
||||
|
||||
echo ">> Turning on profiling.enable"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: config-observability
|
||||
namespace: ${SYSTEM_NAMESPACE}
|
||||
data:
|
||||
profiling.enable: "true"
|
||||
EOF
|
||||
|
||||
echo ">> Patching activator HPA"
|
||||
# We set min replicas to 15 for testing multiple activator pods.
|
||||
kubectl -n ${SYSTEM_NAMESPACE} patch hpa activator --patch '{"spec":{"minReplicas":15}}' || return 1
|
||||
}
|
||||
|
||||
# Check if we should use --resolvabledomain. In case the ingress only has
|
||||
|
@ -338,35 +330,12 @@ function use_resolvable_domain() {
|
|||
|
||||
# Uninstalls Knative Serving from the current cluster.
|
||||
function knative_teardown() {
|
||||
if [[ -z "${INSTALL_CUSTOM_YAMLS}" && -z "${UNINSTALL_LIST[@]}" ]]; then
|
||||
echo "install_knative_serving() was not called, nothing to uninstall"
|
||||
return 0
|
||||
fi
|
||||
if [[ -n "${INSTALL_CUSTOM_YAMLS}" ]]; then
|
||||
echo ">> Uninstalling Knative serving from custom YAMLs"
|
||||
for yaml in ${INSTALL_CUSTOM_YAMLS}; do
|
||||
echo "Uninstalling '${yaml}'"
|
||||
kubectl delete --ignore-not-found=true -f "${yaml}" || return 1
|
||||
done
|
||||
else
|
||||
echo ">> Uninstalling Knative serving"
|
||||
for i in ${!UNINSTALL_LIST[@]}; do
|
||||
# We uninstall elements in the reverse of the order they were installed.
|
||||
local YAML="${UNINSTALL_LIST[$(( ${#array[@]} - $i ))]}"
|
||||
echo ">> Bringing down YAML: ${YAML}"
|
||||
kubectl delete --ignore-not-found=true -f "${YAML}" || return 1
|
||||
done
|
||||
fi
|
||||
run_kapp delete --yes --app "serving-post-install"
|
||||
run_kapp delete --yes --app "serving"
|
||||
}
|
||||
|
||||
# Create test resources and images
|
||||
function test_setup() {
|
||||
echo ">> Replacing ${KNATIVE_DEFAULT_NAMESPACE} with the actual namespace for Knative Serving..."
|
||||
local TEST_DIR=${TMP_DIR}/test
|
||||
mkdir -p ${TEST_DIR}
|
||||
cp -r test/* ${TEST_DIR}
|
||||
find ${TEST_DIR} -type f -name "*.yaml" -exec sed -i "s/${KNATIVE_DEFAULT_NAMESPACE}/${SYSTEM_NAMESPACE}/g" {} +
|
||||
|
||||
echo ">> Setting up logging..."
|
||||
|
||||
# Install kail if needed.
|
||||
|
@ -380,54 +349,8 @@ function test_setup() {
|
|||
# Clean up kail so it doesn't interfere with job shutting down
|
||||
add_trap "kill $kail_pid || true" EXIT
|
||||
|
||||
# Capture lease changes
|
||||
kubectl get lease -A -w -o yaml > ${ARTIFACTS}/leases-$(basename ${E2E_SCRIPT}).log &
|
||||
local leases_pid=$!
|
||||
# Clean up the lease logging so it doesn't interfere with job shutting down
|
||||
add_trap "kill $leases_pid || true" EXIT
|
||||
|
||||
echo ">> Waiting for Serving components to be running..."
|
||||
wait_until_pods_running ${SYSTEM_NAMESPACE} || return 1
|
||||
|
||||
local TEST_CONFIG_DIR=${TEST_DIR}/config
|
||||
echo ">> Creating test resources (${TEST_CONFIG_DIR}/)"
|
||||
ko apply ${KO_FLAGS} -f ${TEST_CONFIG_DIR}/ || return 1
|
||||
if (( MESH )); then
|
||||
kubectl label namespace serving-tests istio-injection=enabled
|
||||
kubectl label namespace serving-tests-alt istio-injection=enabled
|
||||
fi
|
||||
|
||||
# Setting deadline progress to a shorter value.
|
||||
kubectl patch cm "config-deployment" -n "${SYSTEM_NAMESPACE}" \
|
||||
-p '{"data":{"progressDeadline":"120s"}}'
|
||||
|
||||
echo ">> Uploading test images..."
|
||||
${REPO_ROOT_DIR}/test/upload-test-images.sh || return 1
|
||||
|
||||
echo ">> Waiting for Cert Manager components to be running..."
|
||||
wait_until_pods_running cert-manager || return 1
|
||||
|
||||
echo ">> Waiting for Ingress provider to be running..."
|
||||
wait_until_ingress_running || return 1
|
||||
}
|
||||
|
||||
# Apply the logging config for testing. This should be called after test_setup has been triggered.
|
||||
function test_logging_config_setup() {
|
||||
echo ">> Setting up test logging config..."
|
||||
ko apply ${KO_FLAGS} -f ${TMP_DIR}/test/config/config-logging.yaml || return 1
|
||||
}
|
||||
|
||||
# Delete test resources
|
||||
function test_teardown() {
|
||||
local TEST_CONFIG_DIR=${TMP_DIR}/test/config
|
||||
echo ">> Removing test resources (${TEST_CONFIG_DIR}/)"
|
||||
ko delete --ignore-not-found=true --now -f ${TEST_CONFIG_DIR}/
|
||||
|
||||
echo ">> Ensuring test namespaces are clean"
|
||||
kubectl delete all --all --ignore-not-found --now --timeout 60s -n serving-tests
|
||||
kubectl delete --ignore-not-found --now --timeout 60s namespace serving-tests
|
||||
kubectl delete all --all --ignore-not-found --now --timeout 60s -n serving-tests-alt
|
||||
kubectl delete --ignore-not-found --now --timeout 60s namespace serving-tests-alt
|
||||
}
|
||||
|
||||
# Dump more information when test fails.
|
||||
|
@ -484,47 +407,146 @@ function immediate_gc() {
|
|||
sleep 30
|
||||
}
|
||||
|
||||
function scale_controlplane() {
|
||||
for deployment in "$@"; do
|
||||
# Make sure all pods run in leader-elected mode.
|
||||
kubectl -n "${SYSTEM_NAMESPACE}" scale deployment "$deployment" --replicas=0 || fail_test
|
||||
# Give it time to kill the pods.
|
||||
sleep 5
|
||||
# Scale up components for HA tests
|
||||
kubectl -n "${SYSTEM_NAMESPACE}" scale deployment "$deployment" --replicas="${REPLICAS}" || fail_test
|
||||
done
|
||||
}
|
||||
|
||||
function disable_chaosduck() {
|
||||
kubectl -n "${SYSTEM_NAMESPACE}" scale deployment "chaosduck" --replicas=0 || fail_test
|
||||
}
|
||||
|
||||
function enable_chaosduck() {
|
||||
kubectl -n "${SYSTEM_NAMESPACE}" scale deployment "chaosduck" --replicas=1 || fail_test
|
||||
}
|
||||
|
||||
function install_latest_release() {
|
||||
header "Installing Knative latest public release"
|
||||
|
||||
install_knative_serving latest-release \
|
||||
install latest-release latest-release \
|
||||
|| fail_test "Knative latest release installation failed"
|
||||
test_logging_config_setup
|
||||
|
||||
wait_until_pods_running ${SYSTEM_NAMESPACE}
|
||||
wait_until_batch_job_complete ${SYSTEM_NAMESPACE}
|
||||
}
|
||||
|
||||
function install_head_reuse_ingress() {
|
||||
header "Installing Knative head release and reusing ingress"
|
||||
# Keep the existing ingress and do not upgrade it. The ingress upgrade
|
||||
# makes ongoing requests fail.
|
||||
REUSE_INGRESS=true install_knative_serving || fail_test "Knative head release installation failed"
|
||||
test_logging_config_setup
|
||||
|
||||
wait_until_pods_running ${SYSTEM_NAMESPACE}
|
||||
wait_until_batch_job_complete ${SYSTEM_NAMESPACE}
|
||||
install HEAD latest-release \
|
||||
|| fail_test "Knative head release installation failed"
|
||||
}
|
||||
|
||||
function knative_setup() {
|
||||
install_latest_release
|
||||
# Create all manifests required to install Knative Serving.
|
||||
# This will build everything from the current source.
|
||||
# All generated YAMLs will be available and pointed by the corresponding
|
||||
# environment variables as set in /hack/generate-yamls.sh.
|
||||
function build_knative_from_source() {
|
||||
YAML_ENV_FILES="$(mktemp)"
|
||||
"${REPO_ROOT_DIR}/hack/generate-yamls.sh" "${REPO_ROOT_DIR}" "$(mktemp)" "${YAML_ENV_FILES}" || fail_test "failed to build"
|
||||
source "${YAML_ENV_FILES}"
|
||||
}
|
||||
|
||||
function stage_serving_head() {
|
||||
header "Building Serving HEAD"
|
||||
build_knative_from_source
|
||||
|
||||
local head_dir="${E2E_YAML_DIR}/serving/HEAD/install"
|
||||
local head_post_install_dir="${E2E_YAML_DIR}/serving/HEAD/post-install"
|
||||
|
||||
mkdir -p "${head_dir}"
|
||||
mkdir -p "${head_post_install_dir}"
|
||||
|
||||
cp "${SERVING_CORE_YAML}" "${head_dir}"
|
||||
cp "${SERVING_DOMAINMAPPING_YAML}" "${head_dir}"
|
||||
cp "${SERVING_HPA_YAML}" "${head_dir}"
|
||||
cp "${SERVING_POST_INSTALL_JOBS_YAML}" "${head_post_install_dir}"
|
||||
}
|
||||
|
||||
function stage_serving_custom() {
|
||||
source "${INSTALL_CUSTOM_YAMLS}"
|
||||
|
||||
local head_dir="${E2E_YAML_DIR}/serving/HEAD/install"
|
||||
local head_post_install_dir="${E2E_YAML_DIR}/serving/HEAD/post-install"
|
||||
|
||||
mkdir -p "${head_dir}"
|
||||
mkdir -p "${head_post_install_dir}"
|
||||
|
||||
cp "${SERVING_CORE_YAML}" "${head_dir}"
|
||||
cp "${SERVING_DOMAINMAPPING_YAML}" "${head_dir}"
|
||||
cp "${SERVING_HPA_YAML}" "${head_dir}"
|
||||
cp "${SERVING_POST_INSTALL_JOBS_YAML}" "${head_post_install_dir}"
|
||||
}
|
||||
|
||||
|
||||
function stage_serving_latest() {
|
||||
header "Staging Serving ${LATEST_SERVING_RELEASE_VERSION}"
|
||||
local latest_dir="${E2E_YAML_DIR}/serving/latest-release/install"
|
||||
local latest_post_install_dir="${E2E_YAML_DIR}/serving/latest-release/post-install"
|
||||
local version="${LATEST_SERVING_RELEASE_VERSION}"
|
||||
|
||||
mkdir -p "${latest_dir}"
|
||||
mkdir -p "${latest_post_install_dir}"
|
||||
|
||||
# Download the latest release of Knative Serving.
|
||||
local url="https://github.com/knative/serving/releases/download/${version}"
|
||||
|
||||
wget "${url}/serving-core.yaml" -P "${latest_dir}" \
|
||||
|| fail_test "Unable to download latest knative/serving core file."
|
||||
|
||||
wget "${url}/serving-domainmapping.yaml" -P "${latest_dir}" \
|
||||
|| fail_test "Unable to download latest knative/serving domain mapping file."
|
||||
|
||||
wget "${url}/serving-hpa.yaml" -P "${latest_dir}" \
|
||||
|| fail_test "Unable to download latest knative/serving hpa file."
|
||||
|
||||
wget "${url}/serving-post-install-jobs.yaml" -P "${latest_post_install_dir}" \
|
||||
|| fail_test "Unable to download latest knative/serving post install file."
|
||||
}
|
||||
|
||||
function stage_test_resources() {
|
||||
header "Staging Test Resources"
|
||||
|
||||
local source_dir="${REPO_ROOT_DIR}/test/config"
|
||||
local target_dir="${E2E_YAML_DIR}/test/config"
|
||||
|
||||
mkdir -p "${target_dir}"
|
||||
|
||||
for file in $(find -L "${source_dir}" -type f -name "*.yaml"); do
|
||||
if [[ "${file}" == *"test/config/ytt"* ]]; then
|
||||
continue
|
||||
fi
|
||||
target="${file/${source_dir}/$target_dir}"
|
||||
mkdir -p $(dirname $target)
|
||||
|
||||
if grep -Fq "ko://" "${file}"; then
|
||||
local ko_target=$(mktemp)
|
||||
echo building "${file/$REPO_ROOT_DIR/}"
|
||||
ko resolve $(ko_flags) -f "${file}" > "${ko_target}" || fail_test "failed to build test resource"
|
||||
file="${ko_target}"
|
||||
fi
|
||||
|
||||
echo templating "${file/$REPO_ROOT_DIR/}" to "${target}"
|
||||
overlay_system_namespace "${file}" > "${target}" || fail_test "failed to template"
|
||||
done
|
||||
}
|
||||
|
||||
function ko_flags() {
|
||||
local KO_YAML_FLAGS="-P"
|
||||
local KO_FLAGS="${KO_FLAGS:-}"
|
||||
|
||||
[[ "${KO_DOCKER_REPO}" != gcr.io/* ]] && KO_YAML_FLAGS=""
|
||||
|
||||
if [[ "${KO_FLAGS}" != *"--platform"* ]]; then
|
||||
KO_YAML_FLAGS="${KO_YAML_FLAGS} --platform=all"
|
||||
fi
|
||||
|
||||
echo "${KO_YAML_FLAGS} ${KO_FLAGS}"
|
||||
}
|
||||
|
||||
function overlay_system_namespace() {
|
||||
run_ytt \
|
||||
-f "${REPO_ROOT_DIR}/test/config/ytt/lib" \
|
||||
-f "${REPO_ROOT_DIR}/test/config/ytt/values.yaml" \
|
||||
-f "${REPO_ROOT_DIR}/test/config/ytt/overlay-system-namespace.yaml" \
|
||||
-f "${1}" \
|
||||
--data-value serving.namespaces.system="${SYSTEM_NAMESPACE}"
|
||||
}
|
||||
|
||||
function run_ytt() {
|
||||
run_go_tool github.com/k14s/ytt/cmd/ytt ytt "$@"
|
||||
}
|
||||
|
||||
|
||||
function run_kapp() {
|
||||
# TODO drop the sha when kapp releases a version with the
|
||||
# following bug fix included
|
||||
#
|
||||
# https://github.com/vmware-tanzu/carvel-kapp/pull/213
|
||||
run_go_tool github.com/k14s/kapp/cmd/kapp@d5b8c43b5678 kapp "$@"
|
||||
}
|
||||
|
|
|
@ -14,181 +14,104 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
function install_istio() {
|
||||
if [[ -z "${ISTIO_VERSION:-}" ]]; then
|
||||
readonly ISTIO_VERSION="stable"
|
||||
fi
|
||||
function is_ingress_class() {
|
||||
[[ "${INGRESS_CLASS}" == *"${1}"* ]]
|
||||
}
|
||||
|
||||
if [[ -z "${NET_ISTIO_COMMIT:-}" ]]; then
|
||||
NET_ISTIO_COMMIT=$(head -n 1 ${1} | grep "# Generated when HEAD was" | sed 's/^.* //')
|
||||
echo "Got NET_ISTIO_COMMIT from ${1}: ${NET_ISTIO_COMMIT}"
|
||||
fi
|
||||
function stage_istio_head() {
|
||||
header "Staging Istio YAML (HEAD)"
|
||||
local istio_head_dir="${E2E_YAML_DIR}/istio/HEAD/install"
|
||||
mkdir -p "${istio_head_dir}"
|
||||
download_net_istio_yamls "${REPO_ROOT_DIR}/third_party/istio-latest/net-istio.yaml" "${istio_head_dir}"
|
||||
}
|
||||
|
||||
# TODO: remove this when all the net-istio.yaml in use contain a commit ID
|
||||
if [[ -z "${NET_ISTIO_COMMIT:-}" ]]; then
|
||||
NET_ISTIO_COMMIT="8102cd3d32f05be1c58260a9717d532a4a6d2f60"
|
||||
echo "Hard coded NET_ISTIO_COMMIT: ${NET_ISTIO_COMMIT}"
|
||||
fi
|
||||
function stage_istio_latest() {
|
||||
header "Staging Istio YAML (${LATEST_NET_ISTIO_RELEASE_VERSION})"
|
||||
local istio_latest_dir="${E2E_YAML_DIR}/istio/latest-release/install"
|
||||
mkdir -p "${istio_latest_dir}"
|
||||
|
||||
# And checkout the setup script based on that commit.
|
||||
local NET_ISTIO_DIR=$(mktemp -d)
|
||||
(
|
||||
cd $NET_ISTIO_DIR \
|
||||
&& git init \
|
||||
&& git remote add origin https://github.com/knative-sandbox/net-istio.git \
|
||||
&& git fetch --depth 1 origin $NET_ISTIO_COMMIT \
|
||||
&& git checkout FETCH_HEAD
|
||||
)
|
||||
download_net_istio_yamls \
|
||||
"https://github.com/knative-sandbox/net-istio/releases/download/${LATEST_NET_ISTIO_RELEASE_VERSION}/net-istio.yaml" \
|
||||
"${istio_latest_dir}"
|
||||
}
|
||||
|
||||
ISTIO_PROFILE="istio"
|
||||
if [[ -n "${KIND:-}" ]]; then
|
||||
ISTIO_PROFILE+="-kind"
|
||||
function download_net_istio_yamls() {
|
||||
local net_istio_yaml="$1"
|
||||
local target_dir="$2"
|
||||
|
||||
if [[ "${net_istio_yaml}" == "http"* ]]; then
|
||||
wget "${net_istio_yaml}" -P "${target_dir}" \
|
||||
|| fail_test "Unable to download istio file ${net_istio_yaml}"
|
||||
else
|
||||
ISTIO_PROFILE+="-ci"
|
||||
cp "${net_istio_yaml}" "${target_dir}"
|
||||
fi
|
||||
|
||||
# Point to our local copy
|
||||
net_istio_yaml="${target_dir}/$(basename "${net_istio_yaml}")"
|
||||
|
||||
local sha=$(head -n 1 ${net_istio_yaml} | grep "# Generated when HEAD was" | sed 's/^.* //')
|
||||
if [[ -z "${sha:-}" ]]; then
|
||||
sha="191bc5fe5a4b35b64f70577c3e44e44fb699cc5f"
|
||||
echo "Hard coded NET_ISTIO_COMMIT: ${sha}"
|
||||
else
|
||||
echo "Got NET_ISTIO_COMMIT from ${1}: ${sha}"
|
||||
fi
|
||||
|
||||
local istio_yaml="$(net_istio_file_url "$sha" istio.yaml)"
|
||||
local istio_config_yaml="$(net_istio_file_url "$sha" config-istio.yaml)"
|
||||
|
||||
wget -P "${target_dir}" "${istio_yaml}" \
|
||||
|| fail_test "Unable to get istio install file ${istio_yaml}"
|
||||
|
||||
# Some istio profiles don't have a config-istio so do a HEAD request to check
|
||||
# before downloading
|
||||
if wget -S --spider "${istio_config_yaml}" &> /dev/null; then
|
||||
wget -P "${target_dir}" "${istio_config_yaml}" \
|
||||
|| fail_test "Unable to get istio install file ${istio_config_yaml}"
|
||||
else
|
||||
echo "istio profile does not have a config-istio.yaml upstream"
|
||||
fi
|
||||
}
|
||||
|
||||
function net_istio_file_url() {
|
||||
local sha="$1"
|
||||
local file="$2"
|
||||
|
||||
local profile="istio"
|
||||
if (( KIND )); then
|
||||
profile+="-kind"
|
||||
else
|
||||
profile+="-ci"
|
||||
fi
|
||||
if [[ $MESH -eq 0 ]]; then
|
||||
ISTIO_PROFILE+="-no"
|
||||
fi
|
||||
ISTIO_PROFILE+="-mesh"
|
||||
|
||||
if [[ -n "${CLUSTER_DOMAIN:-}" ]]; then
|
||||
sed -ie "s/cluster\.local/${CLUSTER_DOMAIN}/g" ${NET_ISTIO_DIR}/third_party/istio-${ISTIO_VERSION}/${ISTIO_PROFILE}/istio.yaml
|
||||
profile+="-no"
|
||||
fi
|
||||
|
||||
echo ">> Installing Istio"
|
||||
echo "Istio version: ${ISTIO_VERSION}"
|
||||
echo "Istio profile: ${ISTIO_PROFILE}"
|
||||
${NET_ISTIO_DIR}/third_party/istio-${ISTIO_VERSION}/install-istio.sh ${ISTIO_PROFILE}
|
||||
profile+="-mesh"
|
||||
|
||||
if [[ -n "${1:-}" ]]; then
|
||||
echo ">> Installing net-istio"
|
||||
echo "net-istio original YAML: ${1}"
|
||||
# Create temp copy in which we replace knative-serving by the test's system namespace.
|
||||
local YAML_NAME=$(mktemp -p $TMP_DIR --suffix=.$(basename "$1"))
|
||||
sed "s/namespace: \"*${KNATIVE_DEFAULT_NAMESPACE}\"*/namespace: ${SYSTEM_NAMESPACE}/g" ${1} > ${YAML_NAME}
|
||||
echo "net-istio patched YAML: $YAML_NAME"
|
||||
ko apply -f "${YAML_NAME}" --selector=networking.knative.dev/ingress-provider=istio || return 1
|
||||
echo "https://raw.githubusercontent.com/knative-sandbox/net-istio/${sha}/third_party/istio-${ISTIO_VERSION}/${profile}/${file}"
|
||||
}
|
||||
|
||||
CONFIGURE_ISTIO=${NET_ISTIO_DIR}/third_party/istio-${ISTIO_VERSION}/extras/configure-istio.sh
|
||||
if [[ -f "$CONFIGURE_ISTIO" ]]; then
|
||||
$CONFIGURE_ISTIO
|
||||
else
|
||||
echo "configure-istio.sh not found; skipping."
|
||||
fi
|
||||
|
||||
UNINSTALL_LIST+=( "${YAML_NAME}" )
|
||||
function setup_ingress_env_vars() {
|
||||
if is_ingress_class istio; then
|
||||
export GATEWAY_OVERRIDE=istio-ingressgateway
|
||||
export GATEWAY_NAMESPACE_OVERRIDE=istio-system
|
||||
fi
|
||||
}
|
||||
|
||||
function install_kourier() {
|
||||
local INSTALL_KOURIER_YAML="./third_party/kourier-latest/kourier.yaml"
|
||||
local YAML_NAME=$(mktemp -p $TMP_DIR --suffix=.$(basename "$1"))
|
||||
sed "s/${KNATIVE_DEFAULT_NAMESPACE}/${SYSTEM_NAMESPACE}/g" ${INSTALL_KOURIER_YAML} > ${YAML_NAME}
|
||||
echo "Kourier YAML: ${YAML_NAME}"
|
||||
echo ">> Bringing up Kourier"
|
||||
|
||||
kubectl apply -f ${YAML_NAME} || return 1
|
||||
UNINSTALL_LIST+=( "${YAML_NAME}" )
|
||||
|
||||
echo ">> Patching Kourier"
|
||||
# Scale replicas of the Kourier gateways to handle large qps
|
||||
kubectl scale -n kourier-system deployment 3scale-kourier-gateway --replicas=6
|
||||
}
|
||||
|
||||
function install_kong() {
|
||||
local INSTALL_KONG_YAML="./third_party/kong-latest/kong.yaml"
|
||||
echo "Kong YAML: ${INSTALL_KONG_YAML}"
|
||||
echo ">> Bringing up Kong"
|
||||
|
||||
kubectl apply -f ${INSTALL_KONG_YAML} || return 1
|
||||
UNINSTALL_LIST+=( "${INSTALL_KONG_YAML}" )
|
||||
|
||||
echo ">> Patching Kong"
|
||||
# Scale replicas of the Kong gateways to handle large qps
|
||||
kubectl scale -n kong deployment ingress-kong --replicas=6
|
||||
}
|
||||
|
||||
function install_ambassador() {
|
||||
local AMBASSADOR_MANIFESTS_PATH="./third_party/ambassador-latest/"
|
||||
echo "Ambassador YAML: ${AMBASSADOR_MANIFESTS_PATH}"
|
||||
|
||||
echo ">> Creating namespace 'ambassador'"
|
||||
kubectl create namespace ambassador || return 1
|
||||
|
||||
echo ">> Installing Ambassador"
|
||||
kubectl apply -n ambassador -f ${AMBASSADOR_MANIFESTS_PATH} || return 1
|
||||
UNINSTALL_LIST+=( "${AMBASSADOR_MANIFESTS_PATH}" )
|
||||
|
||||
# echo ">> Fixing Ambassador's permissions"
|
||||
# kubectl patch clusterrolebinding ambassador -p '{"subjects":[{"kind": "ServiceAccount", "name": "ambassador", "namespace": "ambassador"}]}' || return 1
|
||||
|
||||
# echo ">> Enabling Knative support in Ambassador"
|
||||
# kubectl set env --namespace ambassador deployments/ambassador AMBASSADOR_KNATIVE_SUPPORT=true || return 1
|
||||
|
||||
echo ">> Patching Ambassador"
|
||||
# Scale replicas of the Ambassador gateway to handle large qps
|
||||
kubectl scale -n ambassador deployment ambassador --replicas=6
|
||||
}
|
||||
|
||||
function install_contour() {
|
||||
local INSTALL_CONTOUR_YAML="./third_party/contour-latest/contour.yaml"
|
||||
local INSTALL_NET_CONTOUR_YAML="./third_party/contour-latest/net-contour.yaml"
|
||||
echo "Contour YAML: ${INSTALL_CONTOUR_YAML}"
|
||||
echo "Contour KIngress YAML: ${INSTALL_NET_CONTOUR_YAML}"
|
||||
|
||||
echo ">> Bringing up Contour"
|
||||
sed 's/--log-level info/--log-level debug/g' "${INSTALL_CONTOUR_YAML}" | kubectl apply -f - || return 1
|
||||
|
||||
UNINSTALL_LIST+=( "${INSTALL_CONTOUR_YAML}" )
|
||||
HA_COMPONENTS+=( "contour-ingress-controller" )
|
||||
|
||||
local NET_CONTOUR_YAML_NAME=${TMP_DIR}/${INSTALL_NET_CONTOUR_YAML##*/}
|
||||
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${INSTALL_NET_CONTOUR_YAML} > ${NET_CONTOUR_YAML_NAME}
|
||||
echo ">> Bringing up net-contour"
|
||||
kubectl apply -f ${NET_CONTOUR_YAML_NAME} || return 1
|
||||
|
||||
# Disable verbosity until https://github.com/golang/go/issues/40771 is fixed.
|
||||
export GO_TEST_VERBOSITY=standard-quiet
|
||||
|
||||
UNINSTALL_LIST+=( "${NET_CONTOUR_YAML_NAME}" )
|
||||
}
|
||||
|
||||
function wait_until_ingress_running() {
|
||||
if [[ -n "${ISTIO_VERSION:-}" ]]; then
|
||||
wait_until_pods_running istio-system || return 1
|
||||
wait_until_service_has_external_http_address istio-system istio-ingressgateway || return 1
|
||||
fi
|
||||
if [[ -n "${KOURIER_VERSION:-}" ]]; then
|
||||
# we must set these override values to allow the test spoofing client to work with Kourier
|
||||
# see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37
|
||||
if is_ingress_class kourier; then
|
||||
export GATEWAY_OVERRIDE=kourier
|
||||
export GATEWAY_NAMESPACE_OVERRIDE=kourier-system
|
||||
wait_until_pods_running kourier-system || return 1
|
||||
wait_until_service_has_external_http_address kourier-system kourier
|
||||
fi
|
||||
if [[ -n "${AMBASSADOR_VERSION:-}" ]]; then
|
||||
# we must set these override values to allow the test spoofing client to work with Ambassador
|
||||
# see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37
|
||||
if is_ingress_class ambassador; then
|
||||
export GATEWAY_OVERRIDE=ambassador
|
||||
export GATEWAY_NAMESPACE_OVERRIDE=ambassador
|
||||
wait_until_pods_running ambassador || return 1
|
||||
wait_until_service_has_external_http_address ambassador ambassador
|
||||
fi
|
||||
if [[ -n "${CONTOUR_VERSION:-}" ]]; then
|
||||
# we must set these override values to allow the test spoofing client to work with Contour
|
||||
# see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37
|
||||
if is_ingress_class contour; then
|
||||
export GATEWAY_OVERRIDE=envoy
|
||||
export GATEWAY_NAMESPACE_OVERRIDE=contour-external
|
||||
wait_until_pods_running contour-external || return 1
|
||||
wait_until_pods_running contour-internal || return 1
|
||||
wait_until_service_has_external_ip "${GATEWAY_NAMESPACE_OVERRIDE}" "${GATEWAY_OVERRIDE}"
|
||||
fi
|
||||
if [[ -n "${KONG_VERSION:-}" ]]; then
|
||||
# we must set these override values to allow the test spoofing client to work with Kong
|
||||
# see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37
|
||||
if is_ingress_class kong; then
|
||||
export GATEWAY_OVERRIDE=kong-proxy
|
||||
export GATEWAY_NAMESPACE_OVERRIDE=kong
|
||||
wait_until_pods_running kong || return 1
|
||||
wait_until_service_has_external_http_address kong kong-proxy
|
||||
fi
|
||||
}
|
||||
|
||||
|
|
|
@ -27,19 +27,13 @@
|
|||
|
||||
source $(dirname $0)/e2e-common.sh
|
||||
|
||||
# Helper functions.
|
||||
|
||||
function knative_setup() {
|
||||
install_knative_serving
|
||||
}
|
||||
|
||||
# Script entry point.
|
||||
|
||||
# Skip installing istio as an add-on.
|
||||
# Temporarily increasing the cluster size for serving tests to rule out
|
||||
# resource/eviction as causes of flakiness.
|
||||
# Pin to 1.18 since scale test is super flakey on 1.19
|
||||
initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --cluster-version=1.18
|
||||
initialize --skip-istio-addon --min-nodes=4 --max-nodes=4 --enable-ha --cluster-version=1.18 "$@"
|
||||
|
||||
# Run the tests
|
||||
header "Running tests"
|
||||
|
@ -56,35 +50,10 @@ fi
|
|||
if (( HTTPS )); then
|
||||
use_https="--https"
|
||||
toggle_feature autoTLS Enabled config-network
|
||||
kubectl apply -f ${TMP_DIR}/test/config/autotls/certmanager/caissuer/
|
||||
add_trap "kubectl delete -f ${TMP_DIR}/test/config/autotls/certmanager/caissuer/ --ignore-not-found" SIGKILL SIGTERM SIGQUIT
|
||||
kubectl apply -f ${E2E_YAML_DIR}/test/config/autotls/certmanager/caissuer/
|
||||
add_trap "kubectl delete -f ${E2E_YAML_DIR}/test/config/autotls/certmanager/caissuer/ --ignore-not-found" SIGKILL SIGTERM SIGQUIT
|
||||
fi
|
||||
|
||||
# Keep the bucket count in sync with test/ha/ha.go.
|
||||
kubectl -n "${SYSTEM_NAMESPACE}" patch configmap/config-leader-election --type=merge \
|
||||
--patch='{"data":{"buckets": "'${BUCKETS}'"}}' || fail_test
|
||||
|
||||
kubectl patch hpa activator -n "${SYSTEM_NAMESPACE}" \
|
||||
--type "merge" \
|
||||
--patch '{"spec": {"minReplicas": '${REPLICAS}', "maxReplicas": '${REPLICAS}'}}' || fail_test
|
||||
|
||||
# Scale up all of the HA components in knative-serving.
|
||||
scale_controlplane "${HA_COMPONENTS[@]}"
|
||||
|
||||
# Changing the bucket count and cycling the controllers will leave around stale
|
||||
# lease resources at the old sharding factor, so clean these up.
|
||||
kubectl -n ${SYSTEM_NAMESPACE} delete leases --all
|
||||
|
||||
# Wait for a new leader Controller to prevent race conditions during service reconciliation.
|
||||
wait_for_leader_controller || fail_test
|
||||
|
||||
# Dump the leases post-setup.
|
||||
header "Leaders"
|
||||
kubectl get lease -n "${SYSTEM_NAMESPACE}"
|
||||
|
||||
# Give the controller time to sync with the rest of the system components.
|
||||
sleep 30
|
||||
|
||||
# Run conformance and e2e tests.
|
||||
|
||||
# Currently only Istio, Contour and Kourier implement the alpha features.
|
||||
|
@ -97,7 +66,8 @@ if [[ -z "${INGRESS_CLASS}" \
|
|||
fi
|
||||
|
||||
go_test_e2e -timeout=30m \
|
||||
./test/conformance/api/... ./test/conformance/runtime/... \
|
||||
./test/conformance/api/... \
|
||||
./test/conformance/runtime/... \
|
||||
./test/e2e \
|
||||
${parallelism} \
|
||||
${alpha} \
|
||||
|
@ -105,7 +75,7 @@ go_test_e2e -timeout=30m \
|
|||
"--resolvabledomain=$(use_resolvable_domain)" "${use_https}" || failed=1
|
||||
|
||||
if (( HTTPS )); then
|
||||
kubectl delete -f ${TMP_DIR}/test/config/autotls/certmanager/caissuer/ --ignore-not-found
|
||||
kubectl delete -f ${E2E_YAML_DIR}/test/config/autotls/certmanager/caissuer/ --ignore-not-found
|
||||
toggle_feature autoTLS Disabled config-network
|
||||
fi
|
||||
|
||||
|
@ -130,6 +100,7 @@ go_test_e2e -timeout=2m ./test/e2e/gc || failed=1
|
|||
kubectl replace cm "config-gc" -n ${SYSTEM_NAMESPACE} -f ${TMP_DIR}/config-gc.yaml
|
||||
toggle_feature responsive-revision-gc Disabled
|
||||
|
||||
|
||||
# Run scale tests.
|
||||
# Note that we use a very high -parallel because each ksvc is run as its own
|
||||
# sub-test. If this is not larger than the maximum scale tested then the test
|
||||
|
@ -139,9 +110,11 @@ go_test_e2e -timeout=20m -parallel=300 ./test/scale || failed=1
|
|||
# Run HA tests separately as they're stopping core Knative Serving pods.
|
||||
# Define short -spoofinterval to ensure frequent probing while stopping pods.
|
||||
go_test_e2e -timeout=25m -failfast -parallel=1 ./test/ha \
|
||||
${alpha} \
|
||||
--enable-beta \
|
||||
-replicas="${REPLICAS:-1}" -buckets="${BUCKETS:-1}" -spoofinterval="10ms" || failed=1
|
||||
${alpha} \
|
||||
--enable-beta \
|
||||
-replicas="${REPLICAS:-1}" \
|
||||
-buckets="${BUCKETS:-1}" \
|
||||
-spoofinterval="10ms" || failed=1
|
||||
|
||||
(( failed )) && fail_test
|
||||
|
||||
|
|
|
@ -37,12 +37,8 @@ source "$(dirname "${BASH_SOURCE[0]}")/e2e-common.sh"
|
|||
# Temporarily increasing the cluster size for serving tests to rule out
|
||||
# resource/eviction as causes of flakiness.
|
||||
# Pin to 1.18 since scale test is super flakey on 1.19
|
||||
initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --cluster-version=1.18
|
||||
|
||||
# We haven't configured these deployments for high-availability,
|
||||
# so disable the chaos duck.
|
||||
# TODO(mattmoor): Reconsider this after 0.17 cuts.
|
||||
disable_chaosduck
|
||||
initialize "$@" --skip-istio-addon --min-nodes=4 --max-nodes=4 --cluster-version=1.18 \
|
||||
--install-latest-release
|
||||
|
||||
# TODO(#2656): Reduce the timeout after we get this test to consistently passing.
|
||||
TIMEOUT=30m
|
||||
|
|
Loading…
Reference in New Issue