mirror of https://github.com/istio/istio.io.git
Replace "sleep" sample throughout Istio docs (#15767)
* The big one. * fix gencheck * fix gen * Change sort order of "curl" to be above "helloworld" * switch another order
This commit is contained in:
parent
28f320c4b7
commit
580ea900c7
|
@ -9,19 +9,19 @@
|
|||
[configuration profile](/docs/setup/additional-setup/config-profiles/).
|
||||
{{< /tip >}}
|
||||
|
||||
* Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample app to use as a test source for sending requests.
|
||||
* Deploy the [curl]({{< github_tree >}}/samples/curl) sample app to use as a test source for sending requests.
|
||||
If you have
|
||||
[automatic sidecar injection](/docs/setup/additional-setup/sidecar-injection/#automatic-sidecar-injection)
|
||||
enabled, run the following command to deploy the sample app:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@
|
||||
{{< /text >}}
|
||||
|
||||
Otherwise, manually inject the sidecar before deploying the `sleep` application with the following command:
|
||||
Otherwise, manually inject the sidecar before deploying the `curl` application with the following command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@)
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@)
|
||||
{{< /text >}}
|
||||
|
||||
{{< tip >}}
|
||||
|
@ -31,5 +31,5 @@
|
|||
* Set the `SOURCE_POD` environment variable to the name of your source pod:
|
||||
|
||||
{{< text bash >}}
|
||||
$ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})
|
||||
$ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})
|
||||
{{< /text >}}
|
||||
|
|
|
@ -21,13 +21,13 @@
|
|||
####################################################################################################
|
||||
|
||||
bpsnip_before_you_begin_egress_before_you_begin_1() {
|
||||
kubectl apply -f samples/sleep/sleep.yaml
|
||||
kubectl apply -f samples/curl/curl.yaml
|
||||
}
|
||||
|
||||
bpsnip_before_you_begin_egress_before_you_begin_2() {
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml)
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml)
|
||||
}
|
||||
|
||||
bpsnip_before_you_begin_egress_before_you_begin_3() {
|
||||
export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})
|
||||
export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ inpod_mark: 1337
|
|||
Follow the steps below to confirm that the sockets on ports 15001, 15006, and 15008 are open and in the listening state.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl debug $(kubectl get pod -l app=sleep -n ambient-demo -o jsonpath='{.items[0].metadata.name}') -it -n ambient-demo --image nicolaka/netshoot -- ss -ntlp
|
||||
$ kubectl debug $(kubectl get pod -l app=curl -n ambient-demo -o jsonpath='{.items[0].metadata.name}') -it -n ambient-demo --image nicolaka/netshoot -- ss -ntlp
|
||||
Defaulting debug container name to debugger-nhd4d.
|
||||
State Recv-Q Send-Q Local Address:Port Peer Address:PortProcess
|
||||
LISTEN 0 128 127.0.0.1:15080 0.0.0.0:*
|
||||
|
@ -91,7 +91,7 @@ LISTEN 0 128 *:15008 *:*
|
|||
To view the iptables rules setup inside one of the application pods, execute this command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl debug $(kubectl get pod -l app=sleep -n ambient-demo -o jsonpath='{.items[0].metadata.name}') -it --image gcr.io/istio-release/base --profile=netadmin -n ambient-demo -- iptables-save
|
||||
$ kubectl debug $(kubectl get pod -l app=curl -n ambient-demo -o jsonpath='{.items[0].metadata.name}') -it --image gcr.io/istio-release/base --profile=netadmin -n ambient-demo -- iptables-save
|
||||
|
||||
Defaulting debug container name to debugger-m44qc.
|
||||
# Generated by iptables-save
|
||||
|
|
|
@ -36,12 +36,12 @@ $ kubectl delete namespace istio-system
|
|||
|
||||
## Remove the sample application
|
||||
|
||||
To delete the Bookinfo sample application and the `sleep` deployment, run the following:
|
||||
To delete the Bookinfo sample application and the `curl` deployment, run the following:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl delete -f {{< github_file >}}/samples/bookinfo/platform/kube/bookinfo.yaml
|
||||
$ kubectl delete -f {{< github_file >}}/samples/bookinfo/platform/kube/bookinfo-versions.yaml
|
||||
$ kubectl delete -f {{< github_file >}}/samples/sleep/sleep.yaml
|
||||
$ kubectl delete -f samples/bookinfo/platform/kube/bookinfo.yaml
|
||||
$ kubectl delete -f samples/bookinfo/platform/kube/bookinfo-versions.yaml
|
||||
$ kubectl delete -f samples/curl/curl.yaml
|
||||
{{< /text >}}
|
||||
|
||||
## Remove the Kubernetes Gateway API CRDs
|
||||
|
|
|
@ -31,7 +31,7 @@ istioctl waypoint delete --all
|
|||
}
|
||||
|
||||
snip_remove_the_sample_application_1() {
|
||||
kubectl delete -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/platform/kube/bookinfo.yaml
|
||||
kubectl delete -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/platform/kube/bookinfo-versions.yaml
|
||||
kubectl delete -f https://raw.githubusercontent.com/istio/istio/master/samples/sleep/sleep.yaml
|
||||
kubectl delete -f samples/bookinfo/platform/kube/bookinfo.yaml
|
||||
kubectl delete -f samples/bookinfo/platform/kube/bookinfo-versions.yaml
|
||||
kubectl delete -f samples/curl/curl.yaml
|
||||
}
|
||||
|
|
|
@ -37,16 +37,16 @@ EOF
|
|||
|
||||
If you open the Bookinfo application in your browser (`http://localhost:8080/productpage`), you will see the product page, just as before. However, if you try to access the `productpage` service from a different service account, you should see an error.
|
||||
|
||||
Let's try accessing Bookinfo application from a `sleep` pod:
|
||||
Let's try accessing Bookinfo application from a `curl` pod:
|
||||
|
||||
{{< text syntax=bash snip_id=deploy_sleep >}}
|
||||
$ kubectl apply -f {{< github_file >}}/samples/sleep/sleep.yaml
|
||||
{{< text syntax=bash snip_id=deploy_curl >}}
|
||||
$ kubectl apply -f samples/curl/curl.yaml
|
||||
{{< /text >}}
|
||||
|
||||
Since the `sleep` pod is using a different service account, it will not have access the `productpage` service:
|
||||
Since the `curl` pod is using a different service account, it will not have access the `productpage` service:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec deploy/sleep -- curl -s "http://productpage:9080/productpage"
|
||||
$ kubectl exec deploy/curl -- curl -s "http://productpage:9080/productpage"
|
||||
command terminated with exit code 56
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -68,7 +68,7 @@ NAME CLASS ADDRESS PROGRAMMED AGE
|
|||
waypoint istio-waypoint 10.96.58.95 True 42s
|
||||
{{< /text >}}
|
||||
|
||||
Adding a [L7 authorization policy](/docs/ambient/usage/l7-features/) will explicitly allow the `sleep` service to send `GET` requests to the `productpage` service, but perform no other operations:
|
||||
Adding a [L7 authorization policy](/docs/ambient/usage/l7-features/) will explicitly allow the `curl` service to send `GET` requests to the `productpage` service, but perform no other operations:
|
||||
|
||||
{{< text syntax=bash snip_id=deploy_l7_policy >}}
|
||||
$ kubectl apply -f - <<EOF
|
||||
|
@ -87,7 +87,7 @@ spec:
|
|||
- from:
|
||||
- source:
|
||||
principals:
|
||||
- cluster.local/ns/default/sa/sleep
|
||||
- cluster.local/ns/default/sa/curl
|
||||
to:
|
||||
- operation:
|
||||
methods: ["GET"]
|
||||
|
@ -104,7 +104,7 @@ Confirm the new waypoint proxy is enforcing the updated authorization policy:
|
|||
|
||||
{{< text bash >}}
|
||||
$ # This fails with an RBAC error because we're not using a GET operation
|
||||
$ kubectl exec deploy/sleep -- curl -s "http://productpage:9080/productpage" -X DELETE
|
||||
$ kubectl exec deploy/curl -- curl -s "http://productpage:9080/productpage" -X DELETE
|
||||
RBAC: access denied
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -115,8 +115,8 @@ RBAC: access denied
|
|||
{{< /text >}}
|
||||
|
||||
{{< text bash >}}
|
||||
$ # This works as we're explicitly allowing GET requests from the sleep pod
|
||||
$ kubectl exec deploy/sleep -- curl -s http://productpage:9080/productpage | grep -o "<title>.*</title>"
|
||||
$ # This works as we're explicitly allowing GET requests from the curl pod
|
||||
$ kubectl exec deploy/curl -- curl -s http://productpage:9080/productpage | grep -o "<title>.*</title>"
|
||||
<title>Simple Bookstore App</title>
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -40,12 +40,12 @@ spec:
|
|||
EOF
|
||||
}
|
||||
|
||||
snip_deploy_sleep() {
|
||||
kubectl apply -f https://raw.githubusercontent.com/istio/istio/master/samples/sleep/sleep.yaml
|
||||
snip_deploy_curl() {
|
||||
kubectl apply -f samples/curl/curl.yaml
|
||||
}
|
||||
|
||||
snip_enforce_layer_4_authorization_policy_3() {
|
||||
kubectl exec deploy/sleep -- curl -s "http://productpage:9080/productpage"
|
||||
kubectl exec deploy/curl -- curl -s "http://productpage:9080/productpage"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_enforce_layer_4_authorization_policy_3_out <<\ENDSNIP
|
||||
|
@ -87,7 +87,7 @@ spec:
|
|||
- from:
|
||||
- source:
|
||||
principals:
|
||||
- cluster.local/ns/default/sa/sleep
|
||||
- cluster.local/ns/default/sa/curl
|
||||
to:
|
||||
- operation:
|
||||
methods: ["GET"]
|
||||
|
@ -96,7 +96,7 @@ EOF
|
|||
|
||||
snip_enforce_layer_7_authorization_policy_4() {
|
||||
# This fails with an RBAC error because we're not using a GET operation
|
||||
kubectl exec deploy/sleep -- curl -s "http://productpage:9080/productpage" -X DELETE
|
||||
kubectl exec deploy/curl -- curl -s "http://productpage:9080/productpage" -X DELETE
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_enforce_layer_7_authorization_policy_4_out <<\ENDSNIP
|
||||
|
@ -113,8 +113,8 @@ RBAC: access denied
|
|||
ENDSNIP
|
||||
|
||||
snip_enforce_layer_7_authorization_policy_6() {
|
||||
# This works as we're explicitly allowing GET requests from the sleep pod
|
||||
kubectl exec deploy/sleep -- curl -s http://productpage:9080/productpage | grep -o "<title>.*</title>"
|
||||
# This works as we're explicitly allowing GET requests from the curl pod
|
||||
kubectl exec deploy/curl -- curl -s http://productpage:9080/productpage | grep -o "<title>.*</title>"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_enforce_layer_7_authorization_policy_6_out <<\ENDSNIP
|
||||
|
|
|
@ -40,7 +40,7 @@ EOF
|
|||
To confirm that roughly 10% of the of the traffic from 100 requests goes to `reviews-v2`, you can run the following command:
|
||||
|
||||
{{< text syntax=bash snip_id=test_traffic_split >}}
|
||||
$ kubectl exec deploy/sleep -- sh -c "for i in \$(seq 1 100); do curl -s http://productpage:9080/productpage | grep reviews-v.-; done"
|
||||
$ kubectl exec deploy/curl -- sh -c "for i in \$(seq 1 100); do curl -s http://productpage:9080/productpage | grep reviews-v.-; done"
|
||||
{{< /text >}}
|
||||
|
||||
You'll notice the majority of requests go to `reviews-v1`. You can confirm the same if you open the Bookinfo application in your browser and refresh the page multiple times. Notice the requests from the `reviews-v1` don't have any stars, while the requests from `reviews-v2` have black stars.
|
||||
|
|
|
@ -44,5 +44,5 @@ EOF
|
|||
}
|
||||
|
||||
snip_test_traffic_split() {
|
||||
kubectl exec deploy/sleep -- sh -c "for i in \$(seq 1 100); do curl -s http://productpage:9080/productpage | grep reviews-v.-; done"
|
||||
kubectl exec deploy/curl -- sh -c "for i in \$(seq 1 100); do curl -s http://productpage:9080/productpage | grep reviews-v.-; done"
|
||||
}
|
||||
|
|
|
@ -39,8 +39,8 @@ _verify_like snip_deploy_and_configure_the_ingress_gateway_3 "$snip_deploy_and_c
|
|||
_verify_contains snip_add_bookinfo_to_the_mesh_1 "$snip_add_bookinfo_to_the_mesh_1_out"
|
||||
|
||||
snip_deploy_l4_policy
|
||||
snip_deploy_sleep
|
||||
_wait_for_deployment default sleep
|
||||
snip_deploy_curl
|
||||
_wait_for_deployment default curl
|
||||
_verify_contains snip_enforce_layer_4_authorization_policy_3 "$snip_enforce_layer_4_authorization_policy_3_out"
|
||||
|
||||
_verify_contains snip_deploy_waypoint "$snip_deploy_waypoint_out"
|
||||
|
|
|
@ -18,10 +18,10 @@ One of the key advantages of Wasm extensibility is that extensions can be loaded
|
|||
1. Set up Istio by following the instructions in the [ambient mode Getting Started guide](/docs/ambient/getting-started).
|
||||
1. Deploy the [Bookinfo sample application](/docs/ambient/getting-started/deploy-sample-app).
|
||||
1. [Add the default namespace to the ambient mesh](/docs/ambient/getting-started/secure-and-visualize).
|
||||
1. Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample app to use as a test source for sending requests.
|
||||
1. Deploy the [curl]({{< github_tree >}}/samples/curl) sample app to use as a test source for sending requests.
|
||||
|
||||
{{< text syntax=bash >}}
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@
|
||||
{{< /text >}}
|
||||
|
||||
## At a gateway
|
||||
|
@ -73,14 +73,14 @@ The Istio agent will interpret the WasmPlugin configuration, download remote Was
|
|||
1. Test `/productpage` without credentials:
|
||||
|
||||
{{< text syntax=bash snip_id=test_gateway_productpage_without_credentials >}}
|
||||
$ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage"
|
||||
$ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage"
|
||||
401
|
||||
{{< /text >}}
|
||||
|
||||
1. Test `/productpage` with the credentials configured in the WasmPlugin resource:
|
||||
|
||||
{{< text syntax=bash snip_id=test_gateway_productpage_with_credentials >}}
|
||||
$ kubectl exec deploy/sleep -- curl -s -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" -w "%{http_code}" "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage"
|
||||
$ kubectl exec deploy/curl -- curl -s -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" -w "%{http_code}" "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -99,7 +99,7 @@ $ istioctl waypoint apply --enroll-namespace --wait
|
|||
Verify traffic reaches the service:
|
||||
|
||||
{{< text syntax=bash snip_id=verify_traffic >}}
|
||||
$ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage
|
||||
$ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -153,14 +153,14 @@ basic-auth-at-waypoint 14m
|
|||
1. Test internal `/productpage` without credentials:
|
||||
|
||||
{{< text syntax=bash snip_id=test_waypoint_productpage_without_credentials >}}
|
||||
$ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage
|
||||
$ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage
|
||||
401
|
||||
{{< /text >}}
|
||||
|
||||
1. Test internal `/productpage` with credentials:
|
||||
|
||||
{{< text syntax=bash snip_id=test_waypoint_productpage_with_credentials >}}
|
||||
$ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage
|
||||
$ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -200,21 +200,21 @@ EOF
|
|||
1. Test the internal `/productpage` with the credentials configured at the generic `waypoint` proxy:
|
||||
|
||||
{{< text syntax=bash snip_id=test_waypoint_service_productpage_with_credentials >}}
|
||||
$ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage
|
||||
$ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
1. Test the internal `/reviews` with credentials configured at the specific `reviews-svc-waypoint` proxy:
|
||||
|
||||
{{< text syntax=bash snip_id=test_waypoint_service_reviews_with_credentials >}}
|
||||
$ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic MXQtaW4zOmFkbWluMw==" http://reviews:9080/reviews/1
|
||||
$ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic MXQtaW4zOmFkbWluMw==" http://reviews:9080/reviews/1
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
1. Test internal `/reviews` without credentials:
|
||||
|
||||
{{< text syntax=bash snip_id=test_waypoint_service_reviews_without_credentials >}}
|
||||
$ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://reviews:9080/reviews/1
|
||||
$ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://reviews:9080/reviews/1
|
||||
401
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
####################################################################################################
|
||||
|
||||
snip_before_you_begin_1() {
|
||||
kubectl apply -f samples/sleep/sleep.yaml
|
||||
kubectl apply -f samples/curl/curl.yaml
|
||||
}
|
||||
|
||||
snip_get_gateway() {
|
||||
|
@ -59,7 +59,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_test_gateway_productpage_without_credentials() {
|
||||
kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage"
|
||||
kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_test_gateway_productpage_without_credentials_out <<\ENDSNIP
|
||||
|
@ -67,7 +67,7 @@ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null "http://book
|
|||
ENDSNIP
|
||||
|
||||
snip_test_gateway_productpage_with_credentials() {
|
||||
kubectl exec deploy/sleep -- curl -s -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" -w "%{http_code}" "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage"
|
||||
kubectl exec deploy/curl -- curl -s -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" -w "%{http_code}" "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_test_gateway_productpage_with_credentials_out <<\ENDSNIP
|
||||
|
@ -79,7 +79,7 @@ istioctl waypoint apply --enroll-namespace --wait
|
|||
}
|
||||
|
||||
snip_verify_traffic() {
|
||||
kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage
|
||||
kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_verify_traffic_out <<\ENDSNIP
|
||||
|
@ -132,7 +132,7 @@ basic-auth-at-waypoint 14m
|
|||
ENDSNIP
|
||||
|
||||
snip_test_waypoint_productpage_without_credentials() {
|
||||
kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage
|
||||
kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_test_waypoint_productpage_without_credentials_out <<\ENDSNIP
|
||||
|
@ -140,7 +140,7 @@ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://produ
|
|||
ENDSNIP
|
||||
|
||||
snip_test_waypoint_productpage_with_credentials() {
|
||||
kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage
|
||||
kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_test_waypoint_productpage_with_credentials_out <<\ENDSNIP
|
||||
|
@ -173,7 +173,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_test_waypoint_service_productpage_with_credentials() {
|
||||
kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage
|
||||
kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_test_waypoint_service_productpage_with_credentials_out <<\ENDSNIP
|
||||
|
@ -181,7 +181,7 @@ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authoriz
|
|||
ENDSNIP
|
||||
|
||||
snip_test_waypoint_service_reviews_with_credentials() {
|
||||
kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic MXQtaW4zOmFkbWluMw==" http://reviews:9080/reviews/1
|
||||
kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic MXQtaW4zOmFkbWluMw==" http://reviews:9080/reviews/1
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_test_waypoint_service_reviews_with_credentials_out <<\ENDSNIP
|
||||
|
@ -189,7 +189,7 @@ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authoriz
|
|||
ENDSNIP
|
||||
|
||||
snip_test_waypoint_service_reviews_without_credentials() {
|
||||
kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://reviews:9080/reviews/1
|
||||
kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://reviews:9080/reviews/1
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_test_waypoint_service_reviews_without_credentials_out <<\ENDSNIP
|
||||
|
|
|
@ -25,7 +25,7 @@ install_gateway_api_crds
|
|||
# deploy test application
|
||||
source "tests/util/samples.sh"
|
||||
startup_bookinfo_sample
|
||||
startup_sleep_sample
|
||||
startup_curl_sample
|
||||
|
||||
# snip_annotate_bookinfo_gateway
|
||||
kubectl annotate gateway bookinfo-gateway networking.istio.io/service-type=ClusterIP --namespace=default
|
||||
|
@ -78,6 +78,6 @@ kubectl label namespace default istio.io/use-waypoint-
|
|||
|
||||
istioctl x waypoint delete --all
|
||||
|
||||
cleanup_sleep_sample
|
||||
cleanup_curl_sample
|
||||
cleanup_bookinfo_sample
|
||||
remove_gateway_api_crds
|
||||
|
|
|
@ -20,7 +20,7 @@ A basic L4 authorization policy looks like this:
|
|||
apiVersion: security.istio.io/v1
|
||||
kind: AuthorizationPolicy
|
||||
metadata:
|
||||
name: allow-sleep-to-httpbin
|
||||
name: allow-curl-to-httpbin
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
@ -30,12 +30,12 @@ spec:
|
|||
- from:
|
||||
- source:
|
||||
principals:
|
||||
- cluster.local/ns/ambient-demo/sa/sleep
|
||||
- cluster.local/ns/ambient-demo/sa/curl
|
||||
{{< /text >}}
|
||||
|
||||
This policy can be used in both {{< gloss "sidecar" >}}sidecar mode{{< /gloss >}} and ambient mode.
|
||||
|
||||
The L4 (TCP) features of the Istio `AuthorizationPolicy` API have the same functional behavior in ambient mode as in sidecar mode. When there is no authorization policy provisioned, the default action is `ALLOW`. Once a policy is provisioned, pods targeted by the policy only permit traffic which is explicitly allowed. In the above example, pods with the label `app: httpbin` only permit traffic from sources with an identity principal of `cluster.local/ns/ambient-demo/sa/sleep`. Traffic from all other sources will be denied.
|
||||
The L4 (TCP) features of the Istio `AuthorizationPolicy` API have the same functional behavior in ambient mode as in sidecar mode. When there is no authorization policy provisioned, the default action is `ALLOW`. Once a policy is provisioned, pods targeted by the policy only permit traffic which is explicitly allowed. In the above example, pods with the label `app: httpbin` only permit traffic from sources with an identity principal of `cluster.local/ns/ambient-demo/sa/curl`. Traffic from all other sources will be denied.
|
||||
|
||||
## Targeting policies
|
||||
|
||||
|
@ -71,7 +71,7 @@ This example adds a check for the HTTP GET method:
|
|||
apiVersion: security.istio.io/v1
|
||||
kind: AuthorizationPolicy
|
||||
metadata:
|
||||
name: allow-sleep-to-httpbin
|
||||
name: allow-curl-to-httpbin
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
@ -81,7 +81,7 @@ spec:
|
|||
- from:
|
||||
- source:
|
||||
principals:
|
||||
- cluster.local/ns/ambient-demo/sa/sleep
|
||||
- cluster.local/ns/ambient-demo/sa/curl
|
||||
to:
|
||||
- operation:
|
||||
methods: ["GET"]
|
||||
|
|
|
@ -10,17 +10,17 @@ This guide describes what to do if you have enrolled a namespace, service or wor
|
|||
|
||||
## Problems with traffic routing or security policy
|
||||
|
||||
To send some requests to the `reviews` service via the `productpage` service from the `sleep` pod:
|
||||
To send some requests to the `reviews` service via the `productpage` service from the `curl` pod:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec deploy/sleep -- curl -s http://productpage:9080/productpage
|
||||
$ kubectl exec deploy/curl -- curl -s http://productpage:9080/productpage
|
||||
{{< /text >}}
|
||||
|
||||
To send some requests to the `reviews` `v2` pod from the `sleep` pod:
|
||||
To send some requests to the `reviews` `v2` pod from the `curl` pod:
|
||||
|
||||
{{< text bash >}}
|
||||
$ export REVIEWS_V2_POD_IP=$(kubectl get pod -l version=v2,app=reviews -o jsonpath='{.items[0].status.podIP}')
|
||||
$ kubectl exec deploy/sleep -- curl -s http://$REVIEWS_V2_POD_IP:9080/reviews/1
|
||||
$ kubectl exec deploy/curl -- curl -s http://$REVIEWS_V2_POD_IP:9080/reviews/1
|
||||
{{< /text >}}
|
||||
|
||||
Requests to the `reviews` service should be enforced by the `reviews-svc-waypoint` for any L7 policies.
|
||||
|
@ -46,7 +46,6 @@ Requests to the `reviews` `v2` pod should be enforced by the `reviews-v2-pod-way
|
|||
default bookinfo-gateway-istio 10.43.164.194 waypoint
|
||||
default details 10.43.160.119 waypoint
|
||||
default kubernetes 10.43.0.1 waypoint
|
||||
default notsleep 10.43.156.147 waypoint
|
||||
default productpage 10.43.172.254 waypoint
|
||||
default ratings 10.43.71.236 waypoint
|
||||
default reviews 10.43.162.105 reviews-svc-waypoint
|
||||
|
@ -60,7 +59,6 @@ Requests to the `reviews` `v2` pod should be enforced by the `reviews-v2-pod-way
|
|||
NAMESPACE POD NAME IP NODE WAYPOINT PROTOCOL
|
||||
default bookinfo-gateway-istio-7c57fc4647-wjqvm 10.42.2.8 k3d-k3s-default-server-0 None TCP
|
||||
default details-v1-698d88b-wwsnv 10.42.2.4 k3d-k3s-default-server-0 None HBONE
|
||||
default notsleep-685df55c6c-nwhs6 10.42.0.9 k3d-k3s-default-agent-0 None HBONE
|
||||
default productpage-v1-675fc69cf-fp65z 10.42.2.6 k3d-k3s-default-server-0 None HBONE
|
||||
default ratings-v1-6484c4d9bb-crjtt 10.42.0.4 k3d-k3s-default-agent-0 None HBONE
|
||||
default reviews-svc-waypoint-c49f9f569-b492t 10.42.2.10 k3d-k3s-default-server-0 None TCP
|
||||
|
|
|
@ -21,13 +21,12 @@ $ istioctl ztunnel-config workloads
|
|||
NAMESPACE POD NAME IP NODE WAYPOINT PROTOCOL
|
||||
default bookinfo-gateway-istio-59dd7c96db-q9k6v 10.244.1.11 ambient-worker None TCP
|
||||
default details-v1-cf74bb974-5sqkp 10.244.1.5 ambient-worker None HBONE
|
||||
default notsleep-5c785bc478-zpg7j 10.244.2.7 ambient-worker2 None HBONE
|
||||
default productpage-v1-87d54dd59-fn6vw 10.244.1.10 ambient-worker None HBONE
|
||||
default ratings-v1-7c4bbf97db-zvkdw 10.244.1.6 ambient-worker None HBONE
|
||||
default reviews-v1-5fd6d4f8f8-knbht 10.244.1.16 ambient-worker None HBONE
|
||||
default reviews-v2-6f9b55c5db-c94m2 10.244.1.17 ambient-worker None HBONE
|
||||
default reviews-v3-7d99fd7978-7rgtd 10.244.1.18 ambient-worker None HBONE
|
||||
default sleep-7656cf8794-r7zb9 10.244.1.12 ambient-worker None HBONE
|
||||
default curl-7656cf8794-r7zb9 10.244.1.12 ambient-worker None HBONE
|
||||
istio-system istiod-7ff4959459-qcpvp 10.244.2.5 ambient-worker2 None TCP
|
||||
istio-system ztunnel-6hvcw 10.244.1.4 ambient-worker None TCP
|
||||
istio-system ztunnel-mf476 10.244.2.6 ambient-worker2 None TCP
|
||||
|
@ -51,8 +50,8 @@ spiffe://cluster.local/ns/default/sa/bookinfo-ratings Leaf Available
|
|||
spiffe://cluster.local/ns/default/sa/bookinfo-ratings Root Available true bad086c516cce777645363cb8d731277 2034-04-24T03:31:05Z 2024-04-26T03:31:05Z
|
||||
spiffe://cluster.local/ns/default/sa/bookinfo-reviews Leaf Available true 285697fb2cf806852d3293298e300c86 2024-05-05T09:17:47Z 2024-05-04T09:15:47Z
|
||||
spiffe://cluster.local/ns/default/sa/bookinfo-reviews Root Available true bad086c516cce777645363cb8d731277 2034-04-24T03:31:05Z 2024-04-26T03:31:05Z
|
||||
spiffe://cluster.local/ns/default/sa/sleep Leaf Available true fa33bbb783553a1704866842586e4c0b 2024-05-05T09:25:49Z 2024-05-04T09:23:49Z
|
||||
spiffe://cluster.local/ns/default/sa/sleep Root Available true bad086c516cce777645363cb8d731277 2034-04-24T03:31:05Z 2024-04-26T03:31:05Z
|
||||
spiffe://cluster.local/ns/default/sa/curl Leaf Available true fa33bbb783553a1704866842586e4c0b 2024-05-05T09:25:49Z 2024-05-04T09:23:49Z
|
||||
spiffe://cluster.local/ns/default/sa/curl Root Available true bad086c516cce777645363cb8d731277 2034-04-24T03:31:05Z 2024-04-26T03:31:05Z
|
||||
{{< /text >}}
|
||||
|
||||
Using these commands, you can check that ztunnel proxies are configured with all the expected workloads and TLS certificate. Additionally, missing information can be used for troubleshooting any networking errors.
|
||||
|
@ -83,7 +82,7 @@ $ kubectl debug -it $ISTIOD -n istio-system --image=curlimages/curl -- curl loca
|
|||
ztunnel's traffic logs can be queried using the standard Kubernetes log facilities.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl -n default exec deploy/sleep -- sh -c 'for i in $(seq 1 10); do curl -s -I http://productpage:9080/; done'
|
||||
$ kubectl -n default exec deploy/curl -- sh -c 'for i in $(seq 1 10); do curl -s -I http://productpage:9080/; done'
|
||||
HTTP/1.1 200 OK
|
||||
Server: Werkzeug/3.0.1 Python/3.12.1
|
||||
--snip--
|
||||
|
@ -93,8 +92,8 @@ The response displayed confirms the client pod receives responses from the servi
|
|||
|
||||
{{< text bash >}}
|
||||
$ kubectl -n istio-system logs -l app=ztunnel | grep -E "inbound|outbound"
|
||||
2024-05-04T09:59:05.028709Z info access connection complete src.addr=10.244.1.12:60059 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.10:9080 dst.hbone_addr="10.244.1.10:9080" dst.service="productpage.default.svc.cluster.local" dst.workload="productpage-v1-87d54dd59-fn6vw" dst.namespace="productpage" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-productpage" direction="inbound" bytes_sent=175 bytes_recv=80 duration="1ms"
|
||||
2024-05-04T09:59:05.028771Z info access connection complete src.addr=10.244.1.12:58508 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.10:15008 dst.hbone_addr="10.244.1.10:9080" dst.service="productpage.default.svc.cluster.local" dst.workload="productpage-v1-87d54dd59-fn6vw" dst.namespace="productpage" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-productpage" direction="outbound" bytes_sent=80 bytes_recv=175 duration="1ms"
|
||||
2024-05-04T09:59:05.028709Z info access connection complete src.addr=10.244.1.12:60059 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.10:9080 dst.hbone_addr="10.244.1.10:9080" dst.service="productpage.default.svc.cluster.local" dst.workload="productpage-v1-87d54dd59-fn6vw" dst.namespace="productpage" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-productpage" direction="inbound" bytes_sent=175 bytes_recv=80 duration="1ms"
|
||||
2024-05-04T09:59:05.028771Z info access connection complete src.addr=10.244.1.12:58508 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.10:15008 dst.hbone_addr="10.244.1.10:9080" dst.service="productpage.default.svc.cluster.local" dst.workload="productpage-v1-87d54dd59-fn6vw" dst.namespace="productpage" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-productpage" direction="outbound" bytes_sent=80 bytes_recv=175 duration="1ms"
|
||||
--snip--
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -115,16 +114,16 @@ If the destination is a service with multiple instances or pods and there is no
|
|||
By calling a service with multiple backends, we can validate that client traffic is balanced across the service replicas.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl -n default exec deploy/sleep -- sh -c 'for i in $(seq 1 10); do curl -s -I http://reviews:9080/; done'
|
||||
$ kubectl -n default exec deploy/curl -- sh -c 'for i in $(seq 1 10); do curl -s -I http://reviews:9080/; done'
|
||||
{{< /text >}}
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl -n istio-system logs -l app=ztunnel | grep -E "outbound"
|
||||
--snip--
|
||||
2024-05-04T10:11:04.964851Z info access connection complete src.addr=10.244.1.12:35520 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.9:15008 dst.hbone_addr="10.244.1.9:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v3-7d99fd7978-zznnq" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms"
|
||||
2024-05-04T10:11:04.969578Z info access connection complete src.addr=10.244.1.12:35526 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.9:15008 dst.hbone_addr="10.244.1.9:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v3-7d99fd7978-zznnq" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms"
|
||||
2024-05-04T10:11:04.974720Z info access connection complete src.addr=10.244.1.12:35536 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.7:15008 dst.hbone_addr="10.244.1.7:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v1-5fd6d4f8f8-26j92" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms"
|
||||
2024-05-04T10:11:04.979462Z info access connection complete src.addr=10.244.1.12:35552 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.8:15008 dst.hbone_addr="10.244.1.8:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v2-6f9b55c5db-c2dtw" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms"
|
||||
2024-05-04T10:11:04.964851Z info access connection complete src.addr=10.244.1.12:35520 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.9:15008 dst.hbone_addr="10.244.1.9:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v3-7d99fd7978-zznnq" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms"
|
||||
2024-05-04T10:11:04.969578Z info access connection complete src.addr=10.244.1.12:35526 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.9:15008 dst.hbone_addr="10.244.1.9:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v3-7d99fd7978-zznnq" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms"
|
||||
2024-05-04T10:11:04.974720Z info access connection complete src.addr=10.244.1.12:35536 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.7:15008 dst.hbone_addr="10.244.1.7:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v1-5fd6d4f8f8-26j92" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms"
|
||||
2024-05-04T10:11:04.979462Z info access connection complete src.addr=10.244.1.12:35552 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.8:15008 dst.hbone_addr="10.244.1.8:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v2-6f9b55c5db-c2dtw" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms"
|
||||
{{< /text >}}
|
||||
|
||||
This is a round robin load balancing algorithm and is separate from and independent of any load balancing algorithm that may be configured within a `VirtualService`'s `TrafficPolicy` field, since as discussed previously, all aspects of `VirtualService` API objects are instantiated on the Waypoint proxies and not the ztunnel proxies.
|
||||
|
|
|
@ -45,8 +45,8 @@ istio_tcp_connections_opened_total{
|
|||
reporter="source",
|
||||
request_protocol="tcp",
|
||||
response_flags="-",
|
||||
source_app="sleep",
|
||||
source_principal="spiffe://cluster.local/ns/default/sa/sleep",source_workload_namespace="default",
|
||||
source_app="curl",
|
||||
source_principal="spiffe://cluster.local/ns/default/sa/curl",source_workload_namespace="default",
|
||||
...}
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -54,11 +54,11 @@ Validate that the `connection_security_policy` value is set to `mutual_tls` alon
|
|||
|
||||
## Validate mTLS from logs
|
||||
|
||||
You can also view either the source or destination ztunnel log to confirm mTLS is enabled, along with peer identities. Below is an example of the source ztunnel's log for a request from the `sleep` service to the `details` service:
|
||||
You can also view either the source or destination ztunnel log to confirm mTLS is enabled, along with peer identities. Below is an example of the source ztunnel's log for a request from the `curl` service to the `details` service:
|
||||
|
||||
{{< text syntax=plain >}}
|
||||
2024-08-21T15:32:05.754291Z info access connection complete src.addr=10.42.0.9:33772 src.workload="sleep-7656cf8794-6lsm4" src.namespace="default"
|
||||
src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.42.0.5:15008 dst.hbone_addr=10.42.0.5:9080 dst.service="details.default.svc.cluster.local"
|
||||
2024-08-21T15:32:05.754291Z info access connection complete src.addr=10.42.0.9:33772 src.workload="curl-7656cf8794-6lsm4" src.namespace="default"
|
||||
src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.42.0.5:15008 dst.hbone_addr=10.42.0.5:9080 dst.service="details.default.svc.cluster.local"
|
||||
dst.workload="details-v1-857849f66-ft8wx" dst.namespace="default" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-details"
|
||||
direction="outbound" bytes_sent=84 bytes_recv=358 duration="15ms"
|
||||
{{< /text >}}
|
||||
|
|
|
@ -570,7 +570,7 @@ An authorization policy includes a selector, an action, and a list of rules:
|
|||
- The `when` field specifies the conditions needed to apply the rule
|
||||
|
||||
The following example shows an authorization policy that allows two sources, the
|
||||
`cluster.local/ns/default/sa/sleep` service account and the `dev` namespace, to
|
||||
`cluster.local/ns/default/sa/curl` service account and the `dev` namespace, to
|
||||
access the workloads with the `app: httpbin` and `version: v1` labels in the
|
||||
`foo` namespace when requests sent have a valid JWT token.
|
||||
|
||||
|
@ -589,7 +589,7 @@ spec:
|
|||
rules:
|
||||
- from:
|
||||
- source:
|
||||
principals: ["cluster.local/ns/default/sa/sleep"]
|
||||
principals: ["cluster.local/ns/default/sa/curl"]
|
||||
- source:
|
||||
namespaces: ["dev"]
|
||||
to:
|
||||
|
@ -832,7 +832,7 @@ spec:
|
|||
rules:
|
||||
- from:
|
||||
- source:
|
||||
principals: ["cluster.local/ns/default/sa/sleep"]
|
||||
principals: ["cluster.local/ns/default/sa/curl"]
|
||||
to:
|
||||
- operation:
|
||||
methods: ["GET"]
|
||||
|
|
|
@ -55,7 +55,7 @@ disrupt your application, it continues to run and serve user requests.
|
|||
reviews-v2-56f6855586-cnrjp 1/1 Running 0 7h
|
||||
reviews-v2-56f6855586-lxc49 1/1 Running 0 7h
|
||||
reviews-v2-56f6855586-qh84k 1/1 Running 0 7h
|
||||
sleep-88ddbcfdd-cc85s 1/1 Running 0 7h
|
||||
curl-88ddbcfdd-cc85s 1/1 Running 0 7h
|
||||
{{< /text >}}
|
||||
|
||||
1. Kubernetes replaced the original pods of `productpage` with the
|
||||
|
|
|
@ -44,14 +44,14 @@ tests, end-to-end tests and tests in a staging environment.
|
|||
1. Send a request to the pod and see that it returns the correct result:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -- curl -sS "$REVIEWS_V2_POD_IP:9080/reviews/7"
|
||||
$ kubectl exec $(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') -- curl -sS "$REVIEWS_V2_POD_IP:9080/reviews/7"
|
||||
{"id": "7","reviews": [{ "reviewer": "Reviewer1", "text": "An extremely entertaining play by Shakespeare. The slapstick humour is refreshing!", "rating": {"stars": 5, "color": "black"}},{ "reviewer": "Reviewer2", "text": "Absolutely fun and entertaining. The play lacks thematic depth when compared to other plays by Shakespeare.", "rating": {"stars": 4, "color": "black"}}]}
|
||||
{{< /text >}}
|
||||
|
||||
1. Perform primitive load testing by sending a request 10 times in a row:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -- sh -c "for i in 1 2 3 4 5 6 7 8 9 10; do curl -o /dev/null -s -w '%{http_code}\n' $REVIEWS_V2_POD_IP:9080/reviews/7; done"
|
||||
$ kubectl exec $(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') -- sh -c "for i in 1 2 3 4 5 6 7 8 9 10; do curl -o /dev/null -s -w '%{http_code}\n' $REVIEWS_V2_POD_IP:9080/reviews/7; done"
|
||||
200
|
||||
200
|
||||
...
|
||||
|
|
|
@ -86,18 +86,18 @@ microservice.
|
|||
{{< /text >}}
|
||||
|
||||
1. After the services achieve the `Running` status, deploy a testing pod,
|
||||
[sleep]({{< github_tree >}}/samples/sleep), to use for sending requests
|
||||
[curl]({{< github_tree >}}/samples/curl), to use for sending requests
|
||||
to your microservices:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl apply -f {{< github_file >}}/samples/sleep/sleep.yaml
|
||||
$ kubectl apply -f {{< github_file >}}/samples/curl/curl.yaml
|
||||
{{< /text >}}
|
||||
|
||||
1. To confirm that the Bookinfo application is running, send a request to it
|
||||
with a curl command from your testing pod:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -sS productpage:9080/productpage | grep -o "<title>.*</title>"
|
||||
$ kubectl exec $(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') -c curl -- curl -sS productpage:9080/productpage | grep -o "<title>.*</title>"
|
||||
<title>Simple Bookstore App</title>
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ enable Istio on all the remaining microservices in one step.
|
|||
productpage-v1-59b4f9f8d5-d4prx 2/2 Running 0 2m
|
||||
ratings-v1-b7b7fbbc9-sggxf 2/2 Running 0 2m
|
||||
reviews-v2-dfbcf859c-27dvk 2/2 Running 0 2m
|
||||
sleep-88ddbcfdd-cc85s 1/1 Running 0 7h
|
||||
curl-88ddbcfdd-cc85s 1/1 Running 0 7h
|
||||
{{< /text >}}
|
||||
|
||||
1. Access the Istio dashboard using the custom URL you set in your `/etc/hosts` file
|
||||
|
|
|
@ -17,7 +17,7 @@ Test your microservice, in production!
|
|||
1. Issue an HTTP request from the testing pod to one of your services:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -- curl -sS http://ratings:9080/ratings/7
|
||||
$ kubectl exec $(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') -- curl -sS http://ratings:9080/ratings/7
|
||||
{{< /text >}}
|
||||
|
||||
## Chaos testing
|
||||
|
@ -50,7 +50,7 @@ the pods' status with `kubectl get pods`.
|
|||
reviews-v1-77c65dc5c6-5wt8g 1/1 Running 0 47m
|
||||
reviews-v1-77c65dc5c6-kjvxs 1/1 Running 0 48m
|
||||
reviews-v1-77c65dc5c6-r55tl 1/1 Running 0 47m
|
||||
sleep-88ddbcfdd-l9zq4 1/1 Running 0 47m
|
||||
curl-88ddbcfdd-l9zq4 1/1 Running 0 47m
|
||||
{{< /text >}}
|
||||
|
||||
Note that the first pod was restarted once.
|
||||
|
@ -87,7 +87,7 @@ the pods' status with `kubectl get pods`.
|
|||
reviews-v1-77c65dc5c6-5wt8g 1/1 Running 0 48m
|
||||
reviews-v1-77c65dc5c6-kjvxs 1/1 Running 0 49m
|
||||
reviews-v1-77c65dc5c6-r55tl 1/1 Running 0 48m
|
||||
sleep-88ddbcfdd-l9zq4 1/1 Running 0 48m
|
||||
curl-88ddbcfdd-l9zq4 1/1 Running 0 48m
|
||||
{{< /text >}}
|
||||
|
||||
The first pod restarted twice and two other `details` pods
|
||||
|
|
|
@ -83,7 +83,7 @@ kubectl label namespace bookinfo istio-injection=enabled --overwrite
|
|||
kubectl apply -n bookinfo -f samples/bookinfo/platform/kube/bookinfo.yaml
|
||||
kubectl apply -n bookinfo -f samples/bookinfo/networking/bookinfo-gateway.yaml
|
||||
kubectl apply -n bookinfo -f samples/bookinfo/networking/destination-rule-all.yaml
|
||||
startup_sleep_sample
|
||||
startup_curl_sample
|
||||
for deploy in "productpage-v1" "details-v1" "ratings-v1" "reviews-v1" "reviews-v2" "reviews-v3"; do
|
||||
_wait_for_deployment bookinfo "$deploy"
|
||||
done
|
||||
|
@ -105,5 +105,5 @@ _verify_elided run_curl "${snip_reaching_kubernetes_services_from_the_virtual_ma
|
|||
docker stop vm
|
||||
kubectl delete -f samples/multicluster/expose-istiod.yaml -n istio-system --ignore-not-found=true
|
||||
echo y | istioctl uninstall --revision=default
|
||||
cleanup_sleep_sample
|
||||
cleanup_curl_sample
|
||||
kubectl delete namespace istio-system vm bookinfo --ignore-not-found=true
|
||||
|
|
|
@ -115,11 +115,11 @@ of injected sidecar when it was.
|
|||
to force the sidecar to be injected:
|
||||
|
||||
{{< text bash yaml >}}
|
||||
$ kubectl get deployment sleep -o yaml | grep "sidecar.istio.io/inject:" -B4
|
||||
$ kubectl get deployment curl -o yaml | grep "sidecar.istio.io/inject:" -B4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sleep
|
||||
app: curl
|
||||
sidecar.istio.io/inject: "true"
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -169,10 +169,10 @@ will also appear in the events of the namespace associated with the deployment.
|
|||
For example, if the `istiod` control plane pod was not running when you tried to deploy your pod, the events would show the following error:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl get events -n sleep
|
||||
$ kubectl get events -n curl
|
||||
...
|
||||
23m Normal SuccessfulCreate replicaset/sleep-9454cc476 Created pod: sleep-9454cc476-khp45
|
||||
22m Warning FailedCreate replicaset/sleep-9454cc476 Error creating: Internal error occurred: failed calling webhook "namespace.sidecar-injector.istio.io": failed to call webhook: Post "https://istiod.istio-system.svc:443/inject?timeout=10s": dial tcp 10.96.44.51:443: connect: connection refused
|
||||
23m Normal SuccessfulCreate replicaset/curl-9454cc476 Created pod: curl-9454cc476-khp45
|
||||
22m Warning FailedCreate replicaset/curl-9454cc476 Error creating: Internal error occurred: failed calling webhook "namespace.sidecar-injector.istio.io": failed to call webhook: Post "https://istiod.istio-system.svc:443/inject?timeout=10s": dial tcp 10.96.44.51:443: connect: connection refused
|
||||
{{< /text >}}
|
||||
|
||||
{{< text bash >}}
|
||||
|
|
|
@ -272,12 +272,12 @@ spec:
|
|||
|
||||
The port name `http-web` in the Service definition explicitly specifies the http protocol for that port.
|
||||
|
||||
Let us assume we have a [sleep]({{< github_tree >}}/samples/sleep) pod `Deployment` as well in the default namespace.
|
||||
When `nginx` is accessed from this `sleep` pod using its Pod IP (this is one of the common ways to access a headless service), the request goes via the `PassthroughCluster` to the server-side, but the sidecar proxy on the server-side fails to find the route entry to `nginx` and fails with `HTTP 503 UC`.
|
||||
Let us assume we have a [curl]({{< github_tree >}}/samples/curl) pod `Deployment` as well in the default namespace.
|
||||
When `nginx` is accessed from this `curl` pod using its Pod IP (this is one of the common ways to access a headless service), the request goes via the `PassthroughCluster` to the server-side, but the sidecar proxy on the server-side fails to find the route entry to `nginx` and fails with `HTTP 503 UC`.
|
||||
|
||||
{{< text bash >}}
|
||||
$ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items..metadata.name}')
|
||||
$ kubectl exec -it $SOURCE_POD -c sleep -- curl 10.1.1.171 -s -o /dev/null -w "%{http_code}"
|
||||
$ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items..metadata.name}')
|
||||
$ kubectl exec -it $SOURCE_POD -c curl -- curl 10.1.1.171 -s -o /dev/null -w "%{http_code}"
|
||||
503
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -290,8 +290,8 @@ Here are some of the ways to avoid this 503 error:
|
|||
The Host header in the curl request above will be the Pod IP by default. Specifying the Host header as `nginx.default` in our request to `nginx` successfully returns `HTTP 200 OK`.
|
||||
|
||||
{{< text bash >}}
|
||||
$ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items..metadata.name}')
|
||||
$ kubectl exec -it $SOURCE_POD -c sleep -- curl -H "Host: nginx.default" 10.1.1.171 -s -o /dev/null -w "%{http_code}"
|
||||
$ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items..metadata.name}')
|
||||
$ kubectl exec -it $SOURCE_POD -c curl -- curl -H "Host: nginx.default" 10.1.1.171 -s -o /dev/null -w "%{http_code}"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -304,13 +304,13 @@ Here are some of the ways to avoid this 503 error:
|
|||
This is useful in certain scenarios where a client may not be able to include header information in the request.
|
||||
|
||||
{{< text bash >}}
|
||||
$ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items..metadata.name}')
|
||||
$ kubectl exec -it $SOURCE_POD -c sleep -- curl 10.1.1.171 -s -o /dev/null -w "%{http_code}"
|
||||
$ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items..metadata.name}')
|
||||
$ kubectl exec -it $SOURCE_POD -c curl -- curl 10.1.1.171 -s -o /dev/null -w "%{http_code}"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec -it $SOURCE_POD -c sleep -- curl -H "Host: nginx.default" 10.1.1.171 -s -o /dev/null -w "%{http_code}"
|
||||
$ kubectl exec -it $SOURCE_POD -c curl -- curl -H "Host: nginx.default" 10.1.1.171 -s -o /dev/null -w "%{http_code}"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -319,8 +319,8 @@ Here are some of the ways to avoid this 503 error:
|
|||
A specific instance of a headless service can also be accessed using just the domain name.
|
||||
|
||||
{{< text bash >}}
|
||||
$ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items..metadata.name}')
|
||||
$ kubectl exec -it $SOURCE_POD -c sleep -- curl web-0.nginx.default -s -o /dev/null -w "%{http_code}"
|
||||
$ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items..metadata.name}')
|
||||
$ kubectl exec -it $SOURCE_POD -c curl -- curl web-0.nginx.default -s -o /dev/null -w "%{http_code}"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ you ensure Istiod is working as expected:
|
|||
2021-04-23T20:53:29.507641Z info ads XDS: Pushing:2021-04-23T20:53:29Z/23 Services:15 ConnectedEndpoints:2 Version:2021-04-23T20:53:29Z/23
|
||||
2021-04-23T20:53:29.507911Z debug authorization Processed authorization policy for httpbin-74fb669cc6-lpscm.foo with details:
|
||||
* found 0 CUSTOM actions
|
||||
2021-04-23T20:53:29.508077Z debug authorization Processed authorization policy for sleep-557747455f-6dxbl.foo with details:
|
||||
2021-04-23T20:53:29.508077Z debug authorization Processed authorization policy for curl-557747455f-6dxbl.foo with details:
|
||||
* found 0 CUSTOM actions
|
||||
2021-04-23T20:53:29.508128Z debug authorization Processed authorization policy for httpbin-74fb669cc6-lpscm.foo with details:
|
||||
* found 1 DENY actions, 0 ALLOW actions, 0 AUDIT actions
|
||||
|
@ -179,11 +179,11 @@ you ensure Istiod is working as expected:
|
|||
* built 1 HTTP filters for DENY action
|
||||
* added 1 HTTP filters to filter chain 0
|
||||
* added 1 HTTP filters to filter chain 1
|
||||
2021-04-23T20:53:29.508158Z debug authorization Processed authorization policy for sleep-557747455f-6dxbl.foo with details:
|
||||
2021-04-23T20:53:29.508158Z debug authorization Processed authorization policy for curl-557747455f-6dxbl.foo with details:
|
||||
* found 0 DENY actions, 0 ALLOW actions, 0 AUDIT actions
|
||||
2021-04-23T20:53:29.509097Z debug authorization Processed authorization policy for sleep-557747455f-6dxbl.foo with details:
|
||||
2021-04-23T20:53:29.509097Z debug authorization Processed authorization policy for curl-557747455f-6dxbl.foo with details:
|
||||
* found 0 CUSTOM actions
|
||||
2021-04-23T20:53:29.509167Z debug authorization Processed authorization policy for sleep-557747455f-6dxbl.foo with details:
|
||||
2021-04-23T20:53:29.509167Z debug authorization Processed authorization policy for curl-557747455f-6dxbl.foo with details:
|
||||
* found 0 DENY actions, 0 ALLOW actions, 0 AUDIT actions
|
||||
2021-04-23T20:53:29.509501Z debug authorization Processed authorization policy for httpbin-74fb669cc6-lpscm.foo with details:
|
||||
* found 0 CUSTOM actions
|
||||
|
@ -198,7 +198,7 @@ you ensure Istiod is working as expected:
|
|||
* added 1 TCP filters to filter chain 2
|
||||
* added 1 TCP filters to filter chain 3
|
||||
* added 1 TCP filters to filter chain 4
|
||||
2021-04-23T20:53:29.510903Z info ads LDS: PUSH for node:sleep-557747455f-6dxbl.foo resources:18 size:85.0kB
|
||||
2021-04-23T20:53:29.510903Z info ads LDS: PUSH for node:curl-557747455f-6dxbl.foo resources:18 size:85.0kB
|
||||
2021-04-23T20:53:29.511487Z info ads LDS: PUSH for node:httpbin-74fb669cc6-lpscm.foo resources:18 size:86.4kB
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -323,7 +323,7 @@ are not using `httpbin`.
|
|||
|
||||
{{< text plain >}}
|
||||
...
|
||||
2021-04-23T20:43:18.552857Z debug envoy rbac checking request: requestedServerName: outbound_.8000_._.httpbin.foo.svc.cluster.local, sourceIP: 10.44.3.13:46180, directRemoteIP: 10.44.3.13:46180, remoteIP: 10.44.3.13:46180,localAddress: 10.44.1.18:80, ssl: uriSanPeerCertificate: spiffe://cluster.local/ns/foo/sa/sleep, dnsSanPeerCertificate: , subjectPeerCertificate: , headers: ':authority', 'httpbin:8000'
|
||||
2021-04-23T20:43:18.552857Z debug envoy rbac checking request: requestedServerName: outbound_.8000_._.httpbin.foo.svc.cluster.local, sourceIP: 10.44.3.13:46180, directRemoteIP: 10.44.3.13:46180, remoteIP: 10.44.3.13:46180,localAddress: 10.44.1.18:80, ssl: uriSanPeerCertificate: spiffe://cluster.local/ns/foo/sa/curl, dnsSanPeerCertificate: , subjectPeerCertificate: , headers: ':authority', 'httpbin:8000'
|
||||
':path', '/headers'
|
||||
':method', 'GET'
|
||||
':scheme', 'http'
|
||||
|
@ -335,14 +335,14 @@ are not using `httpbin`.
|
|||
'x-b3-traceid', '8a124905edf4291a21df326729b264e9'
|
||||
'x-b3-spanid', '21df326729b264e9'
|
||||
'x-b3-sampled', '0'
|
||||
'x-forwarded-client-cert', 'By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=d64cd6750a3af8685defbbe4dd8c467ebe80f6be4bfe9ca718e81cd94129fc1d;Subject="";URI=spiffe://cluster.local/ns/foo/sa/sleep'
|
||||
'x-forwarded-client-cert', 'By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=d64cd6750a3af8685defbbe4dd8c467ebe80f6be4bfe9ca718e81cd94129fc1d;Subject="";URI=spiffe://cluster.local/ns/foo/sa/curl'
|
||||
, dynamicMetadata: filter_metadata {
|
||||
key: "istio_authn"
|
||||
value {
|
||||
fields {
|
||||
key: "request.auth.principal"
|
||||
value {
|
||||
string_value: "cluster.local/ns/foo/sa/sleep"
|
||||
string_value: "cluster.local/ns/foo/sa/curl"
|
||||
}
|
||||
}
|
||||
fields {
|
||||
|
@ -354,13 +354,13 @@ are not using `httpbin`.
|
|||
fields {
|
||||
key: "source.principal"
|
||||
value {
|
||||
string_value: "cluster.local/ns/foo/sa/sleep"
|
||||
string_value: "cluster.local/ns/foo/sa/curl"
|
||||
}
|
||||
}
|
||||
fields {
|
||||
key: "source.user"
|
||||
value {
|
||||
string_value: "cluster.local/ns/foo/sa/sleep"
|
||||
string_value: "cluster.local/ns/foo/sa/curl"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ are not using `httpbin`.
|
|||
|
||||
{{< text plain >}}
|
||||
...
|
||||
2021-04-23T20:59:11.838468Z debug envoy rbac checking request: requestedServerName: outbound_.8000_._.httpbin.foo.svc.cluster.local, sourceIP: 10.44.3.13:49826, directRemoteIP: 10.44.3.13:49826, remoteIP: 10.44.3.13:49826,localAddress: 10.44.1.18:80, ssl: uriSanPeerCertificate: spiffe://cluster.local/ns/foo/sa/sleep, dnsSanPeerCertificate: , subjectPeerCertificate: , headers: ':authority', 'httpbin:8000'
|
||||
2021-04-23T20:59:11.838468Z debug envoy rbac checking request: requestedServerName: outbound_.8000_._.httpbin.foo.svc.cluster.local, sourceIP: 10.44.3.13:49826, directRemoteIP: 10.44.3.13:49826, remoteIP: 10.44.3.13:49826,localAddress: 10.44.1.18:80, ssl: uriSanPeerCertificate: spiffe://cluster.local/ns/foo/sa/curl, dnsSanPeerCertificate: , subjectPeerCertificate: , headers: ':authority', 'httpbin:8000'
|
||||
':path', '/headers'
|
||||
':method', 'GET'
|
||||
':scheme', 'http'
|
||||
|
@ -389,14 +389,14 @@ are not using `httpbin`.
|
|||
'x-b3-traceid', '696607fc4382b50017c1f7017054c751'
|
||||
'x-b3-spanid', '17c1f7017054c751'
|
||||
'x-b3-sampled', '0'
|
||||
'x-forwarded-client-cert', 'By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=d64cd6750a3af8685defbbe4dd8c467ebe80f6be4bfe9ca718e81cd94129fc1d;Subject="";URI=spiffe://cluster.local/ns/foo/sa/sleep'
|
||||
'x-forwarded-client-cert', 'By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=d64cd6750a3af8685defbbe4dd8c467ebe80f6be4bfe9ca718e81cd94129fc1d;Subject="";URI=spiffe://cluster.local/ns/foo/sa/curl'
|
||||
, dynamicMetadata: filter_metadata {
|
||||
key: "istio_authn"
|
||||
value {
|
||||
fields {
|
||||
key: "request.auth.principal"
|
||||
value {
|
||||
string_value: "cluster.local/ns/foo/sa/sleep"
|
||||
string_value: "cluster.local/ns/foo/sa/curl"
|
||||
}
|
||||
}
|
||||
fields {
|
||||
|
@ -408,13 +408,13 @@ are not using `httpbin`.
|
|||
fields {
|
||||
key: "source.principal"
|
||||
value {
|
||||
string_value: "cluster.local/ns/foo/sa/sleep"
|
||||
string_value: "cluster.local/ns/foo/sa/curl"
|
||||
}
|
||||
}
|
||||
fields {
|
||||
key: "source.user"
|
||||
value {
|
||||
string_value: "cluster.local/ns/foo/sa/sleep"
|
||||
string_value: "cluster.local/ns/foo/sa/curl"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -436,7 +436,7 @@ are not using `httpbin`.
|
|||
If you suspect that some of the keys and/or certificates used by Istio aren't correct, you can inspect the contents from any pod:
|
||||
|
||||
{{< text bash >}}
|
||||
$ istioctl proxy-config secret sleep-8f795f47d-4s4t7
|
||||
$ istioctl proxy-config secret curl-8f795f47d-4s4t7
|
||||
RESOURCE NAME TYPE STATUS VALID CERT SERIAL NUMBER NOT AFTER NOT BEFORE
|
||||
default Cert Chain ACTIVE true 138092480869518152837211547060273851586 2020-11-11T16:39:48Z 2020-11-10T16:39:48Z
|
||||
ROOTCA CA ACTIVE true 288553090258624301170355571152070165215 2030-11-08T16:34:52Z 2020-11-10T16:34:52Z
|
||||
|
@ -445,7 +445,7 @@ ROOTCA CA ACTIVE true 288553090258624301170
|
|||
By passing the `-o json` flag, you can pass the full certificate content to `openssl` to analyze its contents:
|
||||
|
||||
{{< text bash >}}
|
||||
$ istioctl proxy-config secret sleep-8f795f47d-4s4t7 -o json | jq '[.dynamicActiveSecrets[] | select(.name == "default")][0].secret.tlsCertificate.certificateChain.inlineBytes' -r | base64 -d | openssl x509 -noout -text
|
||||
$ istioctl proxy-config secret curl-8f795f47d-4s4t7 -o json | jq '[.dynamicActiveSecrets[] | select(.name == "default")][0].secret.tlsCertificate.certificateChain.inlineBytes' -r | base64 -d | openssl x509 -noout -text
|
||||
Certificate:
|
||||
Data:
|
||||
Version: 3 (0x2)
|
||||
|
|
|
@ -64,23 +64,23 @@ kubectl label namespace default istio-injection=enabled --overwrite
|
|||
export IFS=
|
||||
echo "${snip_proxyIstioConfig}" > proxyConfig.yaml
|
||||
unset IFS
|
||||
# yq m -d2 samples/sleep/sleep.yaml proxyConfig.yaml > sleep_istioconfig.yaml
|
||||
yq 'select(document_index != 2)' samples/sleep/sleep.yaml > tmp1.yaml
|
||||
yq 'select(document_index == 2)' samples/sleep/sleep.yaml > tmp2.yaml
|
||||
# yq m -d2 samples/curl/curl.yaml proxyConfig.yaml > curl_istioconfig.yaml
|
||||
yq 'select(document_index != 2)' samples/curl/curl.yaml > tmp1.yaml
|
||||
yq 'select(document_index == 2)' samples/curl/curl.yaml > tmp2.yaml
|
||||
# shellcheck disable=SC2016
|
||||
yq eval-all '. as $item ireduce ({}; . *+ $item)' tmp2.yaml proxyConfig.yaml > new2.yaml
|
||||
yq . tmp1.yaml new2.yaml > sleep_istioconfig.yaml
|
||||
yq . tmp1.yaml new2.yaml > curl_istioconfig.yaml
|
||||
|
||||
kubectl apply -f sleep_istioconfig.yaml
|
||||
_wait_for_deployment default sleep
|
||||
POD="$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')"
|
||||
kubectl apply -f curl_istioconfig.yaml
|
||||
_wait_for_deployment default curl
|
||||
POD="$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')"
|
||||
export POD
|
||||
_verify_contains snip_get_stats "circuit_breakers"
|
||||
|
||||
# @cleanup
|
||||
set +e
|
||||
cleanup_httpbin_sample
|
||||
cleanup_sleep_sample
|
||||
cleanup_curl_sample
|
||||
echo y | istioctl uninstall --revision=default
|
||||
kubectl delete ns istio-system
|
||||
kubectl label namespace default istio-injection-
|
||||
|
|
|
@ -38,7 +38,7 @@ This can also be enabled on a per-pod basis with the [`proxy.istio.io/config` an
|
|||
{{< text syntax=yaml snip_id=none >}}
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: sleep
|
||||
name: curl
|
||||
spec:
|
||||
...
|
||||
template:
|
||||
|
@ -81,13 +81,13 @@ Bring up a client application to initiate the DNS request:
|
|||
|
||||
{{< text bash >}}
|
||||
$ kubectl label namespace default istio-injection=enabled --overwrite
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@
|
||||
{{< /text >}}
|
||||
|
||||
Without the DNS capture, a request to `address.internal` would likely fail to resolve. Once this is enabled, you should instead get a response back based on the configured `address`:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec deploy/sleep -- curl -sS -v address.internal
|
||||
$ kubectl exec deploy/curl -- curl -sS -v address.internal
|
||||
* Trying 198.51.100.1:80...
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -128,7 +128,7 @@ EOF
|
|||
Now, send a request:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec deploy/sleep -- curl -sS -v auto.internal
|
||||
$ kubectl exec deploy/curl -- curl -sS -v auto.internal
|
||||
* Trying 240.240.0.1:80...
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -214,7 +214,7 @@ A virtual IP address will be assigned to every service entry so that client side
|
|||
1. Verify listeners are configured separately for each service at the client side:
|
||||
|
||||
{{< text bash >}}
|
||||
$ istioctl pc listener deploy/sleep | grep tcp-echo | awk '{printf "ADDRESS=%s, DESTINATION=%s %s\n", $1, $4, $5}'
|
||||
$ istioctl pc listener deploy/curl | grep tcp-echo | awk '{printf "ADDRESS=%s, DESTINATION=%s %s\n", $1, $4, $5}'
|
||||
ADDRESS=240.240.105.94, DESTINATION=Cluster: outbound|9000||tcp-echo.external-2.svc.cluster.local
|
||||
ADDRESS=240.240.69.138, DESTINATION=Cluster: outbound|9000||tcp-echo.external-1.svc.cluster.local
|
||||
{{< /text >}}
|
||||
|
@ -224,7 +224,7 @@ A virtual IP address will be assigned to every service entry so that client side
|
|||
{{< text bash >}}
|
||||
$ kubectl -n external-1 delete -f @samples/tcp-echo/tcp-echo.yaml@
|
||||
$ kubectl -n external-2 delete -f @samples/tcp-echo/tcp-echo.yaml@
|
||||
$ kubectl delete -f @samples/sleep/sleep.yaml@
|
||||
$ kubectl delete -f @samples/curl/curl.yaml@
|
||||
$ istioctl uninstall --purge -y
|
||||
$ kubectl delete ns istio-system external-1 external-2
|
||||
$ kubectl label namespace default istio-injection-
|
||||
|
|
|
@ -55,11 +55,11 @@ EOF
|
|||
|
||||
snip_dns_capture_in_action_2() {
|
||||
kubectl label namespace default istio-injection=enabled --overwrite
|
||||
kubectl apply -f samples/sleep/sleep.yaml
|
||||
kubectl apply -f samples/curl/curl.yaml
|
||||
}
|
||||
|
||||
snip_dns_capture_in_action_3() {
|
||||
kubectl exec deploy/sleep -- curl -sS -v address.internal
|
||||
kubectl exec deploy/curl -- curl -sS -v address.internal
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_dns_capture_in_action_3_out <<\ENDSNIP
|
||||
|
@ -84,7 +84,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_address_auto_allocation_2() {
|
||||
kubectl exec deploy/sleep -- curl -sS -v auto.internal
|
||||
kubectl exec deploy/curl -- curl -sS -v auto.internal
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_address_auto_allocation_2_out <<\ENDSNIP
|
||||
|
@ -152,7 +152,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_external_tcp_services_without_vips_5() {
|
||||
istioctl pc listener deploy/sleep | grep tcp-echo | awk '{printf "ADDRESS=%s, DESTINATION=%s %s\n", $1, $4, $5}'
|
||||
istioctl pc listener deploy/curl | grep tcp-echo | awk '{printf "ADDRESS=%s, DESTINATION=%s %s\n", $1, $4, $5}'
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_external_tcp_services_without_vips_5_out <<\ENDSNIP
|
||||
|
@ -163,7 +163,7 @@ ENDSNIP
|
|||
snip_cleanup_1() {
|
||||
kubectl -n external-1 delete -f samples/tcp-echo/tcp-echo.yaml
|
||||
kubectl -n external-2 delete -f samples/tcp-echo/tcp-echo.yaml
|
||||
kubectl delete -f samples/sleep/sleep.yaml
|
||||
kubectl delete -f samples/curl/curl.yaml
|
||||
istioctl uninstall --purge -y
|
||||
kubectl delete ns istio-system external-1 external-2
|
||||
kubectl label namespace default istio-injection-
|
||||
|
|
|
@ -108,7 +108,7 @@ meshConfig:
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: sleep
|
||||
name: curl
|
||||
spec:
|
||||
...
|
||||
template:
|
||||
|
|
|
@ -16,7 +16,7 @@ and read the [Deployment Models](/docs/ops/deployment/deployment-models/) guide.
|
|||
The most common, but also broad problem with multi-network installations is that cross-cluster load balancing doesn’t work. Usually this manifests itself as only seeing responses from the cluster-local instance of a Service:
|
||||
|
||||
{{< text bash >}}
|
||||
$ for i in $(seq 10); do kubectl --context=$CTX_CLUSTER1 -n sample exec sleep-dd98b5f48-djwdw -c sleep -- curl -s helloworld:5000/hello; done
|
||||
$ for i in $(seq 10); do kubectl --context=$CTX_CLUSTER1 -n sample exec curl-dd98b5f48-djwdw -c curl -- curl -s helloworld:5000/hello; done
|
||||
Hello version: v1, instance: helloworld-v1-578dd69f69-j69pf
|
||||
Hello version: v1, instance: helloworld-v1-578dd69f69-j69pf
|
||||
Hello version: v1, instance: helloworld-v1-578dd69f69-j69pf
|
||||
|
@ -65,9 +65,9 @@ $ kubectl apply --context="${CTX_CLUSTER2}" \
|
|||
-f samples/helloworld/helloworld.yaml \
|
||||
-l version=v2 -n uninjected-sample
|
||||
$ kubectl apply --context="${CTX_CLUSTER1}" \
|
||||
-f samples/sleep/sleep.yaml -n uninjected-sample
|
||||
-f samples/curl/curl.yaml -n uninjected-sample
|
||||
$ kubectl apply --context="${CTX_CLUSTER2}" \
|
||||
-f samples/sleep/sleep.yaml -n uninjected-sample
|
||||
-f samples/curl/curl.yaml -n uninjected-sample
|
||||
{{< /text >}}
|
||||
|
||||
Verify that there is a helloworld pod running in `cluster2`, using the `-o wide` flag, so we can get the Pod IP:
|
||||
|
@ -75,8 +75,8 @@ Verify that there is a helloworld pod running in `cluster2`, using the `-o wide`
|
|||
{{< text bash >}}
|
||||
$ kubectl --context="${CTX_CLUSTER2}" -n uninjected-sample get pod -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
curl-557747455f-jdsd8 1/1 Running 0 41s 10.100.0.2 node-2 <none> <none>
|
||||
helloworld-v2-54df5f84b-z28p5 1/1 Running 0 43s 10.100.0.1 node-1 <none> <none>
|
||||
sleep-557747455f-jdsd8 1/1 Running 0 41s 10.100.0.2 node-2 <none> <none>
|
||||
{{< /text >}}
|
||||
|
||||
Take note of the `IP` column for `helloworld`. In this case, it is `10.100.0.1`:
|
||||
|
@ -85,12 +85,12 @@ Take note of the `IP` column for `helloworld`. In this case, it is `10.100.0.1`:
|
|||
$ REMOTE_POD_IP=10.100.0.1
|
||||
{{< /text >}}
|
||||
|
||||
Next, attempt to send traffic from the `sleep` pod in `cluster1` directly to this Pod IP:
|
||||
Next, attempt to send traffic from the `curl` pod in `cluster1` directly to this Pod IP:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec --context="${CTX_CLUSTER1}" -n uninjected-sample -c sleep \
|
||||
$ kubectl exec --context="${CTX_CLUSTER1}" -n uninjected-sample -c curl \
|
||||
"$(kubectl get pod --context="${CTX_CLUSTER1}" -n uninjected-sample -l \
|
||||
app=sleep -o jsonpath='{.items[0].metadata.name}')" \
|
||||
app=curl -o jsonpath='{.items[0].metadata.name}')" \
|
||||
-- curl -sS $REMOTE_POD_IP:5000/hello
|
||||
Hello version: v2, instance: helloworld-v2-54df5f84b-z28p5
|
||||
{{< /text >}}
|
||||
|
@ -133,12 +133,12 @@ guide, ensuring to run the steps for every cluster.
|
|||
If you've gone through the sections above and are still having issues, then it's time to dig a little deeper.
|
||||
|
||||
The following steps assume you're following the [HelloWorld verification](/docs/setup/install/multicluster/verify/).
|
||||
Before continuing, make sure both `helloworld` and `sleep` are deployed in each cluster.
|
||||
Before continuing, make sure both `helloworld` and `curl` are deployed in each cluster.
|
||||
|
||||
From each cluster, find the endpoints the `sleep` service has for `helloworld`:
|
||||
From each cluster, find the endpoints the `curl` service has for `helloworld`:
|
||||
|
||||
{{< text bash >}}
|
||||
$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint sleep-dd98b5f48-djwdw.sample | grep helloworld
|
||||
$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint curl-dd98b5f48-djwdw.sample | grep helloworld
|
||||
{{< /text >}}
|
||||
|
||||
Troubleshooting information differs based on the cluster that is the source of traffic:
|
||||
|
@ -148,7 +148,7 @@ Troubleshooting information differs based on the cluster that is the source of t
|
|||
{{< tab name="Primary cluster" category-value="primary" >}}
|
||||
|
||||
{{< text bash >}}
|
||||
$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint sleep-dd98b5f48-djwdw.sample | grep helloworld
|
||||
$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint curl-dd98b5f48-djwdw.sample | grep helloworld
|
||||
10.0.0.11:5000 HEALTHY OK outbound|5000||helloworld.sample.svc.cluster.local
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -171,7 +171,7 @@ $ kubectl get secrets --context=$CTX_CLUSTER1 -n istio-system -l "istio/multiClu
|
|||
{{< tab name="Remote cluster" category-value="remote" >}}
|
||||
|
||||
{{< text bash >}}
|
||||
$ istioctl --context $CTX_CLUSTER2 proxy-config endpoint sleep-dd98b5f48-djwdw.sample | grep helloworld
|
||||
$ istioctl --context $CTX_CLUSTER2 proxy-config endpoint curl-dd98b5f48-djwdw.sample | grep helloworld
|
||||
10.0.1.11:5000 HEALTHY OK outbound|5000||helloworld.sample.svc.cluster.local
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -201,7 +201,7 @@ $ kubectl get secrets --context=$CTX_CLUSTER1 -n istio-system -l "istio/multiClu
|
|||
The steps for Primary and Remote clusters still apply for multi-network, although multi-network has an additional case:
|
||||
|
||||
{{< text bash >}}
|
||||
$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint sleep-dd98b5f48-djwdw.sample | grep helloworld
|
||||
$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint curl-dd98b5f48-djwdw.sample | grep helloworld
|
||||
10.0.5.11:5000 HEALTHY OK outbound|5000||helloworld.sample.svc.cluster.local
|
||||
10.0.6.13:5000 HEALTHY OK outbound|5000||helloworld.sample.svc.cluster.local
|
||||
{{< /text >}}
|
||||
|
@ -234,7 +234,7 @@ value. If that is incorrect, reinstall the gateway and make sure to set the --ne
|
|||
On the source pod, check the proxy metadata.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl get pod $SLEEP_POD_NAME \
|
||||
$ kubectl get pod $CURL_POD_NAME \
|
||||
-o jsonpath="{.spec.containers[*].env[?(@.name=='ISTIO_META_NETWORK')].value}"
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -396,17 +396,17 @@ $ istioctl proxy-config bootstrap -n istio-system istio-ingressgateway-7d6874b48
|
|||
|
||||
Verifying connectivity to Istiod is a useful troubleshooting step. Every proxy container in the service mesh should be able to communicate with Istiod. This can be accomplished in a few simple steps:
|
||||
|
||||
1. Create a `sleep` pod:
|
||||
1. Create a `curl` pod:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl create namespace foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo
|
||||
{{< /text >}}
|
||||
|
||||
1. Test connectivity to Istiod using `curl`. The following example invokes the v1 registration API using default Istiod configuration parameters and mutual TLS enabled:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec $(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name}) -c sleep -n foo -- curl -sS istiod.istio-system:15014/version
|
||||
$ kubectl exec $(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name}) -c curl -n foo -- curl -sS istiod.istio-system:15014/version
|
||||
{{< /text >}}
|
||||
|
||||
You should receive a response listing the version of Istiod.
|
||||
|
|
|
@ -39,23 +39,23 @@ set -u # Exit on unset value
|
|||
_wait_for_deployment istio-system istiod
|
||||
_wait_for_deployment istio-system istio-ingressgateway
|
||||
|
||||
# Deploy sleep application with registration label
|
||||
snip_apply_sleep
|
||||
_wait_for_deployment default sleep
|
||||
# Deploy curl application with registration label
|
||||
snip_apply_curl
|
||||
_wait_for_deployment default curl
|
||||
|
||||
# Set spire-server pod variable
|
||||
snip_set_spire_server_pod_name_var
|
||||
|
||||
# Set sleep pod and pod uid variables
|
||||
snip_set_sleep_pod_var
|
||||
# Set curl pod and pod uid variables
|
||||
snip_set_curl_pod_var
|
||||
|
||||
# Verify sleep workload identity was issued by SPIRE
|
||||
snip_get_sleep_svid
|
||||
# Verify curl workload identity was issued by SPIRE
|
||||
snip_get_curl_svid
|
||||
_verify_contains snip_get_svid_subject "O = SPIRE"
|
||||
|
||||
# @cleanup
|
||||
#
|
||||
kubectl delete -f samples/security/spire/sleep-spire.yaml
|
||||
kubectl delete -f samples/security/spire/curl-spire.yaml
|
||||
istioctl uninstall --purge --skip-confirmation
|
||||
kubectl delete ns istio-system
|
||||
snip_uninstall_spire
|
||||
|
|
|
@ -138,7 +138,7 @@ Below are the equivalent manual registrations based off the automatic registrati
|
|||
{{< text bash >}}
|
||||
$ kubectl exec -n spire "$SPIRE_SERVER_POD" -- \
|
||||
/opt/spire/bin/spire-server entry create \
|
||||
-spiffeID spiffe://example.org/ns/default/sa/sleep \
|
||||
-spiffeID spiffe://example.org/ns/default/sa/curl \
|
||||
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
|
||||
-selector k8s:ns:default \
|
||||
-selector k8s:pod-label:spiffe.io/spire-managed-identity:true \
|
||||
|
@ -253,8 +253,8 @@ Below are the equivalent manual registrations based off the automatic registrati
|
|||
|
||||
1. Deploy an example workload:
|
||||
|
||||
{{< text syntax=bash snip_id=apply_sleep >}}
|
||||
$ istioctl kube-inject --filename @samples/security/spire/sleep-spire.yaml@ | kubectl apply -f -
|
||||
{{< text syntax=bash snip_id=apply_curl >}}
|
||||
$ istioctl kube-inject --filename @samples/security/spire/curl-spire.yaml@ | kubectl apply -f -
|
||||
{{< /text >}}
|
||||
|
||||
In addition to needing `spiffe.io/spire-managed-identity` label, the workload will need the SPIFFE CSI Driver volume to access the SPIRE Agent socket. To accomplish this,
|
||||
|
@ -265,24 +265,24 @@ Below are the equivalent manual registrations based off the automatic registrati
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: sleep
|
||||
name: curl
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sleep
|
||||
app: curl
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sleep
|
||||
app: curl
|
||||
# Injects custom sidecar template
|
||||
annotations:
|
||||
inject.istio.io/templates: "sidecar,spire"
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
serviceAccountName: sleep
|
||||
serviceAccountName: curl
|
||||
containers:
|
||||
- name: sleep
|
||||
- name: curl
|
||||
image: curlimages/curl
|
||||
command: ["/bin/sleep", "3650d"]
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
@ -322,7 +322,7 @@ JWT-SVID TTL : default
|
|||
Selector : k8s:pod-uid:88b71387-4641-4d9c-9a89-989c88f7509d
|
||||
|
||||
Entry ID : af7b53dc-4cc9-40d3-aaeb-08abbddd8e54
|
||||
SPIFFE ID : spiffe://example.org/ns/default/sa/sleep
|
||||
SPIFFE ID : spiffe://example.org/ns/default/sa/curl
|
||||
Parent ID : spiffe://example.org/spire/agent/k8s_psat/demo-cluster/bea19580-ae04-4679-a22e-472e18ca4687
|
||||
Revision : 0
|
||||
X509-SVID TTL : default
|
||||
|
@ -345,14 +345,14 @@ After registering an entry for the Ingress-gateway pod, Envoy receives the ident
|
|||
|
||||
1. Get pod information:
|
||||
|
||||
{{< text syntax=bash snip_id=set_sleep_pod_var >}}
|
||||
$ SLEEP_POD=$(kubectl get pod -l app=sleep -o jsonpath="{.items[0].metadata.name}")
|
||||
{{< text syntax=bash snip_id=set_curl_pod_var >}}
|
||||
$ CURL_POD=$(kubectl get pod -l app=curl -o jsonpath="{.items[0].metadata.name}")
|
||||
{{< /text >}}
|
||||
|
||||
1. Retrieve sleep's SVID identity document using the istioctl proxy-config secret command:
|
||||
1. Retrieve curl's SVID identity document using the istioctl proxy-config secret command:
|
||||
|
||||
{{< text syntax=bash snip_id=get_sleep_svid >}}
|
||||
$ istioctl proxy-config secret "$SLEEP_POD" -o json | jq -r \
|
||||
{{< text syntax=bash snip_id=get_curl_svid >}}
|
||||
$ istioctl proxy-config secret "$CURL_POD" -o json | jq -r \
|
||||
'.dynamicActiveSecrets[0].secret.tlsCertificate.certificateChain.inlineBytes' | base64 --decode > chain.pem
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -360,7 +360,7 @@ After registering an entry for the Ingress-gateway pod, Envoy receives the ident
|
|||
|
||||
{{< text syntax=bash snip_id=get_svid_subject >}}
|
||||
$ openssl x509 -in chain.pem -text | grep SPIRE
|
||||
Subject: C = US, O = SPIRE, CN = sleep-5f4d47c948-njvpk
|
||||
Subject: C = US, O = SPIRE, CN = curl-5f4d47c948-njvpk
|
||||
{{< /text >}}
|
||||
|
||||
## SPIFFE federation
|
||||
|
|
|
@ -86,7 +86,7 @@ ENDSNIP
|
|||
snip_option_2_manual_registration_3() {
|
||||
kubectl exec -n spire "$SPIRE_SERVER_POD" -- \
|
||||
/opt/spire/bin/spire-server entry create \
|
||||
-spiffeID spiffe://example.org/ns/default/sa/sleep \
|
||||
-spiffeID spiffe://example.org/ns/default/sa/curl \
|
||||
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
|
||||
-selector k8s:ns:default \
|
||||
-selector k8s:pod-label:spiffe.io/spire-managed-identity:true \
|
||||
|
@ -178,16 +178,16 @@ snip_apply_istio_operator_configuration() {
|
|||
istioctl install --skip-confirmation -f ./istio.yaml
|
||||
}
|
||||
|
||||
snip_apply_sleep() {
|
||||
istioctl kube-inject --filename samples/security/spire/sleep-spire.yaml | kubectl apply -f -
|
||||
snip_apply_curl() {
|
||||
istioctl kube-inject --filename samples/security/spire/curl-spire.yaml | kubectl apply -f -
|
||||
}
|
||||
|
||||
snip_set_sleep_pod_var() {
|
||||
SLEEP_POD=$(kubectl get pod -l app=sleep -o jsonpath="{.items[0].metadata.name}")
|
||||
snip_set_curl_pod_var() {
|
||||
CURL_POD=$(kubectl get pod -l app=curl -o jsonpath="{.items[0].metadata.name}")
|
||||
}
|
||||
|
||||
snip_get_sleep_svid() {
|
||||
istioctl proxy-config secret "$SLEEP_POD" -o json | jq -r \
|
||||
snip_get_curl_svid() {
|
||||
istioctl proxy-config secret "$CURL_POD" -o json | jq -r \
|
||||
'.dynamicActiveSecrets[0].secret.tlsCertificate.certificateChain.inlineBytes' | base64 --decode > chain.pem
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ openssl x509 -in chain.pem -text | grep SPIRE
|
|||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_get_svid_subject_out <<\ENDSNIP
|
||||
Subject: C = US, O = SPIRE, CN = sleep-5f4d47c948-njvpk
|
||||
Subject: C = US, O = SPIRE, CN = curl-5f4d47c948-njvpk
|
||||
ENDSNIP
|
||||
|
||||
snip_uninstall_spire() {
|
||||
|
|
|
@ -31,7 +31,7 @@ spec:
|
|||
rules:
|
||||
- from:
|
||||
- source:
|
||||
principals: ["cluster.local/ns/default/sa/sleep"]
|
||||
principals: ["cluster.local/ns/default/sa/curl"]
|
||||
- source:
|
||||
namespaces: ["httpbin"]
|
||||
to:
|
||||
|
|
|
@ -47,12 +47,12 @@ _wait_for_deployment istio-system istiod
|
|||
_wait_for_daemonset istio-system istio-cni-node
|
||||
|
||||
startup_bookinfo_sample
|
||||
startup_sleep_sample
|
||||
startup_curl_sample
|
||||
|
||||
_verify_contains get_productpage "glyphicon glyphicon-star"
|
||||
|
||||
# @cleanup
|
||||
cleanup_bookinfo_sample
|
||||
cleanup_sleep_sample
|
||||
cleanup_curl_sample
|
||||
echo y | istioctl uninstall --revision=default
|
||||
kubectl delete ns istio-system
|
||||
|
|
|
@ -109,30 +109,30 @@ values:
|
|||
$ kubectl apply --namespace ipv6 -f @samples/tcp-echo/tcp-echo-ipv6.yaml@
|
||||
{{< /text >}}
|
||||
|
||||
1. Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample app to use as a test source for sending requests.
|
||||
1. Deploy the [curl]({{< github_tree >}}/samples/curl) sample app to use as a test source for sending requests.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify the traffic reaches the dual-stack pods:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo dualstack | nc tcp-echo.dual-stack 9000"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo dualstack | nc tcp-echo.dual-stack 9000"
|
||||
hello dualstack
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify the traffic reaches the IPv4 pods:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv4 | nc tcp-echo.ipv4 9000"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv4 | nc tcp-echo.ipv4 9000"
|
||||
hello ipv4
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify the traffic reaches the IPv6 pods:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv6 | nc tcp-echo.ipv6 9000"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv6 | nc tcp-echo.ipv6 9000"
|
||||
hello ipv6
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -193,7 +193,7 @@ values:
|
|||
1. Verify envoy endpoints are configured to route to both IPv4 and IPv6:
|
||||
|
||||
{{< text syntax=bash snip_id=none >}}
|
||||
$ istioctl proxy-config endpoints "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" --port 9000
|
||||
$ istioctl proxy-config endpoints "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" --port 9000
|
||||
ENDPOINT STATUS OUTLIER CHECK CLUSTER
|
||||
10.244.0.19:9000 HEALTHY OK outbound|9000||tcp-echo.ipv4.svc.cluster.local
|
||||
10.244.0.26:9000 HEALTHY OK outbound|9000||tcp-echo.dual-stack.svc.cluster.local
|
||||
|
@ -208,6 +208,6 @@ Now you can experiment with dual-stack services in your environment!
|
|||
1. Cleanup application namespaces and deployments
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl delete -f @samples/sleep/sleep.yaml@
|
||||
$ kubectl delete -f @samples/curl/curl.yaml@
|
||||
$ kubectl delete ns dual-stack ipv4 ipv6
|
||||
{{< /text >}}
|
||||
|
|
|
@ -40,11 +40,11 @@ kubectl apply --namespace ipv6 -f samples/tcp-echo/tcp-echo-ipv6.yaml
|
|||
}
|
||||
|
||||
snip_verification_4() {
|
||||
kubectl apply -f samples/sleep/sleep.yaml
|
||||
kubectl apply -f samples/curl/curl.yaml
|
||||
}
|
||||
|
||||
snip_verification_5() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo dualstack | nc tcp-echo.dual-stack 9000"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo dualstack | nc tcp-echo.dual-stack 9000"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_verification_5_out <<\ENDSNIP
|
||||
|
@ -52,7 +52,7 @@ hello dualstack
|
|||
ENDSNIP
|
||||
|
||||
snip_verification_6() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv4 | nc tcp-echo.ipv4 9000"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv4 | nc tcp-echo.ipv4 9000"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_verification_6_out <<\ENDSNIP
|
||||
|
@ -60,7 +60,7 @@ hello ipv4
|
|||
ENDSNIP
|
||||
|
||||
snip_verification_7() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv6 | nc tcp-echo.ipv6 9000"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv6 | nc tcp-echo.ipv6 9000"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_verification_7_out <<\ENDSNIP
|
||||
|
@ -68,6 +68,6 @@ hello ipv6
|
|||
ENDSNIP
|
||||
|
||||
snip_cleanup_1() {
|
||||
kubectl delete -f samples/sleep/sleep.yaml
|
||||
kubectl delete -f samples/curl/curl.yaml
|
||||
kubectl delete ns dual-stack ipv4 ipv6
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ snip_verification_3
|
|||
snip_verification_4
|
||||
|
||||
# wait for deployments to be up and running
|
||||
_wait_for_deployment default sleep
|
||||
_wait_for_deployment default curl
|
||||
_wait_for_deployment dual-stack tcp-echo
|
||||
_wait_for_deployment ipv4 tcp-echo
|
||||
_wait_for_deployment ipv6 tcp-echo
|
||||
|
|
|
@ -35,8 +35,8 @@ kubectl label namespace default istio-injection-
|
|||
snip_install_istio_2
|
||||
|
||||
# TODO: how to make sure previous tests cleaned up everything?
|
||||
# Cleanup sleep
|
||||
cleanup_sleep_sample
|
||||
# Cleanup curl
|
||||
cleanup_curl_sample
|
||||
|
||||
# Deploy the sample Application
|
||||
snip_deploy_the_sample_application_1
|
||||
|
|
|
@ -40,19 +40,19 @@ Note that unlike manual injection, automatic injection occurs at the pod-level.
|
|||
|
||||
#### Deploying an app
|
||||
|
||||
Deploy sleep app. Verify both deployment and pod have a single container.
|
||||
Deploy curl app. Verify both deployment and pod have a single container.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@
|
||||
$ kubectl get deployment -o wide
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
|
||||
sleep 1/1 1 1 12s sleep curlimages/curl app=sleep
|
||||
curl 1/1 1 1 12s curl curlimages/curl app=curl
|
||||
{{< /text >}}
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl get pod
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
sleep-8f795f47d-hdcgs 1/1 Running 0 42s
|
||||
curl-8f795f47d-hdcgs 1/1 Running 0 42s
|
||||
{{< /text >}}
|
||||
|
||||
Label the `default` namespace with `istio-injection=enabled`
|
||||
|
@ -68,18 +68,18 @@ default Active 5m9s enabled
|
|||
Injection occurs at pod creation time. Kill the running pod and verify a new pod is created with the injected sidecar. The original pod has `1/1 READY` containers, and the pod with injected sidecar has `2/2 READY` containers.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl delete pod -l app=sleep
|
||||
$ kubectl get pod -l app=sleep
|
||||
pod "sleep-776b7bcdcd-7hpnk" deleted
|
||||
$ kubectl delete pod -l app=curl
|
||||
$ kubectl get pod -l app=curl
|
||||
pod "curl-776b7bcdcd-7hpnk" deleted
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
sleep-776b7bcdcd-7hpnk 1/1 Terminating 0 1m
|
||||
sleep-776b7bcdcd-bhn9m 2/2 Running 0 7s
|
||||
curl-776b7bcdcd-7hpnk 1/1 Terminating 0 1m
|
||||
curl-776b7bcdcd-bhn9m 2/2 Running 0 7s
|
||||
{{< /text >}}
|
||||
|
||||
View detailed state of the injected pod. You should see the injected `istio-proxy` container and corresponding volumes.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl describe pod -l app=sleep
|
||||
$ kubectl describe pod -l app=curl
|
||||
...
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
|
@ -88,8 +88,8 @@ Events:
|
|||
Normal Created 11s kubelet Created container istio-init
|
||||
Normal Started 11s kubelet Started container istio-init
|
||||
...
|
||||
Normal Created 10s kubelet Created container sleep
|
||||
Normal Started 10s kubelet Started container sleep
|
||||
Normal Created 10s kubelet Created container curl
|
||||
Normal Started 10s kubelet Started container curl
|
||||
...
|
||||
Normal Created 9s kubelet Created container istio-proxy
|
||||
Normal Started 8s kubelet Started container istio-proxy
|
||||
|
@ -99,13 +99,13 @@ Disable injection for the `default` namespace and verify new pods are created wi
|
|||
|
||||
{{< text bash >}}
|
||||
$ kubectl label namespace default istio-injection-
|
||||
$ kubectl delete pod -l app=sleep
|
||||
$ kubectl delete pod -l app=curl
|
||||
$ kubectl get pod
|
||||
namespace/default labeled
|
||||
pod "sleep-776b7bcdcd-bhn9m" deleted
|
||||
pod "curl-776b7bcdcd-bhn9m" deleted
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
sleep-776b7bcdcd-bhn9m 2/2 Terminating 0 2m
|
||||
sleep-776b7bcdcd-gmvnr 1/1 Running 0 2s
|
||||
curl-776b7bcdcd-bhn9m 2/2 Terminating 0 2m
|
||||
curl-776b7bcdcd-gmvnr 1/1 Running 0 2s
|
||||
{{< /text >}}
|
||||
|
||||
#### Controlling the injection policy
|
||||
|
@ -140,10 +140,10 @@ The injector is configured with the following logic:
|
|||
To manually inject a deployment, use [`istioctl kube-inject`](/docs/reference/commands/istioctl/#istioctl-kube-inject):
|
||||
|
||||
{{< text bash >}}
|
||||
$ istioctl kube-inject -f @samples/sleep/sleep.yaml@ | kubectl apply -f -
|
||||
serviceaccount/sleep created
|
||||
service/sleep created
|
||||
deployment.apps/sleep created
|
||||
$ istioctl kube-inject -f @samples/curl/curl.yaml@ | kubectl apply -f -
|
||||
serviceaccount/curl created
|
||||
service/curl created
|
||||
deployment.apps/curl created
|
||||
{{< /text >}}
|
||||
|
||||
By default, this will use the in-cluster configuration. Alternatively, injection can be done using local copies of the configuration.
|
||||
|
@ -161,19 +161,19 @@ $ istioctl kube-inject \
|
|||
--injectConfigFile inject-config.yaml \
|
||||
--meshConfigFile mesh-config.yaml \
|
||||
--valuesFile inject-values.yaml \
|
||||
--filename @samples/sleep/sleep.yaml@ \
|
||||
--filename @samples/curl/curl.yaml@ \
|
||||
| kubectl apply -f -
|
||||
serviceaccount/sleep created
|
||||
service/sleep created
|
||||
deployment.apps/sleep created
|
||||
serviceaccount/curl created
|
||||
service/curl created
|
||||
deployment.apps/curl created
|
||||
{{< /text >}}
|
||||
|
||||
Verify that the sidecar has been injected into the sleep pod with `2/2` under the READY column.
|
||||
Verify that the sidecar has been injected into the curl pod with `2/2` under the READY column.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl get pod -l app=sleep
|
||||
$ kubectl get pod -l app=curl
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
sleep-64c6f57bc8-f5n4x 2/2 Running 0 24s
|
||||
curl-64c6f57bc8-f5n4x 2/2 Running 0 24s
|
||||
{{< /text >}}
|
||||
|
||||
## Customizing injection
|
||||
|
@ -206,7 +206,7 @@ spec:
|
|||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["sleep", "10"]
|
||||
command: ["curl", "10"]
|
||||
volumes:
|
||||
- name: certs
|
||||
secret:
|
||||
|
|
|
@ -499,28 +499,28 @@ See the [Istioctl-proxy Ecosystem project](https://github.com/istio-ecosystem/is
|
|||
$ kubectl label --context="${CTX_REMOTE_CLUSTER}" namespace sample istio-injection=enabled
|
||||
{{< /text >}}
|
||||
|
||||
1. Deploy the `helloworld` (`v1`) and `sleep` samples:
|
||||
1. Deploy the `helloworld` (`v1`) and `curl` samples:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl apply -f @samples/helloworld/helloworld.yaml@ -l service=helloworld -n sample --context="${CTX_REMOTE_CLUSTER}"
|
||||
$ kubectl apply -f @samples/helloworld/helloworld.yaml@ -l version=v1 -n sample --context="${CTX_REMOTE_CLUSTER}"
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@ -n sample --context="${CTX_REMOTE_CLUSTER}"
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@ -n sample --context="${CTX_REMOTE_CLUSTER}"
|
||||
{{< /text >}}
|
||||
|
||||
1. Wait a few seconds for the `helloworld` and `sleep` pods to be running with sidecars injected:
|
||||
1. Wait a few seconds for the `helloworld` and `curl` pods to be running with sidecars injected:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl get pod -n sample --context="${CTX_REMOTE_CLUSTER}"
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
curl-64d7d56698-wqjnm 2/2 Running 0 9s
|
||||
helloworld-v1-776f57d5f6-s7zfc 2/2 Running 0 10s
|
||||
sleep-64d7d56698-wqjnm 2/2 Running 0 9s
|
||||
{{< /text >}}
|
||||
|
||||
1. Send a request from the `sleep` pod to the `helloworld` service:
|
||||
1. Send a request from the `curl` pod to the `helloworld` service:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec --context="${CTX_REMOTE_CLUSTER}" -n sample -c sleep \
|
||||
"$(kubectl get pod --context="${CTX_REMOTE_CLUSTER}" -n sample -l app=sleep -o jsonpath='{.items[0].metadata.name}')" \
|
||||
$ kubectl exec --context="${CTX_REMOTE_CLUSTER}" -n sample -c curl \
|
||||
"$(kubectl get pod --context="${CTX_REMOTE_CLUSTER}" -n sample -l app=curl -o jsonpath='{.items[0].metadata.name}')" \
|
||||
-- curl -sS helloworld.sample:5000/hello
|
||||
Hello version: v1, instance: helloworld-v1-776f57d5f6-s7zfc
|
||||
{{< /text >}}
|
||||
|
@ -855,28 +855,28 @@ $ export SECOND_CLUSTER_NAME=<your second remote cluster name>
|
|||
$ kubectl label --context="${CTX_SECOND_CLUSTER}" namespace sample istio-injection=enabled
|
||||
{{< /text >}}
|
||||
|
||||
1. Deploy the `helloworld` (`v2`) and `sleep` samples:
|
||||
1. Deploy the `helloworld` (`v2`) and `curl` samples:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl apply -f @samples/helloworld/helloworld.yaml@ -l service=helloworld -n sample --context="${CTX_SECOND_CLUSTER}"
|
||||
$ kubectl apply -f @samples/helloworld/helloworld.yaml@ -l version=v2 -n sample --context="${CTX_SECOND_CLUSTER}"
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@ -n sample --context="${CTX_SECOND_CLUSTER}"
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@ -n sample --context="${CTX_SECOND_CLUSTER}"
|
||||
{{< /text >}}
|
||||
|
||||
1. Wait a few seconds for the `helloworld` and `sleep` pods to be running with sidecars injected:
|
||||
1. Wait a few seconds for the `helloworld` and `curl` pods to be running with sidecars injected:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl get pod -n sample --context="${CTX_SECOND_CLUSTER}"
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
curl-557747455f-wtdbr 2/2 Running 0 9s
|
||||
helloworld-v2-54df5f84b-9hxgw 2/2 Running 0 10s
|
||||
sleep-557747455f-wtdbr 2/2 Running 0 9s
|
||||
{{< /text >}}
|
||||
|
||||
1. Send a request from the `sleep` pod to the `helloworld` service:
|
||||
1. Send a request from the `curl` pod to the `helloworld` service:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec --context="${CTX_SECOND_CLUSTER}" -n sample -c sleep \
|
||||
"$(kubectl get pod --context="${CTX_SECOND_CLUSTER}" -n sample -l app=sleep -o jsonpath='{.items[0].metadata.name}')" \
|
||||
$ kubectl exec --context="${CTX_SECOND_CLUSTER}" -n sample -c curl \
|
||||
"$(kubectl get pod --context="${CTX_SECOND_CLUSTER}" -n sample -l app=curl -o jsonpath='{.items[0].metadata.name}')" \
|
||||
-- curl -sS helloworld.sample:5000/hello
|
||||
Hello version: v2, instance: helloworld-v2-54df5f84b-9hxgw
|
||||
{{< /text >}}
|
||||
|
|
|
@ -334,7 +334,7 @@ kubectl label --context="${CTX_REMOTE_CLUSTER}" namespace sample istio-injection
|
|||
snip_deploy_a_sample_application_2() {
|
||||
kubectl apply -f samples/helloworld/helloworld.yaml -l service=helloworld -n sample --context="${CTX_REMOTE_CLUSTER}"
|
||||
kubectl apply -f samples/helloworld/helloworld.yaml -l version=v1 -n sample --context="${CTX_REMOTE_CLUSTER}"
|
||||
kubectl apply -f samples/sleep/sleep.yaml -n sample --context="${CTX_REMOTE_CLUSTER}"
|
||||
kubectl apply -f samples/curl/curl.yaml -n sample --context="${CTX_REMOTE_CLUSTER}"
|
||||
}
|
||||
|
||||
snip_deploy_a_sample_application_3() {
|
||||
|
@ -343,13 +343,13 @@ kubectl get pod -n sample --context="${CTX_REMOTE_CLUSTER}"
|
|||
|
||||
! IFS=$'\n' read -r -d '' snip_deploy_a_sample_application_3_out <<\ENDSNIP
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
curl-64d7d56698-wqjnm 2/2 Running 0 9s
|
||||
helloworld-v1-776f57d5f6-s7zfc 2/2 Running 0 10s
|
||||
sleep-64d7d56698-wqjnm 2/2 Running 0 9s
|
||||
ENDSNIP
|
||||
|
||||
snip_deploy_a_sample_application_4() {
|
||||
kubectl exec --context="${CTX_REMOTE_CLUSTER}" -n sample -c sleep \
|
||||
"$(kubectl get pod --context="${CTX_REMOTE_CLUSTER}" -n sample -l app=sleep -o jsonpath='{.items[0].metadata.name}')" \
|
||||
kubectl exec --context="${CTX_REMOTE_CLUSTER}" -n sample -c curl \
|
||||
"$(kubectl get pod --context="${CTX_REMOTE_CLUSTER}" -n sample -l app=curl -o jsonpath='{.items[0].metadata.name}')" \
|
||||
-- curl -sS helloworld.sample:5000/hello
|
||||
}
|
||||
|
||||
|
@ -549,7 +549,7 @@ kubectl label --context="${CTX_SECOND_CLUSTER}" namespace sample istio-injection
|
|||
snip_validate_the_installation_2() {
|
||||
kubectl apply -f samples/helloworld/helloworld.yaml -l service=helloworld -n sample --context="${CTX_SECOND_CLUSTER}"
|
||||
kubectl apply -f samples/helloworld/helloworld.yaml -l version=v2 -n sample --context="${CTX_SECOND_CLUSTER}"
|
||||
kubectl apply -f samples/sleep/sleep.yaml -n sample --context="${CTX_SECOND_CLUSTER}"
|
||||
kubectl apply -f samples/curl/curl.yaml -n sample --context="${CTX_SECOND_CLUSTER}"
|
||||
}
|
||||
|
||||
snip_validate_the_installation_3() {
|
||||
|
@ -558,13 +558,13 @@ kubectl get pod -n sample --context="${CTX_SECOND_CLUSTER}"
|
|||
|
||||
! IFS=$'\n' read -r -d '' snip_validate_the_installation_3_out <<\ENDSNIP
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
curl-557747455f-wtdbr 2/2 Running 0 9s
|
||||
helloworld-v2-54df5f84b-9hxgw 2/2 Running 0 10s
|
||||
sleep-557747455f-wtdbr 2/2 Running 0 9s
|
||||
ENDSNIP
|
||||
|
||||
snip_validate_the_installation_4() {
|
||||
kubectl exec --context="${CTX_SECOND_CLUSTER}" -n sample -c sleep \
|
||||
"$(kubectl get pod --context="${CTX_SECOND_CLUSTER}" -n sample -l app=sleep -o jsonpath='{.items[0].metadata.name}')" \
|
||||
kubectl exec --context="${CTX_SECOND_CLUSTER}" -n sample -c curl \
|
||||
"$(kubectl get pod --context="${CTX_SECOND_CLUSTER}" -n sample -l app=curl -o jsonpath='{.items[0].metadata.name}')" \
|
||||
-- curl -sS helloworld.sample:5000/hello
|
||||
}
|
||||
|
||||
|
|
|
@ -110,14 +110,14 @@ function verify_load_balancing
|
|||
snip_deploy_helloworld_v1_1
|
||||
snip_deploy_helloworld_v2_1
|
||||
|
||||
# Deploy Sleep
|
||||
snip_deploy_sleep_1
|
||||
# Deploy curl
|
||||
snip_deploy_curl_1
|
||||
|
||||
# Wait for all the deployments.
|
||||
_wait_for_deployment sample helloworld-v1 "${CTX_CLUSTER1}"
|
||||
_wait_for_deployment sample sleep "${CTX_CLUSTER1}"
|
||||
_wait_for_deployment sample curl "${CTX_CLUSTER1}"
|
||||
_wait_for_deployment sample helloworld-v2 "${CTX_CLUSTER2}"
|
||||
_wait_for_deployment sample sleep "${CTX_CLUSTER2}"
|
||||
_wait_for_deployment sample curl "${CTX_CLUSTER2}"
|
||||
|
||||
# Verify everything is deployed as expected.
|
||||
VERIFY_TIMEOUT=0 # Don't retry.
|
||||
|
@ -125,10 +125,10 @@ function verify_load_balancing
|
|||
_verify_like snip_deploy_helloworld_v1_2 "$snip_deploy_helloworld_v1_2_out"
|
||||
echo "Verifying helloworld v2 deployment"
|
||||
_verify_like snip_deploy_helloworld_v2_2 "$snip_deploy_helloworld_v2_2_out"
|
||||
echo "Verifying sleep deployment in ${CTX_CLUSTER1}"
|
||||
_verify_like snip_deploy_sleep_2 "$snip_deploy_sleep_2_out"
|
||||
echo "Verifying sleep deployment in ${CTX_CLUSTER2}"
|
||||
_verify_like snip_deploy_sleep_3 "$snip_deploy_sleep_3_out"
|
||||
echo "Verifying curl deployment in ${CTX_CLUSTER1}"
|
||||
_verify_like snip_deploy_curl_2 "$snip_deploy_curl_2_out"
|
||||
echo "Verifying curl deployment in ${CTX_CLUSTER2}"
|
||||
_verify_like snip_deploy_curl_3 "$snip_deploy_curl_3_out"
|
||||
unset VERIFY_TIMEOUT # Restore default
|
||||
|
||||
local EXPECTED_RESPONSE_FROM_CLUSTER1="Hello version: v1, instance:"
|
||||
|
|
|
@ -17,7 +17,7 @@ In this guide, we will deploy the `HelloWorld` application `V1` to `cluster1`
|
|||
and `V2` to `cluster2`. Upon receiving a request, `HelloWorld` will include
|
||||
its version in its response.
|
||||
|
||||
We will also deploy the `Sleep` container to both clusters. We will use these
|
||||
We will also deploy the `curl` container to both clusters. We will use these
|
||||
pods as the source of requests to the `HelloWorld` service,
|
||||
simulating in-mesh traffic. Finally, after generating traffic, we will observe
|
||||
which cluster received the requests.
|
||||
|
@ -97,50 +97,50 @@ helloworld-v2-758dd55874-6x4t8 2/2 Running 0 40s
|
|||
|
||||
Wait until the status of `helloworld-v2` is `Running`.
|
||||
|
||||
## Deploy `Sleep`
|
||||
## Deploy `curl`
|
||||
|
||||
Deploy the `Sleep` application to both clusters:
|
||||
Deploy the `curl` application to both clusters:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl apply --context="${CTX_CLUSTER1}" \
|
||||
-f @samples/sleep/sleep.yaml@ -n sample
|
||||
-f @samples/curl/curl.yaml@ -n sample
|
||||
$ kubectl apply --context="${CTX_CLUSTER2}" \
|
||||
-f @samples/sleep/sleep.yaml@ -n sample
|
||||
-f @samples/curl/curl.yaml@ -n sample
|
||||
{{< /text >}}
|
||||
|
||||
Confirm the status `Sleep` pod on `cluster1`:
|
||||
Confirm the status `curl` pod on `cluster1`:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l app=sleep
|
||||
$ kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l app=curl
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
sleep-754684654f-n6bzf 2/2 Running 0 5s
|
||||
curl-754684654f-n6bzf 2/2 Running 0 5s
|
||||
{{< /text >}}
|
||||
|
||||
Wait until the status of the `Sleep` pod is `Running`.
|
||||
Wait until the status of the `curl` pod is `Running`.
|
||||
|
||||
Confirm the status of the `Sleep` pod on `cluster2`:
|
||||
Confirm the status of the `curl` pod on `cluster2`:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l app=sleep
|
||||
$ kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l app=curl
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
sleep-754684654f-dzl9j 2/2 Running 0 5s
|
||||
curl-754684654f-dzl9j 2/2 Running 0 5s
|
||||
{{< /text >}}
|
||||
|
||||
Wait until the status of the `Sleep` pod is `Running`.
|
||||
Wait until the status of the `curl` pod is `Running`.
|
||||
|
||||
## Verifying Cross-Cluster Traffic
|
||||
|
||||
To verify that cross-cluster load balancing works as expected, call the
|
||||
`HelloWorld` service several times using the `Sleep` pod. To ensure load
|
||||
`HelloWorld` service several times using the `curl` pod. To ensure load
|
||||
balancing is working properly, call the `HelloWorld` service from all
|
||||
clusters in your deployment.
|
||||
|
||||
Send one request from the `Sleep` pod on `cluster1` to the `HelloWorld` service:
|
||||
Send one request from the `curl` pod on `cluster1` to the `HelloWorld` service:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec --context="${CTX_CLUSTER1}" -n sample -c sleep \
|
||||
$ kubectl exec --context="${CTX_CLUSTER1}" -n sample -c curl \
|
||||
"$(kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l \
|
||||
app=sleep -o jsonpath='{.items[0].metadata.name}')" \
|
||||
app=curl -o jsonpath='{.items[0].metadata.name}')" \
|
||||
-- curl -sS helloworld.sample:5000/hello
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -153,12 +153,12 @@ Hello version: v1, instance: helloworld-v1-86f77cd7bd-cpxhv
|
|||
...
|
||||
{{< /text >}}
|
||||
|
||||
Now repeat this process from the `Sleep` pod on `cluster2`:
|
||||
Now repeat this process from the `curl` pod on `cluster2`:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec --context="${CTX_CLUSTER2}" -n sample -c sleep \
|
||||
$ kubectl exec --context="${CTX_CLUSTER2}" -n sample -c curl \
|
||||
"$(kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l \
|
||||
app=sleep -o jsonpath='{.items[0].metadata.name}')" \
|
||||
app=curl -o jsonpath='{.items[0].metadata.name}')" \
|
||||
-- curl -sS helloworld.sample:5000/hello
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -71,35 +71,35 @@ NAME READY STATUS RESTARTS AGE
|
|||
helloworld-v2-758dd55874-6x4t8 2/2 Running 0 40s
|
||||
ENDSNIP
|
||||
|
||||
snip_deploy_sleep_1() {
|
||||
snip_deploy_curl_1() {
|
||||
kubectl apply --context="${CTX_CLUSTER1}" \
|
||||
-f samples/sleep/sleep.yaml -n sample
|
||||
-f samples/curl/curl.yaml -n sample
|
||||
kubectl apply --context="${CTX_CLUSTER2}" \
|
||||
-f samples/sleep/sleep.yaml -n sample
|
||||
-f samples/curl/curl.yaml -n sample
|
||||
}
|
||||
|
||||
snip_deploy_sleep_2() {
|
||||
kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l app=sleep
|
||||
snip_deploy_curl_2() {
|
||||
kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l app=curl
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_deploy_sleep_2_out <<\ENDSNIP
|
||||
! IFS=$'\n' read -r -d '' snip_deploy_curl_2_out <<\ENDSNIP
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
sleep-754684654f-n6bzf 2/2 Running 0 5s
|
||||
curl-754684654f-n6bzf 2/2 Running 0 5s
|
||||
ENDSNIP
|
||||
|
||||
snip_deploy_sleep_3() {
|
||||
kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l app=sleep
|
||||
snip_deploy_curl_3() {
|
||||
kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l app=curl
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_deploy_sleep_3_out <<\ENDSNIP
|
||||
! IFS=$'\n' read -r -d '' snip_deploy_curl_3_out <<\ENDSNIP
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
sleep-754684654f-dzl9j 2/2 Running 0 5s
|
||||
curl-754684654f-dzl9j 2/2 Running 0 5s
|
||||
ENDSNIP
|
||||
|
||||
snip_verifying_crosscluster_traffic_1() {
|
||||
kubectl exec --context="${CTX_CLUSTER1}" -n sample -c sleep \
|
||||
kubectl exec --context="${CTX_CLUSTER1}" -n sample -c curl \
|
||||
"$(kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l \
|
||||
app=sleep -o jsonpath='{.items[0].metadata.name}')" \
|
||||
app=curl -o jsonpath='{.items[0].metadata.name}')" \
|
||||
-- curl -sS helloworld.sample:5000/hello
|
||||
}
|
||||
|
||||
|
@ -110,9 +110,9 @@ Hello version: v1, instance: helloworld-v1-86f77cd7bd-cpxhv
|
|||
ENDSNIP
|
||||
|
||||
snip_verifying_crosscluster_traffic_3() {
|
||||
kubectl exec --context="${CTX_CLUSTER2}" -n sample -c sleep \
|
||||
kubectl exec --context="${CTX_CLUSTER2}" -n sample -c curl \
|
||||
"$(kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l \
|
||||
app=sleep -o jsonpath='{.items[0].metadata.name}')" \
|
||||
app=curl -o jsonpath='{.items[0].metadata.name}')" \
|
||||
-- curl -sS helloworld.sample:5000/hello
|
||||
}
|
||||
|
||||
|
|
|
@ -173,38 +173,38 @@ Istio revisions and `discoverySelectors` are then used to scope the resources an
|
|||
$ kubectl label ns app-ns-3 usergroup=usergroup-2 istio.io/rev=usergroup-2
|
||||
{{< /text >}}
|
||||
|
||||
1. Deploy one `sleep` and `httpbin` application per namespace:
|
||||
1. Deploy one `curl` and `httpbin` application per namespace:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl -n app-ns-1 apply -f samples/sleep/sleep.yaml
|
||||
$ kubectl -n app-ns-1 apply -f samples/curl/curl.yaml
|
||||
$ kubectl -n app-ns-1 apply -f samples/httpbin/httpbin.yaml
|
||||
$ kubectl -n app-ns-2 apply -f samples/sleep/sleep.yaml
|
||||
$ kubectl -n app-ns-2 apply -f samples/curl/curl.yaml
|
||||
$ kubectl -n app-ns-2 apply -f samples/httpbin/httpbin.yaml
|
||||
$ kubectl -n app-ns-3 apply -f samples/sleep/sleep.yaml
|
||||
$ kubectl -n app-ns-3 apply -f samples/curl/curl.yaml
|
||||
$ kubectl -n app-ns-3 apply -f samples/httpbin/httpbin.yaml
|
||||
{{< /text >}}
|
||||
|
||||
1. Wait a few seconds for the `httpbin` and `sleep` pods to be running with sidecars injected:
|
||||
1. Wait a few seconds for the `httpbin` and `curl` pods to be running with sidecars injected:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl get pods -n app-ns-1
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
httpbin-9dbd644c7-zc2v4 2/2 Running 0 115m
|
||||
sleep-78ff5975c6-fml7c 2/2 Running 0 115m
|
||||
curl-78ff5975c6-fml7c 2/2 Running 0 115m
|
||||
{{< /text >}}
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl get pods -n app-ns-2
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
httpbin-9dbd644c7-sd9ln 2/2 Running 0 115m
|
||||
sleep-78ff5975c6-sz728 2/2 Running 0 115m
|
||||
curl-78ff5975c6-sz728 2/2 Running 0 115m
|
||||
{{< /text >}}
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl get pods -n app-ns-3
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
httpbin-9dbd644c7-8ll27 2/2 Running 0 115m
|
||||
sleep-78ff5975c6-sg4tq 2/2 Running 0 115m
|
||||
curl-78ff5975c6-sg4tq 2/2 Running 0 115m
|
||||
{{< /text >}}
|
||||
|
||||
### Verify the application to control plane mapping
|
||||
|
@ -215,7 +215,7 @@ Now that the applications are deployed, you can use the `istioctl ps` command to
|
|||
$ istioctl ps -i usergroup-1
|
||||
NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION
|
||||
httpbin-9dbd644c7-hccpf.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
sleep-78ff5975c6-9zb77.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
curl-78ff5975c6-9zb77.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
{{< /text >}}
|
||||
|
||||
{{< text bash >}}
|
||||
|
@ -223,16 +223,16 @@ $ istioctl ps -i usergroup-2
|
|||
NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION
|
||||
httpbin-9dbd644c7-vvcqj.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
httpbin-9dbd644c7-xzgfm.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
sleep-78ff5975c6-fthmt.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
sleep-78ff5975c6-nxtth.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
curl-78ff5975c6-fthmt.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
curl-78ff5975c6-nxtth.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
{{< /text >}}
|
||||
|
||||
### Verify the application connectivity is ONLY within the respective usergroup
|
||||
|
||||
1. Send a request from the `sleep` pod in `app-ns-1` in `usergroup-1` to the `httpbin` service in `app-ns-2` in `usergroup-2`. The communication should fail:
|
||||
1. Send a request from the `curl` pod in `app-ns-1` in `usergroup-1` to the `httpbin` service in `app-ns-2` in `usergroup-2`. The communication should fail:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl -n app-ns-1 exec "$(kubectl -n app-ns-1 get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sIL http://httpbin.app-ns-2.svc.cluster.local:8000
|
||||
$ kubectl -n app-ns-1 exec "$(kubectl -n app-ns-1 get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sIL http://httpbin.app-ns-2.svc.cluster.local:8000
|
||||
HTTP/1.1 503 Service Unavailable
|
||||
content-length: 95
|
||||
content-type: text/plain
|
||||
|
@ -240,10 +240,10 @@ sleep-78ff5975c6-nxtth.app-ns-3 Kubernetes SYNCED SYNCED SYNCED
|
|||
server: envoy
|
||||
{{< /text >}}
|
||||
|
||||
1. Send a request from the `sleep` pod in `app-ns-2` in `usergroup-2` to the `httpbin` service in `app-ns-3` in `usergroup-2`. The communication should work:
|
||||
1. Send a request from the `curl` pod in `app-ns-2` in `usergroup-2` to the `httpbin` service in `app-ns-3` in `usergroup-2`. The communication should work:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl -n app-ns-2 exec "$(kubectl -n app-ns-2 get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sIL http://httpbin.app-ns-3.svc.cluster.local:8000
|
||||
$ kubectl -n app-ns-2 exec "$(kubectl -n app-ns-2 get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sIL http://httpbin.app-ns-3.svc.cluster.local:8000
|
||||
HTTP/1.1 200 OK
|
||||
server: envoy
|
||||
date: Thu, 22 Dec 2022 15:01:36 GMT
|
||||
|
|
|
@ -151,11 +151,11 @@ kubectl label ns app-ns-3 usergroup=usergroup-2 istio.io/rev=usergroup-2
|
|||
}
|
||||
|
||||
snip_deploy_application_workloads_per_usergroup_3() {
|
||||
kubectl -n app-ns-1 apply -f samples/sleep/sleep.yaml
|
||||
kubectl -n app-ns-1 apply -f samples/curl/curl.yaml
|
||||
kubectl -n app-ns-1 apply -f samples/httpbin/httpbin.yaml
|
||||
kubectl -n app-ns-2 apply -f samples/sleep/sleep.yaml
|
||||
kubectl -n app-ns-2 apply -f samples/curl/curl.yaml
|
||||
kubectl -n app-ns-2 apply -f samples/httpbin/httpbin.yaml
|
||||
kubectl -n app-ns-3 apply -f samples/sleep/sleep.yaml
|
||||
kubectl -n app-ns-3 apply -f samples/curl/curl.yaml
|
||||
kubectl -n app-ns-3 apply -f samples/httpbin/httpbin.yaml
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ kubectl get pods -n app-ns-1
|
|||
! IFS=$'\n' read -r -d '' snip_deploy_application_workloads_per_usergroup_4_out <<\ENDSNIP
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
httpbin-9dbd644c7-zc2v4 2/2 Running 0 115m
|
||||
sleep-78ff5975c6-fml7c 2/2 Running 0 115m
|
||||
curl-78ff5975c6-fml7c 2/2 Running 0 115m
|
||||
ENDSNIP
|
||||
|
||||
snip_deploy_application_workloads_per_usergroup_5() {
|
||||
|
@ -176,7 +176,7 @@ kubectl get pods -n app-ns-2
|
|||
! IFS=$'\n' read -r -d '' snip_deploy_application_workloads_per_usergroup_5_out <<\ENDSNIP
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
httpbin-9dbd644c7-sd9ln 2/2 Running 0 115m
|
||||
sleep-78ff5975c6-sz728 2/2 Running 0 115m
|
||||
curl-78ff5975c6-sz728 2/2 Running 0 115m
|
||||
ENDSNIP
|
||||
|
||||
snip_deploy_application_workloads_per_usergroup_6() {
|
||||
|
@ -186,7 +186,7 @@ kubectl get pods -n app-ns-3
|
|||
! IFS=$'\n' read -r -d '' snip_deploy_application_workloads_per_usergroup_6_out <<\ENDSNIP
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
httpbin-9dbd644c7-8ll27 2/2 Running 0 115m
|
||||
sleep-78ff5975c6-sg4tq 2/2 Running 0 115m
|
||||
curl-78ff5975c6-sg4tq 2/2 Running 0 115m
|
||||
ENDSNIP
|
||||
|
||||
snip_verify_the_application_to_control_plane_mapping_1() {
|
||||
|
@ -196,7 +196,7 @@ istioctl ps -i usergroup-1
|
|||
! IFS=$'\n' read -r -d '' snip_verify_the_application_to_control_plane_mapping_1_out <<\ENDSNIP
|
||||
NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION
|
||||
httpbin-9dbd644c7-hccpf.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
sleep-78ff5975c6-9zb77.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
curl-78ff5975c6-9zb77.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
ENDSNIP
|
||||
|
||||
snip_verify_the_application_to_control_plane_mapping_2() {
|
||||
|
@ -207,12 +207,12 @@ istioctl ps -i usergroup-2
|
|||
NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION
|
||||
httpbin-9dbd644c7-vvcqj.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
httpbin-9dbd644c7-xzgfm.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
sleep-78ff5975c6-fthmt.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
sleep-78ff5975c6-nxtth.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
curl-78ff5975c6-fthmt.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
curl-78ff5975c6-nxtth.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117
|
||||
ENDSNIP
|
||||
|
||||
snip_verify_the_application_connectivity_is_only_within_the_respective_usergroup_1() {
|
||||
kubectl -n app-ns-1 exec "$(kubectl -n app-ns-1 get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sIL http://httpbin.app-ns-2.svc.cluster.local:8000
|
||||
kubectl -n app-ns-1 exec "$(kubectl -n app-ns-1 get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sIL http://httpbin.app-ns-2.svc.cluster.local:8000
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_verify_the_application_connectivity_is_only_within_the_respective_usergroup_1_out <<\ENDSNIP
|
||||
|
@ -224,7 +224,7 @@ server: envoy
|
|||
ENDSNIP
|
||||
|
||||
snip_verify_the_application_connectivity_is_only_within_the_respective_usergroup_2() {
|
||||
kubectl -n app-ns-2 exec "$(kubectl -n app-ns-2 get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sIL http://httpbin.app-ns-3.svc.cluster.local:8000
|
||||
kubectl -n app-ns-2 exec "$(kubectl -n app-ns-2 get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sIL http://httpbin.app-ns-3.svc.cluster.local:8000
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_verify_the_application_connectivity_is_only_within_the_respective_usergroup_2_out <<\ENDSNIP
|
||||
|
|
|
@ -41,11 +41,11 @@ snip_deploy_application_workloads_per_usergroup_2
|
|||
snip_deploy_application_workloads_per_usergroup_3
|
||||
|
||||
_wait_for_deployment app-ns-1 httpbin
|
||||
_wait_for_deployment app-ns-1 sleep
|
||||
_wait_for_deployment app-ns-1 curl
|
||||
_wait_for_deployment app-ns-2 httpbin
|
||||
_wait_for_deployment app-ns-2 sleep
|
||||
_wait_for_deployment app-ns-2 curl
|
||||
_wait_for_deployment app-ns-3 httpbin
|
||||
_wait_for_deployment app-ns-3 sleep
|
||||
_wait_for_deployment app-ns-3 curl
|
||||
|
||||
# verification of connectivity
|
||||
_verify_first_line snip_verify_the_application_connectivity_is_only_within_the_respective_usergroup_1 "HTTP/1.1 503 Service Unavailable"
|
||||
|
|
|
@ -32,7 +32,7 @@ istioctl install --set profile=default --revision="$previousVersionRevision1" -y
|
|||
snip_data_plane_2
|
||||
snip_data_plane_3
|
||||
snip_data_plane_4
|
||||
_wait_for_deployment test-ns sleep
|
||||
_wait_for_deployment test-ns curl
|
||||
|
||||
# precheck before upgrade
|
||||
_verify_lines snip_before_you_upgrade_1 "$snip_before_you_upgrade_1_out"
|
||||
|
|
|
@ -88,7 +88,7 @@ However, simply installing the new revision has no impact on the existing sideca
|
|||
you must configure them to point to the new `istiod-canary` control plane. This is controlled during sidecar injection
|
||||
based on the namespace label `istio.io/rev`.
|
||||
|
||||
Create a namespace `test-ns` with `istio-injection` enabled. In the `test-ns` namespace, deploy a sample sleep pod:
|
||||
Create a namespace `test-ns` with `istio-injection` enabled. In the `test-ns` namespace, deploy a sample curl pod:
|
||||
|
||||
1. Create a namespace `test-ns`.
|
||||
|
||||
|
@ -102,10 +102,10 @@ Create a namespace `test-ns` with `istio-injection` enabled. In the `test-ns` na
|
|||
$ kubectl label namespace test-ns istio-injection=enabled
|
||||
{{< /text >}}
|
||||
|
||||
1. Bring up a sample sleep pod in `test-ns` namespace.
|
||||
1. Bring up a sample curl pod in `test-ns` namespace.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl apply -n test-ns -f samples/sleep/sleep.yaml
|
||||
$ kubectl apply -n test-ns -f samples/curl/curl.yaml
|
||||
{{< /text >}}
|
||||
|
||||
To upgrade the namespace `test-ns`, remove the `istio-injection` label, and add the `istio.io/rev` label to point to the `canary` revision. The `istio-injection` label must be removed because it takes precedence over the `istio.io/rev` label for backward compatibility.
|
||||
|
@ -166,12 +166,12 @@ If you're using Helm, refer to the [Helm upgrade documentation](/docs/setup/upgr
|
|||
$ kubectl label ns app-ns-3 istio.io/rev=prod-canary
|
||||
{{< /text >}}
|
||||
|
||||
1. Bring up a sample sleep pod in each namespace:
|
||||
1. Bring up a sample curl pod in each namespace:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl apply -n app-ns-1 -f samples/sleep/sleep.yaml
|
||||
$ kubectl apply -n app-ns-2 -f samples/sleep/sleep.yaml
|
||||
$ kubectl apply -n app-ns-3 -f samples/sleep/sleep.yaml
|
||||
$ kubectl apply -n app-ns-1 -f samples/curl/curl.yaml
|
||||
$ kubectl apply -n app-ns-2 -f samples/curl/curl.yaml
|
||||
$ kubectl apply -n app-ns-3 -f samples/curl/curl.yaml
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify application to control plane mapping using `istioctl proxy-status` command:
|
||||
|
@ -179,9 +179,9 @@ If you're using Helm, refer to the [Helm upgrade documentation](/docs/setup/upgr
|
|||
{{< text bash >}}
|
||||
$ istioctl ps
|
||||
NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION
|
||||
sleep-78ff5975c6-62pzf.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-s8zfg {{< istio_full_version >}}
|
||||
sleep-78ff5975c6-8kxpl.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_previous_version_revision >}}-1-bdf5948d5-n72r2 {{< istio_previous_version >}}.1
|
||||
sleep-78ff5975c6-8q7m6.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_previous_version_revision >}}-1-bdf5948d5-n72r2 {{< istio_previous_version_revision >}}.1
|
||||
curl-78ff5975c6-62pzf.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-s8zfg {{< istio_full_version >}}
|
||||
curl-78ff5975c6-8kxpl.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_previous_version_revision >}}-1-bdf5948d5-n72r2 {{< istio_previous_version >}}.1
|
||||
curl-78ff5975c6-8q7m6.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_previous_version_revision >}}-1-bdf5948d5-n72r2 {{< istio_previous_version_revision >}}.1
|
||||
{{< /text >}}
|
||||
|
||||
{{< boilerplate revision-tags-middle >}}
|
||||
|
@ -202,9 +202,9 @@ Verify the application to control plane mapping using `istioctl proxy-status` co
|
|||
{{< text bash >}}
|
||||
$ istioctl ps
|
||||
NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION
|
||||
sleep-5984f48bc7-kmj6x.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}}
|
||||
sleep-78ff5975c6-jldk4.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}}
|
||||
sleep-7cdd8dccb9-5bq5n.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}}
|
||||
curl-5984f48bc7-kmj6x.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}}
|
||||
curl-78ff5975c6-jldk4.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}}
|
||||
curl-7cdd8dccb9-5bq5n.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}}
|
||||
{{< /text >}}
|
||||
|
||||
### Default tag
|
||||
|
|
|
@ -36,9 +36,9 @@ snip_usage_2
|
|||
# deploy app namespaces and label them
|
||||
snip_usage_3
|
||||
snip_usage_4
|
||||
_wait_for_deployment app-ns-1 sleep
|
||||
_wait_for_deployment app-ns-2 sleep
|
||||
_wait_for_deployment app-ns-3 sleep
|
||||
_wait_for_deployment app-ns-1 curl
|
||||
_wait_for_deployment app-ns-2 curl
|
||||
_wait_for_deployment app-ns-3 curl
|
||||
|
||||
# verify both the revisions are managing workloads
|
||||
_verify_contains snip_usage_5 "istiod-$previousVersionRevision1"
|
||||
|
|
|
@ -82,7 +82,7 @@ kubectl label namespace test-ns istio-injection=enabled
|
|||
}
|
||||
|
||||
snip_data_plane_4() {
|
||||
kubectl apply -n test-ns -f samples/sleep/sleep.yaml
|
||||
kubectl apply -n test-ns -f samples/curl/curl.yaml
|
||||
}
|
||||
|
||||
snip_data_plane_5() {
|
||||
|
@ -117,9 +117,9 @@ kubectl label ns app-ns-3 istio.io/rev=prod-canary
|
|||
}
|
||||
|
||||
snip_usage_4() {
|
||||
kubectl apply -n app-ns-1 -f samples/sleep/sleep.yaml
|
||||
kubectl apply -n app-ns-2 -f samples/sleep/sleep.yaml
|
||||
kubectl apply -n app-ns-3 -f samples/sleep/sleep.yaml
|
||||
kubectl apply -n app-ns-1 -f samples/curl/curl.yaml
|
||||
kubectl apply -n app-ns-2 -f samples/curl/curl.yaml
|
||||
kubectl apply -n app-ns-3 -f samples/curl/curl.yaml
|
||||
}
|
||||
|
||||
snip_usage_5() {
|
||||
|
@ -128,9 +128,9 @@ istioctl ps
|
|||
|
||||
! IFS=$'\n' read -r -d '' snip_usage_5_out <<\ENDSNIP
|
||||
NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION
|
||||
sleep-78ff5975c6-62pzf.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-s8zfg 1.24.0
|
||||
sleep-78ff5975c6-8kxpl.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-23-1-bdf5948d5-n72r2 1.23.1
|
||||
sleep-78ff5975c6-8q7m6.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-23-1-bdf5948d5-n72r2 1-23.1
|
||||
curl-78ff5975c6-62pzf.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-s8zfg 1.24.0
|
||||
curl-78ff5975c6-8kxpl.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-23-1-bdf5948d5-n72r2 1.23.1
|
||||
curl-78ff5975c6-8q7m6.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-23-1-bdf5948d5-n72r2 1-23.1
|
||||
ENDSNIP
|
||||
|
||||
snip_usage_6() {
|
||||
|
@ -148,9 +148,9 @@ istioctl ps
|
|||
|
||||
! IFS=$'\n' read -r -d '' snip_usage_8_out <<\ENDSNIP
|
||||
NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION
|
||||
sleep-5984f48bc7-kmj6x.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0
|
||||
sleep-78ff5975c6-jldk4.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0
|
||||
sleep-7cdd8dccb9-5bq5n.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0
|
||||
curl-5984f48bc7-kmj6x.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0
|
||||
curl-78ff5975c6-jldk4.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0
|
||||
curl-7cdd8dccb9-5bq5n.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0
|
||||
ENDSNIP
|
||||
|
||||
snip_default_tag_1() {
|
||||
|
|
|
@ -115,14 +115,14 @@ EOF
|
|||
|
||||
You can add the `proxy.istio.io/config` annotation to your Pod metadata
|
||||
specification to override any mesh-wide tracing settings.
|
||||
For instance, to modify the `sleep` deployment shipped with Istio you would add
|
||||
the following to `samples/sleep/sleep.yaml`:
|
||||
For instance, to modify the `curl` deployment shipped with Istio you would add
|
||||
the following to `samples/curl/curl.yaml`:
|
||||
|
||||
{{< text yaml >}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: sleep
|
||||
name: curl
|
||||
spec:
|
||||
...
|
||||
template:
|
||||
|
|
|
@ -87,7 +87,7 @@ For instance, to override the mesh-wide sampling above, you would add the follow
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: sleep
|
||||
name: curl
|
||||
spec:
|
||||
...
|
||||
template:
|
||||
|
|
|
@ -83,10 +83,10 @@ Istio will use the following default access log format if `accessLogFormat` is n
|
|||
\"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" %UPSTREAM_CLUSTER% %UPSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_REMOTE_ADDRESS% %REQUESTED_SERVER_NAME% %ROUTE_NAME%\n
|
||||
{{< /text >}}
|
||||
|
||||
The following table shows an example using the default access log format for a request sent from `sleep` to `httpbin`:
|
||||
The following table shows an example using the default access log format for a request sent from `curl` to `httpbin`:
|
||||
|
||||
| Log operator | access log in sleep | access log in httpbin |
|
||||
|--------------|---------------------|-----------------------|
|
||||
| Log operator | access log in curl | access log in httpbin |
|
||||
|--------------|--------------------|-----------------------|
|
||||
| `[%START_TIME%]` | `[2020-11-25T21:26:18.409Z]` | `[2020-11-25T21:26:18.409Z]`
|
||||
| `\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\"` | `"GET /status/418 HTTP/1.1"` | `"GET /status/418 HTTP/1.1"`
|
||||
| `%RESPONSE_CODE%` | `418` | `418`
|
||||
|
@ -112,10 +112,10 @@ The following table shows an example using the default access log format for a r
|
|||
|
||||
## Test the access log
|
||||
|
||||
1. Send a request from `sleep` to `httpbin`:
|
||||
1. Send a request from `curl` to `httpbin`:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v httpbin:8000/status/418
|
||||
$ kubectl exec "$SOURCE_POD" -c curl -- curl -sS -v httpbin:8000/status/418
|
||||
...
|
||||
< HTTP/1.1 418 Unknown
|
||||
...
|
||||
|
@ -125,10 +125,10 @@ The following table shows an example using the default access log format for a r
|
|||
...
|
||||
{{< /text >}}
|
||||
|
||||
1. Check `sleep`'s log:
|
||||
1. Check `curl`'s log:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl logs -l app=sleep -c istio-proxy
|
||||
$ kubectl logs -l app=curl -c istio-proxy
|
||||
[2020-11-25T21:26:18.409Z] "GET /status/418 HTTP/1.1" 418 - via_upstream - "-" 0 135 4 4 "-" "curl/7.73.0-DEV" "84961386-6d84-929d-98bd-c5aee93b5c88" "httpbin:8000" "10.44.1.27:80" outbound|8000||httpbin.foo.svc.cluster.local 10.44.1.23:37652 10.0.45.184:8000 10.44.1.23:46520 - default
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -139,14 +139,14 @@ The following table shows an example using the default access log format for a r
|
|||
[2020-11-25T21:26:18.409Z] "GET /status/418 HTTP/1.1" 418 - via_upstream - "-" 0 135 3 1 "-" "curl/7.73.0-DEV" "84961386-6d84-929d-98bd-c5aee93b5c88" "httpbin:8000" "127.0.0.1:80" inbound|8000|| 127.0.0.1:41854 10.44.1.27:80 10.44.1.23:37652 outbound_.8000_._.httpbin.foo.svc.cluster.local default
|
||||
{{< /text >}}
|
||||
|
||||
Note that the messages corresponding to the request appear in logs of the Istio proxies of both the source and the destination, `sleep` and `httpbin`, respectively. You can see in the log the HTTP verb (`GET`), the HTTP path (`/status/418`), the response code (`418`) and other [request-related information](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-rules).
|
||||
Note that the messages corresponding to the request appear in logs of the Istio proxies of both the source and the destination, `curl` and `httpbin`, respectively. You can see in the log the HTTP verb (`GET`), the HTTP path (`/status/418`), the response code (`418`) and other [request-related information](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-rules).
|
||||
|
||||
## Cleanup
|
||||
|
||||
Shutdown the [sleep]({{< github_tree >}}/samples/sleep) and [httpbin]({{< github_tree >}}/samples/httpbin) services:
|
||||
Shutdown the [curl]({{< github_tree >}}/samples/curl) and [httpbin]({{< github_tree >}}/samples/httpbin) services:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl delete -f @samples/sleep/sleep.yaml@
|
||||
$ kubectl delete -f @samples/curl/curl.yaml@
|
||||
$ kubectl delete -f @samples/httpbin/httpbin.yaml@
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ ENDSNIP
|
|||
ENDSNIP
|
||||
|
||||
snip_test_the_access_log_1() {
|
||||
kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v httpbin:8000/status/418
|
||||
kubectl exec "$SOURCE_POD" -c curl -- curl -sS -v httpbin:8000/status/418
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_test_the_access_log_1_out <<\ENDSNIP
|
||||
|
@ -61,7 +61,7 @@ I'm a teapot!
|
|||
ENDSNIP
|
||||
|
||||
snip_test_the_access_log_2() {
|
||||
kubectl logs -l app=sleep -c istio-proxy
|
||||
kubectl logs -l app=curl -c istio-proxy
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_test_the_access_log_2_out <<\ENDSNIP
|
||||
|
@ -77,7 +77,7 @@ kubectl logs -l app=httpbin -c istio-proxy
|
|||
ENDSNIP
|
||||
|
||||
snip_cleanup_1() {
|
||||
kubectl delete -f samples/sleep/sleep.yaml
|
||||
kubectl delete -f samples/curl/curl.yaml
|
||||
kubectl delete -f samples/httpbin/httpbin.yaml
|
||||
}
|
||||
|
||||
|
|
|
@ -34,9 +34,9 @@ source "tests/util/samples.sh"
|
|||
|
||||
kubectl label namespace default istio-injection=enabled --overwrite
|
||||
|
||||
# Start the sleep sample
|
||||
startup_sleep_sample
|
||||
export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')
|
||||
# Start the curl sample
|
||||
startup_curl_sample
|
||||
export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')
|
||||
|
||||
# Start the httpbin sample
|
||||
startup_httpbin_sample
|
||||
|
|
|
@ -66,11 +66,11 @@ $ cat <<EOF | kubectl apply -n default -f -
|
|||
apiVersion: telemetry.istio.io/v1
|
||||
kind: Telemetry
|
||||
metadata:
|
||||
name: sleep-logging
|
||||
name: curl-logging
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sleep
|
||||
app: curl
|
||||
accessLogging:
|
||||
- providers:
|
||||
- name: otel
|
||||
|
@ -118,10 +118,10 @@ Istio will use the following default access log format if `accessLogFormat` is n
|
|||
\"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" %UPSTREAM_CLUSTER% %UPSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_REMOTE_ADDRESS% %REQUESTED_SERVER_NAME% %ROUTE_NAME%\n
|
||||
{{< /text >}}
|
||||
|
||||
The following table shows an example using the default access log format for a request sent from `sleep` to `httpbin`:
|
||||
The following table shows an example using the default access log format for a request sent from `curl` to `httpbin`:
|
||||
|
||||
| Log operator | access log in sleep | access log in httpbin |
|
||||
|--------------|---------------------|-----------------------|
|
||||
| Log operator | access log in curl | access log in httpbin |
|
||||
|--------------|--------------------|-----------------------|
|
||||
| `[%START_TIME%]` | `[2020-11-25T21:26:18.409Z]` | `[2020-11-25T21:26:18.409Z]`
|
||||
| `\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\"` | `"GET /status/418 HTTP/1.1"` | `"GET /status/418 HTTP/1.1"`
|
||||
| `%RESPONSE_CODE%` | `418` | `418`
|
||||
|
@ -147,10 +147,10 @@ The following table shows an example using the default access log format for a r
|
|||
|
||||
## Test the access log
|
||||
|
||||
1. Send a request from `sleep` to `httpbin`:
|
||||
1. Send a request from `curl` to `httpbin`:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v httpbin:8000/status/418
|
||||
$ kubectl exec "$SOURCE_POD" -c curl -- curl -sS -v httpbin:8000/status/418
|
||||
...
|
||||
< HTTP/1.1 418 Unknown
|
||||
...
|
||||
|
@ -167,15 +167,15 @@ The following table shows an example using the default access log format for a r
|
|||
[2020-11-25T21:26:18.409Z] "GET /status/418 HTTP/1.1" 418 - via_upstream - "-" 0 135 3 1 "-" "curl/7.73.0-DEV" "84961386-6d84-929d-98bd-c5aee93b5c88" "httpbin:8000" "127.0.0.1:80" inbound|8000|| 127.0.0.1:41854 10.44.1.27:80 10.44.1.23:37652 outbound_.8000_._.httpbin.foo.svc.cluster.local default
|
||||
{{< /text >}}
|
||||
|
||||
Note that the messages corresponding to the request appear in logs of the Istio proxies of both the source and the destination, `sleep` and `httpbin`, respectively. You can see in the log the HTTP verb (`GET`), the HTTP path (`/status/418`), the response code (`418`) and other [request-related information](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-rules).
|
||||
Note that the messages corresponding to the request appear in logs of the Istio proxies of both the source and the destination, `curl` and `httpbin`, respectively. You can see in the log the HTTP verb (`GET`), the HTTP path (`/status/418`), the response code (`418`) and other [request-related information](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-rules).
|
||||
|
||||
## Cleanup
|
||||
|
||||
Shutdown the [sleep]({{< github_tree >}}/samples/sleep) and [httpbin]({{< github_tree >}}/samples/httpbin) services:
|
||||
Shutdown the [curl]({{< github_tree >}}/samples/curl) and [httpbin]({{< github_tree >}}/samples/httpbin) services:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl delete telemetry sleep-logging
|
||||
$ kubectl delete -f @samples/sleep/sleep.yaml@
|
||||
$ kubectl delete telemetry curl-logging
|
||||
$ kubectl delete -f @samples/curl/curl.yaml@
|
||||
$ kubectl delete -f @samples/httpbin/httpbin.yaml@
|
||||
$ kubectl delete -f @samples/open-telemetry/otel.yaml@ -n istio-system
|
||||
$ kubectl delete namespace observability
|
||||
|
|
|
@ -62,11 +62,11 @@ cat <<EOF | kubectl apply -n default -f -
|
|||
apiVersion: telemetry.istio.io/v1
|
||||
kind: Telemetry
|
||||
metadata:
|
||||
name: sleep-logging
|
||||
name: curl-logging
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sleep
|
||||
app: curl
|
||||
accessLogging:
|
||||
- providers:
|
||||
- name: otel
|
||||
|
@ -95,7 +95,7 @@ ENDSNIP
|
|||
ENDSNIP
|
||||
|
||||
snip_test_the_access_log_1() {
|
||||
kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v httpbin:8000/status/418
|
||||
kubectl exec "$SOURCE_POD" -c curl -- curl -sS -v httpbin:8000/status/418
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_test_the_access_log_1_out <<\ENDSNIP
|
||||
|
@ -117,8 +117,8 @@ kubectl logs -l app=opentelemetry-collector -n observability
|
|||
ENDSNIP
|
||||
|
||||
snip_cleanup_1() {
|
||||
kubectl delete telemetry sleep-logging
|
||||
kubectl delete -f samples/sleep/sleep.yaml
|
||||
kubectl delete telemetry curl-logging
|
||||
kubectl delete -f samples/curl/curl.yaml
|
||||
kubectl delete -f samples/httpbin/httpbin.yaml
|
||||
kubectl delete -f samples/open-telemetry/otel.yaml -n istio-system
|
||||
kubectl delete namespace observability
|
||||
|
|
|
@ -33,9 +33,9 @@ snip_enable_envoys_access_logging_3
|
|||
|
||||
kubectl label namespace default istio-injection=enabled --overwrite
|
||||
|
||||
# Start the sleep sample
|
||||
startup_sleep_sample
|
||||
export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')
|
||||
# Start the curl sample
|
||||
startup_curl_sample
|
||||
export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')
|
||||
|
||||
# Start the httpbin sample
|
||||
startup_httpbin_sample
|
||||
|
|
|
@ -45,19 +45,19 @@ $ kubectl apply -f @samples/open-telemetry/loki/otel.yaml@ -n istio-system
|
|||
|
||||
1. Disable access log for specific workload
|
||||
|
||||
You can disable access log for `sleep` service with the following configuration:
|
||||
You can disable access log for `curl` service with the following configuration:
|
||||
|
||||
{{< text bash >}}
|
||||
$ cat <<EOF | kubectl apply -n default -f -
|
||||
apiVersion: telemetry.istio.io/v1
|
||||
kind: Telemetry
|
||||
metadata:
|
||||
name: disable-sleep-logging
|
||||
name: disable-curl-logging
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sleep
|
||||
app: curl
|
||||
accessLogging:
|
||||
- providers:
|
||||
- name: otel
|
||||
|
@ -97,11 +97,11 @@ $ kubectl apply -f @samples/open-telemetry/loki/otel.yaml@ -n istio-system
|
|||
apiVersion: telemetry.istio.io/v1alpha1
|
||||
kind: Telemetry
|
||||
metadata:
|
||||
name: filter-sleep-logging
|
||||
name: filter-curl-logging
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sleep
|
||||
app: curl
|
||||
accessLogging:
|
||||
- providers:
|
||||
- name: otel
|
||||
|
|
|
@ -46,12 +46,12 @@ cat <<EOF | kubectl apply -n default -f -
|
|||
apiVersion: telemetry.istio.io/v1
|
||||
kind: Telemetry
|
||||
metadata:
|
||||
name: disable-sleep-logging
|
||||
name: disable-curl-logging
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sleep
|
||||
app: curl
|
||||
accessLogging:
|
||||
- providers:
|
||||
- name: otel
|
||||
|
@ -83,11 +83,11 @@ cat <<EOF | kubectl apply -n default -f -
|
|||
apiVersion: telemetry.istio.io/v1alpha1
|
||||
kind: Telemetry
|
||||
metadata:
|
||||
name: filter-sleep-logging
|
||||
name: filter-curl-logging
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sleep
|
||||
app: curl
|
||||
accessLogging:
|
||||
- providers:
|
||||
- name: otel
|
||||
|
|
|
@ -61,13 +61,13 @@ kubectl wait pods -n istio-system -l app.kubernetes.io/name=loki --for condition
|
|||
|
||||
kubectl label namespace default istio-injection=enabled --overwrite
|
||||
|
||||
startup_sleep_sample
|
||||
startup_curl_sample
|
||||
startup_httpbin_sample
|
||||
|
||||
function send_httpbin_requests() {
|
||||
local request_path="$1"
|
||||
for _ in {1..10}; do
|
||||
kubectl exec deploy/sleep -- curl -sS "http://httpbin:8000/$request_path" > /dev/null
|
||||
kubectl exec deploy/curl -- curl -sS "http://httpbin:8000/$request_path" > /dev/null
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -78,8 +78,8 @@ function count_by_pod() {
|
|||
curl -G -s "http://$loki_address:3100/loki/api/v1/query_range" --data-urlencode "query={namespace=\"$namespace\", pod=\"$name\"}" | jq '.data.result[0].values | length'
|
||||
}
|
||||
|
||||
count_sleep_pod() {
|
||||
local pod=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})
|
||||
count_curl_pod() {
|
||||
local pod=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})
|
||||
count_by_pod default $pod
|
||||
}
|
||||
|
||||
|
@ -89,16 +89,16 @@ count_httpbin_pod() {
|
|||
}
|
||||
|
||||
rollout_restart_pods() {
|
||||
kubectl rollout restart deploy/sleep
|
||||
kubectl rollout restart deploy/curl
|
||||
kubectl rollout restart deploy/httpbin
|
||||
_wait_for_deployment default sleep
|
||||
_wait_for_deployment default curl
|
||||
_wait_for_deployment default httpbin
|
||||
}
|
||||
|
||||
send_httpbin_requests "status/200"
|
||||
|
||||
# no logs are sent to loki
|
||||
_verify_same count_sleep_pod "0"
|
||||
_verify_same count_curl_pod "0"
|
||||
_verify_same count_httpbin_pod "0"
|
||||
|
||||
# enable access log via Telemetry API
|
||||
|
@ -107,17 +107,17 @@ rollout_restart_pods
|
|||
|
||||
send_httpbin_requests "status/200"
|
||||
|
||||
_verify_same count_sleep_pod "10"
|
||||
_verify_same count_curl_pod "10"
|
||||
_verify_same count_httpbin_pod "10"
|
||||
|
||||
# disable access log for sleep pod
|
||||
# disable access log for curl pod
|
||||
snip_get_started_with_telemetry_api_2
|
||||
rollout_restart_pods
|
||||
|
||||
send_httpbin_requests "status/200"
|
||||
|
||||
# sleep pod logs are not sent to loki
|
||||
_verify_same count_sleep_pod "0"
|
||||
# curl pod logs are not sent to loki
|
||||
_verify_same count_curl_pod "0"
|
||||
_verify_same count_httpbin_pod "10"
|
||||
|
||||
# disable httpbin
|
||||
|
@ -126,24 +126,24 @@ rollout_restart_pods
|
|||
|
||||
send_httpbin_requests "status/200"
|
||||
|
||||
_verify_same count_sleep_pod "0"
|
||||
_verify_same count_curl_pod "0"
|
||||
# httpbin pod logs are not sent to loki
|
||||
_verify_same count_httpbin_pod "0"
|
||||
|
||||
# filter sleep logs
|
||||
# filter curl logs
|
||||
kubectl delete telemetry --all -n default
|
||||
snip_get_started_with_telemetry_api_4
|
||||
rollout_restart_pods
|
||||
|
||||
# only 5xx logs are sent to loki
|
||||
send_httpbin_requests "status/200"
|
||||
_verify_same count_sleep_pod "0"
|
||||
_verify_same count_curl_pod "0"
|
||||
|
||||
send_httpbin_requests "status/500"
|
||||
_verify_same count_sleep_pod "10"
|
||||
_verify_same count_curl_pod "10"
|
||||
|
||||
# @cleanup
|
||||
cleanup_sleep_sample
|
||||
cleanup_curl_sample
|
||||
cleanup_httpbin_sample
|
||||
|
||||
snip_cleanup_1
|
||||
|
|
|
@ -27,45 +27,45 @@ $ istioctl install --set profile=default
|
|||
|
||||
### Setup
|
||||
|
||||
Our examples use two namespaces `foo` and `bar`, with two services, `httpbin` and `sleep`, both running with an Envoy proxy. We also use second
|
||||
instances of `httpbin` and `sleep` running without the sidecar in the `legacy` namespace. If you’d like to use the same examples when trying the tasks,
|
||||
Our examples use two namespaces `foo` and `bar`, with two services, `httpbin` and `curl`, both running with an Envoy proxy. We also use second
|
||||
instances of `httpbin` and `curl` running without the sidecar in the `legacy` namespace. If you’d like to use the same examples when trying the tasks,
|
||||
run the following:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl create ns foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n foo
|
||||
$ kubectl create ns bar
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n bar
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n bar
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n bar
|
||||
$ kubectl create ns legacy
|
||||
$ kubectl apply -f @samples/httpbin/httpbin.yaml@ -n legacy
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@ -n legacy
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@ -n legacy
|
||||
{{< /text >}}
|
||||
|
||||
You can verify setup by sending an HTTP request with `curl` from any `sleep` pod in the namespace `foo`, `bar` or `legacy` to either `httpbin.foo`,
|
||||
You can verify setup by sending an HTTP request with `curl` from any `curl` pod in the namespace `foo`, `bar` or `legacy` to either `httpbin.foo`,
|
||||
`httpbin.bar` or `httpbin.legacy`. All requests should succeed with HTTP code 200.
|
||||
|
||||
For example, here is a command to check `sleep.bar` to `httpbin.foo` reachability:
|
||||
For example, here is a command to check `curl.bar` to `httpbin.foo` reachability:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n bar -o jsonpath={.items..metadata.name})" -c sleep -n bar -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n bar -o jsonpath={.items..metadata.name})" -c curl -n bar -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
This one-liner command conveniently iterates through all reachability combinations:
|
||||
|
||||
{{< text bash >}}
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl -s "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.foo to httpbin.legacy: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.bar to httpbin.legacy: 200
|
||||
sleep.legacy to httpbin.foo: 200
|
||||
sleep.legacy to httpbin.bar: 200
|
||||
sleep.legacy to httpbin.legacy: 200
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl -s "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.foo to httpbin.legacy: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.bar to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.foo: 200
|
||||
curl.legacy to httpbin.bar: 200
|
||||
curl.legacy to httpbin.legacy: 200
|
||||
{{< /text >}}
|
||||
|
||||
Verify there is no peer authentication policy in the system with the following command:
|
||||
|
@ -99,14 +99,14 @@ upstream request to the backend. That header's presence is evidence that mutual
|
|||
used. For example:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl -s http://httpbin.foo:8000/headers -s | jq '.headers["X-Forwarded-Client-Cert"][0]' | sed 's/Hash=[a-z0-9]*;/Hash=<redacted>;/'
|
||||
"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=<redacted>;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl -s http://httpbin.foo:8000/headers -s | jq '.headers["X-Forwarded-Client-Cert"][0]' | sed 's/Hash=[a-z0-9]*;/Hash=<redacted>;/'
|
||||
"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=<redacted>;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl"
|
||||
{{< /text >}}
|
||||
|
||||
When the server doesn't have sidecar, the `X-Forwarded-Client-Cert` header is not there, which implies requests are in plain text.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.legacy:8000/headers -s | grep X-Forwarded-Client-Cert
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.legacy:8000/headers -s | grep X-Forwarded-Client-Cert
|
||||
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -140,21 +140,21 @@ Since it doesn't specify a value for the `selector` field, the policy applies to
|
|||
Run the test command again:
|
||||
|
||||
{{< text bash >}}
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.foo to httpbin.legacy: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.bar to httpbin.legacy: 200
|
||||
sleep.legacy to httpbin.foo: 000
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.foo to httpbin.legacy: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.bar to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.foo: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.bar: 000
|
||||
curl.legacy to httpbin.bar: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.legacy: 200
|
||||
{{< /text >}}
|
||||
|
||||
You see requests still succeed, except for those from the client that doesn't have proxy, `sleep.legacy`, to the server with a proxy, `httpbin.foo` or `httpbin.bar`. This is expected because mutual TLS is now strictly required, but the workload without sidecar cannot comply.
|
||||
You see requests still succeed, except for those from the client that doesn't have proxy, `curl.legacy`, to the server with a proxy, `httpbin.foo` or `httpbin.bar`. This is expected because mutual TLS is now strictly required, but the workload without sidecar cannot comply.
|
||||
|
||||
### Cleanup part 1
|
||||
|
||||
|
@ -183,20 +183,20 @@ spec:
|
|||
EOF
|
||||
{{< /text >}}
|
||||
|
||||
As this policy is applied on workloads in namespace `foo` only, you should see only request from client-without-sidecar (`sleep.legacy`) to `httpbin.foo` start to fail.
|
||||
As this policy is applied on workloads in namespace `foo` only, you should see only request from client-without-sidecar (`curl.legacy`) to `httpbin.foo` start to fail.
|
||||
|
||||
{{< text bash >}}
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.foo to httpbin.legacy: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.bar to httpbin.legacy: 200
|
||||
sleep.legacy to httpbin.foo: 000
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.foo to httpbin.legacy: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.bar to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.foo: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.bar: 200
|
||||
sleep.legacy to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.bar: 200
|
||||
curl.legacy to httpbin.legacy: 200
|
||||
{{< /text >}}
|
||||
|
||||
### Enable mutual TLS per workload
|
||||
|
@ -219,26 +219,26 @@ spec:
|
|||
EOF
|
||||
{{< /text >}}
|
||||
|
||||
Again, run the probing command. As expected, request from `sleep.legacy` to `httpbin.bar` starts failing with the same reasons.
|
||||
Again, run the probing command. As expected, request from `curl.legacy` to `httpbin.bar` starts failing with the same reasons.
|
||||
|
||||
{{< text bash >}}
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.foo to httpbin.legacy: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.bar to httpbin.legacy: 200
|
||||
sleep.legacy to httpbin.foo: 000
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.foo to httpbin.legacy: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.bar to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.foo: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.bar: 000
|
||||
curl.legacy to httpbin.bar: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.legacy: 200
|
||||
{{< /text >}}
|
||||
|
||||
{{< text plain >}}
|
||||
...
|
||||
sleep.legacy to httpbin.bar: 000
|
||||
curl.legacy to httpbin.bar: 000
|
||||
command terminated with exit code 56
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -267,24 +267,24 @@ EOF
|
|||
1. You can only use `portLevelMtls` if the port is bound to a service. Istio ignores it otherwise.
|
||||
|
||||
{{< text bash >}}
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.foo to httpbin.legacy: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.bar to httpbin.legacy: 200
|
||||
sleep.legacy to httpbin.foo: 000
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.foo to httpbin.legacy: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.bar to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.foo: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.bar: 200
|
||||
sleep.legacy to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.bar: 200
|
||||
curl.legacy to httpbin.legacy: 200
|
||||
{{< /text >}}
|
||||
|
||||
### Policy precedence
|
||||
|
||||
A workload-specific peer authentication policy takes precedence over a namespace-wide policy. You can test this behavior if you add a policy to disable mutual TLS for the `httpbin.foo` workload, for example.
|
||||
Note that you've already created a namespace-wide policy that enables mutual TLS for all services in namespace `foo` and observe that requests from
|
||||
`sleep.legacy` to `httpbin.foo` are failing (see above).
|
||||
`curl.legacy` to `httpbin.foo` are failing (see above).
|
||||
|
||||
{{< text bash >}}
|
||||
$ cat <<EOF | kubectl apply -n foo -f -
|
||||
|
@ -302,10 +302,10 @@ spec:
|
|||
EOF
|
||||
{{< /text >}}
|
||||
|
||||
Re-running the request from `sleep.legacy`, you should see a success return code again (200), confirming service-specific policy overrides the namespace-wide policy.
|
||||
Re-running the request from `curl.legacy`, you should see a success return code again (200), confirming service-specific policy overrides the namespace-wide policy.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n legacy -o jsonpath={.items..metadata.name})" -c sleep -n legacy -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n legacy -o jsonpath={.items..metadata.name})" -c curl -n legacy -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -28,17 +28,17 @@ istioctl install --set profile=default
|
|||
snip_setup_1() {
|
||||
kubectl create ns foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo
|
||||
kubectl create ns bar
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n bar
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n bar
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n bar
|
||||
kubectl create ns legacy
|
||||
kubectl apply -f samples/httpbin/httpbin.yaml -n legacy
|
||||
kubectl apply -f samples/sleep/sleep.yaml -n legacy
|
||||
kubectl apply -f samples/curl/curl.yaml -n legacy
|
||||
}
|
||||
|
||||
snip_setup_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n bar -o jsonpath={.items..metadata.name})" -c sleep -n bar -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n bar -o jsonpath={.items..metadata.name})" -c curl -n bar -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_setup_2_out <<\ENDSNIP
|
||||
|
@ -46,19 +46,19 @@ kubectl exec "$(kubectl get pod -l app=sleep -n bar -o jsonpath={.items..metadat
|
|||
ENDSNIP
|
||||
|
||||
snip_setup_3() {
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl -s "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl -s "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_setup_3_out <<\ENDSNIP
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.foo to httpbin.legacy: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.bar to httpbin.legacy: 200
|
||||
sleep.legacy to httpbin.foo: 200
|
||||
sleep.legacy to httpbin.bar: 200
|
||||
sleep.legacy to httpbin.legacy: 200
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.foo to httpbin.legacy: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.bar to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.foo: 200
|
||||
curl.legacy to httpbin.bar: 200
|
||||
curl.legacy to httpbin.legacy: 200
|
||||
ENDSNIP
|
||||
|
||||
snip_setup_4() {
|
||||
|
@ -78,15 +78,15 @@ kubectl get destinationrules.networking.istio.io --all-namespaces -o yaml | grep
|
|||
ENDSNIP
|
||||
|
||||
snip_auto_mutual_tls_1() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl -s http://httpbin.foo:8000/headers -s | jq '.headers["X-Forwarded-Client-Cert"][0]' | sed 's/Hash=[a-z0-9]*;/Hash=<redacted>;/'
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl -s http://httpbin.foo:8000/headers -s | jq '.headers["X-Forwarded-Client-Cert"][0]' | sed 's/Hash=[a-z0-9]*;/Hash=<redacted>;/'
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_auto_mutual_tls_1_out <<\ENDSNIP
|
||||
"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=<redacted>;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep"
|
||||
"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=<redacted>;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl"
|
||||
ENDSNIP
|
||||
|
||||
snip_auto_mutual_tls_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.legacy:8000/headers -s | grep X-Forwarded-Client-Cert
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.legacy:8000/headers -s | grep X-Forwarded-Client-Cert
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_auto_mutual_tls_2_out <<\ENDSNIP
|
||||
|
@ -107,21 +107,21 @@ EOF
|
|||
}
|
||||
|
||||
snip_globally_enabling_istio_mutual_tls_in_strict_mode_2() {
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_globally_enabling_istio_mutual_tls_in_strict_mode_2_out <<\ENDSNIP
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.foo to httpbin.legacy: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.bar to httpbin.legacy: 200
|
||||
sleep.legacy to httpbin.foo: 000
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.foo to httpbin.legacy: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.bar to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.foo: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.bar: 000
|
||||
curl.legacy to httpbin.bar: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.legacy: 200
|
||||
ENDSNIP
|
||||
|
||||
snip_cleanup_part_1_1() {
|
||||
|
@ -142,20 +142,20 @@ EOF
|
|||
}
|
||||
|
||||
snip_namespacewide_policy_2() {
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_namespacewide_policy_2_out <<\ENDSNIP
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.foo to httpbin.legacy: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.bar to httpbin.legacy: 200
|
||||
sleep.legacy to httpbin.foo: 000
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.foo to httpbin.legacy: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.bar to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.foo: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.bar: 200
|
||||
sleep.legacy to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.bar: 200
|
||||
curl.legacy to httpbin.legacy: 200
|
||||
ENDSNIP
|
||||
|
||||
snip_enable_mutual_tls_per_workload_1() {
|
||||
|
@ -175,26 +175,26 @@ EOF
|
|||
}
|
||||
|
||||
snip_enable_mutual_tls_per_workload_2() {
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_enable_mutual_tls_per_workload_2_out <<\ENDSNIP
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.foo to httpbin.legacy: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.bar to httpbin.legacy: 200
|
||||
sleep.legacy to httpbin.foo: 000
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.foo to httpbin.legacy: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.bar to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.foo: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.bar: 000
|
||||
curl.legacy to httpbin.bar: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.legacy: 200
|
||||
ENDSNIP
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_enable_mutual_tls_per_workload_3 <<\ENDSNIP
|
||||
...
|
||||
sleep.legacy to httpbin.bar: 000
|
||||
curl.legacy to httpbin.bar: 000
|
||||
command terminated with exit code 56
|
||||
ENDSNIP
|
||||
|
||||
|
@ -218,20 +218,20 @@ EOF
|
|||
}
|
||||
|
||||
snip_enable_mutual_tls_per_workload_5() {
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_enable_mutual_tls_per_workload_5_out <<\ENDSNIP
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.foo to httpbin.legacy: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.bar to httpbin.legacy: 200
|
||||
sleep.legacy to httpbin.foo: 000
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.foo to httpbin.legacy: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.bar to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.foo: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.bar: 200
|
||||
sleep.legacy to httpbin.legacy: 200
|
||||
curl.legacy to httpbin.bar: 200
|
||||
curl.legacy to httpbin.legacy: 200
|
||||
ENDSNIP
|
||||
|
||||
snip_policy_precedence_1() {
|
||||
|
@ -251,7 +251,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_policy_precedence_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n legacy -o jsonpath={.items..metadata.name})" -c sleep -n legacy -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n legacy -o jsonpath={.items..metadata.name})" -c curl -n legacy -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_policy_precedence_2_out <<\ENDSNIP
|
||||
|
|
|
@ -27,11 +27,11 @@ _wait_for_deployment istio-system istiod
|
|||
snip_setup_1
|
||||
|
||||
_wait_for_deployment foo httpbin
|
||||
_wait_for_deployment foo sleep
|
||||
_wait_for_deployment foo curl
|
||||
_wait_for_deployment bar httpbin
|
||||
_wait_for_deployment bar sleep
|
||||
_wait_for_deployment bar curl
|
||||
_wait_for_deployment legacy httpbin
|
||||
_wait_for_deployment legacy sleep
|
||||
_wait_for_deployment legacy curl
|
||||
|
||||
_verify_same snip_setup_2 "$snip_setup_2_out"
|
||||
_verify_same snip_setup_3 "$snip_setup_3_out"
|
||||
|
|
|
@ -27,20 +27,20 @@ Before you begin this task, do the following:
|
|||
|
||||
* Install Istio using [Istio installation guide](/docs/setup/install/istioctl/).
|
||||
|
||||
* Deploy `httpbin` and `sleep` workloads in namespace `foo` with sidecar injection enabled.
|
||||
* Deploy `httpbin` and `curl` workloads in namespace `foo` with sidecar injection enabled.
|
||||
Deploy the example namespace and workloads using these commands:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl create ns foo
|
||||
$ kubectl label namespace foo istio-injection=enabled
|
||||
$ kubectl apply -f @samples/httpbin/httpbin.yaml@ -n foo
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@ -n foo
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@ -n foo
|
||||
{{< /text >}}
|
||||
|
||||
* Verify that `sleep` successfully communicates with `httpbin` using this command:
|
||||
* Verify that `curl` successfully communicates with `httpbin` using this command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -78,7 +78,7 @@ Before you begin this task, do the following:
|
|||
1. Verify that a request with an invalid JWT is denied:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n"
|
||||
401
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -92,14 +92,14 @@ Before you begin this task, do the following:
|
|||
1. Verify that a request with a valid JWT is allowed:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify that a request contains a valid HTTP header with JWT claim value:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -H "Authorization: Bearer $TOKEN" | jq '.headers["X-Jwt-Claim-Foo"][0]'
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -H "Authorization: Bearer $TOKEN" | jq '.headers["X-Jwt-Claim-Foo"][0]'
|
||||
"bar"
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -24,11 +24,11 @@ snip_before_you_begin_1() {
|
|||
kubectl create ns foo
|
||||
kubectl label namespace foo istio-injection=enabled
|
||||
kubectl apply -f samples/httpbin/httpbin.yaml -n foo
|
||||
kubectl apply -f samples/sleep/sleep.yaml -n foo
|
||||
kubectl apply -f samples/curl/curl.yaml -n foo
|
||||
}
|
||||
|
||||
snip_before_you_begin_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_before_you_begin_2_out <<\ENDSNIP
|
||||
|
@ -56,7 +56,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_allow_requests_with_valid_jwt_and_listtyped_claims_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_2_out <<\ENDSNIP
|
||||
|
@ -72,7 +72,7 @@ TOKEN=$(curl https://raw.githubusercontent.com/istio/istio/master/security/tools
|
|||
ENDSNIP
|
||||
|
||||
snip_allow_requests_with_valid_jwt_and_listtyped_claims_4() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_4_out <<\ENDSNIP
|
||||
|
@ -80,7 +80,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat
|
|||
ENDSNIP
|
||||
|
||||
snip_allow_requests_with_valid_jwt_and_listtyped_claims_5() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -H "Authorization: Bearer $TOKEN" | jq '.headers["X-Jwt-Claim-Foo"][0]'
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -H "Authorization: Bearer $TOKEN" | jq '.headers["X-Jwt-Claim-Foo"][0]'
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_5_out <<\ENDSNIP
|
||||
|
|
|
@ -27,7 +27,7 @@ export VERIFY_TIMEOUT=300
|
|||
snip_before_you_begin_1
|
||||
|
||||
_wait_for_deployment foo httpbin
|
||||
_wait_for_deployment foo sleep
|
||||
_wait_for_deployment foo curl
|
||||
|
||||
# Pull the Istio branch from the docs configuration file.
|
||||
ISTIO_BRANCH=$(yq '.source_branch_name' "${REPO_ROOT}"/data/args.yml)
|
||||
|
|
|
@ -36,35 +36,35 @@ the policies to enforce STRICT mutual TLS between the workloads.
|
|||
|
||||
## Set up the cluster
|
||||
|
||||
* Create two namespaces, `foo` and `bar`, and deploy [httpbin]({{< github_tree >}}/samples/httpbin) and [sleep]({{< github_tree >}}/samples/sleep) with sidecars on both of them:
|
||||
* Create two namespaces, `foo` and `bar`, and deploy [httpbin]({{< github_tree >}}/samples/httpbin) and [curl]({{< github_tree >}}/samples/curl) with sidecars on both of them:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl create ns foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n foo
|
||||
$ kubectl create ns bar
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n bar
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n bar
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n bar
|
||||
{{< /text >}}
|
||||
|
||||
* Create another namespace, `legacy`, and deploy [sleep]({{< github_tree >}}/samples/sleep) without a sidecar:
|
||||
* Create another namespace, `legacy`, and deploy [curl]({{< github_tree >}}/samples/curl) without a sidecar:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl create ns legacy
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@ -n legacy
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@ -n legacy
|
||||
{{< /text >}}
|
||||
|
||||
* Verify the setup by sending http requests (using curl) from the sleep pods, in namespaces `foo`, `bar` and `legacy`, to `httpbin.foo` and `httpbin.bar`.
|
||||
* Verify the setup by sending http requests (using curl) from the curl pods, in namespaces `foo`, `bar` and `legacy`, to `httpbin.foo` and `httpbin.bar`.
|
||||
All requests should succeed with return code 200.
|
||||
|
||||
{{< text bash >}}
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.legacy to httpbin.foo: 200
|
||||
sleep.legacy to httpbin.bar: 200
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.legacy to httpbin.foo: 200
|
||||
curl.legacy to httpbin.bar: 200
|
||||
{{< /text >}}
|
||||
|
||||
{{< tip >}}
|
||||
|
@ -100,17 +100,17 @@ spec:
|
|||
EOF
|
||||
{{< /text >}}
|
||||
|
||||
Now, you should see the request from `sleep.legacy` to `httpbin.foo` failing.
|
||||
Now, you should see the request from `curl.legacy` to `httpbin.foo` failing.
|
||||
|
||||
{{< text bash >}}
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.legacy to httpbin.foo: 000
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.legacy to httpbin.foo: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.bar: 200
|
||||
curl.legacy to httpbin.bar: 200
|
||||
{{< /text >}}
|
||||
|
||||
If you installed Istio with `values.global.proxy.privileged=true`, you can use `tcpdump` to verify
|
||||
|
@ -122,7 +122,7 @@ tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
|
|||
listening on eth0, link-type EN10MB (Ethernet), capture size 262144 bytes
|
||||
{{< /text >}}
|
||||
|
||||
You will see plain text and encrypted text in the output when requests are sent from `sleep.legacy` and `sleep.foo`
|
||||
You will see plain text and encrypted text in the output when requests are sent from `curl.legacy` and `curl.foo`
|
||||
respectively.
|
||||
|
||||
If you can't migrate all your services to Istio (i.e., inject Envoy sidecar in all of them), you will need to continue to use `PERMISSIVE` mode.
|
||||
|
@ -145,11 +145,11 @@ spec:
|
|||
EOF
|
||||
{{< /text >}}
|
||||
|
||||
Now, both the `foo` and `bar` namespaces enforce mutual TLS only traffic, so you should see requests from `sleep.legacy`
|
||||
Now, both the `foo` and `bar` namespaces enforce mutual TLS only traffic, so you should see requests from `curl.legacy`
|
||||
failing for both.
|
||||
|
||||
{{< text bash >}}
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
{{< /text >}}
|
||||
|
||||
## Clean up the example
|
||||
|
|
|
@ -23,28 +23,28 @@
|
|||
snip_set_up_the_cluster_1() {
|
||||
kubectl create ns foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo
|
||||
kubectl create ns bar
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n bar
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n bar
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n bar
|
||||
}
|
||||
|
||||
snip_set_up_the_cluster_2() {
|
||||
kubectl create ns legacy
|
||||
kubectl apply -f samples/sleep/sleep.yaml -n legacy
|
||||
kubectl apply -f samples/curl/curl.yaml -n legacy
|
||||
}
|
||||
|
||||
snip_set_up_the_cluster_3() {
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_set_up_the_cluster_3_out <<\ENDSNIP
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.legacy to httpbin.foo: 200
|
||||
sleep.legacy to httpbin.bar: 200
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.legacy to httpbin.foo: 200
|
||||
curl.legacy to httpbin.bar: 200
|
||||
ENDSNIP
|
||||
|
||||
snip_set_up_the_cluster_4() {
|
||||
|
@ -76,17 +76,17 @@ EOF
|
|||
}
|
||||
|
||||
snip_lock_down_to_mutual_tls_by_namespace_2() {
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_lock_down_to_mutual_tls_by_namespace_2_out <<\ENDSNIP
|
||||
sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.legacy to httpbin.foo: 000
|
||||
curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.legacy to httpbin.foo: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.bar: 200
|
||||
curl.legacy to httpbin.bar: 200
|
||||
ENDSNIP
|
||||
|
||||
snip_lock_down_to_mutual_tls_by_namespace_3() {
|
||||
|
@ -111,7 +111,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_lock_down_mutual_tls_for_the_entire_mesh_2() {
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done
|
||||
}
|
||||
|
||||
snip_clean_up_the_example_1() {
|
||||
|
|
|
@ -26,10 +26,10 @@ snip_set_up_the_cluster_1
|
|||
snip_set_up_the_cluster_2
|
||||
|
||||
_wait_for_deployment foo httpbin
|
||||
_wait_for_deployment foo sleep
|
||||
_wait_for_deployment foo curl
|
||||
_wait_for_deployment bar httpbin
|
||||
_wait_for_deployment bar sleep
|
||||
_wait_for_deployment legacy sleep
|
||||
_wait_for_deployment bar curl
|
||||
_wait_for_deployment legacy curl
|
||||
|
||||
# curl_foo_bar_legacy
|
||||
_verify_same snip_set_up_the_cluster_3 "$snip_set_up_the_cluster_3_out"
|
||||
|
@ -66,13 +66,13 @@ set +e
|
|||
set +o pipefail
|
||||
|
||||
# curl_foo_bar_legacy_httpbin_foo_mtls
|
||||
expected="sleep.foo to httpbin.foo: 200
|
||||
sleep.foo to httpbin.bar: 200
|
||||
sleep.bar to httpbin.foo: 200
|
||||
sleep.bar to httpbin.bar: 200
|
||||
sleep.legacy to httpbin.foo: 000
|
||||
expected="curl.foo to httpbin.foo: 200
|
||||
curl.foo to httpbin.bar: 200
|
||||
curl.bar to httpbin.foo: 200
|
||||
curl.bar to httpbin.bar: 200
|
||||
curl.legacy to httpbin.foo: 000
|
||||
command terminated with exit code 56
|
||||
sleep.legacy to httpbin.bar: 000
|
||||
curl.legacy to httpbin.bar: 000
|
||||
command terminated with exit code 56"
|
||||
_verify_same snip_lock_down_mutual_tls_for_the_entire_mesh_2 "$expected"
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ Before you begin this task, do the following:
|
|||
|
||||
* Deploy test workloads:
|
||||
|
||||
This task uses two workloads, `httpbin` and `sleep`, both deployed in namespace `foo`.
|
||||
This task uses two workloads, `httpbin` and `curl`, both deployed in namespace `foo`.
|
||||
Both workloads run with an Envoy proxy sidecar. Deploy the `foo` namespace
|
||||
and workloads with the following command:
|
||||
|
||||
|
@ -29,13 +29,13 @@ Before you begin this task, do the following:
|
|||
$ kubectl create ns foo
|
||||
$ kubectl label ns foo istio-injection=enabled
|
||||
$ kubectl apply -f @samples/httpbin/httpbin.yaml@ -n foo
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@ -n foo
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@ -n foo
|
||||
{{< /text >}}
|
||||
|
||||
* Verify that `sleep` can access `httpbin` with the following command:
|
||||
* Verify that `curl` can access `httpbin` with the following command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -185,14 +185,14 @@ The external authorizer is now ready to be used by the authorization policy.
|
|||
1. Verify a request to path `/headers` with header `x-ext-authz: deny` is denied by the sample `ext_authz` server:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: deny" -s
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: deny" -s
|
||||
denied by ext_authz for not found header `x-ext-authz: allow` in the request
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify a request to path `/headers` with header `x-ext-authz: allow` is allowed by the sample `ext_authz` server:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: allow" -s | jq '.headers'
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: allow" -s | jq '.headers'
|
||||
...
|
||||
"X-Ext-Authz-Check-Result": [
|
||||
"allowed"
|
||||
|
@ -203,7 +203,7 @@ The external authorizer is now ready to be used by the authorization policy.
|
|||
1. Verify a request to path `/ip` is allowed and does not trigger the external authorization:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -s -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -s -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -213,12 +213,12 @@ The external authorizer is now ready to be used by the authorization policy.
|
|||
$ kubectl logs "$(kubectl get pod -l app=ext-authz -n foo -o jsonpath={.items..metadata.name})" -n foo -c ext-authz
|
||||
2021/01/07 22:55:47 Starting HTTP server at [::]:8000
|
||||
2021/01/07 22:55:47 Starting gRPC server at [::]:9000
|
||||
2021/01/08 03:25:00 [gRPCv3][denied]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52088}} principal:"spiffe://cluster.local/ns/foo/sa/sleep"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076306 nanos:473835000} http:{id:"13869142855783664817" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"377ba0cdc2334270"} headers:{key:"x-b3-traceid" value:"635187cb20d92f62377ba0cdc2334270"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"deny"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"9609691a-4e9b-9545-ac71-3889bc2dffb0"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{}
|
||||
2021/01/08 03:25:06 [gRPCv3][allowed]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52184}} principal:"spiffe://cluster.local/ns/foo/sa/sleep"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076300 nanos:925912000} http:{id:"17995949296433813435" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"a66b5470e922fa80"} headers:{key:"x-b3-traceid" value:"300c2f2b90a618c8a66b5470e922fa80"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"allow"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"2b62daf1-00b9-97d9-91b8-ba6194ef58a4"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{}
|
||||
2021/01/08 03:25:00 [gRPCv3][denied]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52088}} principal:"spiffe://cluster.local/ns/foo/sa/curl"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076306 nanos:473835000} http:{id:"13869142855783664817" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"377ba0cdc2334270"} headers:{key:"x-b3-traceid" value:"635187cb20d92f62377ba0cdc2334270"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"deny"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"9609691a-4e9b-9545-ac71-3889bc2dffb0"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{}
|
||||
2021/01/08 03:25:06 [gRPCv3][allowed]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52184}} principal:"spiffe://cluster.local/ns/foo/sa/curl"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076300 nanos:925912000} http:{id:"17995949296433813435" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"a66b5470e922fa80"} headers:{key:"x-b3-traceid" value:"300c2f2b90a618c8a66b5470e922fa80"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"allow"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"2b62daf1-00b9-97d9-91b8-ba6194ef58a4"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{}
|
||||
{{< /text >}}
|
||||
|
||||
You can also tell from the log that mTLS is enabled for the connection between the `ext-authz` filter and the
|
||||
sample `ext-authz` server because the source principal is populated with the value `spiffe://cluster.local/ns/foo/sa/sleep`.
|
||||
sample `ext-authz` server because the source principal is populated with the value `spiffe://cluster.local/ns/foo/sa/curl`.
|
||||
|
||||
You can now apply another authorization policy for the sample `ext-authz` server to control who is allowed to access it.
|
||||
|
||||
|
|
|
@ -24,11 +24,11 @@ snip_before_you_begin_1() {
|
|||
kubectl create ns foo
|
||||
kubectl label ns foo istio-injection=enabled
|
||||
kubectl apply -f samples/httpbin/httpbin.yaml -n foo
|
||||
kubectl apply -f samples/sleep/sleep.yaml -n foo
|
||||
kubectl apply -f samples/curl/curl.yaml -n foo
|
||||
}
|
||||
|
||||
snip_before_you_begin_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_before_you_begin_2_out <<\ENDSNIP
|
||||
|
@ -128,7 +128,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_enable_with_external_authorization_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: deny" -s
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: deny" -s
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_enable_with_external_authorization_2_out <<\ENDSNIP
|
||||
|
@ -136,7 +136,7 @@ denied by ext_authz for not found header `x-ext-authz: allow` in the request
|
|||
ENDSNIP
|
||||
|
||||
snip_enable_with_external_authorization_3() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: allow" -s | jq '.headers'
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: allow" -s | jq '.headers'
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_enable_with_external_authorization_3_out <<\ENDSNIP
|
||||
|
@ -148,7 +148,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat
|
|||
ENDSNIP
|
||||
|
||||
snip_enable_with_external_authorization_4() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -s -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -s -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_enable_with_external_authorization_4_out <<\ENDSNIP
|
||||
|
@ -162,8 +162,8 @@ kubectl logs "$(kubectl get pod -l app=ext-authz -n foo -o jsonpath={.items..met
|
|||
! IFS=$'\n' read -r -d '' snip_enable_with_external_authorization_5_out <<\ENDSNIP
|
||||
2021/01/07 22:55:47 Starting HTTP server at [::]:8000
|
||||
2021/01/07 22:55:47 Starting gRPC server at [::]:9000
|
||||
2021/01/08 03:25:00 [gRPCv3][denied]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52088}} principal:"spiffe://cluster.local/ns/foo/sa/sleep"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076306 nanos:473835000} http:{id:"13869142855783664817" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"377ba0cdc2334270"} headers:{key:"x-b3-traceid" value:"635187cb20d92f62377ba0cdc2334270"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"deny"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"9609691a-4e9b-9545-ac71-3889bc2dffb0"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{}
|
||||
2021/01/08 03:25:06 [gRPCv3][allowed]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52184}} principal:"spiffe://cluster.local/ns/foo/sa/sleep"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076300 nanos:925912000} http:{id:"17995949296433813435" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"a66b5470e922fa80"} headers:{key:"x-b3-traceid" value:"300c2f2b90a618c8a66b5470e922fa80"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"allow"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"2b62daf1-00b9-97d9-91b8-ba6194ef58a4"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{}
|
||||
2021/01/08 03:25:00 [gRPCv3][denied]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52088}} principal:"spiffe://cluster.local/ns/foo/sa/curl"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076306 nanos:473835000} http:{id:"13869142855783664817" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"377ba0cdc2334270"} headers:{key:"x-b3-traceid" value:"635187cb20d92f62377ba0cdc2334270"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"deny"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"9609691a-4e9b-9545-ac71-3889bc2dffb0"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{}
|
||||
2021/01/08 03:25:06 [gRPCv3][allowed]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52184}} principal:"spiffe://cluster.local/ns/foo/sa/curl"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076300 nanos:925912000} http:{id:"17995949296433813435" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"a66b5470e922fa80"} headers:{key:"x-b3-traceid" value:"300c2f2b90a618c8a66b5470e922fa80"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"allow"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"2b62daf1-00b9-97d9-91b8-ba6194ef58a4"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{}
|
||||
ENDSNIP
|
||||
|
||||
snip_clean_up_1() {
|
||||
|
|
|
@ -28,7 +28,7 @@ export VERIFY_TIMEOUT=300
|
|||
snip_before_you_begin_1
|
||||
|
||||
_wait_for_deployment foo httpbin
|
||||
_wait_for_deployment foo sleep
|
||||
_wait_for_deployment foo curl
|
||||
|
||||
_verify_same snip_before_you_begin_2 "$snip_before_you_begin_2_out"
|
||||
|
||||
|
|
|
@ -21,20 +21,20 @@ Before you begin this task, do the following:
|
|||
|
||||
* Deploy workloads:
|
||||
|
||||
This task uses two workloads, `httpbin` and `sleep`, deployed on one namespace, `foo`.
|
||||
This task uses two workloads, `httpbin` and `curl`, deployed on one namespace, `foo`.
|
||||
Both workloads run with an Envoy proxy in front of each. Deploy the example namespace
|
||||
and workloads with the following command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl create ns foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n foo
|
||||
{{< /text >}}
|
||||
|
||||
* Verify that `sleep` talks to `httpbin` with the following command:
|
||||
* Verify that `curl` talks to `httpbin` with the following command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -72,14 +72,14 @@ Caching and propagation overhead can cause some delay.
|
|||
1. Verify that `GET` requests are denied:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -sS -o /dev/null -w "%{http_code}\n"
|
||||
403
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify that `POST` requests are allowed:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/post" -X POST -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/post" -X POST -sS -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -113,14 +113,14 @@ Caching and propagation overhead can cause some delay.
|
|||
1. Verify that `GET` requests with the HTTP header `x-token: admin` are allowed:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -sS -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify that GET requests with the HTTP header `x-token: guest` are denied:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: guest" -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: guest" -sS -o /dev/null -w "%{http_code}\n"
|
||||
403
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -151,7 +151,7 @@ Caching and propagation overhead can cause some delay.
|
|||
by the `deny-method-get` policy. Deny policies takes precedence over the allow policies:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: guest" -s -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: guest" -s -o /dev/null -w "%{http_code}\n"
|
||||
403
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -159,7 +159,7 @@ Caching and propagation overhead can cause some delay.
|
|||
allowed by the `allow-path-ip` policy:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -167,7 +167,7 @@ Caching and propagation overhead can cause some delay.
|
|||
denied because they don’t match the `allow-path-ip` policy:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n"
|
||||
403
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -23,11 +23,11 @@
|
|||
snip_before_you_begin_1() {
|
||||
kubectl create ns foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo
|
||||
}
|
||||
|
||||
snip_before_you_begin_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_before_you_begin_2_out <<\ENDSNIP
|
||||
|
@ -54,7 +54,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_explicitly_deny_a_request_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_2_out <<\ENDSNIP
|
||||
|
@ -62,7 +62,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat
|
|||
ENDSNIP
|
||||
|
||||
snip_explicitly_deny_a_request_3() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/post" -X POST -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/post" -X POST -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_3_out <<\ENDSNIP
|
||||
|
@ -92,7 +92,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_explicitly_deny_a_request_5() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_5_out <<\ENDSNIP
|
||||
|
@ -100,7 +100,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat
|
|||
ENDSNIP
|
||||
|
||||
snip_explicitly_deny_a_request_6() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: guest" -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: guest" -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_6_out <<\ENDSNIP
|
||||
|
@ -127,7 +127,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_explicitly_deny_a_request_8() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: guest" -s -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: guest" -s -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_8_out <<\ENDSNIP
|
||||
|
@ -135,7 +135,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat
|
|||
ENDSNIP
|
||||
|
||||
snip_explicitly_deny_a_request_9() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_9_out <<\ENDSNIP
|
||||
|
@ -143,7 +143,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat
|
|||
ENDSNIP
|
||||
|
||||
snip_explicitly_deny_a_request_10() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_10_out <<\ENDSNIP
|
||||
|
|
|
@ -28,7 +28,7 @@ export VERIFY_TIMEOUT=300
|
|||
snip_before_you_begin_1
|
||||
|
||||
_wait_for_deployment foo httpbin
|
||||
_wait_for_deployment foo sleep
|
||||
_wait_for_deployment foo curl
|
||||
|
||||
_verify_same snip_before_you_begin_2 "$snip_before_you_begin_2_out"
|
||||
|
||||
|
|
|
@ -32,14 +32,14 @@ Before you begin this task, do the following:
|
|||
|
||||
* Deploy test workloads:
|
||||
|
||||
This task uses two workloads, `httpbin` and `sleep`, both deployed in namespace `foo`.
|
||||
This task uses two workloads, `httpbin` and `curl`, both deployed in namespace `foo`.
|
||||
Both workloads run with an Envoy proxy sidecar. Create the `foo` namespace and deploy the workloads with the following command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl create ns foo
|
||||
$ kubectl label ns foo istio-injection=enabled
|
||||
$ kubectl apply -f @samples/httpbin/httpbin.yaml@ -n foo
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@ -n foo
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@ -n foo
|
||||
{{< /text >}}
|
||||
|
||||
* Enable proxy debug level log for checking dry-run logging results:
|
||||
|
@ -49,10 +49,10 @@ Before you begin this task, do the following:
|
|||
rbac: debug
|
||||
{{< /text >}}
|
||||
|
||||
* Verify that `sleep` can access `httpbin` with the following command:
|
||||
* Verify that `curl` can access `httpbin` with the following command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -92,10 +92,10 @@ Caching and propagation overhead can cause some delay.
|
|||
{{< /text >}}
|
||||
|
||||
1. Verify a request to path `/headers` is allowed because the policy is created in dry-run mode, run the following command
|
||||
to send 20 requests from `sleep` to `httpbin`, the request includes the header `X-B3-Sampled: 1` to always trigger the Zipkin tracing:
|
||||
to send 20 requests from `curl` to `httpbin`, the request includes the header `X-B3-Sampled: 1` to always trigger the Zipkin tracing:
|
||||
|
||||
{{< text bash >}}
|
||||
$ for i in {1..20}; do kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s -o /dev/null -w "%{http_code}\n"; done
|
||||
$ for i in {1..20}; do kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s -o /dev/null -w "%{http_code}\n"; done
|
||||
200
|
||||
200
|
||||
200
|
||||
|
@ -154,7 +154,7 @@ Also see the [troubleshooting guide](/docs/ops/common-problems/security-issues/#
|
|||
$ istioctl dashboard zipkin
|
||||
{{< /text >}}
|
||||
|
||||
1. Find the trace result for the request from `sleep` to `httpbin`. Try to send some more requests if you do see the trace
|
||||
1. Find the trace result for the request from `curl` to `httpbin`. Try to send some more requests if you do see the trace
|
||||
result due to the delay in the Zipkin.
|
||||
|
||||
1. In the trace result, you should find the following custom tags indicating the request is rejected by the dry-run policy
|
||||
|
|
|
@ -24,7 +24,7 @@ snip_before_you_begin_1() {
|
|||
kubectl create ns foo
|
||||
kubectl label ns foo istio-injection=enabled
|
||||
kubectl apply -f samples/httpbin/httpbin.yaml -n foo
|
||||
kubectl apply -f samples/sleep/sleep.yaml -n foo
|
||||
kubectl apply -f samples/curl/curl.yaml -n foo
|
||||
}
|
||||
|
||||
snip_before_you_begin_2() {
|
||||
|
@ -36,7 +36,7 @@ rbac: debug
|
|||
ENDSNIP
|
||||
|
||||
snip_before_you_begin_3() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_before_you_begin_3_out <<\ENDSNIP
|
||||
|
@ -68,7 +68,7 @@ kubectl annotate --overwrite authorizationpolicies deny-path-headers -n foo isti
|
|||
}
|
||||
|
||||
snip_create_dryrun_policy_3() {
|
||||
for i in {1..20}; do kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s -o /dev/null -w "%{http_code}\n"; done
|
||||
for i in {1..20}; do kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s -o /dev/null -w "%{http_code}\n"; done
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_create_dryrun_policy_3_out <<\ENDSNIP
|
||||
|
|
|
@ -32,35 +32,35 @@ _deploy_and_wait_for_addons prometheus zipkin
|
|||
snip_enable_tracing_for_mesh_1
|
||||
snip_customizing_trace_sampling_1
|
||||
|
||||
# Install sleep and httpbin
|
||||
# Install curl and httpbin
|
||||
snip_before_you_begin_1
|
||||
_wait_for_deployment foo httpbin
|
||||
_wait_for_deployment foo sleep
|
||||
_wait_for_deployment foo curl
|
||||
|
||||
# Enable RBAC debug logging on httpbin
|
||||
_verify_contains snip_before_you_begin_2 "$snip_before_you_begin_2_out"
|
||||
|
||||
# Send request from sleep to httpbin
|
||||
# Send request from curl to httpbin
|
||||
_verify_contains snip_before_you_begin_3 "$snip_before_you_begin_3_out"
|
||||
|
||||
# Create authorization policy in dry-run mode
|
||||
snip_create_dryrun_policy_1
|
||||
snip_create_dryrun_policy_2
|
||||
|
||||
# Send requests from sleep to httpbin
|
||||
# Send requests from curl to httpbin
|
||||
_verify_elided snip_create_dryrun_policy_3 "$snip_create_dryrun_policy_3_out"
|
||||
|
||||
# Verify Envoy logs for the dry-run result
|
||||
function check_logs() {
|
||||
# Send more requests in case the log is not showing
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/headers -s -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/headers -s -o /dev/null -w "%{http_code}\n"
|
||||
snip_check_dryrun_result_in_proxy_log_1
|
||||
}
|
||||
_verify_contains check_logs "ns[foo]-policy[deny-path-headers]-rule[0]"
|
||||
|
||||
function query_prometheus() {
|
||||
# Send more requests in case the metric is not showing
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s
|
||||
curl -sg "http://localhost:9090/api/v1/query?query=$snip_check_dryrun_result_in_metric_using_prometheus_2" | jq '.data.result[0].value[1]'
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ pgrep istioctl | xargs kill
|
|||
|
||||
function query_zipkin() {
|
||||
# Send more requests in case the trace is not showing
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s
|
||||
curl -s 'http://localhost:9411/zipkin/api/v2/traces?serviceName=httpbin.foo'
|
||||
}
|
||||
|
||||
|
|
|
@ -44,11 +44,11 @@ function verify {
|
|||
goodResponse=0
|
||||
|
||||
ingress_url="http://istio-ingressgateway.istio-system/productpage"
|
||||
sleep_pod=$(kubectl get pod -l app=sleep -n default -o 'jsonpath={.items..metadata.name}')
|
||||
curl_pod=$(kubectl get pod -l app=curl -n default -o 'jsonpath={.items..metadata.name}')
|
||||
|
||||
for ((i=1; i<="$REPEAT"; i++)); do
|
||||
set +e
|
||||
response=$(kubectl exec "${sleep_pod}" -c sleep -n "default" -- curl "${ingress_url}" -sS -w "\n%{http_code}\n")
|
||||
response=$(kubectl exec "${curl_pod}" -c curl -n "default" -- curl "${ingress_url}" -sS -w "\n%{http_code}\n")
|
||||
set -e
|
||||
mapfile -t respArray <<< "$response"
|
||||
code=${respArray[-1]}
|
||||
|
@ -83,7 +83,7 @@ function verify {
|
|||
}
|
||||
|
||||
kubectl label namespace default istio-injection=enabled --overwrite
|
||||
startup_sleep_sample # needed for sending test requests with curl
|
||||
startup_curl_sample # needed for sending test requests with curl
|
||||
|
||||
# launch the bookinfo app
|
||||
startup_bookinfo_sample
|
||||
|
@ -123,6 +123,6 @@ verify 200 "William Shakespeare" "Book Details" "Book Reviews"
|
|||
snip_clean_up_1
|
||||
# remaining cleanup (undocumented).
|
||||
cleanup_bookinfo_sample
|
||||
cleanup_sleep_sample
|
||||
cleanup_curl_sample
|
||||
kubectl delete -f samples/bookinfo/networking/virtual-service-reviews-v3.yaml
|
||||
kubectl label namespace default istio-injection-
|
||||
|
|
|
@ -24,20 +24,20 @@ Before you begin this task, do the following:
|
|||
|
||||
* Install Istio using [Istio installation guide](/docs/setup/install/istioctl/).
|
||||
|
||||
* Deploy two workloads: `httpbin` and `sleep`. Deploy these in one namespace,
|
||||
* Deploy two workloads: `httpbin` and `curl`. Deploy these in one namespace,
|
||||
for example `foo`. Both workloads run with an Envoy proxy in front of each.
|
||||
Deploy the example namespace and workloads using these commands:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl create ns foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n foo
|
||||
{{< /text >}}
|
||||
|
||||
* Verify that `sleep` successfully communicates with `httpbin` using this command:
|
||||
* Verify that `curl` successfully communicates with `httpbin` using this command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -72,14 +72,14 @@ Caching and propagation can cause a delay.
|
|||
1. Verify that a request with an invalid JWT is denied:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n"
|
||||
401
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify that a request without a JWT is allowed because there is no authorization policy:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -119,14 +119,14 @@ Caching and propagation can cause a delay.
|
|||
1. Verify that a request with a valid JWT is allowed:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify that a request without a JWT is denied:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n"
|
||||
403
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -169,14 +169,14 @@ Caching and propagation can cause a delay.
|
|||
1. Verify that a request with the JWT that includes `group1` in the `groups` claim is allowed:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN_GROUP" -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN_GROUP" -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify that a request with a JWT, which doesn’t have the `groups` claim is rejected:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
403
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -23,11 +23,11 @@
|
|||
snip_before_you_begin_1() {
|
||||
kubectl create ns foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo
|
||||
}
|
||||
|
||||
snip_before_you_begin_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_before_you_begin_2_out <<\ENDSNIP
|
||||
|
@ -52,7 +52,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_allow_requests_with_valid_jwt_and_listtyped_claims_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_2_out <<\ENDSNIP
|
||||
|
@ -60,7 +60,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat
|
|||
ENDSNIP
|
||||
|
||||
snip_allow_requests_with_valid_jwt_and_listtyped_claims_3() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_3_out <<\ENDSNIP
|
||||
|
@ -95,7 +95,7 @@ TOKEN=$(curl https://raw.githubusercontent.com/istio/istio/master/security/tools
|
|||
ENDSNIP
|
||||
|
||||
snip_allow_requests_with_valid_jwt_and_listtyped_claims_6() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_6_out <<\ENDSNIP
|
||||
|
@ -103,7 +103,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat
|
|||
ENDSNIP
|
||||
|
||||
snip_allow_requests_with_valid_jwt_and_listtyped_claims_7() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_7_out <<\ENDSNIP
|
||||
|
@ -141,7 +141,7 @@ TOKEN_GROUP=$(curl https://raw.githubusercontent.com/istio/istio/master/security
|
|||
ENDSNIP
|
||||
|
||||
snip_allow_requests_with_valid_jwt_and_listtyped_claims_10() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN_GROUP" -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN_GROUP" -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_10_out <<\ENDSNIP
|
||||
|
@ -149,7 +149,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat
|
|||
ENDSNIP
|
||||
|
||||
snip_allow_requests_with_valid_jwt_and_listtyped_claims_11() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_11_out <<\ENDSNIP
|
||||
|
|
|
@ -27,7 +27,7 @@ export VERIFY_TIMEOUT=300
|
|||
snip_before_you_begin_1
|
||||
|
||||
_wait_for_deployment foo httpbin
|
||||
_wait_for_deployment foo sleep
|
||||
_wait_for_deployment foo curl
|
||||
|
||||
# Pull the Istio branch from the docs configuration file.
|
||||
ISTIO_BRANCH=$(yq '.source_branch_name' "${REPO_ROOT}"/data/args.yml)
|
||||
|
|
|
@ -19,7 +19,7 @@ Before you begin this task, do the following:
|
|||
|
||||
* Install Istio using the [Istio installation guide](/docs/setup/install/istioctl/).
|
||||
|
||||
* Deploy two workloads named `sleep` and `tcp-echo` together in a namespace, for example `foo`.
|
||||
* Deploy two workloads named `curl` and `tcp-echo` together in a namespace, for example `foo`.
|
||||
Both workloads run with an Envoy proxy in front of each. The `tcp-echo` workload listens on port
|
||||
9000, 9001 and 9002 and echoes back any traffic it received with a prefix `hello`.
|
||||
For example, if you send "world" to `tcp-echo`, it will reply with `hello world`.
|
||||
|
@ -30,37 +30,37 @@ Before you begin this task, do the following:
|
|||
{{< text bash >}}
|
||||
$ kubectl create ns foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/tcp-echo/tcp-echo.yaml@) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n foo
|
||||
{{< /text >}}
|
||||
|
||||
* Verify that `sleep` successfully communicates with `tcp-echo` on ports 9000 and 9001
|
||||
* Verify that `curl` successfully communicates with `tcp-echo` on ports 9000 and 9001
|
||||
using the following command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
hello port 9000
|
||||
connection succeeded
|
||||
{{< /text >}}
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
hello port 9001
|
||||
connection succeeded
|
||||
{{< /text >}}
|
||||
|
||||
* Verify that `sleep` successfully communicates with `tcp-echo` on port 9002.
|
||||
* Verify that `curl` successfully communicates with `tcp-echo` on port 9002.
|
||||
You need to send the traffic directly to the pod IP of `tcp-echo` because the port 9002 is not
|
||||
defined in the Kubernetes service object of `tcp-echo`.
|
||||
Get the pod IP address and send the request with the following command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ TCP_ECHO_IP=$(kubectl get pod "$(kubectl get pod -l app=tcp-echo -n foo -o jsonpath={.items..metadata.name})" -n foo -o jsonpath="{.status.podIP}")
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
"echo \"port 9002\" | nc $TCP_ECHO_IP 9002" | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
hello port 9002
|
||||
connection succeeded
|
||||
|
@ -97,8 +97,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p
|
|||
1. Verify that requests to port 9000 are allowed using the following command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
hello port 9000
|
||||
connection succeeded
|
||||
|
@ -107,8 +107,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p
|
|||
1. Verify that requests to port 9001 are allowed using the following command:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
hello port 9001
|
||||
connection succeeded
|
||||
|
@ -119,8 +119,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p
|
|||
explicitly in the `tcp-echo` Kubernetes service object. Run the following command and verify the output:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
"echo \"port 9002\" | nc $TCP_ECHO_IP 9002" | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
connection rejected
|
||||
{{< /text >}}
|
||||
|
@ -153,8 +153,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p
|
|||
Run the following command and verify the output:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
connection rejected
|
||||
{{< /text >}}
|
||||
|
@ -163,8 +163,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p
|
|||
ALLOW rules. Run the following command and verify the output:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
connection rejected
|
||||
{{< /text >}}
|
||||
|
@ -196,8 +196,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p
|
|||
HTTP-only fields while creating a DENY rule for tcp port and due to it's restrictive nature it denies all the traffic to the tcp ports:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
connection rejected
|
||||
{{< /text >}}
|
||||
|
@ -205,8 +205,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p
|
|||
1. Verify that the requests to port 9001 are denied. Same reason as above.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
connection rejected
|
||||
{{< /text >}}
|
||||
|
@ -236,8 +236,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p
|
|||
1. Verify that requests to port 9000 is denied. This occurs because the request matches the `ports` in the above-mentioned deny policy.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
connection rejected
|
||||
{{< /text >}}
|
||||
|
@ -246,8 +246,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p
|
|||
the `ports` in the DENY policy:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
hello port 9001
|
||||
connection succeeded
|
||||
|
|
|
@ -23,12 +23,12 @@
|
|||
snip_before_you_begin_1() {
|
||||
kubectl create ns foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/tcp-echo/tcp-echo.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo
|
||||
}
|
||||
|
||||
snip_before_you_begin_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
@ -38,8 +38,8 @@ connection succeeded
|
|||
ENDSNIP
|
||||
|
||||
snip_before_you_begin_3() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
@ -50,8 +50,8 @@ ENDSNIP
|
|||
|
||||
snip_before_you_begin_4() {
|
||||
TCP_ECHO_IP=$(kubectl get pod "$(kubectl get pod -l app=tcp-echo -n foo -o jsonpath={.items..metadata.name})" -n foo -o jsonpath="{.status.podIP}")
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
"echo \"port 9002\" | nc $TCP_ECHO_IP 9002" | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
@ -80,8 +80,8 @@ EOF
|
|||
}
|
||||
|
||||
snip_configure_allow_authorization_policy_for_a_tcp_workload_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
@ -91,8 +91,8 @@ connection succeeded
|
|||
ENDSNIP
|
||||
|
||||
snip_configure_allow_authorization_policy_for_a_tcp_workload_3() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
@ -102,8 +102,8 @@ connection succeeded
|
|||
ENDSNIP
|
||||
|
||||
snip_configure_allow_authorization_policy_for_a_tcp_workload_4() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
"echo \"port 9002\" | nc $TCP_ECHO_IP 9002" | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
@ -132,8 +132,8 @@ EOF
|
|||
}
|
||||
|
||||
snip_configure_allow_authorization_policy_for_a_tcp_workload_6() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
@ -142,8 +142,8 @@ connection rejected
|
|||
ENDSNIP
|
||||
|
||||
snip_configure_allow_authorization_policy_for_a_tcp_workload_7() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
@ -171,8 +171,8 @@ EOF
|
|||
}
|
||||
|
||||
snip_configure_deny_authorization_policy_for_a_tcp_workload_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
@ -181,8 +181,8 @@ connection rejected
|
|||
ENDSNIP
|
||||
|
||||
snip_configure_deny_authorization_policy_for_a_tcp_workload_3() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
@ -211,8 +211,8 @@ EOF
|
|||
}
|
||||
|
||||
snip_configure_deny_authorization_policy_for_a_tcp_workload_5() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
@ -221,8 +221,8 @@ connection rejected
|
|||
ENDSNIP
|
||||
|
||||
snip_configure_deny_authorization_policy_for_a_tcp_workload_6() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c sleep -n foo -- sh -c \
|
||||
kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \
|
||||
-c curl -n foo -- sh -c \
|
||||
'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected'
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ set -o pipefail
|
|||
snip_before_you_begin_1
|
||||
|
||||
_wait_for_deployment foo tcp-echo
|
||||
_wait_for_deployment foo sleep
|
||||
_wait_for_deployment foo curl
|
||||
|
||||
# shellcheck disable=SC2155
|
||||
export TCP_ECHO_IP=$(kubectl get pod "$(kubectl get pod -l app=tcp-echo -n foo -o jsonpath={.items..metadata.name})" -n foo -o "jsonpath={.status.podIP}")
|
||||
|
|
|
@ -28,18 +28,18 @@ Before you begin this task, do the following:
|
|||
{{< /text >}}
|
||||
|
||||
1. Deploy the [httpbin]({{< github_tree >}}/samples/httpbin) sample in the `default` namespace
|
||||
and the [sleep]({{< github_tree >}}/samples/sleep) sample in the `default` and `sleep-allow` namespaces:
|
||||
and the [curl]({{< github_tree >}}/samples/curl) sample in the `default` and `curl-allow` namespaces:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl label namespace default istio-injection=enabled
|
||||
$ kubectl apply -f @samples/httpbin/httpbin.yaml@
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@
|
||||
$ kubectl create namespace sleep-allow
|
||||
$ kubectl label namespace sleep-allow istio-injection=enabled
|
||||
$ kubectl apply -f @samples/sleep/sleep.yaml@ -n sleep-allow
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@
|
||||
$ kubectl create namespace curl-allow
|
||||
$ kubectl label namespace curl-allow istio-injection=enabled
|
||||
$ kubectl apply -f @samples/curl/curl.yaml@ -n curl-allow
|
||||
{{< /text >}}
|
||||
|
||||
1. Apply the authorization policy below to deny all requests to `httpbin` except from `sleep` in the `sleep-allow` namespace.
|
||||
1. Apply the authorization policy below to deny all requests to `httpbin` except from `curl` in the `curl-allow` namespace.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl apply -f - <<EOF
|
||||
|
@ -53,7 +53,7 @@ Before you begin this task, do the following:
|
|||
- from:
|
||||
- source:
|
||||
principals:
|
||||
- old-td/ns/sleep-allow/sa/sleep
|
||||
- old-td/ns/curl-allow/sa/curl
|
||||
to:
|
||||
- operation:
|
||||
methods:
|
||||
|
@ -69,17 +69,17 @@ Before you begin this task, do the following:
|
|||
|
||||
1. Verify that requests to `httpbin` from:
|
||||
|
||||
* `sleep` in the `default` namespace are denied.
|
||||
* `curl` in the `default` namespace are denied.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
403
|
||||
{{< /text >}}
|
||||
|
||||
* `sleep` in the `sleep-allow` namespace are allowed.
|
||||
* `curl` in the `curl-allow` namespace are allowed.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
|
@ -99,32 +99,32 @@ Before you begin this task, do the following:
|
|||
|
||||
Istio mesh is now running with a new trust domain, `new-td`.
|
||||
|
||||
1. Redeploy the `httpbin` and `sleep` applications to pick up changes from the new Istio control plane.
|
||||
1. Redeploy the `httpbin` and `curl` applications to pick up changes from the new Istio control plane.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl delete pod --all
|
||||
{{< /text >}}
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl delete pod --all -n sleep-allow
|
||||
$ kubectl delete pod --all -n curl-allow
|
||||
{{< /text >}}
|
||||
|
||||
1. Verify that requests to `httpbin` from both `sleep` in `default` namespace and `sleep-allow` namespace are denied.
|
||||
1. Verify that requests to `httpbin` from both `curl` in `default` namespace and `curl-allow` namespace are denied.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
403
|
||||
{{< /text >}}
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
403
|
||||
{{< /text >}}
|
||||
|
||||
This is because we specified an authorization policy that deny all requests to `httpbin`, except the ones
|
||||
the `old-td/ns/sleep-allow/sa/sleep` identity, which is the old identity of the `sleep` application in `sleep-allow` namespace.
|
||||
When we migrated to a new trust domain above, i.e. `new-td`, the identity of this `sleep` application is now `new-td/ns/sleep-allow/sa/sleep`,
|
||||
which is not the same as `old-td/ns/sleep-allow/sa/sleep`. Therefore, requests from the `sleep` application in `sleep-allow` namespace
|
||||
the `old-td/ns/curl-allow/sa/curl` identity, which is the old identity of the `curl` application in `curl-allow` namespace.
|
||||
When we migrated to a new trust domain above, i.e. `new-td`, the identity of this `curl` application is now `new-td/ns/curl-allow/sa/curl`,
|
||||
which is not the same as `old-td/ns/curl-allow/sa/curl`. Therefore, requests from the `curl` application in `curl-allow` namespace
|
||||
to `httpbin` were allowed before are now being denied. Prior to Istio 1.4, the only way to make this work is to change the authorization
|
||||
policy manually. In Istio 1.4, we introduce an easy way, as shown below.
|
||||
|
||||
|
@ -147,24 +147,24 @@ Before you begin this task, do the following:
|
|||
|
||||
1. Without changing the authorization policy, verify that requests to `httpbin` from:
|
||||
|
||||
* `sleep` in the `default` namespace are denied.
|
||||
* `curl` in the `default` namespace are denied.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
403
|
||||
{{< /text >}}
|
||||
|
||||
* `sleep` in the `sleep-allow` namespace are allowed.
|
||||
* `curl` in the `curl-allow` namespace are allowed.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
$ kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
200
|
||||
{{< /text >}}
|
||||
|
||||
## Best practices
|
||||
|
||||
Starting from Istio 1.4, when writing authorization policy, you should consider using the value `cluster.local` as the
|
||||
trust domain part in the policy. For example, instead of `old-td/ns/sleep-allow/sa/sleep`, it should be `cluster.local/ns/sleep-allow/sa/sleep`.
|
||||
trust domain part in the policy. For example, instead of `old-td/ns/curl-allow/sa/curl`, it should be `cluster.local/ns/curl-allow/sa/curl`.
|
||||
Notice that in this case, `cluster.local` is not the Istio mesh trust domain (the trust domain is still `old-td`). However,
|
||||
in authorization policy, `cluster.local` is a pointer that points to the current trust domain, i.e. `old-td` (and later `new-td`), as well as its aliases.
|
||||
By using `cluster.local` in the authorization policy, when you migrate to a new trust domain, Istio will detect this and treat the new trust domain
|
||||
|
@ -175,8 +175,8 @@ as the old trust domain without you having to include the aliases.
|
|||
{{< text bash >}}
|
||||
$ kubectl delete authorizationpolicy service-httpbin.default.svc.cluster.local
|
||||
$ kubectl delete deploy httpbin; kubectl delete service httpbin; kubectl delete serviceaccount httpbin
|
||||
$ kubectl delete deploy sleep; kubectl delete service sleep; kubectl delete serviceaccount sleep
|
||||
$ kubectl delete deploy curl; kubectl delete service curl; kubectl delete serviceaccount curl
|
||||
$ istioctl uninstall --purge -y
|
||||
$ kubectl delete namespace sleep-allow istio-system
|
||||
$ kubectl delete namespace curl-allow istio-system
|
||||
$ rm ./td-installation.yaml
|
||||
{{< /text >}}
|
||||
|
|
|
@ -27,10 +27,10 @@ istioctl install --set profile=demo --set meshConfig.trustDomain=old-td
|
|||
snip_before_you_begin_2() {
|
||||
kubectl label namespace default istio-injection=enabled
|
||||
kubectl apply -f samples/httpbin/httpbin.yaml
|
||||
kubectl apply -f samples/sleep/sleep.yaml
|
||||
kubectl create namespace sleep-allow
|
||||
kubectl label namespace sleep-allow istio-injection=enabled
|
||||
kubectl apply -f samples/sleep/sleep.yaml -n sleep-allow
|
||||
kubectl apply -f samples/curl/curl.yaml
|
||||
kubectl create namespace curl-allow
|
||||
kubectl label namespace curl-allow istio-injection=enabled
|
||||
kubectl apply -f samples/curl/curl.yaml -n curl-allow
|
||||
}
|
||||
|
||||
snip_before_you_begin_3() {
|
||||
|
@ -45,7 +45,7 @@ spec:
|
|||
- from:
|
||||
- source:
|
||||
principals:
|
||||
- old-td/ns/sleep-allow/sa/sleep
|
||||
- old-td/ns/curl-allow/sa/curl
|
||||
to:
|
||||
- operation:
|
||||
methods:
|
||||
|
@ -58,7 +58,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_before_you_begin_4() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_before_you_begin_4_out <<\ENDSNIP
|
||||
|
@ -66,7 +66,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}
|
|||
ENDSNIP
|
||||
|
||||
snip_before_you_begin_5() {
|
||||
kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_before_you_begin_5_out <<\ENDSNIP
|
||||
|
@ -86,11 +86,11 @@ kubectl delete pod --all
|
|||
}
|
||||
|
||||
snip_migrate_trust_domain_without_trust_domain_aliases_4() {
|
||||
kubectl delete pod --all -n sleep-allow
|
||||
kubectl delete pod --all -n curl-allow
|
||||
}
|
||||
|
||||
snip_migrate_trust_domain_without_trust_domain_aliases_5() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_migrate_trust_domain_without_trust_domain_aliases_5_out <<\ENDSNIP
|
||||
|
@ -98,7 +98,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}
|
|||
ENDSNIP
|
||||
|
||||
snip_migrate_trust_domain_without_trust_domain_aliases_6() {
|
||||
kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_migrate_trust_domain_without_trust_domain_aliases_6_out <<\ENDSNIP
|
||||
|
@ -119,7 +119,7 @@ istioctl install --set profile=demo -f td-installation.yaml -y
|
|||
}
|
||||
|
||||
snip_migrate_trust_domain_with_trust_domain_aliases_2() {
|
||||
kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_migrate_trust_domain_with_trust_domain_aliases_2_out <<\ENDSNIP
|
||||
|
@ -127,7 +127,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}
|
|||
ENDSNIP
|
||||
|
||||
snip_migrate_trust_domain_with_trust_domain_aliases_3() {
|
||||
kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n"
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_migrate_trust_domain_with_trust_domain_aliases_3_out <<\ENDSNIP
|
||||
|
@ -137,8 +137,8 @@ ENDSNIP
|
|||
snip_clean_up_1() {
|
||||
kubectl delete authorizationpolicy service-httpbin.default.svc.cluster.local
|
||||
kubectl delete deploy httpbin; kubectl delete service httpbin; kubectl delete serviceaccount httpbin
|
||||
kubectl delete deploy sleep; kubectl delete service sleep; kubectl delete serviceaccount sleep
|
||||
kubectl delete deploy curl; kubectl delete service curl; kubectl delete serviceaccount curl
|
||||
istioctl uninstall --purge -y
|
||||
kubectl delete namespace sleep-allow istio-system
|
||||
kubectl delete namespace curl-allow istio-system
|
||||
rm ./td-installation.yaml
|
||||
}
|
||||
|
|
|
@ -33,9 +33,9 @@ _wait_for_deployment istio-system istiod
|
|||
|
||||
snip_before_you_begin_2
|
||||
|
||||
_wait_for_deployment default sleep
|
||||
_wait_for_deployment default curl
|
||||
_wait_for_deployment default httpbin
|
||||
_wait_for_deployment sleep-allow sleep
|
||||
_wait_for_deployment curl-allow curl
|
||||
|
||||
snip_before_you_begin_3
|
||||
|
||||
|
|
|
@ -246,30 +246,30 @@ $ export BARCA=$(kubectl get clusterissuers bar -o jsonpath='{.spec.ca.secretNam
|
|||
$ kubectl apply -f ./proxyconfig-foo.yaml
|
||||
{{< /text >}}
|
||||
|
||||
1. Deploy the `httpbin` and `sleep` sample applications in the `foo` and `bar` namespaces.
|
||||
1. Deploy the `httpbin` and `curl` sample applications in the `foo` and `bar` namespaces.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl label ns foo istio-injection=enabled
|
||||
$ kubectl label ns bar istio-injection=enabled
|
||||
$ kubectl apply -f samples/httpbin/httpbin.yaml -n foo
|
||||
$ kubectl apply -f samples/sleep/sleep.yaml -n foo
|
||||
$ kubectl apply -f samples/curl/curl.yaml -n foo
|
||||
$ kubectl apply -f samples/httpbin/httpbin.yaml -n bar
|
||||
{{< /text >}}
|
||||
|
||||
## Verify the network connectivity between `httpbin` and `sleep` within the same namespace
|
||||
## Verify the network connectivity between `httpbin` and `curl` within the same namespace
|
||||
|
||||
When the workloads are deployed, they send CSR requests with related signer info. Istiod forwards the CSR request to the custom CA for signing. The custom CA will use the correct cluster issuer to sign the cert back. Workloads under `foo` namespace will use `foo` cluster issuers while workloads under `bar` namespace will use the `bar` cluster issuers. To verify that they have indeed been signed by correct cluster issuers, we can verify workloads under the same namespace can communicate while workloads under the different namespace cannot communicate.
|
||||
|
||||
1. Set the `SLEEP_POD_FOO` environment variable to the name of `sleep` pod.
|
||||
1. Set the `CURL_POD_FOO` environment variable to the name of `curl` pod.
|
||||
|
||||
{{< text bash >}}
|
||||
$ export SLEEP_POD_FOO=$(kubectl get pod -n foo -l app=sleep -o jsonpath={.items..metadata.name})
|
||||
$ export CURL_POD_FOO=$(kubectl get pod -n foo -l app=curl -o jsonpath={.items..metadata.name})
|
||||
{{< /text >}}
|
||||
|
||||
1. Check network connectivity between service `sleep` and `httpbin` in the `foo` namespace.
|
||||
1. Check network connectivity between service `curl` and `httpbin` in the `foo` namespace.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$SLEEP_POD_FOO" -n foo -c sleep -- curl http://httpbin.foo:8000/html
|
||||
$ kubectl exec "$CURL_POD_FOO" -n foo -c curl -- curl http://httpbin.foo:8000/html
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
|
@ -285,10 +285,10 @@ When the workloads are deployed, they send CSR requests with related signer info
|
|||
</body>
|
||||
{{< /text >}}
|
||||
|
||||
1. Check network connectivity between service `sleep` in the `foo` namespace and `httpbin` in the `bar` namespace.
|
||||
1. Check network connectivity between service `curl` in the `foo` namespace and `httpbin` in the `bar` namespace.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl exec "$SLEEP_POD_FOO" -n foo -c sleep -- curl http://httpbin.bar:8000/html
|
||||
$ kubectl exec "$CURL_POD_FOO" -n foo -c curl -- curl http://httpbin.bar:8000/html
|
||||
upstream connect error or disconnect/reset before headers. reset reason: connection failure, transport failure reason: TLS error: 268435581:SSL routines:OPENSSL_internal:CERTIFICATE_VERIFY_FAILED
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -226,19 +226,19 @@ snip_deploy_istio_with_default_certsigner_info_5() {
|
|||
kubectl label ns foo istio-injection=enabled
|
||||
kubectl label ns bar istio-injection=enabled
|
||||
kubectl apply -f samples/httpbin/httpbin.yaml -n foo
|
||||
kubectl apply -f samples/sleep/sleep.yaml -n foo
|
||||
kubectl apply -f samples/curl/curl.yaml -n foo
|
||||
kubectl apply -f samples/httpbin/httpbin.yaml -n bar
|
||||
}
|
||||
|
||||
snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_1() {
|
||||
export SLEEP_POD_FOO=$(kubectl get pod -n foo -l app=sleep -o jsonpath={.items..metadata.name})
|
||||
snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_1() {
|
||||
export CURL_POD_FOO=$(kubectl get pod -n foo -l app=curl -o jsonpath={.items..metadata.name})
|
||||
}
|
||||
|
||||
snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_2() {
|
||||
kubectl exec "$SLEEP_POD_FOO" -n foo -c sleep -- curl http://httpbin.foo:8000/html
|
||||
snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_2() {
|
||||
kubectl exec "$CURL_POD_FOO" -n foo -c curl -- curl http://httpbin.foo:8000/html
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_2_out <<\ENDSNIP
|
||||
! IFS=$'\n' read -r -d '' snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_2_out <<\ENDSNIP
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
|
@ -254,11 +254,11 @@ kubectl exec "$SLEEP_POD_FOO" -n foo -c sleep -- curl http://httpbin.foo:8000/ht
|
|||
</body>
|
||||
ENDSNIP
|
||||
|
||||
snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_3() {
|
||||
kubectl exec "$SLEEP_POD_FOO" -n foo -c sleep -- curl http://httpbin.bar:8000/html
|
||||
snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_3() {
|
||||
kubectl exec "$CURL_POD_FOO" -n foo -c curl -- curl http://httpbin.bar:8000/html
|
||||
}
|
||||
|
||||
! IFS=$'\n' read -r -d '' snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_3_out <<\ENDSNIP
|
||||
! IFS=$'\n' read -r -d '' snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_3_out <<\ENDSNIP
|
||||
upstream connect error or disconnect/reset before headers. reset reason: connection failure, transport failure reason: TLS error: 268435581:SSL routines:OPENSSL_internal:CERTIFICATE_VERIFY_FAILED
|
||||
ENDSNIP
|
||||
|
||||
|
|
|
@ -36,14 +36,14 @@ snip_deploy_istio_with_default_certsigner_info_4
|
|||
|
||||
# deploy test application
|
||||
snip_deploy_istio_with_default_certsigner_info_5
|
||||
_wait_for_deployment foo sleep
|
||||
_wait_for_deployment foo curl
|
||||
_wait_for_deployment foo httpbin
|
||||
_wait_for_deployment bar httpbin
|
||||
|
||||
|
||||
snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_1
|
||||
_verify_contains snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_2 "Herman Melville - Moby-Dick"
|
||||
_verify_contains snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_3 "upstream connect error"
|
||||
snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_1
|
||||
_verify_contains snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_2 "Herman Melville - Moby-Dick"
|
||||
_verify_contains snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_3 "upstream connect error"
|
||||
|
||||
# @cleanup
|
||||
|
||||
|
|
|
@ -112,12 +112,12 @@ Support for SHA-1 signatures is [disabled by default in Go 1.18](https://github.
|
|||
|
||||
## Deploying example services
|
||||
|
||||
1. Deploy the `httpbin` and `sleep` sample services.
|
||||
1. Deploy the `httpbin` and `curl` sample services.
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl create ns foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo
|
||||
$ kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo
|
||||
{{< /text >}}
|
||||
|
||||
1. Deploy a policy for workloads in the `foo` namespace to only accept mutual TLS traffic.
|
||||
|
@ -145,7 +145,7 @@ the `verify error:num=19:self signed certificate in certificate chain` error ret
|
|||
openssl command is expected.
|
||||
|
||||
{{< text bash >}}
|
||||
$ sleep 20; kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -showcerts -connect httpbin.foo:8000 > httpbin-proxy-cert.txt
|
||||
$ sleep 20; kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -showcerts -connect httpbin.foo:8000 > httpbin-proxy-cert.txt
|
||||
{{< /text >}}
|
||||
|
||||
1. Parse the certificates on the certificate chain.
|
||||
|
@ -200,10 +200,10 @@ openssl command is expected.
|
|||
$ kubectl delete peerauthentication -n foo default
|
||||
{{< /text >}}
|
||||
|
||||
* Remove the sample applications `sleep` and `httpbin`:
|
||||
* Remove the sample applications `curl` and `httpbin`:
|
||||
|
||||
{{< text bash >}}
|
||||
$ kubectl delete -f samples/sleep/sleep.yaml -n foo
|
||||
$ kubectl delete -f samples/curl/curl.yaml -n foo
|
||||
$ kubectl delete -f samples/httpbin/httpbin.yaml -n foo
|
||||
{{< /text >}}
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ istioctl install --set profile=demo
|
|||
snip_deploying_example_services_1() {
|
||||
kubectl create ns foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo
|
||||
kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo
|
||||
}
|
||||
|
||||
snip_deploying_example_services_2() {
|
||||
|
@ -69,7 +69,7 @@ EOF
|
|||
}
|
||||
|
||||
snip_verifying_the_certificates_1() {
|
||||
sleep 20; kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -showcerts -connect httpbin.foo:8000 > httpbin-proxy-cert.txt
|
||||
sleep 20; kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -showcerts -connect httpbin.foo:8000 > httpbin-proxy-cert.txt
|
||||
}
|
||||
|
||||
snip_verifying_the_certificates_2() {
|
||||
|
@ -118,7 +118,7 @@ kubectl delete peerauthentication -n foo default
|
|||
}
|
||||
|
||||
snip_cleanup_4() {
|
||||
kubectl delete -f samples/sleep/sleep.yaml -n foo
|
||||
kubectl delete -f samples/curl/curl.yaml -n foo
|
||||
kubectl delete -f samples/httpbin/httpbin.yaml -n foo
|
||||
}
|
||||
|
||||
|
|
|
@ -30,12 +30,12 @@ snip_plug_in_certificates_and_key_into_the_cluster_5
|
|||
echo y | snip_deploy_istio_1
|
||||
_wait_for_deployment istio-system istiod
|
||||
|
||||
# create_ns_foo_with_httpbin_sleep
|
||||
# create_ns_foo_with_httpbin_curl
|
||||
snip_deploying_example_services_1
|
||||
snip_deploying_example_services_2
|
||||
|
||||
_wait_for_deployment foo httpbin
|
||||
_wait_for_deployment foo sleep
|
||||
_wait_for_deployment foo curl
|
||||
|
||||
# Disable errors, since the next command is expected to return an error.
|
||||
set +e
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue