diff --git a/content/en/boilerplates/before-you-begin-egress.md b/content/en/boilerplates/before-you-begin-egress.md index 044e8c4a9bd99..61a07354997e2 100644 --- a/content/en/boilerplates/before-you-begin-egress.md +++ b/content/en/boilerplates/before-you-begin-egress.md @@ -9,19 +9,19 @@ [configuration profile](/docs/setup/additional-setup/config-profiles/). {{< /tip >}} -* Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample app to use as a test source for sending requests. +* Deploy the [curl]({{< github_tree >}}/samples/curl) sample app to use as a test source for sending requests. If you have [automatic sidecar injection](/docs/setup/additional-setup/sidecar-injection/#automatic-sidecar-injection) enabled, run the following command to deploy the sample app: {{< text bash >}} - $ kubectl apply -f @samples/sleep/sleep.yaml@ + $ kubectl apply -f @samples/curl/curl.yaml@ {{< /text >}} - Otherwise, manually inject the sidecar before deploying the `sleep` application with the following command: + Otherwise, manually inject the sidecar before deploying the `curl` application with the following command: {{< text bash >}} - $ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) + $ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) {{< /text >}} {{< tip >}} @@ -31,5 +31,5 @@ * Set the `SOURCE_POD` environment variable to the name of your source pod: {{< text bash >}} - $ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) + $ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) {{< /text >}} diff --git a/content/en/boilerplates/snips/before-you-begin-egress.sh b/content/en/boilerplates/snips/before-you-begin-egress.sh index 15f71f8b19f09..a30182fdadce8 100644 --- a/content/en/boilerplates/snips/before-you-begin-egress.sh +++ b/content/en/boilerplates/snips/before-you-begin-egress.sh @@ -21,13 +21,13 @@ #################################################################################################### bpsnip_before_you_begin_egress_before_you_begin_1() { -kubectl apply -f samples/sleep/sleep.yaml +kubectl apply -f samples/curl/curl.yaml } bpsnip_before_you_begin_egress_before_you_begin_2() { -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) } bpsnip_before_you_begin_egress_before_you_begin_3() { -export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) +export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) } diff --git a/content/en/docs/ambient/architecture/traffic-redirection/index.md b/content/en/docs/ambient/architecture/traffic-redirection/index.md index 69ce1c73d006b..096b745fd07e1 100644 --- a/content/en/docs/ambient/architecture/traffic-redirection/index.md +++ b/content/en/docs/ambient/architecture/traffic-redirection/index.md @@ -77,7 +77,7 @@ inpod_mark: 1337 Follow the steps below to confirm that the sockets on ports 15001, 15006, and 15008 are open and in the listening state. {{< text bash >}} -$ kubectl debug $(kubectl get pod -l app=sleep -n ambient-demo -o jsonpath='{.items[0].metadata.name}') -it -n ambient-demo --image nicolaka/netshoot -- ss -ntlp +$ kubectl debug $(kubectl get pod -l app=curl -n ambient-demo -o jsonpath='{.items[0].metadata.name}') -it -n ambient-demo --image nicolaka/netshoot -- ss -ntlp Defaulting debug container name to debugger-nhd4d. State Recv-Q Send-Q Local Address:Port Peer Address:PortProcess LISTEN 0 128 127.0.0.1:15080 0.0.0.0:* @@ -91,7 +91,7 @@ LISTEN 0 128 *:15008 *:* To view the iptables rules setup inside one of the application pods, execute this command: {{< text bash >}} -$ kubectl debug $(kubectl get pod -l app=sleep -n ambient-demo -o jsonpath='{.items[0].metadata.name}') -it --image gcr.io/istio-release/base --profile=netadmin -n ambient-demo -- iptables-save +$ kubectl debug $(kubectl get pod -l app=curl -n ambient-demo -o jsonpath='{.items[0].metadata.name}') -it --image gcr.io/istio-release/base --profile=netadmin -n ambient-demo -- iptables-save Defaulting debug container name to debugger-m44qc. # Generated by iptables-save diff --git a/content/en/docs/ambient/getting-started/cleanup/index.md b/content/en/docs/ambient/getting-started/cleanup/index.md index a2da000b4017d..da7cccbd23172 100644 --- a/content/en/docs/ambient/getting-started/cleanup/index.md +++ b/content/en/docs/ambient/getting-started/cleanup/index.md @@ -36,12 +36,12 @@ $ kubectl delete namespace istio-system ## Remove the sample application -To delete the Bookinfo sample application and the `sleep` deployment, run the following: +To delete the Bookinfo sample application and the `curl` deployment, run the following: {{< text bash >}} -$ kubectl delete -f {{< github_file >}}/samples/bookinfo/platform/kube/bookinfo.yaml -$ kubectl delete -f {{< github_file >}}/samples/bookinfo/platform/kube/bookinfo-versions.yaml -$ kubectl delete -f {{< github_file >}}/samples/sleep/sleep.yaml +$ kubectl delete -f samples/bookinfo/platform/kube/bookinfo.yaml +$ kubectl delete -f samples/bookinfo/platform/kube/bookinfo-versions.yaml +$ kubectl delete -f samples/curl/curl.yaml {{< /text >}} ## Remove the Kubernetes Gateway API CRDs diff --git a/content/en/docs/ambient/getting-started/cleanup/snips.sh b/content/en/docs/ambient/getting-started/cleanup/snips.sh index ad3c36c8f7a40..c37d45edfcb62 100644 --- a/content/en/docs/ambient/getting-started/cleanup/snips.sh +++ b/content/en/docs/ambient/getting-started/cleanup/snips.sh @@ -31,7 +31,7 @@ istioctl waypoint delete --all } snip_remove_the_sample_application_1() { -kubectl delete -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/platform/kube/bookinfo.yaml -kubectl delete -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/platform/kube/bookinfo-versions.yaml -kubectl delete -f https://raw.githubusercontent.com/istio/istio/master/samples/sleep/sleep.yaml +kubectl delete -f samples/bookinfo/platform/kube/bookinfo.yaml +kubectl delete -f samples/bookinfo/platform/kube/bookinfo-versions.yaml +kubectl delete -f samples/curl/curl.yaml } diff --git a/content/en/docs/ambient/getting-started/enforce-auth-policies/index.md b/content/en/docs/ambient/getting-started/enforce-auth-policies/index.md index dc2a22d310291..06c97101b69e9 100644 --- a/content/en/docs/ambient/getting-started/enforce-auth-policies/index.md +++ b/content/en/docs/ambient/getting-started/enforce-auth-policies/index.md @@ -37,16 +37,16 @@ EOF If you open the Bookinfo application in your browser (`http://localhost:8080/productpage`), you will see the product page, just as before. However, if you try to access the `productpage` service from a different service account, you should see an error. -Let's try accessing Bookinfo application from a `sleep` pod: +Let's try accessing Bookinfo application from a `curl` pod: -{{< text syntax=bash snip_id=deploy_sleep >}} -$ kubectl apply -f {{< github_file >}}/samples/sleep/sleep.yaml +{{< text syntax=bash snip_id=deploy_curl >}} +$ kubectl apply -f samples/curl/curl.yaml {{< /text >}} -Since the `sleep` pod is using a different service account, it will not have access the `productpage` service: +Since the `curl` pod is using a different service account, it will not have access the `productpage` service: {{< text bash >}} -$ kubectl exec deploy/sleep -- curl -s "http://productpage:9080/productpage" +$ kubectl exec deploy/curl -- curl -s "http://productpage:9080/productpage" command terminated with exit code 56 {{< /text >}} @@ -68,7 +68,7 @@ NAME CLASS ADDRESS PROGRAMMED AGE waypoint istio-waypoint 10.96.58.95 True 42s {{< /text >}} -Adding a [L7 authorization policy](/docs/ambient/usage/l7-features/) will explicitly allow the `sleep` service to send `GET` requests to the `productpage` service, but perform no other operations: +Adding a [L7 authorization policy](/docs/ambient/usage/l7-features/) will explicitly allow the `curl` service to send `GET` requests to the `productpage` service, but perform no other operations: {{< text syntax=bash snip_id=deploy_l7_policy >}} $ kubectl apply -f - <}} $ # This fails with an RBAC error because we're not using a GET operation -$ kubectl exec deploy/sleep -- curl -s "http://productpage:9080/productpage" -X DELETE +$ kubectl exec deploy/curl -- curl -s "http://productpage:9080/productpage" -X DELETE RBAC: access denied {{< /text >}} @@ -115,8 +115,8 @@ RBAC: access denied {{< /text >}} {{< text bash >}} -$ # This works as we're explicitly allowing GET requests from the sleep pod -$ kubectl exec deploy/sleep -- curl -s http://productpage:9080/productpage | grep -o ".*" +$ # This works as we're explicitly allowing GET requests from the curl pod +$ kubectl exec deploy/curl -- curl -s http://productpage:9080/productpage | grep -o ".*" Simple Bookstore App {{< /text >}} diff --git a/content/en/docs/ambient/getting-started/enforce-auth-policies/snips.sh b/content/en/docs/ambient/getting-started/enforce-auth-policies/snips.sh index 209d1901141e2..9e525969a757f 100644 --- a/content/en/docs/ambient/getting-started/enforce-auth-policies/snips.sh +++ b/content/en/docs/ambient/getting-started/enforce-auth-policies/snips.sh @@ -40,12 +40,12 @@ spec: EOF } -snip_deploy_sleep() { -kubectl apply -f https://raw.githubusercontent.com/istio/istio/master/samples/sleep/sleep.yaml +snip_deploy_curl() { +kubectl apply -f samples/curl/curl.yaml } snip_enforce_layer_4_authorization_policy_3() { -kubectl exec deploy/sleep -- curl -s "http://productpage:9080/productpage" +kubectl exec deploy/curl -- curl -s "http://productpage:9080/productpage" } ! IFS=$'\n' read -r -d '' snip_enforce_layer_4_authorization_policy_3_out <<\ENDSNIP @@ -87,7 +87,7 @@ spec: - from: - source: principals: - - cluster.local/ns/default/sa/sleep + - cluster.local/ns/default/sa/curl to: - operation: methods: ["GET"] @@ -96,7 +96,7 @@ EOF snip_enforce_layer_7_authorization_policy_4() { # This fails with an RBAC error because we're not using a GET operation -kubectl exec deploy/sleep -- curl -s "http://productpage:9080/productpage" -X DELETE +kubectl exec deploy/curl -- curl -s "http://productpage:9080/productpage" -X DELETE } ! IFS=$'\n' read -r -d '' snip_enforce_layer_7_authorization_policy_4_out <<\ENDSNIP @@ -113,8 +113,8 @@ RBAC: access denied ENDSNIP snip_enforce_layer_7_authorization_policy_6() { -# This works as we're explicitly allowing GET requests from the sleep pod -kubectl exec deploy/sleep -- curl -s http://productpage:9080/productpage | grep -o ".*" +# This works as we're explicitly allowing GET requests from the curl pod +kubectl exec deploy/curl -- curl -s http://productpage:9080/productpage | grep -o ".*" } ! IFS=$'\n' read -r -d '' snip_enforce_layer_7_authorization_policy_6_out <<\ENDSNIP diff --git a/content/en/docs/ambient/getting-started/manage-traffic/index.md b/content/en/docs/ambient/getting-started/manage-traffic/index.md index 9eb0a58e846ee..bec7564f9a099 100644 --- a/content/en/docs/ambient/getting-started/manage-traffic/index.md +++ b/content/en/docs/ambient/getting-started/manage-traffic/index.md @@ -40,7 +40,7 @@ EOF To confirm that roughly 10% of the of the traffic from 100 requests goes to `reviews-v2`, you can run the following command: {{< text syntax=bash snip_id=test_traffic_split >}} -$ kubectl exec deploy/sleep -- sh -c "for i in \$(seq 1 100); do curl -s http://productpage:9080/productpage | grep reviews-v.-; done" +$ kubectl exec deploy/curl -- sh -c "for i in \$(seq 1 100); do curl -s http://productpage:9080/productpage | grep reviews-v.-; done" {{< /text >}} You'll notice the majority of requests go to `reviews-v1`. You can confirm the same if you open the Bookinfo application in your browser and refresh the page multiple times. Notice the requests from the `reviews-v1` don't have any stars, while the requests from `reviews-v2` have black stars. diff --git a/content/en/docs/ambient/getting-started/manage-traffic/snips.sh b/content/en/docs/ambient/getting-started/manage-traffic/snips.sh index a5d6fb976fb39..107adbe4a28ea 100644 --- a/content/en/docs/ambient/getting-started/manage-traffic/snips.sh +++ b/content/en/docs/ambient/getting-started/manage-traffic/snips.sh @@ -44,5 +44,5 @@ EOF } snip_test_traffic_split() { -kubectl exec deploy/sleep -- sh -c "for i in \$(seq 1 100); do curl -s http://productpage:9080/productpage | grep reviews-v.-; done" +kubectl exec deploy/curl -- sh -c "for i in \$(seq 1 100); do curl -s http://productpage:9080/productpage | grep reviews-v.-; done" } diff --git a/content/en/docs/ambient/getting-started/test.sh b/content/en/docs/ambient/getting-started/test.sh index c8444be337ed4..043d8f8819bce 100644 --- a/content/en/docs/ambient/getting-started/test.sh +++ b/content/en/docs/ambient/getting-started/test.sh @@ -39,8 +39,8 @@ _verify_like snip_deploy_and_configure_the_ingress_gateway_3 "$snip_deploy_and_c _verify_contains snip_add_bookinfo_to_the_mesh_1 "$snip_add_bookinfo_to_the_mesh_1_out" snip_deploy_l4_policy -snip_deploy_sleep -_wait_for_deployment default sleep +snip_deploy_curl +_wait_for_deployment default curl _verify_contains snip_enforce_layer_4_authorization_policy_3 "$snip_enforce_layer_4_authorization_policy_3_out" _verify_contains snip_deploy_waypoint "$snip_deploy_waypoint_out" diff --git a/content/en/docs/ambient/usage/extend-waypoint-wasm/index.md b/content/en/docs/ambient/usage/extend-waypoint-wasm/index.md index 363fa0d33b50e..0c97b4ec27a3b 100644 --- a/content/en/docs/ambient/usage/extend-waypoint-wasm/index.md +++ b/content/en/docs/ambient/usage/extend-waypoint-wasm/index.md @@ -18,10 +18,10 @@ One of the key advantages of Wasm extensibility is that extensions can be loaded 1. Set up Istio by following the instructions in the [ambient mode Getting Started guide](/docs/ambient/getting-started). 1. Deploy the [Bookinfo sample application](/docs/ambient/getting-started/deploy-sample-app). 1. [Add the default namespace to the ambient mesh](/docs/ambient/getting-started/secure-and-visualize). -1. Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample app to use as a test source for sending requests. +1. Deploy the [curl]({{< github_tree >}}/samples/curl) sample app to use as a test source for sending requests. {{< text syntax=bash >}} - $ kubectl apply -f @samples/sleep/sleep.yaml@ + $ kubectl apply -f @samples/curl/curl.yaml@ {{< /text >}} ## At a gateway @@ -73,14 +73,14 @@ The Istio agent will interpret the WasmPlugin configuration, download remote Was 1. Test `/productpage` without credentials: {{< text syntax=bash snip_id=test_gateway_productpage_without_credentials >}} - $ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage" + $ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage" 401 {{< /text >}} 1. Test `/productpage` with the credentials configured in the WasmPlugin resource: {{< text syntax=bash snip_id=test_gateway_productpage_with_credentials >}} - $ kubectl exec deploy/sleep -- curl -s -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" -w "%{http_code}" "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage" + $ kubectl exec deploy/curl -- curl -s -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" -w "%{http_code}" "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage" 200 {{< /text >}} @@ -99,7 +99,7 @@ $ istioctl waypoint apply --enroll-namespace --wait Verify traffic reaches the service: {{< text syntax=bash snip_id=verify_traffic >}} -$ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage +$ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage 200 {{< /text >}} @@ -153,14 +153,14 @@ basic-auth-at-waypoint 14m 1. Test internal `/productpage` without credentials: {{< text syntax=bash snip_id=test_waypoint_productpage_without_credentials >}} - $ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage + $ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage 401 {{< /text >}} 1. Test internal `/productpage` with credentials: {{< text syntax=bash snip_id=test_waypoint_productpage_with_credentials >}} - $ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage + $ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage 200 {{< /text >}} @@ -200,21 +200,21 @@ EOF 1. Test the internal `/productpage` with the credentials configured at the generic `waypoint` proxy: {{< text syntax=bash snip_id=test_waypoint_service_productpage_with_credentials >}} - $ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage + $ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage 200 {{< /text >}} 1. Test the internal `/reviews` with credentials configured at the specific `reviews-svc-waypoint` proxy: {{< text syntax=bash snip_id=test_waypoint_service_reviews_with_credentials >}} - $ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic MXQtaW4zOmFkbWluMw==" http://reviews:9080/reviews/1 + $ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic MXQtaW4zOmFkbWluMw==" http://reviews:9080/reviews/1 200 {{< /text >}} 1. Test internal `/reviews` without credentials: {{< text syntax=bash snip_id=test_waypoint_service_reviews_without_credentials >}} - $ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://reviews:9080/reviews/1 + $ kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://reviews:9080/reviews/1 401 {{< /text >}} diff --git a/content/en/docs/ambient/usage/extend-waypoint-wasm/snips.sh b/content/en/docs/ambient/usage/extend-waypoint-wasm/snips.sh index 490e8cd174f1d..8faea3f9238a3 100644 --- a/content/en/docs/ambient/usage/extend-waypoint-wasm/snips.sh +++ b/content/en/docs/ambient/usage/extend-waypoint-wasm/snips.sh @@ -21,7 +21,7 @@ #################################################################################################### snip_before_you_begin_1() { -kubectl apply -f samples/sleep/sleep.yaml +kubectl apply -f samples/curl/curl.yaml } snip_get_gateway() { @@ -59,7 +59,7 @@ EOF } snip_test_gateway_productpage_without_credentials() { -kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage" +kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage" } ! IFS=$'\n' read -r -d '' snip_test_gateway_productpage_without_credentials_out <<\ENDSNIP @@ -67,7 +67,7 @@ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null "http://book ENDSNIP snip_test_gateway_productpage_with_credentials() { -kubectl exec deploy/sleep -- curl -s -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" -w "%{http_code}" "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage" +kubectl exec deploy/curl -- curl -s -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" -w "%{http_code}" "http://bookinfo-gateway-istio.default.svc.cluster.local/productpage" } ! IFS=$'\n' read -r -d '' snip_test_gateway_productpage_with_credentials_out <<\ENDSNIP @@ -79,7 +79,7 @@ istioctl waypoint apply --enroll-namespace --wait } snip_verify_traffic() { -kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage +kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage } ! IFS=$'\n' read -r -d '' snip_verify_traffic_out <<\ENDSNIP @@ -132,7 +132,7 @@ basic-auth-at-waypoint 14m ENDSNIP snip_test_waypoint_productpage_without_credentials() { -kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage +kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://productpage:9080/productpage } ! IFS=$'\n' read -r -d '' snip_test_waypoint_productpage_without_credentials_out <<\ENDSNIP @@ -140,7 +140,7 @@ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://produ ENDSNIP snip_test_waypoint_productpage_with_credentials() { -kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage +kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage } ! IFS=$'\n' read -r -d '' snip_test_waypoint_productpage_with_credentials_out <<\ENDSNIP @@ -173,7 +173,7 @@ EOF } snip_test_waypoint_service_productpage_with_credentials() { -kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage +kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic YWRtaW4zOmFkbWluMw==" http://productpage:9080/productpage } ! IFS=$'\n' read -r -d '' snip_test_waypoint_service_productpage_with_credentials_out <<\ENDSNIP @@ -181,7 +181,7 @@ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authoriz ENDSNIP snip_test_waypoint_service_reviews_with_credentials() { -kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic MXQtaW4zOmFkbWluMw==" http://reviews:9080/reviews/1 +kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null -H "Authorization: Basic MXQtaW4zOmFkbWluMw==" http://reviews:9080/reviews/1 } ! IFS=$'\n' read -r -d '' snip_test_waypoint_service_reviews_with_credentials_out <<\ENDSNIP @@ -189,7 +189,7 @@ kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null -H "Authoriz ENDSNIP snip_test_waypoint_service_reviews_without_credentials() { -kubectl exec deploy/sleep -- curl -s -w "%{http_code}" -o /dev/null http://reviews:9080/reviews/1 +kubectl exec deploy/curl -- curl -s -w "%{http_code}" -o /dev/null http://reviews:9080/reviews/1 } ! IFS=$'\n' read -r -d '' snip_test_waypoint_service_reviews_without_credentials_out <<\ENDSNIP diff --git a/content/en/docs/ambient/usage/extend-waypoint-wasm/test.sh b/content/en/docs/ambient/usage/extend-waypoint-wasm/test.sh index 664a23bf119c1..d378a1913ce62 100644 --- a/content/en/docs/ambient/usage/extend-waypoint-wasm/test.sh +++ b/content/en/docs/ambient/usage/extend-waypoint-wasm/test.sh @@ -25,7 +25,7 @@ install_gateway_api_crds # deploy test application source "tests/util/samples.sh" startup_bookinfo_sample -startup_sleep_sample +startup_curl_sample # snip_annotate_bookinfo_gateway kubectl annotate gateway bookinfo-gateway networking.istio.io/service-type=ClusterIP --namespace=default @@ -78,6 +78,6 @@ kubectl label namespace default istio.io/use-waypoint- istioctl x waypoint delete --all -cleanup_sleep_sample +cleanup_curl_sample cleanup_bookinfo_sample remove_gateway_api_crds diff --git a/content/en/docs/ambient/usage/l4-policy/index.md b/content/en/docs/ambient/usage/l4-policy/index.md index 5d90fdb9d2f08..c690715311e02 100644 --- a/content/en/docs/ambient/usage/l4-policy/index.md +++ b/content/en/docs/ambient/usage/l4-policy/index.md @@ -20,7 +20,7 @@ A basic L4 authorization policy looks like this: apiVersion: security.istio.io/v1 kind: AuthorizationPolicy metadata: - name: allow-sleep-to-httpbin + name: allow-curl-to-httpbin spec: selector: matchLabels: @@ -30,12 +30,12 @@ spec: - from: - source: principals: - - cluster.local/ns/ambient-demo/sa/sleep + - cluster.local/ns/ambient-demo/sa/curl {{< /text >}} This policy can be used in both {{< gloss "sidecar" >}}sidecar mode{{< /gloss >}} and ambient mode. -The L4 (TCP) features of the Istio `AuthorizationPolicy` API have the same functional behavior in ambient mode as in sidecar mode. When there is no authorization policy provisioned, the default action is `ALLOW`. Once a policy is provisioned, pods targeted by the policy only permit traffic which is explicitly allowed. In the above example, pods with the label `app: httpbin` only permit traffic from sources with an identity principal of `cluster.local/ns/ambient-demo/sa/sleep`. Traffic from all other sources will be denied. +The L4 (TCP) features of the Istio `AuthorizationPolicy` API have the same functional behavior in ambient mode as in sidecar mode. When there is no authorization policy provisioned, the default action is `ALLOW`. Once a policy is provisioned, pods targeted by the policy only permit traffic which is explicitly allowed. In the above example, pods with the label `app: httpbin` only permit traffic from sources with an identity principal of `cluster.local/ns/ambient-demo/sa/curl`. Traffic from all other sources will be denied. ## Targeting policies @@ -71,7 +71,7 @@ This example adds a check for the HTTP GET method: apiVersion: security.istio.io/v1 kind: AuthorizationPolicy metadata: - name: allow-sleep-to-httpbin + name: allow-curl-to-httpbin spec: selector: matchLabels: @@ -81,7 +81,7 @@ spec: - from: - source: principals: - - cluster.local/ns/ambient-demo/sa/sleep + - cluster.local/ns/ambient-demo/sa/curl to: - operation: methods: ["GET"] diff --git a/content/en/docs/ambient/usage/troubleshoot-waypoint/index.md b/content/en/docs/ambient/usage/troubleshoot-waypoint/index.md index 029345107afe4..cd536ac312c68 100644 --- a/content/en/docs/ambient/usage/troubleshoot-waypoint/index.md +++ b/content/en/docs/ambient/usage/troubleshoot-waypoint/index.md @@ -10,17 +10,17 @@ This guide describes what to do if you have enrolled a namespace, service or wor ## Problems with traffic routing or security policy -To send some requests to the `reviews` service via the `productpage` service from the `sleep` pod: +To send some requests to the `reviews` service via the `productpage` service from the `curl` pod: {{< text bash >}} -$ kubectl exec deploy/sleep -- curl -s http://productpage:9080/productpage +$ kubectl exec deploy/curl -- curl -s http://productpage:9080/productpage {{< /text >}} -To send some requests to the `reviews` `v2` pod from the `sleep` pod: +To send some requests to the `reviews` `v2` pod from the `curl` pod: {{< text bash >}} $ export REVIEWS_V2_POD_IP=$(kubectl get pod -l version=v2,app=reviews -o jsonpath='{.items[0].status.podIP}') -$ kubectl exec deploy/sleep -- curl -s http://$REVIEWS_V2_POD_IP:9080/reviews/1 +$ kubectl exec deploy/curl -- curl -s http://$REVIEWS_V2_POD_IP:9080/reviews/1 {{< /text >}} Requests to the `reviews` service should be enforced by the `reviews-svc-waypoint` for any L7 policies. @@ -46,7 +46,6 @@ Requests to the `reviews` `v2` pod should be enforced by the `reviews-v2-pod-way default bookinfo-gateway-istio 10.43.164.194 waypoint default details 10.43.160.119 waypoint default kubernetes 10.43.0.1 waypoint - default notsleep 10.43.156.147 waypoint default productpage 10.43.172.254 waypoint default ratings 10.43.71.236 waypoint default reviews 10.43.162.105 reviews-svc-waypoint @@ -60,7 +59,6 @@ Requests to the `reviews` `v2` pod should be enforced by the `reviews-v2-pod-way NAMESPACE POD NAME IP NODE WAYPOINT PROTOCOL default bookinfo-gateway-istio-7c57fc4647-wjqvm 10.42.2.8 k3d-k3s-default-server-0 None TCP default details-v1-698d88b-wwsnv 10.42.2.4 k3d-k3s-default-server-0 None HBONE - default notsleep-685df55c6c-nwhs6 10.42.0.9 k3d-k3s-default-agent-0 None HBONE default productpage-v1-675fc69cf-fp65z 10.42.2.6 k3d-k3s-default-server-0 None HBONE default ratings-v1-6484c4d9bb-crjtt 10.42.0.4 k3d-k3s-default-agent-0 None HBONE default reviews-svc-waypoint-c49f9f569-b492t 10.42.2.10 k3d-k3s-default-server-0 None TCP diff --git a/content/en/docs/ambient/usage/troubleshoot-ztunnel/index.md b/content/en/docs/ambient/usage/troubleshoot-ztunnel/index.md index b56c43ba4053c..4b1c1c68330e3 100644 --- a/content/en/docs/ambient/usage/troubleshoot-ztunnel/index.md +++ b/content/en/docs/ambient/usage/troubleshoot-ztunnel/index.md @@ -21,13 +21,12 @@ $ istioctl ztunnel-config workloads NAMESPACE POD NAME IP NODE WAYPOINT PROTOCOL default bookinfo-gateway-istio-59dd7c96db-q9k6v 10.244.1.11 ambient-worker None TCP default details-v1-cf74bb974-5sqkp 10.244.1.5 ambient-worker None HBONE -default notsleep-5c785bc478-zpg7j 10.244.2.7 ambient-worker2 None HBONE default productpage-v1-87d54dd59-fn6vw 10.244.1.10 ambient-worker None HBONE default ratings-v1-7c4bbf97db-zvkdw 10.244.1.6 ambient-worker None HBONE default reviews-v1-5fd6d4f8f8-knbht 10.244.1.16 ambient-worker None HBONE default reviews-v2-6f9b55c5db-c94m2 10.244.1.17 ambient-worker None HBONE default reviews-v3-7d99fd7978-7rgtd 10.244.1.18 ambient-worker None HBONE -default sleep-7656cf8794-r7zb9 10.244.1.12 ambient-worker None HBONE +default curl-7656cf8794-r7zb9 10.244.1.12 ambient-worker None HBONE istio-system istiod-7ff4959459-qcpvp 10.244.2.5 ambient-worker2 None TCP istio-system ztunnel-6hvcw 10.244.1.4 ambient-worker None TCP istio-system ztunnel-mf476 10.244.2.6 ambient-worker2 None TCP @@ -51,8 +50,8 @@ spiffe://cluster.local/ns/default/sa/bookinfo-ratings Leaf Available spiffe://cluster.local/ns/default/sa/bookinfo-ratings Root Available true bad086c516cce777645363cb8d731277 2034-04-24T03:31:05Z 2024-04-26T03:31:05Z spiffe://cluster.local/ns/default/sa/bookinfo-reviews Leaf Available true 285697fb2cf806852d3293298e300c86 2024-05-05T09:17:47Z 2024-05-04T09:15:47Z spiffe://cluster.local/ns/default/sa/bookinfo-reviews Root Available true bad086c516cce777645363cb8d731277 2034-04-24T03:31:05Z 2024-04-26T03:31:05Z -spiffe://cluster.local/ns/default/sa/sleep Leaf Available true fa33bbb783553a1704866842586e4c0b 2024-05-05T09:25:49Z 2024-05-04T09:23:49Z -spiffe://cluster.local/ns/default/sa/sleep Root Available true bad086c516cce777645363cb8d731277 2034-04-24T03:31:05Z 2024-04-26T03:31:05Z +spiffe://cluster.local/ns/default/sa/curl Leaf Available true fa33bbb783553a1704866842586e4c0b 2024-05-05T09:25:49Z 2024-05-04T09:23:49Z +spiffe://cluster.local/ns/default/sa/curl Root Available true bad086c516cce777645363cb8d731277 2034-04-24T03:31:05Z 2024-04-26T03:31:05Z {{< /text >}} Using these commands, you can check that ztunnel proxies are configured with all the expected workloads and TLS certificate. Additionally, missing information can be used for troubleshooting any networking errors. @@ -83,7 +82,7 @@ $ kubectl debug -it $ISTIOD -n istio-system --image=curlimages/curl -- curl loca ztunnel's traffic logs can be queried using the standard Kubernetes log facilities. {{< text bash >}} -$ kubectl -n default exec deploy/sleep -- sh -c 'for i in $(seq 1 10); do curl -s -I http://productpage:9080/; done' +$ kubectl -n default exec deploy/curl -- sh -c 'for i in $(seq 1 10); do curl -s -I http://productpage:9080/; done' HTTP/1.1 200 OK Server: Werkzeug/3.0.1 Python/3.12.1 --snip-- @@ -93,8 +92,8 @@ The response displayed confirms the client pod receives responses from the servi {{< text bash >}} $ kubectl -n istio-system logs -l app=ztunnel | grep -E "inbound|outbound" -2024-05-04T09:59:05.028709Z info access connection complete src.addr=10.244.1.12:60059 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.10:9080 dst.hbone_addr="10.244.1.10:9080" dst.service="productpage.default.svc.cluster.local" dst.workload="productpage-v1-87d54dd59-fn6vw" dst.namespace="productpage" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-productpage" direction="inbound" bytes_sent=175 bytes_recv=80 duration="1ms" -2024-05-04T09:59:05.028771Z info access connection complete src.addr=10.244.1.12:58508 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.10:15008 dst.hbone_addr="10.244.1.10:9080" dst.service="productpage.default.svc.cluster.local" dst.workload="productpage-v1-87d54dd59-fn6vw" dst.namespace="productpage" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-productpage" direction="outbound" bytes_sent=80 bytes_recv=175 duration="1ms" +2024-05-04T09:59:05.028709Z info access connection complete src.addr=10.244.1.12:60059 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.10:9080 dst.hbone_addr="10.244.1.10:9080" dst.service="productpage.default.svc.cluster.local" dst.workload="productpage-v1-87d54dd59-fn6vw" dst.namespace="productpage" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-productpage" direction="inbound" bytes_sent=175 bytes_recv=80 duration="1ms" +2024-05-04T09:59:05.028771Z info access connection complete src.addr=10.244.1.12:58508 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.10:15008 dst.hbone_addr="10.244.1.10:9080" dst.service="productpage.default.svc.cluster.local" dst.workload="productpage-v1-87d54dd59-fn6vw" dst.namespace="productpage" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-productpage" direction="outbound" bytes_sent=80 bytes_recv=175 duration="1ms" --snip-- {{< /text >}} @@ -115,16 +114,16 @@ If the destination is a service with multiple instances or pods and there is no By calling a service with multiple backends, we can validate that client traffic is balanced across the service replicas. {{< text bash >}} -$ kubectl -n default exec deploy/sleep -- sh -c 'for i in $(seq 1 10); do curl -s -I http://reviews:9080/; done' +$ kubectl -n default exec deploy/curl -- sh -c 'for i in $(seq 1 10); do curl -s -I http://reviews:9080/; done' {{< /text >}} {{< text bash >}} $ kubectl -n istio-system logs -l app=ztunnel | grep -E "outbound" --snip-- -2024-05-04T10:11:04.964851Z info access connection complete src.addr=10.244.1.12:35520 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.9:15008 dst.hbone_addr="10.244.1.9:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v3-7d99fd7978-zznnq" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms" -2024-05-04T10:11:04.969578Z info access connection complete src.addr=10.244.1.12:35526 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.9:15008 dst.hbone_addr="10.244.1.9:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v3-7d99fd7978-zznnq" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms" -2024-05-04T10:11:04.974720Z info access connection complete src.addr=10.244.1.12:35536 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.7:15008 dst.hbone_addr="10.244.1.7:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v1-5fd6d4f8f8-26j92" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms" -2024-05-04T10:11:04.979462Z info access connection complete src.addr=10.244.1.12:35552 src.workload="sleep-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.244.1.8:15008 dst.hbone_addr="10.244.1.8:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v2-6f9b55c5db-c2dtw" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms" +2024-05-04T10:11:04.964851Z info access connection complete src.addr=10.244.1.12:35520 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.9:15008 dst.hbone_addr="10.244.1.9:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v3-7d99fd7978-zznnq" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms" +2024-05-04T10:11:04.969578Z info access connection complete src.addr=10.244.1.12:35526 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.9:15008 dst.hbone_addr="10.244.1.9:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v3-7d99fd7978-zznnq" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms" +2024-05-04T10:11:04.974720Z info access connection complete src.addr=10.244.1.12:35536 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.7:15008 dst.hbone_addr="10.244.1.7:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v1-5fd6d4f8f8-26j92" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms" +2024-05-04T10:11:04.979462Z info access connection complete src.addr=10.244.1.12:35552 src.workload="curl-7656cf8794-r7zb9" src.namespace="default" src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.244.1.8:15008 dst.hbone_addr="10.244.1.8:9080" dst.service="reviews.default.svc.cluster.local" dst.workload="reviews-v2-6f9b55c5db-c2dtw" dst.namespace="reviews" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-reviews" direction="outbound" bytes_sent=84 bytes_recv=169 duration="2ms" {{< /text >}} This is a round robin load balancing algorithm and is separate from and independent of any load balancing algorithm that may be configured within a `VirtualService`'s `TrafficPolicy` field, since as discussed previously, all aspects of `VirtualService` API objects are instantiated on the Waypoint proxies and not the ztunnel proxies. diff --git a/content/en/docs/ambient/usage/verify-mtls-enabled/index.md b/content/en/docs/ambient/usage/verify-mtls-enabled/index.md index df19f5ca35899..cc5db4305a7a0 100644 --- a/content/en/docs/ambient/usage/verify-mtls-enabled/index.md +++ b/content/en/docs/ambient/usage/verify-mtls-enabled/index.md @@ -45,8 +45,8 @@ istio_tcp_connections_opened_total{ reporter="source", request_protocol="tcp", response_flags="-", - source_app="sleep", - source_principal="spiffe://cluster.local/ns/default/sa/sleep",source_workload_namespace="default", + source_app="curl", + source_principal="spiffe://cluster.local/ns/default/sa/curl",source_workload_namespace="default", ...} {{< /text >}} @@ -54,11 +54,11 @@ Validate that the `connection_security_policy` value is set to `mutual_tls` alon ## Validate mTLS from logs -You can also view either the source or destination ztunnel log to confirm mTLS is enabled, along with peer identities. Below is an example of the source ztunnel's log for a request from the `sleep` service to the `details` service: +You can also view either the source or destination ztunnel log to confirm mTLS is enabled, along with peer identities. Below is an example of the source ztunnel's log for a request from the `curl` service to the `details` service: {{< text syntax=plain >}} -2024-08-21T15:32:05.754291Z info access connection complete src.addr=10.42.0.9:33772 src.workload="sleep-7656cf8794-6lsm4" src.namespace="default" -src.identity="spiffe://cluster.local/ns/default/sa/sleep" dst.addr=10.42.0.5:15008 dst.hbone_addr=10.42.0.5:9080 dst.service="details.default.svc.cluster.local" +2024-08-21T15:32:05.754291Z info access connection complete src.addr=10.42.0.9:33772 src.workload="curl-7656cf8794-6lsm4" src.namespace="default" +src.identity="spiffe://cluster.local/ns/default/sa/curl" dst.addr=10.42.0.5:15008 dst.hbone_addr=10.42.0.5:9080 dst.service="details.default.svc.cluster.local" dst.workload="details-v1-857849f66-ft8wx" dst.namespace="default" dst.identity="spiffe://cluster.local/ns/default/sa/bookinfo-details" direction="outbound" bytes_sent=84 bytes_recv=358 duration="15ms" {{< /text >}} diff --git a/content/en/docs/concepts/security/index.md b/content/en/docs/concepts/security/index.md index 7c74642bcaca5..0ab4c8a34f239 100644 --- a/content/en/docs/concepts/security/index.md +++ b/content/en/docs/concepts/security/index.md @@ -570,7 +570,7 @@ An authorization policy includes a selector, an action, and a list of rules: - The `when` field specifies the conditions needed to apply the rule The following example shows an authorization policy that allows two sources, the -`cluster.local/ns/default/sa/sleep` service account and the `dev` namespace, to +`cluster.local/ns/default/sa/curl` service account and the `dev` namespace, to access the workloads with the `app: httpbin` and `version: v1` labels in the `foo` namespace when requests sent have a valid JWT token. @@ -589,7 +589,7 @@ spec: rules: - from: - source: - principals: ["cluster.local/ns/default/sa/sleep"] + principals: ["cluster.local/ns/default/sa/curl"] - source: namespaces: ["dev"] to: @@ -832,7 +832,7 @@ spec: rules: - from: - source: - principals: ["cluster.local/ns/default/sa/sleep"] + principals: ["cluster.local/ns/default/sa/curl"] to: - operation: methods: ["GET"] diff --git a/content/en/docs/examples/microservices-istio/add-istio/index.md b/content/en/docs/examples/microservices-istio/add-istio/index.md index a8e8bc8d785e8..02f43f0867d05 100644 --- a/content/en/docs/examples/microservices-istio/add-istio/index.md +++ b/content/en/docs/examples/microservices-istio/add-istio/index.md @@ -55,7 +55,7 @@ disrupt your application, it continues to run and serve user requests. reviews-v2-56f6855586-cnrjp 1/1 Running 0 7h reviews-v2-56f6855586-lxc49 1/1 Running 0 7h reviews-v2-56f6855586-qh84k 1/1 Running 0 7h - sleep-88ddbcfdd-cc85s 1/1 Running 0 7h + curl-88ddbcfdd-cc85s 1/1 Running 0 7h {{< /text >}} 1. Kubernetes replaced the original pods of `productpage` with the diff --git a/content/en/docs/examples/microservices-istio/add-new-microservice-version/index.md b/content/en/docs/examples/microservices-istio/add-new-microservice-version/index.md index 190386a1d66d9..7e27fec309921 100644 --- a/content/en/docs/examples/microservices-istio/add-new-microservice-version/index.md +++ b/content/en/docs/examples/microservices-istio/add-new-microservice-version/index.md @@ -44,14 +44,14 @@ tests, end-to-end tests and tests in a staging environment. 1. Send a request to the pod and see that it returns the correct result: {{< text bash >}} - $ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -- curl -sS "$REVIEWS_V2_POD_IP:9080/reviews/7" + $ kubectl exec $(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') -- curl -sS "$REVIEWS_V2_POD_IP:9080/reviews/7" {"id": "7","reviews": [{ "reviewer": "Reviewer1", "text": "An extremely entertaining play by Shakespeare. The slapstick humour is refreshing!", "rating": {"stars": 5, "color": "black"}},{ "reviewer": "Reviewer2", "text": "Absolutely fun and entertaining. The play lacks thematic depth when compared to other plays by Shakespeare.", "rating": {"stars": 4, "color": "black"}}]} {{< /text >}} 1. Perform primitive load testing by sending a request 10 times in a row: {{< text bash >}} - $ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -- sh -c "for i in 1 2 3 4 5 6 7 8 9 10; do curl -o /dev/null -s -w '%{http_code}\n' $REVIEWS_V2_POD_IP:9080/reviews/7; done" + $ kubectl exec $(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') -- sh -c "for i in 1 2 3 4 5 6 7 8 9 10; do curl -o /dev/null -s -w '%{http_code}\n' $REVIEWS_V2_POD_IP:9080/reviews/7; done" 200 200 ... diff --git a/content/en/docs/examples/microservices-istio/bookinfo-kubernetes/index.md b/content/en/docs/examples/microservices-istio/bookinfo-kubernetes/index.md index 4602e9bedf1c9..bca5bda8d78c7 100644 --- a/content/en/docs/examples/microservices-istio/bookinfo-kubernetes/index.md +++ b/content/en/docs/examples/microservices-istio/bookinfo-kubernetes/index.md @@ -86,18 +86,18 @@ microservice. {{< /text >}} 1. After the services achieve the `Running` status, deploy a testing pod, - [sleep]({{< github_tree >}}/samples/sleep), to use for sending requests + [curl]({{< github_tree >}}/samples/curl), to use for sending requests to your microservices: {{< text bash >}} - $ kubectl apply -f {{< github_file >}}/samples/sleep/sleep.yaml + $ kubectl apply -f {{< github_file >}}/samples/curl/curl.yaml {{< /text >}} 1. To confirm that the Bookinfo application is running, send a request to it with a curl command from your testing pod: {{< text bash >}} - $ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -sS productpage:9080/productpage | grep -o ".*" + $ kubectl exec $(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') -c curl -- curl -sS productpage:9080/productpage | grep -o ".*" Simple Bookstore App {{< /text >}} diff --git a/content/en/docs/examples/microservices-istio/enable-istio-all-microservices/index.md b/content/en/docs/examples/microservices-istio/enable-istio-all-microservices/index.md index 1b3e7a6d6c96f..25ce06b064b6f 100644 --- a/content/en/docs/examples/microservices-istio/enable-istio-all-microservices/index.md +++ b/content/en/docs/examples/microservices-istio/enable-istio-all-microservices/index.md @@ -53,7 +53,7 @@ enable Istio on all the remaining microservices in one step. productpage-v1-59b4f9f8d5-d4prx 2/2 Running 0 2m ratings-v1-b7b7fbbc9-sggxf 2/2 Running 0 2m reviews-v2-dfbcf859c-27dvk 2/2 Running 0 2m - sleep-88ddbcfdd-cc85s 1/1 Running 0 7h + curl-88ddbcfdd-cc85s 1/1 Running 0 7h {{< /text >}} 1. Access the Istio dashboard using the custom URL you set in your `/etc/hosts` file diff --git a/content/en/docs/examples/microservices-istio/production-testing/index.md b/content/en/docs/examples/microservices-istio/production-testing/index.md index 9feda24fc7fba..2bd212c2929f8 100644 --- a/content/en/docs/examples/microservices-istio/production-testing/index.md +++ b/content/en/docs/examples/microservices-istio/production-testing/index.md @@ -17,7 +17,7 @@ Test your microservice, in production! 1. Issue an HTTP request from the testing pod to one of your services: {{< text bash >}} - $ kubectl exec $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -- curl -sS http://ratings:9080/ratings/7 + $ kubectl exec $(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') -- curl -sS http://ratings:9080/ratings/7 {{< /text >}} ## Chaos testing @@ -50,7 +50,7 @@ the pods' status with `kubectl get pods`. reviews-v1-77c65dc5c6-5wt8g 1/1 Running 0 47m reviews-v1-77c65dc5c6-kjvxs 1/1 Running 0 48m reviews-v1-77c65dc5c6-r55tl 1/1 Running 0 47m - sleep-88ddbcfdd-l9zq4 1/1 Running 0 47m + curl-88ddbcfdd-l9zq4 1/1 Running 0 47m {{< /text >}} Note that the first pod was restarted once. @@ -87,7 +87,7 @@ the pods' status with `kubectl get pods`. reviews-v1-77c65dc5c6-5wt8g 1/1 Running 0 48m reviews-v1-77c65dc5c6-kjvxs 1/1 Running 0 49m reviews-v1-77c65dc5c6-r55tl 1/1 Running 0 48m - sleep-88ddbcfdd-l9zq4 1/1 Running 0 48m + curl-88ddbcfdd-l9zq4 1/1 Running 0 48m {{< /text >}} The first pod restarted twice and two other `details` pods diff --git a/content/en/docs/examples/virtual-machines/test.sh b/content/en/docs/examples/virtual-machines/test.sh index c52650caffac1..0228ca1c8732c 100644 --- a/content/en/docs/examples/virtual-machines/test.sh +++ b/content/en/docs/examples/virtual-machines/test.sh @@ -83,7 +83,7 @@ kubectl label namespace bookinfo istio-injection=enabled --overwrite kubectl apply -n bookinfo -f samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -n bookinfo -f samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -n bookinfo -f samples/bookinfo/networking/destination-rule-all.yaml -startup_sleep_sample +startup_curl_sample for deploy in "productpage-v1" "details-v1" "ratings-v1" "reviews-v1" "reviews-v2" "reviews-v3"; do _wait_for_deployment bookinfo "$deploy" done @@ -105,5 +105,5 @@ _verify_elided run_curl "${snip_reaching_kubernetes_services_from_the_virtual_ma docker stop vm kubectl delete -f samples/multicluster/expose-istiod.yaml -n istio-system --ignore-not-found=true echo y | istioctl uninstall --revision=default -cleanup_sleep_sample +cleanup_curl_sample kubectl delete namespace istio-system vm bookinfo --ignore-not-found=true diff --git a/content/en/docs/ops/common-problems/injection/index.md b/content/en/docs/ops/common-problems/injection/index.md index 9515c3f8badb6..dbf8a42e8996a 100644 --- a/content/en/docs/ops/common-problems/injection/index.md +++ b/content/en/docs/ops/common-problems/injection/index.md @@ -115,11 +115,11 @@ of injected sidecar when it was. to force the sidecar to be injected: {{< text bash yaml >}} - $ kubectl get deployment sleep -o yaml | grep "sidecar.istio.io/inject:" -B4 + $ kubectl get deployment curl -o yaml | grep "sidecar.istio.io/inject:" -B4 template: metadata: labels: - app: sleep + app: curl sidecar.istio.io/inject: "true" {{< /text >}} @@ -169,10 +169,10 @@ will also appear in the events of the namespace associated with the deployment. For example, if the `istiod` control plane pod was not running when you tried to deploy your pod, the events would show the following error: {{< text bash >}} -$ kubectl get events -n sleep +$ kubectl get events -n curl ... -23m Normal SuccessfulCreate replicaset/sleep-9454cc476 Created pod: sleep-9454cc476-khp45 -22m Warning FailedCreate replicaset/sleep-9454cc476 Error creating: Internal error occurred: failed calling webhook "namespace.sidecar-injector.istio.io": failed to call webhook: Post "https://istiod.istio-system.svc:443/inject?timeout=10s": dial tcp 10.96.44.51:443: connect: connection refused +23m Normal SuccessfulCreate replicaset/curl-9454cc476 Created pod: curl-9454cc476-khp45 +22m Warning FailedCreate replicaset/curl-9454cc476 Error creating: Internal error occurred: failed calling webhook "namespace.sidecar-injector.istio.io": failed to call webhook: Post "https://istiod.istio-system.svc:443/inject?timeout=10s": dial tcp 10.96.44.51:443: connect: connection refused {{< /text >}} {{< text bash >}} diff --git a/content/en/docs/ops/common-problems/network-issues/index.md b/content/en/docs/ops/common-problems/network-issues/index.md index 5601a8ff915f5..70f63ce51e84e 100644 --- a/content/en/docs/ops/common-problems/network-issues/index.md +++ b/content/en/docs/ops/common-problems/network-issues/index.md @@ -272,12 +272,12 @@ spec: The port name `http-web` in the Service definition explicitly specifies the http protocol for that port. -Let us assume we have a [sleep]({{< github_tree >}}/samples/sleep) pod `Deployment` as well in the default namespace. -When `nginx` is accessed from this `sleep` pod using its Pod IP (this is one of the common ways to access a headless service), the request goes via the `PassthroughCluster` to the server-side, but the sidecar proxy on the server-side fails to find the route entry to `nginx` and fails with `HTTP 503 UC`. +Let us assume we have a [curl]({{< github_tree >}}/samples/curl) pod `Deployment` as well in the default namespace. +When `nginx` is accessed from this `curl` pod using its Pod IP (this is one of the common ways to access a headless service), the request goes via the `PassthroughCluster` to the server-side, but the sidecar proxy on the server-side fails to find the route entry to `nginx` and fails with `HTTP 503 UC`. {{< text bash >}} -$ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items..metadata.name}') -$ kubectl exec -it $SOURCE_POD -c sleep -- curl 10.1.1.171 -s -o /dev/null -w "%{http_code}" +$ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items..metadata.name}') +$ kubectl exec -it $SOURCE_POD -c curl -- curl 10.1.1.171 -s -o /dev/null -w "%{http_code}" 503 {{< /text >}} @@ -290,8 +290,8 @@ Here are some of the ways to avoid this 503 error: The Host header in the curl request above will be the Pod IP by default. Specifying the Host header as `nginx.default` in our request to `nginx` successfully returns `HTTP 200 OK`. {{< text bash >}} - $ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items..metadata.name}') - $ kubectl exec -it $SOURCE_POD -c sleep -- curl -H "Host: nginx.default" 10.1.1.171 -s -o /dev/null -w "%{http_code}" + $ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items..metadata.name}') + $ kubectl exec -it $SOURCE_POD -c curl -- curl -H "Host: nginx.default" 10.1.1.171 -s -o /dev/null -w "%{http_code}" 200 {{< /text >}} @@ -304,13 +304,13 @@ Here are some of the ways to avoid this 503 error: This is useful in certain scenarios where a client may not be able to include header information in the request. {{< text bash >}} - $ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items..metadata.name}') - $ kubectl exec -it $SOURCE_POD -c sleep -- curl 10.1.1.171 -s -o /dev/null -w "%{http_code}" + $ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items..metadata.name}') + $ kubectl exec -it $SOURCE_POD -c curl -- curl 10.1.1.171 -s -o /dev/null -w "%{http_code}" 200 {{< /text >}} {{< text bash >}} - $ kubectl exec -it $SOURCE_POD -c sleep -- curl -H "Host: nginx.default" 10.1.1.171 -s -o /dev/null -w "%{http_code}" + $ kubectl exec -it $SOURCE_POD -c curl -- curl -H "Host: nginx.default" 10.1.1.171 -s -o /dev/null -w "%{http_code}" 200 {{< /text >}} @@ -319,8 +319,8 @@ Here are some of the ways to avoid this 503 error: A specific instance of a headless service can also be accessed using just the domain name. {{< text bash >}} - $ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items..metadata.name}') - $ kubectl exec -it $SOURCE_POD -c sleep -- curl web-0.nginx.default -s -o /dev/null -w "%{http_code}" + $ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items..metadata.name}') + $ kubectl exec -it $SOURCE_POD -c curl -- curl web-0.nginx.default -s -o /dev/null -w "%{http_code}" 200 {{< /text >}} diff --git a/content/en/docs/ops/common-problems/security-issues/index.md b/content/en/docs/ops/common-problems/security-issues/index.md index c77ae40f03ee5..da2e6176952a1 100644 --- a/content/en/docs/ops/common-problems/security-issues/index.md +++ b/content/en/docs/ops/common-problems/security-issues/index.md @@ -171,7 +171,7 @@ you ensure Istiod is working as expected: 2021-04-23T20:53:29.507641Z info ads XDS: Pushing:2021-04-23T20:53:29Z/23 Services:15 ConnectedEndpoints:2 Version:2021-04-23T20:53:29Z/23 2021-04-23T20:53:29.507911Z debug authorization Processed authorization policy for httpbin-74fb669cc6-lpscm.foo with details: * found 0 CUSTOM actions - 2021-04-23T20:53:29.508077Z debug authorization Processed authorization policy for sleep-557747455f-6dxbl.foo with details: + 2021-04-23T20:53:29.508077Z debug authorization Processed authorization policy for curl-557747455f-6dxbl.foo with details: * found 0 CUSTOM actions 2021-04-23T20:53:29.508128Z debug authorization Processed authorization policy for httpbin-74fb669cc6-lpscm.foo with details: * found 1 DENY actions, 0 ALLOW actions, 0 AUDIT actions @@ -179,11 +179,11 @@ you ensure Istiod is working as expected: * built 1 HTTP filters for DENY action * added 1 HTTP filters to filter chain 0 * added 1 HTTP filters to filter chain 1 - 2021-04-23T20:53:29.508158Z debug authorization Processed authorization policy for sleep-557747455f-6dxbl.foo with details: + 2021-04-23T20:53:29.508158Z debug authorization Processed authorization policy for curl-557747455f-6dxbl.foo with details: * found 0 DENY actions, 0 ALLOW actions, 0 AUDIT actions - 2021-04-23T20:53:29.509097Z debug authorization Processed authorization policy for sleep-557747455f-6dxbl.foo with details: + 2021-04-23T20:53:29.509097Z debug authorization Processed authorization policy for curl-557747455f-6dxbl.foo with details: * found 0 CUSTOM actions - 2021-04-23T20:53:29.509167Z debug authorization Processed authorization policy for sleep-557747455f-6dxbl.foo with details: + 2021-04-23T20:53:29.509167Z debug authorization Processed authorization policy for curl-557747455f-6dxbl.foo with details: * found 0 DENY actions, 0 ALLOW actions, 0 AUDIT actions 2021-04-23T20:53:29.509501Z debug authorization Processed authorization policy for httpbin-74fb669cc6-lpscm.foo with details: * found 0 CUSTOM actions @@ -198,7 +198,7 @@ you ensure Istiod is working as expected: * added 1 TCP filters to filter chain 2 * added 1 TCP filters to filter chain 3 * added 1 TCP filters to filter chain 4 - 2021-04-23T20:53:29.510903Z info ads LDS: PUSH for node:sleep-557747455f-6dxbl.foo resources:18 size:85.0kB + 2021-04-23T20:53:29.510903Z info ads LDS: PUSH for node:curl-557747455f-6dxbl.foo resources:18 size:85.0kB 2021-04-23T20:53:29.511487Z info ads LDS: PUSH for node:httpbin-74fb669cc6-lpscm.foo resources:18 size:86.4kB {{< /text >}} @@ -323,7 +323,7 @@ are not using `httpbin`. {{< text plain >}} ... - 2021-04-23T20:43:18.552857Z debug envoy rbac checking request: requestedServerName: outbound_.8000_._.httpbin.foo.svc.cluster.local, sourceIP: 10.44.3.13:46180, directRemoteIP: 10.44.3.13:46180, remoteIP: 10.44.3.13:46180,localAddress: 10.44.1.18:80, ssl: uriSanPeerCertificate: spiffe://cluster.local/ns/foo/sa/sleep, dnsSanPeerCertificate: , subjectPeerCertificate: , headers: ':authority', 'httpbin:8000' + 2021-04-23T20:43:18.552857Z debug envoy rbac checking request: requestedServerName: outbound_.8000_._.httpbin.foo.svc.cluster.local, sourceIP: 10.44.3.13:46180, directRemoteIP: 10.44.3.13:46180, remoteIP: 10.44.3.13:46180,localAddress: 10.44.1.18:80, ssl: uriSanPeerCertificate: spiffe://cluster.local/ns/foo/sa/curl, dnsSanPeerCertificate: , subjectPeerCertificate: , headers: ':authority', 'httpbin:8000' ':path', '/headers' ':method', 'GET' ':scheme', 'http' @@ -335,14 +335,14 @@ are not using `httpbin`. 'x-b3-traceid', '8a124905edf4291a21df326729b264e9' 'x-b3-spanid', '21df326729b264e9' 'x-b3-sampled', '0' - 'x-forwarded-client-cert', 'By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=d64cd6750a3af8685defbbe4dd8c467ebe80f6be4bfe9ca718e81cd94129fc1d;Subject="";URI=spiffe://cluster.local/ns/foo/sa/sleep' + 'x-forwarded-client-cert', 'By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=d64cd6750a3af8685defbbe4dd8c467ebe80f6be4bfe9ca718e81cd94129fc1d;Subject="";URI=spiffe://cluster.local/ns/foo/sa/curl' , dynamicMetadata: filter_metadata { key: "istio_authn" value { fields { key: "request.auth.principal" value { - string_value: "cluster.local/ns/foo/sa/sleep" + string_value: "cluster.local/ns/foo/sa/curl" } } fields { @@ -354,13 +354,13 @@ are not using `httpbin`. fields { key: "source.principal" value { - string_value: "cluster.local/ns/foo/sa/sleep" + string_value: "cluster.local/ns/foo/sa/curl" } } fields { key: "source.user" value { - string_value: "cluster.local/ns/foo/sa/sleep" + string_value: "cluster.local/ns/foo/sa/curl" } } } @@ -377,7 +377,7 @@ are not using `httpbin`. {{< text plain >}} ... - 2021-04-23T20:59:11.838468Z debug envoy rbac checking request: requestedServerName: outbound_.8000_._.httpbin.foo.svc.cluster.local, sourceIP: 10.44.3.13:49826, directRemoteIP: 10.44.3.13:49826, remoteIP: 10.44.3.13:49826,localAddress: 10.44.1.18:80, ssl: uriSanPeerCertificate: spiffe://cluster.local/ns/foo/sa/sleep, dnsSanPeerCertificate: , subjectPeerCertificate: , headers: ':authority', 'httpbin:8000' + 2021-04-23T20:59:11.838468Z debug envoy rbac checking request: requestedServerName: outbound_.8000_._.httpbin.foo.svc.cluster.local, sourceIP: 10.44.3.13:49826, directRemoteIP: 10.44.3.13:49826, remoteIP: 10.44.3.13:49826,localAddress: 10.44.1.18:80, ssl: uriSanPeerCertificate: spiffe://cluster.local/ns/foo/sa/curl, dnsSanPeerCertificate: , subjectPeerCertificate: , headers: ':authority', 'httpbin:8000' ':path', '/headers' ':method', 'GET' ':scheme', 'http' @@ -389,14 +389,14 @@ are not using `httpbin`. 'x-b3-traceid', '696607fc4382b50017c1f7017054c751' 'x-b3-spanid', '17c1f7017054c751' 'x-b3-sampled', '0' - 'x-forwarded-client-cert', 'By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=d64cd6750a3af8685defbbe4dd8c467ebe80f6be4bfe9ca718e81cd94129fc1d;Subject="";URI=spiffe://cluster.local/ns/foo/sa/sleep' + 'x-forwarded-client-cert', 'By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=d64cd6750a3af8685defbbe4dd8c467ebe80f6be4bfe9ca718e81cd94129fc1d;Subject="";URI=spiffe://cluster.local/ns/foo/sa/curl' , dynamicMetadata: filter_metadata { key: "istio_authn" value { fields { key: "request.auth.principal" value { - string_value: "cluster.local/ns/foo/sa/sleep" + string_value: "cluster.local/ns/foo/sa/curl" } } fields { @@ -408,13 +408,13 @@ are not using `httpbin`. fields { key: "source.principal" value { - string_value: "cluster.local/ns/foo/sa/sleep" + string_value: "cluster.local/ns/foo/sa/curl" } } fields { key: "source.user" value { - string_value: "cluster.local/ns/foo/sa/sleep" + string_value: "cluster.local/ns/foo/sa/curl" } } } @@ -436,7 +436,7 @@ are not using `httpbin`. If you suspect that some of the keys and/or certificates used by Istio aren't correct, you can inspect the contents from any pod: {{< text bash >}} -$ istioctl proxy-config secret sleep-8f795f47d-4s4t7 +$ istioctl proxy-config secret curl-8f795f47d-4s4t7 RESOURCE NAME TYPE STATUS VALID CERT SERIAL NUMBER NOT AFTER NOT BEFORE default Cert Chain ACTIVE true 138092480869518152837211547060273851586 2020-11-11T16:39:48Z 2020-11-10T16:39:48Z ROOTCA CA ACTIVE true 288553090258624301170355571152070165215 2030-11-08T16:34:52Z 2020-11-10T16:34:52Z @@ -445,7 +445,7 @@ ROOTCA CA ACTIVE true 288553090258624301170 By passing the `-o json` flag, you can pass the full certificate content to `openssl` to analyze its contents: {{< text bash >}} -$ istioctl proxy-config secret sleep-8f795f47d-4s4t7 -o json | jq '[.dynamicActiveSecrets[] | select(.name == "default")][0].secret.tlsCertificate.certificateChain.inlineBytes' -r | base64 -d | openssl x509 -noout -text +$ istioctl proxy-config secret curl-8f795f47d-4s4t7 -o json | jq '[.dynamicActiveSecrets[] | select(.name == "default")][0].secret.tlsCertificate.certificateChain.inlineBytes' -r | base64 -d | openssl x509 -noout -text Certificate: Data: Version: 3 (0x2) diff --git a/content/en/docs/ops/configuration/telemetry/envoy-stats/test.sh b/content/en/docs/ops/configuration/telemetry/envoy-stats/test.sh index e07dd650a7326..783da66a77e30 100644 --- a/content/en/docs/ops/configuration/telemetry/envoy-stats/test.sh +++ b/content/en/docs/ops/configuration/telemetry/envoy-stats/test.sh @@ -64,23 +64,23 @@ kubectl label namespace default istio-injection=enabled --overwrite export IFS= echo "${snip_proxyIstioConfig}" > proxyConfig.yaml unset IFS -# yq m -d2 samples/sleep/sleep.yaml proxyConfig.yaml > sleep_istioconfig.yaml -yq 'select(document_index != 2)' samples/sleep/sleep.yaml > tmp1.yaml -yq 'select(document_index == 2)' samples/sleep/sleep.yaml > tmp2.yaml +# yq m -d2 samples/curl/curl.yaml proxyConfig.yaml > curl_istioconfig.yaml +yq 'select(document_index != 2)' samples/curl/curl.yaml > tmp1.yaml +yq 'select(document_index == 2)' samples/curl/curl.yaml > tmp2.yaml # shellcheck disable=SC2016 yq eval-all '. as $item ireduce ({}; . *+ $item)' tmp2.yaml proxyConfig.yaml > new2.yaml -yq . tmp1.yaml new2.yaml > sleep_istioconfig.yaml +yq . tmp1.yaml new2.yaml > curl_istioconfig.yaml -kubectl apply -f sleep_istioconfig.yaml -_wait_for_deployment default sleep -POD="$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" +kubectl apply -f curl_istioconfig.yaml +_wait_for_deployment default curl +POD="$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" export POD _verify_contains snip_get_stats "circuit_breakers" # @cleanup set +e cleanup_httpbin_sample -cleanup_sleep_sample +cleanup_curl_sample echo y | istioctl uninstall --revision=default kubectl delete ns istio-system kubectl label namespace default istio-injection- diff --git a/content/en/docs/ops/configuration/traffic-management/dns-proxy/index.md b/content/en/docs/ops/configuration/traffic-management/dns-proxy/index.md index 44125a05be704..bafd75f830b2d 100644 --- a/content/en/docs/ops/configuration/traffic-management/dns-proxy/index.md +++ b/content/en/docs/ops/configuration/traffic-management/dns-proxy/index.md @@ -38,7 +38,7 @@ This can also be enabled on a per-pod basis with the [`proxy.istio.io/config` an {{< text syntax=yaml snip_id=none >}} kind: Deployment metadata: -  name: sleep +  name: curl spec: ...   template: @@ -81,13 +81,13 @@ Bring up a client application to initiate the DNS request: {{< text bash >}} $ kubectl label namespace default istio-injection=enabled --overwrite -$ kubectl apply -f @samples/sleep/sleep.yaml@ +$ kubectl apply -f @samples/curl/curl.yaml@ {{< /text >}} Without the DNS capture, a request to `address.internal` would likely fail to resolve. Once this is enabled, you should instead get a response back based on the configured `address`: {{< text bash >}} -$ kubectl exec deploy/sleep -- curl -sS -v address.internal +$ kubectl exec deploy/curl -- curl -sS -v address.internal * Trying 198.51.100.1:80... {{< /text >}} @@ -128,7 +128,7 @@ EOF Now, send a request: {{< text bash >}} -$ kubectl exec deploy/sleep -- curl -sS -v auto.internal +$ kubectl exec deploy/curl -- curl -sS -v auto.internal * Trying 240.240.0.1:80... {{< /text >}} @@ -214,7 +214,7 @@ A virtual IP address will be assigned to every service entry so that client side 1. Verify listeners are configured separately for each service at the client side: {{< text bash >}} - $ istioctl pc listener deploy/sleep | grep tcp-echo | awk '{printf "ADDRESS=%s, DESTINATION=%s %s\n", $1, $4, $5}' + $ istioctl pc listener deploy/curl | grep tcp-echo | awk '{printf "ADDRESS=%s, DESTINATION=%s %s\n", $1, $4, $5}' ADDRESS=240.240.105.94, DESTINATION=Cluster: outbound|9000||tcp-echo.external-2.svc.cluster.local ADDRESS=240.240.69.138, DESTINATION=Cluster: outbound|9000||tcp-echo.external-1.svc.cluster.local {{< /text >}} @@ -224,7 +224,7 @@ A virtual IP address will be assigned to every service entry so that client side {{< text bash >}} $ kubectl -n external-1 delete -f @samples/tcp-echo/tcp-echo.yaml@ $ kubectl -n external-2 delete -f @samples/tcp-echo/tcp-echo.yaml@ -$ kubectl delete -f @samples/sleep/sleep.yaml@ +$ kubectl delete -f @samples/curl/curl.yaml@ $ istioctl uninstall --purge -y $ kubectl delete ns istio-system external-1 external-2 $ kubectl label namespace default istio-injection- diff --git a/content/en/docs/ops/configuration/traffic-management/dns-proxy/snips.sh b/content/en/docs/ops/configuration/traffic-management/dns-proxy/snips.sh index 24513b1460510..00cf65d372640 100644 --- a/content/en/docs/ops/configuration/traffic-management/dns-proxy/snips.sh +++ b/content/en/docs/ops/configuration/traffic-management/dns-proxy/snips.sh @@ -55,11 +55,11 @@ EOF snip_dns_capture_in_action_2() { kubectl label namespace default istio-injection=enabled --overwrite -kubectl apply -f samples/sleep/sleep.yaml +kubectl apply -f samples/curl/curl.yaml } snip_dns_capture_in_action_3() { -kubectl exec deploy/sleep -- curl -sS -v address.internal +kubectl exec deploy/curl -- curl -sS -v address.internal } ! IFS=$'\n' read -r -d '' snip_dns_capture_in_action_3_out <<\ENDSNIP @@ -84,7 +84,7 @@ EOF } snip_address_auto_allocation_2() { -kubectl exec deploy/sleep -- curl -sS -v auto.internal +kubectl exec deploy/curl -- curl -sS -v auto.internal } ! IFS=$'\n' read -r -d '' snip_address_auto_allocation_2_out <<\ENDSNIP @@ -152,7 +152,7 @@ EOF } snip_external_tcp_services_without_vips_5() { -istioctl pc listener deploy/sleep | grep tcp-echo | awk '{printf "ADDRESS=%s, DESTINATION=%s %s\n", $1, $4, $5}' +istioctl pc listener deploy/curl | grep tcp-echo | awk '{printf "ADDRESS=%s, DESTINATION=%s %s\n", $1, $4, $5}' } ! IFS=$'\n' read -r -d '' snip_external_tcp_services_without_vips_5_out <<\ENDSNIP @@ -163,7 +163,7 @@ ENDSNIP snip_cleanup_1() { kubectl -n external-1 delete -f samples/tcp-echo/tcp-echo.yaml kubectl -n external-2 delete -f samples/tcp-echo/tcp-echo.yaml -kubectl delete -f samples/sleep/sleep.yaml +kubectl delete -f samples/curl/curl.yaml istioctl uninstall --purge -y kubectl delete ns istio-system external-1 external-2 kubectl label namespace default istio-injection- diff --git a/content/en/docs/ops/configuration/traffic-management/manage-mesh-certificates/index.md b/content/en/docs/ops/configuration/traffic-management/manage-mesh-certificates/index.md index 5ab0df58b1e01..54342c37392e3 100644 --- a/content/en/docs/ops/configuration/traffic-management/manage-mesh-certificates/index.md +++ b/content/en/docs/ops/configuration/traffic-management/manage-mesh-certificates/index.md @@ -108,7 +108,7 @@ meshConfig: apiVersion: apps/v1 kind: Deployment metadata: - name: sleep + name: curl spec: ... template: diff --git a/content/en/docs/ops/diagnostic-tools/multicluster/index.md b/content/en/docs/ops/diagnostic-tools/multicluster/index.md index 7b0e2c800a08a..3c4a28c7d54bf 100644 --- a/content/en/docs/ops/diagnostic-tools/multicluster/index.md +++ b/content/en/docs/ops/diagnostic-tools/multicluster/index.md @@ -16,7 +16,7 @@ and read the [Deployment Models](/docs/ops/deployment/deployment-models/) guide. The most common, but also broad problem with multi-network installations is that cross-cluster load balancing doesn’t work. Usually this manifests itself as only seeing responses from the cluster-local instance of a Service: {{< text bash >}} -$ for i in $(seq 10); do kubectl --context=$CTX_CLUSTER1 -n sample exec sleep-dd98b5f48-djwdw -c sleep -- curl -s helloworld:5000/hello; done +$ for i in $(seq 10); do kubectl --context=$CTX_CLUSTER1 -n sample exec curl-dd98b5f48-djwdw -c curl -- curl -s helloworld:5000/hello; done Hello version: v1, instance: helloworld-v1-578dd69f69-j69pf Hello version: v1, instance: helloworld-v1-578dd69f69-j69pf Hello version: v1, instance: helloworld-v1-578dd69f69-j69pf @@ -65,9 +65,9 @@ $ kubectl apply --context="${CTX_CLUSTER2}" \ -f samples/helloworld/helloworld.yaml \ -l version=v2 -n uninjected-sample $ kubectl apply --context="${CTX_CLUSTER1}" \ - -f samples/sleep/sleep.yaml -n uninjected-sample + -f samples/curl/curl.yaml -n uninjected-sample $ kubectl apply --context="${CTX_CLUSTER2}" \ - -f samples/sleep/sleep.yaml -n uninjected-sample + -f samples/curl/curl.yaml -n uninjected-sample {{< /text >}} Verify that there is a helloworld pod running in `cluster2`, using the `-o wide` flag, so we can get the Pod IP: @@ -75,8 +75,8 @@ Verify that there is a helloworld pod running in `cluster2`, using the `-o wide` {{< text bash >}} $ kubectl --context="${CTX_CLUSTER2}" -n uninjected-sample get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +curl-557747455f-jdsd8 1/1 Running 0 41s 10.100.0.2 node-2 helloworld-v2-54df5f84b-z28p5 1/1 Running 0 43s 10.100.0.1 node-1 -sleep-557747455f-jdsd8 1/1 Running 0 41s 10.100.0.2 node-2 {{< /text >}} Take note of the `IP` column for `helloworld`. In this case, it is `10.100.0.1`: @@ -85,12 +85,12 @@ Take note of the `IP` column for `helloworld`. In this case, it is `10.100.0.1`: $ REMOTE_POD_IP=10.100.0.1 {{< /text >}} -Next, attempt to send traffic from the `sleep` pod in `cluster1` directly to this Pod IP: +Next, attempt to send traffic from the `curl` pod in `cluster1` directly to this Pod IP: {{< text bash >}} -$ kubectl exec --context="${CTX_CLUSTER1}" -n uninjected-sample -c sleep \ +$ kubectl exec --context="${CTX_CLUSTER1}" -n uninjected-sample -c curl \ "$(kubectl get pod --context="${CTX_CLUSTER1}" -n uninjected-sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sS $REMOTE_POD_IP:5000/hello Hello version: v2, instance: helloworld-v2-54df5f84b-z28p5 {{< /text >}} @@ -133,12 +133,12 @@ guide, ensuring to run the steps for every cluster. If you've gone through the sections above and are still having issues, then it's time to dig a little deeper. The following steps assume you're following the [HelloWorld verification](/docs/setup/install/multicluster/verify/). -Before continuing, make sure both `helloworld` and `sleep` are deployed in each cluster. +Before continuing, make sure both `helloworld` and `curl` are deployed in each cluster. -From each cluster, find the endpoints the `sleep` service has for `helloworld`: +From each cluster, find the endpoints the `curl` service has for `helloworld`: {{< text bash >}} -$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint sleep-dd98b5f48-djwdw.sample | grep helloworld +$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint curl-dd98b5f48-djwdw.sample | grep helloworld {{< /text >}} Troubleshooting information differs based on the cluster that is the source of traffic: @@ -148,7 +148,7 @@ Troubleshooting information differs based on the cluster that is the source of t {{< tab name="Primary cluster" category-value="primary" >}} {{< text bash >}} -$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint sleep-dd98b5f48-djwdw.sample | grep helloworld +$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint curl-dd98b5f48-djwdw.sample | grep helloworld 10.0.0.11:5000 HEALTHY OK outbound|5000||helloworld.sample.svc.cluster.local {{< /text >}} @@ -171,7 +171,7 @@ $ kubectl get secrets --context=$CTX_CLUSTER1 -n istio-system -l "istio/multiClu {{< tab name="Remote cluster" category-value="remote" >}} {{< text bash >}} -$ istioctl --context $CTX_CLUSTER2 proxy-config endpoint sleep-dd98b5f48-djwdw.sample | grep helloworld +$ istioctl --context $CTX_CLUSTER2 proxy-config endpoint curl-dd98b5f48-djwdw.sample | grep helloworld 10.0.1.11:5000 HEALTHY OK outbound|5000||helloworld.sample.svc.cluster.local {{< /text >}} @@ -201,7 +201,7 @@ $ kubectl get secrets --context=$CTX_CLUSTER1 -n istio-system -l "istio/multiClu The steps for Primary and Remote clusters still apply for multi-network, although multi-network has an additional case: {{< text bash >}} -$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint sleep-dd98b5f48-djwdw.sample | grep helloworld +$ istioctl --context $CTX_CLUSTER1 proxy-config endpoint curl-dd98b5f48-djwdw.sample | grep helloworld 10.0.5.11:5000 HEALTHY OK outbound|5000||helloworld.sample.svc.cluster.local 10.0.6.13:5000 HEALTHY OK outbound|5000||helloworld.sample.svc.cluster.local {{< /text >}} @@ -234,7 +234,7 @@ value. If that is incorrect, reinstall the gateway and make sure to set the --ne On the source pod, check the proxy metadata. {{< text bash >}} -$ kubectl get pod $SLEEP_POD_NAME \ +$ kubectl get pod $CURL_POD_NAME \ -o jsonpath="{.spec.containers[*].env[?(@.name=='ISTIO_META_NETWORK')].value}" {{< /text >}} diff --git a/content/en/docs/ops/diagnostic-tools/proxy-cmd/index.md b/content/en/docs/ops/diagnostic-tools/proxy-cmd/index.md index 74bd988633fdb..6ea2c723f8683 100644 --- a/content/en/docs/ops/diagnostic-tools/proxy-cmd/index.md +++ b/content/en/docs/ops/diagnostic-tools/proxy-cmd/index.md @@ -396,17 +396,17 @@ $ istioctl proxy-config bootstrap -n istio-system istio-ingressgateway-7d6874b48 Verifying connectivity to Istiod is a useful troubleshooting step. Every proxy container in the service mesh should be able to communicate with Istiod. This can be accomplished in a few simple steps: -1. Create a `sleep` pod: +1. Create a `curl` pod: {{< text bash >}} $ kubectl create namespace foo - $ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo + $ kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo {{< /text >}} 1. Test connectivity to Istiod using `curl`. The following example invokes the v1 registration API using default Istiod configuration parameters and mutual TLS enabled: {{< text bash >}} - $ kubectl exec $(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name}) -c sleep -n foo -- curl -sS istiod.istio-system:15014/version + $ kubectl exec $(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name}) -c curl -n foo -- curl -sS istiod.istio-system:15014/version {{< /text >}} You should receive a response listing the version of Istiod. diff --git a/content/en/docs/ops/integrations/spire/automatic_registration_test.sh b/content/en/docs/ops/integrations/spire/automatic_registration_test.sh index a5f9bdb497afb..7f2c2e3dc406e 100644 --- a/content/en/docs/ops/integrations/spire/automatic_registration_test.sh +++ b/content/en/docs/ops/integrations/spire/automatic_registration_test.sh @@ -39,23 +39,23 @@ set -u # Exit on unset value _wait_for_deployment istio-system istiod _wait_for_deployment istio-system istio-ingressgateway -# Deploy sleep application with registration label -snip_apply_sleep -_wait_for_deployment default sleep +# Deploy curl application with registration label +snip_apply_curl +_wait_for_deployment default curl # Set spire-server pod variable snip_set_spire_server_pod_name_var -# Set sleep pod and pod uid variables -snip_set_sleep_pod_var +# Set curl pod and pod uid variables +snip_set_curl_pod_var -# Verify sleep workload identity was issued by SPIRE -snip_get_sleep_svid +# Verify curl workload identity was issued by SPIRE +snip_get_curl_svid _verify_contains snip_get_svid_subject "O = SPIRE" # @cleanup # -kubectl delete -f samples/security/spire/sleep-spire.yaml +kubectl delete -f samples/security/spire/curl-spire.yaml istioctl uninstall --purge --skip-confirmation kubectl delete ns istio-system snip_uninstall_spire diff --git a/content/en/docs/ops/integrations/spire/index.md b/content/en/docs/ops/integrations/spire/index.md index 79101e19d8288..146b0f68741c3 100644 --- a/content/en/docs/ops/integrations/spire/index.md +++ b/content/en/docs/ops/integrations/spire/index.md @@ -138,7 +138,7 @@ Below are the equivalent manual registrations based off the automatic registrati {{< text bash >}} $ kubectl exec -n spire "$SPIRE_SERVER_POD" -- \ /opt/spire/bin/spire-server entry create \ - -spiffeID spiffe://example.org/ns/default/sa/sleep \ + -spiffeID spiffe://example.org/ns/default/sa/curl \ -parentID spiffe://example.org/ns/spire/sa/spire-agent \ -selector k8s:ns:default \ -selector k8s:pod-label:spiffe.io/spire-managed-identity:true \ @@ -253,8 +253,8 @@ Below are the equivalent manual registrations based off the automatic registrati 1. Deploy an example workload: - {{< text syntax=bash snip_id=apply_sleep >}} - $ istioctl kube-inject --filename @samples/security/spire/sleep-spire.yaml@ | kubectl apply -f - + {{< text syntax=bash snip_id=apply_curl >}} + $ istioctl kube-inject --filename @samples/security/spire/curl-spire.yaml@ | kubectl apply -f - {{< /text >}} In addition to needing `spiffe.io/spire-managed-identity` label, the workload will need the SPIFFE CSI Driver volume to access the SPIRE Agent socket. To accomplish this, @@ -265,24 +265,24 @@ Below are the equivalent manual registrations based off the automatic registrati apiVersion: apps/v1 kind: Deployment metadata: - name: sleep + name: curl spec: replicas: 1 selector: matchLabels: - app: sleep + app: curl template: metadata: labels: - app: sleep + app: curl # Injects custom sidecar template annotations: inject.istio.io/templates: "sidecar,spire" spec: terminationGracePeriodSeconds: 0 - serviceAccountName: sleep + serviceAccountName: curl containers: - - name: sleep + - name: curl image: curlimages/curl command: ["/bin/sleep", "3650d"] imagePullPolicy: IfNotPresent @@ -322,7 +322,7 @@ JWT-SVID TTL : default Selector : k8s:pod-uid:88b71387-4641-4d9c-9a89-989c88f7509d Entry ID : af7b53dc-4cc9-40d3-aaeb-08abbddd8e54 -SPIFFE ID : spiffe://example.org/ns/default/sa/sleep +SPIFFE ID : spiffe://example.org/ns/default/sa/curl Parent ID : spiffe://example.org/spire/agent/k8s_psat/demo-cluster/bea19580-ae04-4679-a22e-472e18ca4687 Revision : 0 X509-SVID TTL : default @@ -345,14 +345,14 @@ After registering an entry for the Ingress-gateway pod, Envoy receives the ident 1. Get pod information: - {{< text syntax=bash snip_id=set_sleep_pod_var >}} - $ SLEEP_POD=$(kubectl get pod -l app=sleep -o jsonpath="{.items[0].metadata.name}") + {{< text syntax=bash snip_id=set_curl_pod_var >}} + $ CURL_POD=$(kubectl get pod -l app=curl -o jsonpath="{.items[0].metadata.name}") {{< /text >}} -1. Retrieve sleep's SVID identity document using the istioctl proxy-config secret command: +1. Retrieve curl's SVID identity document using the istioctl proxy-config secret command: - {{< text syntax=bash snip_id=get_sleep_svid >}} - $ istioctl proxy-config secret "$SLEEP_POD" -o json | jq -r \ + {{< text syntax=bash snip_id=get_curl_svid >}} + $ istioctl proxy-config secret "$CURL_POD" -o json | jq -r \ '.dynamicActiveSecrets[0].secret.tlsCertificate.certificateChain.inlineBytes' | base64 --decode > chain.pem {{< /text >}} @@ -360,7 +360,7 @@ After registering an entry for the Ingress-gateway pod, Envoy receives the ident {{< text syntax=bash snip_id=get_svid_subject >}} $ openssl x509 -in chain.pem -text | grep SPIRE - Subject: C = US, O = SPIRE, CN = sleep-5f4d47c948-njvpk + Subject: C = US, O = SPIRE, CN = curl-5f4d47c948-njvpk {{< /text >}} ## SPIFFE federation diff --git a/content/en/docs/ops/integrations/spire/snips.sh b/content/en/docs/ops/integrations/spire/snips.sh index 4812f155e912b..29684c8996490 100644 --- a/content/en/docs/ops/integrations/spire/snips.sh +++ b/content/en/docs/ops/integrations/spire/snips.sh @@ -86,7 +86,7 @@ ENDSNIP snip_option_2_manual_registration_3() { kubectl exec -n spire "$SPIRE_SERVER_POD" -- \ /opt/spire/bin/spire-server entry create \ - -spiffeID spiffe://example.org/ns/default/sa/sleep \ + -spiffeID spiffe://example.org/ns/default/sa/curl \ -parentID spiffe://example.org/ns/spire/sa/spire-agent \ -selector k8s:ns:default \ -selector k8s:pod-label:spiffe.io/spire-managed-identity:true \ @@ -178,16 +178,16 @@ snip_apply_istio_operator_configuration() { istioctl install --skip-confirmation -f ./istio.yaml } -snip_apply_sleep() { -istioctl kube-inject --filename samples/security/spire/sleep-spire.yaml | kubectl apply -f - +snip_apply_curl() { +istioctl kube-inject --filename samples/security/spire/curl-spire.yaml | kubectl apply -f - } -snip_set_sleep_pod_var() { -SLEEP_POD=$(kubectl get pod -l app=sleep -o jsonpath="{.items[0].metadata.name}") +snip_set_curl_pod_var() { +CURL_POD=$(kubectl get pod -l app=curl -o jsonpath="{.items[0].metadata.name}") } -snip_get_sleep_svid() { -istioctl proxy-config secret "$SLEEP_POD" -o json | jq -r \ +snip_get_curl_svid() { +istioctl proxy-config secret "$CURL_POD" -o json | jq -r \ '.dynamicActiveSecrets[0].secret.tlsCertificate.certificateChain.inlineBytes' | base64 --decode > chain.pem } @@ -196,7 +196,7 @@ openssl x509 -in chain.pem -text | grep SPIRE } ! IFS=$'\n' read -r -d '' snip_get_svid_subject_out <<\ENDSNIP - Subject: C = US, O = SPIRE, CN = sleep-5f4d47c948-njvpk + Subject: C = US, O = SPIRE, CN = curl-5f4d47c948-njvpk ENDSNIP snip_uninstall_spire() { diff --git a/content/en/docs/reference/config/analysis/ist0127/index.md b/content/en/docs/reference/config/analysis/ist0127/index.md index 8c5c8546f5415..d39656e62edce 100644 --- a/content/en/docs/reference/config/analysis/ist0127/index.md +++ b/content/en/docs/reference/config/analysis/ist0127/index.md @@ -31,7 +31,7 @@ spec: rules: - from: - source: - principals: ["cluster.local/ns/default/sa/sleep"] + principals: ["cluster.local/ns/default/sa/curl"] - source: namespaces: ["httpbin"] to: diff --git a/content/en/docs/setup/additional-setup/cni/test.sh b/content/en/docs/setup/additional-setup/cni/test.sh index ec3f46f39bb1b..19ef1ab00dad1 100755 --- a/content/en/docs/setup/additional-setup/cni/test.sh +++ b/content/en/docs/setup/additional-setup/cni/test.sh @@ -47,12 +47,12 @@ _wait_for_deployment istio-system istiod _wait_for_daemonset istio-system istio-cni-node startup_bookinfo_sample -startup_sleep_sample +startup_curl_sample _verify_contains get_productpage "glyphicon glyphicon-star" # @cleanup cleanup_bookinfo_sample -cleanup_sleep_sample +cleanup_curl_sample echo y | istioctl uninstall --revision=default kubectl delete ns istio-system diff --git a/content/en/docs/setup/additional-setup/dual-stack/index.md b/content/en/docs/setup/additional-setup/dual-stack/index.md index d8c0bf84a00bb..eba42b3cfebc6 100644 --- a/content/en/docs/setup/additional-setup/dual-stack/index.md +++ b/content/en/docs/setup/additional-setup/dual-stack/index.md @@ -109,30 +109,30 @@ values: $ kubectl apply --namespace ipv6 -f @samples/tcp-echo/tcp-echo-ipv6.yaml@ {{< /text >}} -1. Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample app to use as a test source for sending requests. +1. Deploy the [curl]({{< github_tree >}}/samples/curl) sample app to use as a test source for sending requests. {{< text bash >}} - $ kubectl apply -f @samples/sleep/sleep.yaml@ + $ kubectl apply -f @samples/curl/curl.yaml@ {{< /text >}} 1. Verify the traffic reaches the dual-stack pods: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo dualstack | nc tcp-echo.dual-stack 9000" + $ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo dualstack | nc tcp-echo.dual-stack 9000" hello dualstack {{< /text >}} 1. Verify the traffic reaches the IPv4 pods: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv4 | nc tcp-echo.ipv4 9000" + $ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv4 | nc tcp-echo.ipv4 9000" hello ipv4 {{< /text >}} 1. Verify the traffic reaches the IPv6 pods: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv6 | nc tcp-echo.ipv6 9000" + $ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv6 | nc tcp-echo.ipv6 9000" hello ipv6 {{< /text >}} @@ -193,7 +193,7 @@ values: 1. Verify envoy endpoints are configured to route to both IPv4 and IPv6: {{< text syntax=bash snip_id=none >}} - $ istioctl proxy-config endpoints "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" --port 9000 + $ istioctl proxy-config endpoints "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" --port 9000 ENDPOINT STATUS OUTLIER CHECK CLUSTER 10.244.0.19:9000 HEALTHY OK outbound|9000||tcp-echo.ipv4.svc.cluster.local 10.244.0.26:9000 HEALTHY OK outbound|9000||tcp-echo.dual-stack.svc.cluster.local @@ -208,6 +208,6 @@ Now you can experiment with dual-stack services in your environment! 1. Cleanup application namespaces and deployments {{< text bash >}} - $ kubectl delete -f @samples/sleep/sleep.yaml@ + $ kubectl delete -f @samples/curl/curl.yaml@ $ kubectl delete ns dual-stack ipv4 ipv6 {{< /text >}} diff --git a/content/en/docs/setup/additional-setup/dual-stack/snips.sh b/content/en/docs/setup/additional-setup/dual-stack/snips.sh index 5447df393c79c..71320aa1ab468 100755 --- a/content/en/docs/setup/additional-setup/dual-stack/snips.sh +++ b/content/en/docs/setup/additional-setup/dual-stack/snips.sh @@ -40,11 +40,11 @@ kubectl apply --namespace ipv6 -f samples/tcp-echo/tcp-echo-ipv6.yaml } snip_verification_4() { -kubectl apply -f samples/sleep/sleep.yaml +kubectl apply -f samples/curl/curl.yaml } snip_verification_5() { -kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo dualstack | nc tcp-echo.dual-stack 9000" +kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo dualstack | nc tcp-echo.dual-stack 9000" } ! IFS=$'\n' read -r -d '' snip_verification_5_out <<\ENDSNIP @@ -52,7 +52,7 @@ hello dualstack ENDSNIP snip_verification_6() { -kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv4 | nc tcp-echo.ipv4 9000" +kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv4 | nc tcp-echo.ipv4 9000" } ! IFS=$'\n' read -r -d '' snip_verification_6_out <<\ENDSNIP @@ -60,7 +60,7 @@ hello ipv4 ENDSNIP snip_verification_7() { -kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv6 | nc tcp-echo.ipv6 9000" +kubectl exec "$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}')" -- sh -c "echo ipv6 | nc tcp-echo.ipv6 9000" } ! IFS=$'\n' read -r -d '' snip_verification_7_out <<\ENDSNIP @@ -68,6 +68,6 @@ hello ipv6 ENDSNIP snip_cleanup_1() { -kubectl delete -f samples/sleep/sleep.yaml +kubectl delete -f samples/curl/curl.yaml kubectl delete ns dual-stack ipv4 ipv6 } diff --git a/content/en/docs/setup/additional-setup/dual-stack/test.sh b/content/en/docs/setup/additional-setup/dual-stack/test.sh index 0678ac3d71f88..23dd9e20875f8 100755 --- a/content/en/docs/setup/additional-setup/dual-stack/test.sh +++ b/content/en/docs/setup/additional-setup/dual-stack/test.sh @@ -28,7 +28,7 @@ snip_verification_3 snip_verification_4 # wait for deployments to be up and running -_wait_for_deployment default sleep +_wait_for_deployment default curl _wait_for_deployment dual-stack tcp-echo _wait_for_deployment ipv4 tcp-echo _wait_for_deployment ipv6 tcp-echo diff --git a/content/en/docs/setup/additional-setup/getting-started-istio-apis/test.sh b/content/en/docs/setup/additional-setup/getting-started-istio-apis/test.sh index e6568775c7bf3..054e14985ede5 100644 --- a/content/en/docs/setup/additional-setup/getting-started-istio-apis/test.sh +++ b/content/en/docs/setup/additional-setup/getting-started-istio-apis/test.sh @@ -35,8 +35,8 @@ kubectl label namespace default istio-injection- snip_install_istio_2 # TODO: how to make sure previous tests cleaned up everything? -# Cleanup sleep -cleanup_sleep_sample +# Cleanup curl +cleanup_curl_sample # Deploy the sample Application snip_deploy_the_sample_application_1 diff --git a/content/en/docs/setup/additional-setup/sidecar-injection/index.md b/content/en/docs/setup/additional-setup/sidecar-injection/index.md index 9b2e7ec7fb42e..9a0786281e1c9 100644 --- a/content/en/docs/setup/additional-setup/sidecar-injection/index.md +++ b/content/en/docs/setup/additional-setup/sidecar-injection/index.md @@ -40,19 +40,19 @@ Note that unlike manual injection, automatic injection occurs at the pod-level. #### Deploying an app -Deploy sleep app. Verify both deployment and pod have a single container. +Deploy curl app. Verify both deployment and pod have a single container. {{< text bash >}} -$ kubectl apply -f @samples/sleep/sleep.yaml@ +$ kubectl apply -f @samples/curl/curl.yaml@ $ kubectl get deployment -o wide NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR -sleep 1/1 1 1 12s sleep curlimages/curl app=sleep +curl 1/1 1 1 12s curl curlimages/curl app=curl {{< /text >}} {{< text bash >}} $ kubectl get pod NAME READY STATUS RESTARTS AGE -sleep-8f795f47d-hdcgs 1/1 Running 0 42s +curl-8f795f47d-hdcgs 1/1 Running 0 42s {{< /text >}} Label the `default` namespace with `istio-injection=enabled` @@ -68,18 +68,18 @@ default Active 5m9s enabled Injection occurs at pod creation time. Kill the running pod and verify a new pod is created with the injected sidecar. The original pod has `1/1 READY` containers, and the pod with injected sidecar has `2/2 READY` containers. {{< text bash >}} -$ kubectl delete pod -l app=sleep -$ kubectl get pod -l app=sleep -pod "sleep-776b7bcdcd-7hpnk" deleted +$ kubectl delete pod -l app=curl +$ kubectl get pod -l app=curl +pod "curl-776b7bcdcd-7hpnk" deleted NAME READY STATUS RESTARTS AGE -sleep-776b7bcdcd-7hpnk 1/1 Terminating 0 1m -sleep-776b7bcdcd-bhn9m 2/2 Running 0 7s +curl-776b7bcdcd-7hpnk 1/1 Terminating 0 1m +curl-776b7bcdcd-bhn9m 2/2 Running 0 7s {{< /text >}} View detailed state of the injected pod. You should see the injected `istio-proxy` container and corresponding volumes. {{< text bash >}} -$ kubectl describe pod -l app=sleep +$ kubectl describe pod -l app=curl ... Events: Type Reason Age From Message @@ -88,8 +88,8 @@ Events: Normal Created 11s kubelet Created container istio-init Normal Started 11s kubelet Started container istio-init ... - Normal Created 10s kubelet Created container sleep - Normal Started 10s kubelet Started container sleep + Normal Created 10s kubelet Created container curl + Normal Started 10s kubelet Started container curl ... Normal Created 9s kubelet Created container istio-proxy Normal Started 8s kubelet Started container istio-proxy @@ -99,13 +99,13 @@ Disable injection for the `default` namespace and verify new pods are created wi {{< text bash >}} $ kubectl label namespace default istio-injection- -$ kubectl delete pod -l app=sleep +$ kubectl delete pod -l app=curl $ kubectl get pod namespace/default labeled -pod "sleep-776b7bcdcd-bhn9m" deleted +pod "curl-776b7bcdcd-bhn9m" deleted NAME READY STATUS RESTARTS AGE -sleep-776b7bcdcd-bhn9m 2/2 Terminating 0 2m -sleep-776b7bcdcd-gmvnr 1/1 Running 0 2s +curl-776b7bcdcd-bhn9m 2/2 Terminating 0 2m +curl-776b7bcdcd-gmvnr 1/1 Running 0 2s {{< /text >}} #### Controlling the injection policy @@ -140,10 +140,10 @@ The injector is configured with the following logic: To manually inject a deployment, use [`istioctl kube-inject`](/docs/reference/commands/istioctl/#istioctl-kube-inject): {{< text bash >}} -$ istioctl kube-inject -f @samples/sleep/sleep.yaml@ | kubectl apply -f - -serviceaccount/sleep created -service/sleep created -deployment.apps/sleep created +$ istioctl kube-inject -f @samples/curl/curl.yaml@ | kubectl apply -f - +serviceaccount/curl created +service/curl created +deployment.apps/curl created {{< /text >}} By default, this will use the in-cluster configuration. Alternatively, injection can be done using local copies of the configuration. @@ -161,19 +161,19 @@ $ istioctl kube-inject \ --injectConfigFile inject-config.yaml \ --meshConfigFile mesh-config.yaml \ --valuesFile inject-values.yaml \ - --filename @samples/sleep/sleep.yaml@ \ + --filename @samples/curl/curl.yaml@ \ | kubectl apply -f - -serviceaccount/sleep created -service/sleep created -deployment.apps/sleep created +serviceaccount/curl created +service/curl created +deployment.apps/curl created {{< /text >}} -Verify that the sidecar has been injected into the sleep pod with `2/2` under the READY column. +Verify that the sidecar has been injected into the curl pod with `2/2` under the READY column. {{< text bash >}} -$ kubectl get pod -l app=sleep +$ kubectl get pod -l app=curl NAME READY STATUS RESTARTS AGE -sleep-64c6f57bc8-f5n4x 2/2 Running 0 24s +curl-64c6f57bc8-f5n4x 2/2 Running 0 24s {{< /text >}} ## Customizing injection @@ -206,7 +206,7 @@ spec: lifecycle: preStop: exec: - command: ["sleep", "10"] + command: ["curl", "10"] volumes: - name: certs secret: diff --git a/content/en/docs/setup/install/external-controlplane/index.md b/content/en/docs/setup/install/external-controlplane/index.md index 106cbad952cc8..72fd1108dc242 100644 --- a/content/en/docs/setup/install/external-controlplane/index.md +++ b/content/en/docs/setup/install/external-controlplane/index.md @@ -499,28 +499,28 @@ See the [Istioctl-proxy Ecosystem project](https://github.com/istio-ecosystem/is $ kubectl label --context="${CTX_REMOTE_CLUSTER}" namespace sample istio-injection=enabled {{< /text >}} -1. Deploy the `helloworld` (`v1`) and `sleep` samples: +1. Deploy the `helloworld` (`v1`) and `curl` samples: {{< text bash >}} $ kubectl apply -f @samples/helloworld/helloworld.yaml@ -l service=helloworld -n sample --context="${CTX_REMOTE_CLUSTER}" $ kubectl apply -f @samples/helloworld/helloworld.yaml@ -l version=v1 -n sample --context="${CTX_REMOTE_CLUSTER}" - $ kubectl apply -f @samples/sleep/sleep.yaml@ -n sample --context="${CTX_REMOTE_CLUSTER}" + $ kubectl apply -f @samples/curl/curl.yaml@ -n sample --context="${CTX_REMOTE_CLUSTER}" {{< /text >}} -1. Wait a few seconds for the `helloworld` and `sleep` pods to be running with sidecars injected: +1. Wait a few seconds for the `helloworld` and `curl` pods to be running with sidecars injected: {{< text bash >}} $ kubectl get pod -n sample --context="${CTX_REMOTE_CLUSTER}" NAME READY STATUS RESTARTS AGE + curl-64d7d56698-wqjnm 2/2 Running 0 9s helloworld-v1-776f57d5f6-s7zfc 2/2 Running 0 10s - sleep-64d7d56698-wqjnm 2/2 Running 0 9s {{< /text >}} -1. Send a request from the `sleep` pod to the `helloworld` service: +1. Send a request from the `curl` pod to the `helloworld` service: {{< text bash >}} - $ kubectl exec --context="${CTX_REMOTE_CLUSTER}" -n sample -c sleep \ - "$(kubectl get pod --context="${CTX_REMOTE_CLUSTER}" -n sample -l app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + $ kubectl exec --context="${CTX_REMOTE_CLUSTER}" -n sample -c curl \ + "$(kubectl get pod --context="${CTX_REMOTE_CLUSTER}" -n sample -l app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sS helloworld.sample:5000/hello Hello version: v1, instance: helloworld-v1-776f57d5f6-s7zfc {{< /text >}} @@ -855,28 +855,28 @@ $ export SECOND_CLUSTER_NAME= $ kubectl label --context="${CTX_SECOND_CLUSTER}" namespace sample istio-injection=enabled {{< /text >}} -1. Deploy the `helloworld` (`v2`) and `sleep` samples: +1. Deploy the `helloworld` (`v2`) and `curl` samples: {{< text bash >}} $ kubectl apply -f @samples/helloworld/helloworld.yaml@ -l service=helloworld -n sample --context="${CTX_SECOND_CLUSTER}" $ kubectl apply -f @samples/helloworld/helloworld.yaml@ -l version=v2 -n sample --context="${CTX_SECOND_CLUSTER}" - $ kubectl apply -f @samples/sleep/sleep.yaml@ -n sample --context="${CTX_SECOND_CLUSTER}" + $ kubectl apply -f @samples/curl/curl.yaml@ -n sample --context="${CTX_SECOND_CLUSTER}" {{< /text >}} -1. Wait a few seconds for the `helloworld` and `sleep` pods to be running with sidecars injected: +1. Wait a few seconds for the `helloworld` and `curl` pods to be running with sidecars injected: {{< text bash >}} $ kubectl get pod -n sample --context="${CTX_SECOND_CLUSTER}" NAME READY STATUS RESTARTS AGE + curl-557747455f-wtdbr 2/2 Running 0 9s helloworld-v2-54df5f84b-9hxgw 2/2 Running 0 10s - sleep-557747455f-wtdbr 2/2 Running 0 9s {{< /text >}} -1. Send a request from the `sleep` pod to the `helloworld` service: +1. Send a request from the `curl` pod to the `helloworld` service: {{< text bash >}} - $ kubectl exec --context="${CTX_SECOND_CLUSTER}" -n sample -c sleep \ - "$(kubectl get pod --context="${CTX_SECOND_CLUSTER}" -n sample -l app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + $ kubectl exec --context="${CTX_SECOND_CLUSTER}" -n sample -c curl \ + "$(kubectl get pod --context="${CTX_SECOND_CLUSTER}" -n sample -l app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sS helloworld.sample:5000/hello Hello version: v2, instance: helloworld-v2-54df5f84b-9hxgw {{< /text >}} diff --git a/content/en/docs/setup/install/external-controlplane/snips.sh b/content/en/docs/setup/install/external-controlplane/snips.sh index e6ab8cf742cab..88610bb914717 100644 --- a/content/en/docs/setup/install/external-controlplane/snips.sh +++ b/content/en/docs/setup/install/external-controlplane/snips.sh @@ -334,7 +334,7 @@ kubectl label --context="${CTX_REMOTE_CLUSTER}" namespace sample istio-injection snip_deploy_a_sample_application_2() { kubectl apply -f samples/helloworld/helloworld.yaml -l service=helloworld -n sample --context="${CTX_REMOTE_CLUSTER}" kubectl apply -f samples/helloworld/helloworld.yaml -l version=v1 -n sample --context="${CTX_REMOTE_CLUSTER}" -kubectl apply -f samples/sleep/sleep.yaml -n sample --context="${CTX_REMOTE_CLUSTER}" +kubectl apply -f samples/curl/curl.yaml -n sample --context="${CTX_REMOTE_CLUSTER}" } snip_deploy_a_sample_application_3() { @@ -343,13 +343,13 @@ kubectl get pod -n sample --context="${CTX_REMOTE_CLUSTER}" ! IFS=$'\n' read -r -d '' snip_deploy_a_sample_application_3_out <<\ENDSNIP NAME READY STATUS RESTARTS AGE +curl-64d7d56698-wqjnm 2/2 Running 0 9s helloworld-v1-776f57d5f6-s7zfc 2/2 Running 0 10s -sleep-64d7d56698-wqjnm 2/2 Running 0 9s ENDSNIP snip_deploy_a_sample_application_4() { -kubectl exec --context="${CTX_REMOTE_CLUSTER}" -n sample -c sleep \ - "$(kubectl get pod --context="${CTX_REMOTE_CLUSTER}" -n sample -l app=sleep -o jsonpath='{.items[0].metadata.name}')" \ +kubectl exec --context="${CTX_REMOTE_CLUSTER}" -n sample -c curl \ + "$(kubectl get pod --context="${CTX_REMOTE_CLUSTER}" -n sample -l app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sS helloworld.sample:5000/hello } @@ -549,7 +549,7 @@ kubectl label --context="${CTX_SECOND_CLUSTER}" namespace sample istio-injection snip_validate_the_installation_2() { kubectl apply -f samples/helloworld/helloworld.yaml -l service=helloworld -n sample --context="${CTX_SECOND_CLUSTER}" kubectl apply -f samples/helloworld/helloworld.yaml -l version=v2 -n sample --context="${CTX_SECOND_CLUSTER}" -kubectl apply -f samples/sleep/sleep.yaml -n sample --context="${CTX_SECOND_CLUSTER}" +kubectl apply -f samples/curl/curl.yaml -n sample --context="${CTX_SECOND_CLUSTER}" } snip_validate_the_installation_3() { @@ -558,13 +558,13 @@ kubectl get pod -n sample --context="${CTX_SECOND_CLUSTER}" ! IFS=$'\n' read -r -d '' snip_validate_the_installation_3_out <<\ENDSNIP NAME READY STATUS RESTARTS AGE +curl-557747455f-wtdbr 2/2 Running 0 9s helloworld-v2-54df5f84b-9hxgw 2/2 Running 0 10s -sleep-557747455f-wtdbr 2/2 Running 0 9s ENDSNIP snip_validate_the_installation_4() { -kubectl exec --context="${CTX_SECOND_CLUSTER}" -n sample -c sleep \ - "$(kubectl get pod --context="${CTX_SECOND_CLUSTER}" -n sample -l app=sleep -o jsonpath='{.items[0].metadata.name}')" \ +kubectl exec --context="${CTX_SECOND_CLUSTER}" -n sample -c curl \ + "$(kubectl get pod --context="${CTX_SECOND_CLUSTER}" -n sample -l app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sS helloworld.sample:5000/hello } diff --git a/content/en/docs/setup/install/multicluster/common.sh b/content/en/docs/setup/install/multicluster/common.sh index 364022d3e4da1..8fdb38ccae2c1 100644 --- a/content/en/docs/setup/install/multicluster/common.sh +++ b/content/en/docs/setup/install/multicluster/common.sh @@ -110,14 +110,14 @@ function verify_load_balancing snip_deploy_helloworld_v1_1 snip_deploy_helloworld_v2_1 - # Deploy Sleep - snip_deploy_sleep_1 + # Deploy curl + snip_deploy_curl_1 # Wait for all the deployments. _wait_for_deployment sample helloworld-v1 "${CTX_CLUSTER1}" - _wait_for_deployment sample sleep "${CTX_CLUSTER1}" + _wait_for_deployment sample curl "${CTX_CLUSTER1}" _wait_for_deployment sample helloworld-v2 "${CTX_CLUSTER2}" - _wait_for_deployment sample sleep "${CTX_CLUSTER2}" + _wait_for_deployment sample curl "${CTX_CLUSTER2}" # Verify everything is deployed as expected. VERIFY_TIMEOUT=0 # Don't retry. @@ -125,10 +125,10 @@ function verify_load_balancing _verify_like snip_deploy_helloworld_v1_2 "$snip_deploy_helloworld_v1_2_out" echo "Verifying helloworld v2 deployment" _verify_like snip_deploy_helloworld_v2_2 "$snip_deploy_helloworld_v2_2_out" - echo "Verifying sleep deployment in ${CTX_CLUSTER1}" - _verify_like snip_deploy_sleep_2 "$snip_deploy_sleep_2_out" - echo "Verifying sleep deployment in ${CTX_CLUSTER2}" - _verify_like snip_deploy_sleep_3 "$snip_deploy_sleep_3_out" + echo "Verifying curl deployment in ${CTX_CLUSTER1}" + _verify_like snip_deploy_curl_2 "$snip_deploy_curl_2_out" + echo "Verifying curl deployment in ${CTX_CLUSTER2}" + _verify_like snip_deploy_curl_3 "$snip_deploy_curl_3_out" unset VERIFY_TIMEOUT # Restore default local EXPECTED_RESPONSE_FROM_CLUSTER1="Hello version: v1, instance:" diff --git a/content/en/docs/setup/install/multicluster/verify/index.md b/content/en/docs/setup/install/multicluster/verify/index.md index 602941da5c37e..4a3a719588116 100644 --- a/content/en/docs/setup/install/multicluster/verify/index.md +++ b/content/en/docs/setup/install/multicluster/verify/index.md @@ -17,7 +17,7 @@ In this guide, we will deploy the `HelloWorld` application `V1` to `cluster1` and `V2` to `cluster2`. Upon receiving a request, `HelloWorld` will include its version in its response. -We will also deploy the `Sleep` container to both clusters. We will use these +We will also deploy the `curl` container to both clusters. We will use these pods as the source of requests to the `HelloWorld` service, simulating in-mesh traffic. Finally, after generating traffic, we will observe which cluster received the requests. @@ -97,50 +97,50 @@ helloworld-v2-758dd55874-6x4t8 2/2 Running 0 40s Wait until the status of `helloworld-v2` is `Running`. -## Deploy `Sleep` +## Deploy `curl` -Deploy the `Sleep` application to both clusters: +Deploy the `curl` application to both clusters: {{< text bash >}} $ kubectl apply --context="${CTX_CLUSTER1}" \ - -f @samples/sleep/sleep.yaml@ -n sample + -f @samples/curl/curl.yaml@ -n sample $ kubectl apply --context="${CTX_CLUSTER2}" \ - -f @samples/sleep/sleep.yaml@ -n sample + -f @samples/curl/curl.yaml@ -n sample {{< /text >}} -Confirm the status `Sleep` pod on `cluster1`: +Confirm the status `curl` pod on `cluster1`: {{< text bash >}} -$ kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l app=sleep +$ kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l app=curl NAME READY STATUS RESTARTS AGE -sleep-754684654f-n6bzf 2/2 Running 0 5s +curl-754684654f-n6bzf 2/2 Running 0 5s {{< /text >}} -Wait until the status of the `Sleep` pod is `Running`. +Wait until the status of the `curl` pod is `Running`. -Confirm the status of the `Sleep` pod on `cluster2`: +Confirm the status of the `curl` pod on `cluster2`: {{< text bash >}} -$ kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l app=sleep +$ kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l app=curl NAME READY STATUS RESTARTS AGE -sleep-754684654f-dzl9j 2/2 Running 0 5s +curl-754684654f-dzl9j 2/2 Running 0 5s {{< /text >}} -Wait until the status of the `Sleep` pod is `Running`. +Wait until the status of the `curl` pod is `Running`. ## Verifying Cross-Cluster Traffic To verify that cross-cluster load balancing works as expected, call the -`HelloWorld` service several times using the `Sleep` pod. To ensure load +`HelloWorld` service several times using the `curl` pod. To ensure load balancing is working properly, call the `HelloWorld` service from all clusters in your deployment. -Send one request from the `Sleep` pod on `cluster1` to the `HelloWorld` service: +Send one request from the `curl` pod on `cluster1` to the `HelloWorld` service: {{< text bash >}} -$ kubectl exec --context="${CTX_CLUSTER1}" -n sample -c sleep \ +$ kubectl exec --context="${CTX_CLUSTER1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sS helloworld.sample:5000/hello {{< /text >}} @@ -153,12 +153,12 @@ Hello version: v1, instance: helloworld-v1-86f77cd7bd-cpxhv ... {{< /text >}} -Now repeat this process from the `Sleep` pod on `cluster2`: +Now repeat this process from the `curl` pod on `cluster2`: {{< text bash >}} -$ kubectl exec --context="${CTX_CLUSTER2}" -n sample -c sleep \ +$ kubectl exec --context="${CTX_CLUSTER2}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sS helloworld.sample:5000/hello {{< /text >}} diff --git a/content/en/docs/setup/install/multicluster/verify/snips.sh b/content/en/docs/setup/install/multicluster/verify/snips.sh index 14f6f27a1e53d..9ec98bf2049b8 100644 --- a/content/en/docs/setup/install/multicluster/verify/snips.sh +++ b/content/en/docs/setup/install/multicluster/verify/snips.sh @@ -71,35 +71,35 @@ NAME READY STATUS RESTARTS AGE helloworld-v2-758dd55874-6x4t8 2/2 Running 0 40s ENDSNIP -snip_deploy_sleep_1() { +snip_deploy_curl_1() { kubectl apply --context="${CTX_CLUSTER1}" \ - -f samples/sleep/sleep.yaml -n sample + -f samples/curl/curl.yaml -n sample kubectl apply --context="${CTX_CLUSTER2}" \ - -f samples/sleep/sleep.yaml -n sample + -f samples/curl/curl.yaml -n sample } -snip_deploy_sleep_2() { -kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l app=sleep +snip_deploy_curl_2() { +kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l app=curl } -! IFS=$'\n' read -r -d '' snip_deploy_sleep_2_out <<\ENDSNIP +! IFS=$'\n' read -r -d '' snip_deploy_curl_2_out <<\ENDSNIP NAME READY STATUS RESTARTS AGE -sleep-754684654f-n6bzf 2/2 Running 0 5s +curl-754684654f-n6bzf 2/2 Running 0 5s ENDSNIP -snip_deploy_sleep_3() { -kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l app=sleep +snip_deploy_curl_3() { +kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l app=curl } -! IFS=$'\n' read -r -d '' snip_deploy_sleep_3_out <<\ENDSNIP +! IFS=$'\n' read -r -d '' snip_deploy_curl_3_out <<\ENDSNIP NAME READY STATUS RESTARTS AGE -sleep-754684654f-dzl9j 2/2 Running 0 5s +curl-754684654f-dzl9j 2/2 Running 0 5s ENDSNIP snip_verifying_crosscluster_traffic_1() { -kubectl exec --context="${CTX_CLUSTER1}" -n sample -c sleep \ +kubectl exec --context="${CTX_CLUSTER1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sS helloworld.sample:5000/hello } @@ -110,9 +110,9 @@ Hello version: v1, instance: helloworld-v1-86f77cd7bd-cpxhv ENDSNIP snip_verifying_crosscluster_traffic_3() { -kubectl exec --context="${CTX_CLUSTER2}" -n sample -c sleep \ +kubectl exec --context="${CTX_CLUSTER2}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sS helloworld.sample:5000/hello } diff --git a/content/en/docs/setup/install/multiple-controlplanes/index.md b/content/en/docs/setup/install/multiple-controlplanes/index.md index d46e2967025af..158162316834d 100644 --- a/content/en/docs/setup/install/multiple-controlplanes/index.md +++ b/content/en/docs/setup/install/multiple-controlplanes/index.md @@ -173,38 +173,38 @@ Istio revisions and `discoverySelectors` are then used to scope the resources an $ kubectl label ns app-ns-3 usergroup=usergroup-2 istio.io/rev=usergroup-2 {{< /text >}} -1. Deploy one `sleep` and `httpbin` application per namespace: +1. Deploy one `curl` and `httpbin` application per namespace: {{< text bash >}} - $ kubectl -n app-ns-1 apply -f samples/sleep/sleep.yaml + $ kubectl -n app-ns-1 apply -f samples/curl/curl.yaml $ kubectl -n app-ns-1 apply -f samples/httpbin/httpbin.yaml - $ kubectl -n app-ns-2 apply -f samples/sleep/sleep.yaml + $ kubectl -n app-ns-2 apply -f samples/curl/curl.yaml $ kubectl -n app-ns-2 apply -f samples/httpbin/httpbin.yaml - $ kubectl -n app-ns-3 apply -f samples/sleep/sleep.yaml + $ kubectl -n app-ns-3 apply -f samples/curl/curl.yaml $ kubectl -n app-ns-3 apply -f samples/httpbin/httpbin.yaml {{< /text >}} -1. Wait a few seconds for the `httpbin` and `sleep` pods to be running with sidecars injected: +1. Wait a few seconds for the `httpbin` and `curl` pods to be running with sidecars injected: {{< text bash >}} $ kubectl get pods -n app-ns-1 NAME READY STATUS RESTARTS AGE httpbin-9dbd644c7-zc2v4 2/2 Running 0 115m - sleep-78ff5975c6-fml7c 2/2 Running 0 115m + curl-78ff5975c6-fml7c 2/2 Running 0 115m {{< /text >}} {{< text bash >}} $ kubectl get pods -n app-ns-2 NAME READY STATUS RESTARTS AGE httpbin-9dbd644c7-sd9ln 2/2 Running 0 115m - sleep-78ff5975c6-sz728 2/2 Running 0 115m + curl-78ff5975c6-sz728 2/2 Running 0 115m {{< /text >}} {{< text bash >}} $ kubectl get pods -n app-ns-3 NAME READY STATUS RESTARTS AGE httpbin-9dbd644c7-8ll27 2/2 Running 0 115m - sleep-78ff5975c6-sg4tq 2/2 Running 0 115m + curl-78ff5975c6-sg4tq 2/2 Running 0 115m {{< /text >}} ### Verify the application to control plane mapping @@ -215,7 +215,7 @@ Now that the applications are deployed, you can use the `istioctl ps` command to $ istioctl ps -i usergroup-1 NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION httpbin-9dbd644c7-hccpf.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 -sleep-78ff5975c6-9zb77.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 +curl-78ff5975c6-9zb77.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 {{< /text >}} {{< text bash >}} @@ -223,16 +223,16 @@ $ istioctl ps -i usergroup-2 NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION httpbin-9dbd644c7-vvcqj.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 httpbin-9dbd644c7-xzgfm.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 -sleep-78ff5975c6-fthmt.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 -sleep-78ff5975c6-nxtth.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 +curl-78ff5975c6-fthmt.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 +curl-78ff5975c6-nxtth.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 {{< /text >}} ### Verify the application connectivity is ONLY within the respective usergroup -1. Send a request from the `sleep` pod in `app-ns-1` in `usergroup-1` to the `httpbin` service in `app-ns-2` in `usergroup-2`. The communication should fail: +1. Send a request from the `curl` pod in `app-ns-1` in `usergroup-1` to the `httpbin` service in `app-ns-2` in `usergroup-2`. The communication should fail: {{< text bash >}} - $ kubectl -n app-ns-1 exec "$(kubectl -n app-ns-1 get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sIL http://httpbin.app-ns-2.svc.cluster.local:8000 + $ kubectl -n app-ns-1 exec "$(kubectl -n app-ns-1 get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sIL http://httpbin.app-ns-2.svc.cluster.local:8000 HTTP/1.1 503 Service Unavailable content-length: 95 content-type: text/plain @@ -240,10 +240,10 @@ sleep-78ff5975c6-nxtth.app-ns-3 Kubernetes SYNCED SYNCED SYNCED server: envoy {{< /text >}} -1. Send a request from the `sleep` pod in `app-ns-2` in `usergroup-2` to the `httpbin` service in `app-ns-3` in `usergroup-2`. The communication should work: +1. Send a request from the `curl` pod in `app-ns-2` in `usergroup-2` to the `httpbin` service in `app-ns-3` in `usergroup-2`. The communication should work: {{< text bash >}} - $ kubectl -n app-ns-2 exec "$(kubectl -n app-ns-2 get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sIL http://httpbin.app-ns-3.svc.cluster.local:8000 + $ kubectl -n app-ns-2 exec "$(kubectl -n app-ns-2 get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sIL http://httpbin.app-ns-3.svc.cluster.local:8000 HTTP/1.1 200 OK server: envoy date: Thu, 22 Dec 2022 15:01:36 GMT diff --git a/content/en/docs/setup/install/multiple-controlplanes/snips.sh b/content/en/docs/setup/install/multiple-controlplanes/snips.sh index 4e73136c8cac3..f2b4a3b70e7fa 100644 --- a/content/en/docs/setup/install/multiple-controlplanes/snips.sh +++ b/content/en/docs/setup/install/multiple-controlplanes/snips.sh @@ -151,11 +151,11 @@ kubectl label ns app-ns-3 usergroup=usergroup-2 istio.io/rev=usergroup-2 } snip_deploy_application_workloads_per_usergroup_3() { -kubectl -n app-ns-1 apply -f samples/sleep/sleep.yaml +kubectl -n app-ns-1 apply -f samples/curl/curl.yaml kubectl -n app-ns-1 apply -f samples/httpbin/httpbin.yaml -kubectl -n app-ns-2 apply -f samples/sleep/sleep.yaml +kubectl -n app-ns-2 apply -f samples/curl/curl.yaml kubectl -n app-ns-2 apply -f samples/httpbin/httpbin.yaml -kubectl -n app-ns-3 apply -f samples/sleep/sleep.yaml +kubectl -n app-ns-3 apply -f samples/curl/curl.yaml kubectl -n app-ns-3 apply -f samples/httpbin/httpbin.yaml } @@ -166,7 +166,7 @@ kubectl get pods -n app-ns-1 ! IFS=$'\n' read -r -d '' snip_deploy_application_workloads_per_usergroup_4_out <<\ENDSNIP NAME READY STATUS RESTARTS AGE httpbin-9dbd644c7-zc2v4 2/2 Running 0 115m -sleep-78ff5975c6-fml7c 2/2 Running 0 115m +curl-78ff5975c6-fml7c 2/2 Running 0 115m ENDSNIP snip_deploy_application_workloads_per_usergroup_5() { @@ -176,7 +176,7 @@ kubectl get pods -n app-ns-2 ! IFS=$'\n' read -r -d '' snip_deploy_application_workloads_per_usergroup_5_out <<\ENDSNIP NAME READY STATUS RESTARTS AGE httpbin-9dbd644c7-sd9ln 2/2 Running 0 115m -sleep-78ff5975c6-sz728 2/2 Running 0 115m +curl-78ff5975c6-sz728 2/2 Running 0 115m ENDSNIP snip_deploy_application_workloads_per_usergroup_6() { @@ -186,7 +186,7 @@ kubectl get pods -n app-ns-3 ! IFS=$'\n' read -r -d '' snip_deploy_application_workloads_per_usergroup_6_out <<\ENDSNIP NAME READY STATUS RESTARTS AGE httpbin-9dbd644c7-8ll27 2/2 Running 0 115m -sleep-78ff5975c6-sg4tq 2/2 Running 0 115m +curl-78ff5975c6-sg4tq 2/2 Running 0 115m ENDSNIP snip_verify_the_application_to_control_plane_mapping_1() { @@ -196,7 +196,7 @@ istioctl ps -i usergroup-1 ! IFS=$'\n' read -r -d '' snip_verify_the_application_to_control_plane_mapping_1_out <<\ENDSNIP NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION httpbin-9dbd644c7-hccpf.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 -sleep-78ff5975c6-9zb77.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 +curl-78ff5975c6-9zb77.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-1-5ccc849b5f-wnqd6 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 ENDSNIP snip_verify_the_application_to_control_plane_mapping_2() { @@ -207,12 +207,12 @@ istioctl ps -i usergroup-2 NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION httpbin-9dbd644c7-vvcqj.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 httpbin-9dbd644c7-xzgfm.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 -sleep-78ff5975c6-fthmt.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 -sleep-78ff5975c6-nxtth.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 +curl-78ff5975c6-fthmt.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 +curl-78ff5975c6-nxtth.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-usergroup-2-658d6458f7-slpd9 1.17-alpha.f5212a6f7df61fd8156f3585154bed2f003c4117 ENDSNIP snip_verify_the_application_connectivity_is_only_within_the_respective_usergroup_1() { -kubectl -n app-ns-1 exec "$(kubectl -n app-ns-1 get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sIL http://httpbin.app-ns-2.svc.cluster.local:8000 +kubectl -n app-ns-1 exec "$(kubectl -n app-ns-1 get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sIL http://httpbin.app-ns-2.svc.cluster.local:8000 } ! IFS=$'\n' read -r -d '' snip_verify_the_application_connectivity_is_only_within_the_respective_usergroup_1_out <<\ENDSNIP @@ -224,7 +224,7 @@ server: envoy ENDSNIP snip_verify_the_application_connectivity_is_only_within_the_respective_usergroup_2() { -kubectl -n app-ns-2 exec "$(kubectl -n app-ns-2 get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sIL http://httpbin.app-ns-3.svc.cluster.local:8000 +kubectl -n app-ns-2 exec "$(kubectl -n app-ns-2 get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sIL http://httpbin.app-ns-3.svc.cluster.local:8000 } ! IFS=$'\n' read -r -d '' snip_verify_the_application_connectivity_is_only_within_the_respective_usergroup_2_out <<\ENDSNIP diff --git a/content/en/docs/setup/install/multiple-controlplanes/test.sh b/content/en/docs/setup/install/multiple-controlplanes/test.sh index 46081b324eeeb..ee14bfe73251e 100644 --- a/content/en/docs/setup/install/multiple-controlplanes/test.sh +++ b/content/en/docs/setup/install/multiple-controlplanes/test.sh @@ -41,11 +41,11 @@ snip_deploy_application_workloads_per_usergroup_2 snip_deploy_application_workloads_per_usergroup_3 _wait_for_deployment app-ns-1 httpbin -_wait_for_deployment app-ns-1 sleep +_wait_for_deployment app-ns-1 curl _wait_for_deployment app-ns-2 httpbin -_wait_for_deployment app-ns-2 sleep +_wait_for_deployment app-ns-2 curl _wait_for_deployment app-ns-3 httpbin -_wait_for_deployment app-ns-3 sleep +_wait_for_deployment app-ns-3 curl # verification of connectivity _verify_first_line snip_verify_the_application_connectivity_is_only_within_the_respective_usergroup_1 "HTTP/1.1 503 Service Unavailable" diff --git a/content/en/docs/setup/upgrade/canary/canary_upgrade_test.sh b/content/en/docs/setup/upgrade/canary/canary_upgrade_test.sh index 2bb39c5e5caac..fdc1a978c6e64 100644 --- a/content/en/docs/setup/upgrade/canary/canary_upgrade_test.sh +++ b/content/en/docs/setup/upgrade/canary/canary_upgrade_test.sh @@ -32,7 +32,7 @@ istioctl install --set profile=default --revision="$previousVersionRevision1" -y snip_data_plane_2 snip_data_plane_3 snip_data_plane_4 -_wait_for_deployment test-ns sleep +_wait_for_deployment test-ns curl # precheck before upgrade _verify_lines snip_before_you_upgrade_1 "$snip_before_you_upgrade_1_out" diff --git a/content/en/docs/setup/upgrade/canary/index.md b/content/en/docs/setup/upgrade/canary/index.md index 82c46d246327c..1a3100e97070f 100644 --- a/content/en/docs/setup/upgrade/canary/index.md +++ b/content/en/docs/setup/upgrade/canary/index.md @@ -88,7 +88,7 @@ However, simply installing the new revision has no impact on the existing sideca you must configure them to point to the new `istiod-canary` control plane. This is controlled during sidecar injection based on the namespace label `istio.io/rev`. -Create a namespace `test-ns` with `istio-injection` enabled. In the `test-ns` namespace, deploy a sample sleep pod: +Create a namespace `test-ns` with `istio-injection` enabled. In the `test-ns` namespace, deploy a sample curl pod: 1. Create a namespace `test-ns`. @@ -102,10 +102,10 @@ Create a namespace `test-ns` with `istio-injection` enabled. In the `test-ns` na $ kubectl label namespace test-ns istio-injection=enabled {{< /text >}} -1. Bring up a sample sleep pod in `test-ns` namespace. +1. Bring up a sample curl pod in `test-ns` namespace. {{< text bash >}} - $ kubectl apply -n test-ns -f samples/sleep/sleep.yaml + $ kubectl apply -n test-ns -f samples/curl/curl.yaml {{< /text >}} To upgrade the namespace `test-ns`, remove the `istio-injection` label, and add the `istio.io/rev` label to point to the `canary` revision. The `istio-injection` label must be removed because it takes precedence over the `istio.io/rev` label for backward compatibility. @@ -166,12 +166,12 @@ If you're using Helm, refer to the [Helm upgrade documentation](/docs/setup/upgr $ kubectl label ns app-ns-3 istio.io/rev=prod-canary {{< /text >}} -1. Bring up a sample sleep pod in each namespace: +1. Bring up a sample curl pod in each namespace: {{< text bash >}} - $ kubectl apply -n app-ns-1 -f samples/sleep/sleep.yaml - $ kubectl apply -n app-ns-2 -f samples/sleep/sleep.yaml - $ kubectl apply -n app-ns-3 -f samples/sleep/sleep.yaml + $ kubectl apply -n app-ns-1 -f samples/curl/curl.yaml + $ kubectl apply -n app-ns-2 -f samples/curl/curl.yaml + $ kubectl apply -n app-ns-3 -f samples/curl/curl.yaml {{< /text >}} 1. Verify application to control plane mapping using `istioctl proxy-status` command: @@ -179,9 +179,9 @@ If you're using Helm, refer to the [Helm upgrade documentation](/docs/setup/upgr {{< text bash >}} $ istioctl ps NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION - sleep-78ff5975c6-62pzf.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-s8zfg {{< istio_full_version >}} - sleep-78ff5975c6-8kxpl.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_previous_version_revision >}}-1-bdf5948d5-n72r2 {{< istio_previous_version >}}.1 - sleep-78ff5975c6-8q7m6.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_previous_version_revision >}}-1-bdf5948d5-n72r2 {{< istio_previous_version_revision >}}.1 + curl-78ff5975c6-62pzf.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-s8zfg {{< istio_full_version >}} + curl-78ff5975c6-8kxpl.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_previous_version_revision >}}-1-bdf5948d5-n72r2 {{< istio_previous_version >}}.1 + curl-78ff5975c6-8q7m6.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_previous_version_revision >}}-1-bdf5948d5-n72r2 {{< istio_previous_version_revision >}}.1 {{< /text >}} {{< boilerplate revision-tags-middle >}} @@ -202,9 +202,9 @@ Verify the application to control plane mapping using `istioctl proxy-status` co {{< text bash >}} $ istioctl ps NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION -sleep-5984f48bc7-kmj6x.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}} -sleep-78ff5975c6-jldk4.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}} -sleep-7cdd8dccb9-5bq5n.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}} +curl-5984f48bc7-kmj6x.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}} +curl-78ff5975c6-jldk4.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}} +curl-7cdd8dccb9-5bq5n.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-{{< istio_full_version_revision >}}-7f6fc6cfd6-jsktb {{< istio_full_version >}} {{< /text >}} ### Default tag diff --git a/content/en/docs/setup/upgrade/canary/revision_tags_test.sh b/content/en/docs/setup/upgrade/canary/revision_tags_test.sh index 1d7354e26e774..832e4b58297ec 100644 --- a/content/en/docs/setup/upgrade/canary/revision_tags_test.sh +++ b/content/en/docs/setup/upgrade/canary/revision_tags_test.sh @@ -36,9 +36,9 @@ snip_usage_2 # deploy app namespaces and label them snip_usage_3 snip_usage_4 -_wait_for_deployment app-ns-1 sleep -_wait_for_deployment app-ns-2 sleep -_wait_for_deployment app-ns-3 sleep +_wait_for_deployment app-ns-1 curl +_wait_for_deployment app-ns-2 curl +_wait_for_deployment app-ns-3 curl # verify both the revisions are managing workloads _verify_contains snip_usage_5 "istiod-$previousVersionRevision1" diff --git a/content/en/docs/setup/upgrade/canary/snips.sh b/content/en/docs/setup/upgrade/canary/snips.sh index 77ea1b6a10bb0..c5f4de7cf6ffc 100644 --- a/content/en/docs/setup/upgrade/canary/snips.sh +++ b/content/en/docs/setup/upgrade/canary/snips.sh @@ -82,7 +82,7 @@ kubectl label namespace test-ns istio-injection=enabled } snip_data_plane_4() { -kubectl apply -n test-ns -f samples/sleep/sleep.yaml +kubectl apply -n test-ns -f samples/curl/curl.yaml } snip_data_plane_5() { @@ -117,9 +117,9 @@ kubectl label ns app-ns-3 istio.io/rev=prod-canary } snip_usage_4() { -kubectl apply -n app-ns-1 -f samples/sleep/sleep.yaml -kubectl apply -n app-ns-2 -f samples/sleep/sleep.yaml -kubectl apply -n app-ns-3 -f samples/sleep/sleep.yaml +kubectl apply -n app-ns-1 -f samples/curl/curl.yaml +kubectl apply -n app-ns-2 -f samples/curl/curl.yaml +kubectl apply -n app-ns-3 -f samples/curl/curl.yaml } snip_usage_5() { @@ -128,9 +128,9 @@ istioctl ps ! IFS=$'\n' read -r -d '' snip_usage_5_out <<\ENDSNIP NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION -sleep-78ff5975c6-62pzf.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-s8zfg 1.24.0 -sleep-78ff5975c6-8kxpl.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-23-1-bdf5948d5-n72r2 1.23.1 -sleep-78ff5975c6-8q7m6.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-23-1-bdf5948d5-n72r2 1-23.1 +curl-78ff5975c6-62pzf.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-s8zfg 1.24.0 +curl-78ff5975c6-8kxpl.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-23-1-bdf5948d5-n72r2 1.23.1 +curl-78ff5975c6-8q7m6.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-23-1-bdf5948d5-n72r2 1-23.1 ENDSNIP snip_usage_6() { @@ -148,9 +148,9 @@ istioctl ps ! IFS=$'\n' read -r -d '' snip_usage_8_out <<\ENDSNIP NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION -sleep-5984f48bc7-kmj6x.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0 -sleep-78ff5975c6-jldk4.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0 -sleep-7cdd8dccb9-5bq5n.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0 +curl-5984f48bc7-kmj6x.app-ns-1 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0 +curl-78ff5975c6-jldk4.app-ns-3 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0 +curl-7cdd8dccb9-5bq5n.app-ns-2 Kubernetes SYNCED SYNCED SYNCED SYNCED NOT SENT istiod-1-24-0-7f6fc6cfd6-jsktb 1.24.0 ENDSNIP snip_default_tag_1() { diff --git a/content/en/docs/tasks/observability/distributed-tracing/mesh-and-proxy-config/index.md b/content/en/docs/tasks/observability/distributed-tracing/mesh-and-proxy-config/index.md index 7133f36c0dea4..81fe0d0c547bc 100644 --- a/content/en/docs/tasks/observability/distributed-tracing/mesh-and-proxy-config/index.md +++ b/content/en/docs/tasks/observability/distributed-tracing/mesh-and-proxy-config/index.md @@ -115,14 +115,14 @@ EOF You can add the `proxy.istio.io/config` annotation to your Pod metadata specification to override any mesh-wide tracing settings. -For instance, to modify the `sleep` deployment shipped with Istio you would add -the following to `samples/sleep/sleep.yaml`: +For instance, to modify the `curl` deployment shipped with Istio you would add +the following to `samples/curl/curl.yaml`: {{< text yaml >}} apiVersion: apps/v1 kind: Deployment metadata: - name: sleep + name: curl spec: ... template: diff --git a/content/en/docs/tasks/observability/distributed-tracing/sampling/index.md b/content/en/docs/tasks/observability/distributed-tracing/sampling/index.md index f0c116fddf7e0..560731af3bfb3 100644 --- a/content/en/docs/tasks/observability/distributed-tracing/sampling/index.md +++ b/content/en/docs/tasks/observability/distributed-tracing/sampling/index.md @@ -87,7 +87,7 @@ For instance, to override the mesh-wide sampling above, you would add the follow apiVersion: apps/v1 kind: Deployment metadata: - name: sleep + name: curl spec: ... template: diff --git a/content/en/docs/tasks/observability/logs/access-log/index.md b/content/en/docs/tasks/observability/logs/access-log/index.md index 30eb65a9d15c2..a47bab08eaf36 100644 --- a/content/en/docs/tasks/observability/logs/access-log/index.md +++ b/content/en/docs/tasks/observability/logs/access-log/index.md @@ -83,10 +83,10 @@ Istio will use the following default access log format if `accessLogFormat` is n \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" %UPSTREAM_CLUSTER% %UPSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_REMOTE_ADDRESS% %REQUESTED_SERVER_NAME% %ROUTE_NAME%\n {{< /text >}} -The following table shows an example using the default access log format for a request sent from `sleep` to `httpbin`: +The following table shows an example using the default access log format for a request sent from `curl` to `httpbin`: -| Log operator | access log in sleep | access log in httpbin | -|--------------|---------------------|-----------------------| +| Log operator | access log in curl | access log in httpbin | +|--------------|--------------------|-----------------------| | `[%START_TIME%]` | `[2020-11-25T21:26:18.409Z]` | `[2020-11-25T21:26:18.409Z]` | `\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\"` | `"GET /status/418 HTTP/1.1"` | `"GET /status/418 HTTP/1.1"` | `%RESPONSE_CODE%` | `418` | `418` @@ -112,10 +112,10 @@ The following table shows an example using the default access log format for a r ## Test the access log -1. Send a request from `sleep` to `httpbin`: +1. Send a request from `curl` to `httpbin`: {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v httpbin:8000/status/418 + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sS -v httpbin:8000/status/418 ... < HTTP/1.1 418 Unknown ... @@ -125,10 +125,10 @@ The following table shows an example using the default access log format for a r ... {{< /text >}} -1. Check `sleep`'s log: +1. Check `curl`'s log: {{< text bash >}} - $ kubectl logs -l app=sleep -c istio-proxy + $ kubectl logs -l app=curl -c istio-proxy [2020-11-25T21:26:18.409Z] "GET /status/418 HTTP/1.1" 418 - via_upstream - "-" 0 135 4 4 "-" "curl/7.73.0-DEV" "84961386-6d84-929d-98bd-c5aee93b5c88" "httpbin:8000" "10.44.1.27:80" outbound|8000||httpbin.foo.svc.cluster.local 10.44.1.23:37652 10.0.45.184:8000 10.44.1.23:46520 - default {{< /text >}} @@ -139,14 +139,14 @@ The following table shows an example using the default access log format for a r [2020-11-25T21:26:18.409Z] "GET /status/418 HTTP/1.1" 418 - via_upstream - "-" 0 135 3 1 "-" "curl/7.73.0-DEV" "84961386-6d84-929d-98bd-c5aee93b5c88" "httpbin:8000" "127.0.0.1:80" inbound|8000|| 127.0.0.1:41854 10.44.1.27:80 10.44.1.23:37652 outbound_.8000_._.httpbin.foo.svc.cluster.local default {{< /text >}} -Note that the messages corresponding to the request appear in logs of the Istio proxies of both the source and the destination, `sleep` and `httpbin`, respectively. You can see in the log the HTTP verb (`GET`), the HTTP path (`/status/418`), the response code (`418`) and other [request-related information](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-rules). +Note that the messages corresponding to the request appear in logs of the Istio proxies of both the source and the destination, `curl` and `httpbin`, respectively. You can see in the log the HTTP verb (`GET`), the HTTP path (`/status/418`), the response code (`418`) and other [request-related information](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-rules). ## Cleanup -Shutdown the [sleep]({{< github_tree >}}/samples/sleep) and [httpbin]({{< github_tree >}}/samples/httpbin) services: +Shutdown the [curl]({{< github_tree >}}/samples/curl) and [httpbin]({{< github_tree >}}/samples/httpbin) services: {{< text bash >}} -$ kubectl delete -f @samples/sleep/sleep.yaml@ +$ kubectl delete -f @samples/curl/curl.yaml@ $ kubectl delete -f @samples/httpbin/httpbin.yaml@ {{< /text >}} diff --git a/content/en/docs/tasks/observability/logs/access-log/snips.sh b/content/en/docs/tasks/observability/logs/access-log/snips.sh index 32dd86bb349f0..8ed840897c28e 100644 --- a/content/en/docs/tasks/observability/logs/access-log/snips.sh +++ b/content/en/docs/tasks/observability/logs/access-log/snips.sh @@ -47,7 +47,7 @@ ENDSNIP ENDSNIP snip_test_the_access_log_1() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v httpbin:8000/status/418 +kubectl exec "$SOURCE_POD" -c curl -- curl -sS -v httpbin:8000/status/418 } ! IFS=$'\n' read -r -d '' snip_test_the_access_log_1_out <<\ENDSNIP @@ -61,7 +61,7 @@ I'm a teapot! ENDSNIP snip_test_the_access_log_2() { -kubectl logs -l app=sleep -c istio-proxy +kubectl logs -l app=curl -c istio-proxy } ! IFS=$'\n' read -r -d '' snip_test_the_access_log_2_out <<\ENDSNIP @@ -77,7 +77,7 @@ kubectl logs -l app=httpbin -c istio-proxy ENDSNIP snip_cleanup_1() { -kubectl delete -f samples/sleep/sleep.yaml +kubectl delete -f samples/curl/curl.yaml kubectl delete -f samples/httpbin/httpbin.yaml } diff --git a/content/en/docs/tasks/observability/logs/access-log/test.sh b/content/en/docs/tasks/observability/logs/access-log/test.sh index 2fc39b28b9e10..12e1411df8090 100644 --- a/content/en/docs/tasks/observability/logs/access-log/test.sh +++ b/content/en/docs/tasks/observability/logs/access-log/test.sh @@ -34,9 +34,9 @@ source "tests/util/samples.sh" kubectl label namespace default istio-injection=enabled --overwrite -# Start the sleep sample -startup_sleep_sample -export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') +# Start the curl sample +startup_curl_sample +export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') # Start the httpbin sample startup_httpbin_sample diff --git a/content/en/docs/tasks/observability/logs/otel-provider/index.md b/content/en/docs/tasks/observability/logs/otel-provider/index.md index 9627530ea94b4..65ac7edbe639f 100644 --- a/content/en/docs/tasks/observability/logs/otel-provider/index.md +++ b/content/en/docs/tasks/observability/logs/otel-provider/index.md @@ -66,11 +66,11 @@ $ cat <}} -The following table shows an example using the default access log format for a request sent from `sleep` to `httpbin`: +The following table shows an example using the default access log format for a request sent from `curl` to `httpbin`: -| Log operator | access log in sleep | access log in httpbin | -|--------------|---------------------|-----------------------| +| Log operator | access log in curl | access log in httpbin | +|--------------|--------------------|-----------------------| | `[%START_TIME%]` | `[2020-11-25T21:26:18.409Z]` | `[2020-11-25T21:26:18.409Z]` | `\"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\"` | `"GET /status/418 HTTP/1.1"` | `"GET /status/418 HTTP/1.1"` | `%RESPONSE_CODE%` | `418` | `418` @@ -147,10 +147,10 @@ The following table shows an example using the default access log format for a r ## Test the access log -1. Send a request from `sleep` to `httpbin`: +1. Send a request from `curl` to `httpbin`: {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v httpbin:8000/status/418 + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sS -v httpbin:8000/status/418 ... < HTTP/1.1 418 Unknown ... @@ -167,15 +167,15 @@ The following table shows an example using the default access log format for a r [2020-11-25T21:26:18.409Z] "GET /status/418 HTTP/1.1" 418 - via_upstream - "-" 0 135 3 1 "-" "curl/7.73.0-DEV" "84961386-6d84-929d-98bd-c5aee93b5c88" "httpbin:8000" "127.0.0.1:80" inbound|8000|| 127.0.0.1:41854 10.44.1.27:80 10.44.1.23:37652 outbound_.8000_._.httpbin.foo.svc.cluster.local default {{< /text >}} -Note that the messages corresponding to the request appear in logs of the Istio proxies of both the source and the destination, `sleep` and `httpbin`, respectively. You can see in the log the HTTP verb (`GET`), the HTTP path (`/status/418`), the response code (`418`) and other [request-related information](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-rules). +Note that the messages corresponding to the request appear in logs of the Istio proxies of both the source and the destination, `curl` and `httpbin`, respectively. You can see in the log the HTTP verb (`GET`), the HTTP path (`/status/418`), the response code (`418`) and other [request-related information](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#format-rules). ## Cleanup -Shutdown the [sleep]({{< github_tree >}}/samples/sleep) and [httpbin]({{< github_tree >}}/samples/httpbin) services: +Shutdown the [curl]({{< github_tree >}}/samples/curl) and [httpbin]({{< github_tree >}}/samples/httpbin) services: {{< text bash >}} -$ kubectl delete telemetry sleep-logging -$ kubectl delete -f @samples/sleep/sleep.yaml@ +$ kubectl delete telemetry curl-logging +$ kubectl delete -f @samples/curl/curl.yaml@ $ kubectl delete -f @samples/httpbin/httpbin.yaml@ $ kubectl delete -f @samples/open-telemetry/otel.yaml@ -n istio-system $ kubectl delete namespace observability diff --git a/content/en/docs/tasks/observability/logs/otel-provider/snips.sh b/content/en/docs/tasks/observability/logs/otel-provider/snips.sh index 8a7936be28587..222a7aa41c3ea 100644 --- a/content/en/docs/tasks/observability/logs/otel-provider/snips.sh +++ b/content/en/docs/tasks/observability/logs/otel-provider/snips.sh @@ -62,11 +62,11 @@ cat <}} $ cat < /dev/null + kubectl exec deploy/curl -- curl -sS "http://httpbin:8000/$request_path" > /dev/null done } @@ -78,8 +78,8 @@ function count_by_pod() { curl -G -s "http://$loki_address:3100/loki/api/v1/query_range" --data-urlencode "query={namespace=\"$namespace\", pod=\"$name\"}" | jq '.data.result[0].values | length' } -count_sleep_pod() { - local pod=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) +count_curl_pod() { + local pod=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) count_by_pod default $pod } @@ -89,16 +89,16 @@ count_httpbin_pod() { } rollout_restart_pods() { - kubectl rollout restart deploy/sleep + kubectl rollout restart deploy/curl kubectl rollout restart deploy/httpbin - _wait_for_deployment default sleep + _wait_for_deployment default curl _wait_for_deployment default httpbin } send_httpbin_requests "status/200" # no logs are sent to loki -_verify_same count_sleep_pod "0" +_verify_same count_curl_pod "0" _verify_same count_httpbin_pod "0" # enable access log via Telemetry API @@ -107,17 +107,17 @@ rollout_restart_pods send_httpbin_requests "status/200" -_verify_same count_sleep_pod "10" +_verify_same count_curl_pod "10" _verify_same count_httpbin_pod "10" -# disable access log for sleep pod +# disable access log for curl pod snip_get_started_with_telemetry_api_2 rollout_restart_pods send_httpbin_requests "status/200" -# sleep pod logs are not sent to loki -_verify_same count_sleep_pod "0" +# curl pod logs are not sent to loki +_verify_same count_curl_pod "0" _verify_same count_httpbin_pod "10" # disable httpbin @@ -126,24 +126,24 @@ rollout_restart_pods send_httpbin_requests "status/200" -_verify_same count_sleep_pod "0" +_verify_same count_curl_pod "0" # httpbin pod logs are not sent to loki _verify_same count_httpbin_pod "0" -# filter sleep logs +# filter curl logs kubectl delete telemetry --all -n default snip_get_started_with_telemetry_api_4 rollout_restart_pods # only 5xx logs are sent to loki send_httpbin_requests "status/200" -_verify_same count_sleep_pod "0" +_verify_same count_curl_pod "0" send_httpbin_requests "status/500" -_verify_same count_sleep_pod "10" +_verify_same count_curl_pod "10" # @cleanup -cleanup_sleep_sample +cleanup_curl_sample cleanup_httpbin_sample snip_cleanup_1 diff --git a/content/en/docs/tasks/security/authentication/authn-policy/index.md b/content/en/docs/tasks/security/authentication/authn-policy/index.md index 7a9758de2eba7..892cfa2d2ea59 100644 --- a/content/en/docs/tasks/security/authentication/authn-policy/index.md +++ b/content/en/docs/tasks/security/authentication/authn-policy/index.md @@ -27,45 +27,45 @@ $ istioctl install --set profile=default ### Setup -Our examples use two namespaces `foo` and `bar`, with two services, `httpbin` and `sleep`, both running with an Envoy proxy. We also use second -instances of `httpbin` and `sleep` running without the sidecar in the `legacy` namespace. If you’d like to use the same examples when trying the tasks, +Our examples use two namespaces `foo` and `bar`, with two services, `httpbin` and `curl`, both running with an Envoy proxy. We also use second +instances of `httpbin` and `curl` running without the sidecar in the `legacy` namespace. If you’d like to use the same examples when trying the tasks, run the following: {{< text bash >}} $ kubectl create ns foo $ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n foo -$ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n foo +$ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n foo $ kubectl create ns bar $ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n bar -$ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n bar +$ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n bar $ kubectl create ns legacy $ kubectl apply -f @samples/httpbin/httpbin.yaml@ -n legacy -$ kubectl apply -f @samples/sleep/sleep.yaml@ -n legacy +$ kubectl apply -f @samples/curl/curl.yaml@ -n legacy {{< /text >}} -You can verify setup by sending an HTTP request with `curl` from any `sleep` pod in the namespace `foo`, `bar` or `legacy` to either `httpbin.foo`, +You can verify setup by sending an HTTP request with `curl` from any `curl` pod in the namespace `foo`, `bar` or `legacy` to either `httpbin.foo`, `httpbin.bar` or `httpbin.legacy`. All requests should succeed with HTTP code 200. -For example, here is a command to check `sleep.bar` to `httpbin.foo` reachability: +For example, here is a command to check `curl.bar` to `httpbin.foo` reachability: {{< text bash >}} -$ kubectl exec "$(kubectl get pod -l app=sleep -n bar -o jsonpath={.items..metadata.name})" -c sleep -n bar -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" +$ kubectl exec "$(kubectl get pod -l app=curl -n bar -o jsonpath={.items..metadata.name})" -c curl -n bar -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} This one-liner command conveniently iterates through all reachability combinations: {{< text bash >}} -$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl -s "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.foo to httpbin.legacy: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.bar to httpbin.legacy: 200 -sleep.legacy to httpbin.foo: 200 -sleep.legacy to httpbin.bar: 200 -sleep.legacy to httpbin.legacy: 200 +$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl -s "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.foo to httpbin.legacy: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.bar to httpbin.legacy: 200 +curl.legacy to httpbin.foo: 200 +curl.legacy to httpbin.bar: 200 +curl.legacy to httpbin.legacy: 200 {{< /text >}} Verify there is no peer authentication policy in the system with the following command: @@ -99,14 +99,14 @@ upstream request to the backend. That header's presence is evidence that mutual used. For example: {{< text bash >}} -$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl -s http://httpbin.foo:8000/headers -s | jq '.headers["X-Forwarded-Client-Cert"][0]' | sed 's/Hash=[a-z0-9]*;/Hash=;/' - "By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep" +$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl -s http://httpbin.foo:8000/headers -s | jq '.headers["X-Forwarded-Client-Cert"][0]' | sed 's/Hash=[a-z0-9]*;/Hash=;/' + "By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl" {{< /text >}} When the server doesn't have sidecar, the `X-Forwarded-Client-Cert` header is not there, which implies requests are in plain text. {{< text bash >}} -$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.legacy:8000/headers -s | grep X-Forwarded-Client-Cert +$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.legacy:8000/headers -s | grep X-Forwarded-Client-Cert {{< /text >}} @@ -140,21 +140,21 @@ Since it doesn't specify a value for the `selector` field, the policy applies to Run the test command again: {{< text bash >}} -$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.foo to httpbin.legacy: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.bar to httpbin.legacy: 200 -sleep.legacy to httpbin.foo: 000 +$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.foo to httpbin.legacy: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.bar to httpbin.legacy: 200 +curl.legacy to httpbin.foo: 000 command terminated with exit code 56 -sleep.legacy to httpbin.bar: 000 +curl.legacy to httpbin.bar: 000 command terminated with exit code 56 -sleep.legacy to httpbin.legacy: 200 +curl.legacy to httpbin.legacy: 200 {{< /text >}} -You see requests still succeed, except for those from the client that doesn't have proxy, `sleep.legacy`, to the server with a proxy, `httpbin.foo` or `httpbin.bar`. This is expected because mutual TLS is now strictly required, but the workload without sidecar cannot comply. +You see requests still succeed, except for those from the client that doesn't have proxy, `curl.legacy`, to the server with a proxy, `httpbin.foo` or `httpbin.bar`. This is expected because mutual TLS is now strictly required, but the workload without sidecar cannot comply. ### Cleanup part 1 @@ -183,20 +183,20 @@ spec: EOF {{< /text >}} -As this policy is applied on workloads in namespace `foo` only, you should see only request from client-without-sidecar (`sleep.legacy`) to `httpbin.foo` start to fail. +As this policy is applied on workloads in namespace `foo` only, you should see only request from client-without-sidecar (`curl.legacy`) to `httpbin.foo` start to fail. {{< text bash >}} -$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.foo to httpbin.legacy: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.bar to httpbin.legacy: 200 -sleep.legacy to httpbin.foo: 000 +$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.foo to httpbin.legacy: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.bar to httpbin.legacy: 200 +curl.legacy to httpbin.foo: 000 command terminated with exit code 56 -sleep.legacy to httpbin.bar: 200 -sleep.legacy to httpbin.legacy: 200 +curl.legacy to httpbin.bar: 200 +curl.legacy to httpbin.legacy: 200 {{< /text >}} ### Enable mutual TLS per workload @@ -219,26 +219,26 @@ spec: EOF {{< /text >}} -Again, run the probing command. As expected, request from `sleep.legacy` to `httpbin.bar` starts failing with the same reasons. +Again, run the probing command. As expected, request from `curl.legacy` to `httpbin.bar` starts failing with the same reasons. {{< text bash >}} -$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.foo to httpbin.legacy: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.bar to httpbin.legacy: 200 -sleep.legacy to httpbin.foo: 000 +$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.foo to httpbin.legacy: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.bar to httpbin.legacy: 200 +curl.legacy to httpbin.foo: 000 command terminated with exit code 56 -sleep.legacy to httpbin.bar: 000 +curl.legacy to httpbin.bar: 000 command terminated with exit code 56 -sleep.legacy to httpbin.legacy: 200 +curl.legacy to httpbin.legacy: 200 {{< /text >}} {{< text plain >}} ... -sleep.legacy to httpbin.bar: 000 +curl.legacy to httpbin.bar: 000 command terminated with exit code 56 {{< /text >}} @@ -267,24 +267,24 @@ EOF 1. You can only use `portLevelMtls` if the port is bound to a service. Istio ignores it otherwise. {{< text bash >}} -$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.foo to httpbin.legacy: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.bar to httpbin.legacy: 200 -sleep.legacy to httpbin.foo: 000 +$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.foo to httpbin.legacy: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.bar to httpbin.legacy: 200 +curl.legacy to httpbin.foo: 000 command terminated with exit code 56 -sleep.legacy to httpbin.bar: 200 -sleep.legacy to httpbin.legacy: 200 +curl.legacy to httpbin.bar: 200 +curl.legacy to httpbin.legacy: 200 {{< /text >}} ### Policy precedence A workload-specific peer authentication policy takes precedence over a namespace-wide policy. You can test this behavior if you add a policy to disable mutual TLS for the `httpbin.foo` workload, for example. Note that you've already created a namespace-wide policy that enables mutual TLS for all services in namespace `foo` and observe that requests from -`sleep.legacy` to `httpbin.foo` are failing (see above). +`curl.legacy` to `httpbin.foo` are failing (see above). {{< text bash >}} $ cat <}} -Re-running the request from `sleep.legacy`, you should see a success return code again (200), confirming service-specific policy overrides the namespace-wide policy. +Re-running the request from `curl.legacy`, you should see a success return code again (200), confirming service-specific policy overrides the namespace-wide policy. {{< text bash >}} -$ kubectl exec "$(kubectl get pod -l app=sleep -n legacy -o jsonpath={.items..metadata.name})" -c sleep -n legacy -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" +$ kubectl exec "$(kubectl get pod -l app=curl -n legacy -o jsonpath={.items..metadata.name})" -c curl -n legacy -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} diff --git a/content/en/docs/tasks/security/authentication/authn-policy/snips.sh b/content/en/docs/tasks/security/authentication/authn-policy/snips.sh index 048b38b5f3777..e79ddca7974e3 100644 --- a/content/en/docs/tasks/security/authentication/authn-policy/snips.sh +++ b/content/en/docs/tasks/security/authentication/authn-policy/snips.sh @@ -28,17 +28,17 @@ istioctl install --set profile=default snip_setup_1() { kubectl create ns foo kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo kubectl create ns bar kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n bar -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n bar +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n bar kubectl create ns legacy kubectl apply -f samples/httpbin/httpbin.yaml -n legacy -kubectl apply -f samples/sleep/sleep.yaml -n legacy +kubectl apply -f samples/curl/curl.yaml -n legacy } snip_setup_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n bar -o jsonpath={.items..metadata.name})" -c sleep -n bar -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n bar -o jsonpath={.items..metadata.name})" -c curl -n bar -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_setup_2_out <<\ENDSNIP @@ -46,19 +46,19 @@ kubectl exec "$(kubectl get pod -l app=sleep -n bar -o jsonpath={.items..metadat ENDSNIP snip_setup_3() { -for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl -s "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done +for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl -s "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done } ! IFS=$'\n' read -r -d '' snip_setup_3_out <<\ENDSNIP -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.foo to httpbin.legacy: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.bar to httpbin.legacy: 200 -sleep.legacy to httpbin.foo: 200 -sleep.legacy to httpbin.bar: 200 -sleep.legacy to httpbin.legacy: 200 +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.foo to httpbin.legacy: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.bar to httpbin.legacy: 200 +curl.legacy to httpbin.foo: 200 +curl.legacy to httpbin.bar: 200 +curl.legacy to httpbin.legacy: 200 ENDSNIP snip_setup_4() { @@ -78,15 +78,15 @@ kubectl get destinationrules.networking.istio.io --all-namespaces -o yaml | grep ENDSNIP snip_auto_mutual_tls_1() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl -s http://httpbin.foo:8000/headers -s | jq '.headers["X-Forwarded-Client-Cert"][0]' | sed 's/Hash=[a-z0-9]*;/Hash=;/' +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl -s http://httpbin.foo:8000/headers -s | jq '.headers["X-Forwarded-Client-Cert"][0]' | sed 's/Hash=[a-z0-9]*;/Hash=;/' } ! IFS=$'\n' read -r -d '' snip_auto_mutual_tls_1_out <<\ENDSNIP - "By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep" + "By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl" ENDSNIP snip_auto_mutual_tls_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.legacy:8000/headers -s | grep X-Forwarded-Client-Cert +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.legacy:8000/headers -s | grep X-Forwarded-Client-Cert } ! IFS=$'\n' read -r -d '' snip_auto_mutual_tls_2_out <<\ENDSNIP @@ -107,21 +107,21 @@ EOF } snip_globally_enabling_istio_mutual_tls_in_strict_mode_2() { -for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done +for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done } ! IFS=$'\n' read -r -d '' snip_globally_enabling_istio_mutual_tls_in_strict_mode_2_out <<\ENDSNIP -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.foo to httpbin.legacy: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.bar to httpbin.legacy: 200 -sleep.legacy to httpbin.foo: 000 +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.foo to httpbin.legacy: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.bar to httpbin.legacy: 200 +curl.legacy to httpbin.foo: 000 command terminated with exit code 56 -sleep.legacy to httpbin.bar: 000 +curl.legacy to httpbin.bar: 000 command terminated with exit code 56 -sleep.legacy to httpbin.legacy: 200 +curl.legacy to httpbin.legacy: 200 ENDSNIP snip_cleanup_part_1_1() { @@ -142,20 +142,20 @@ EOF } snip_namespacewide_policy_2() { -for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done +for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done } ! IFS=$'\n' read -r -d '' snip_namespacewide_policy_2_out <<\ENDSNIP -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.foo to httpbin.legacy: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.bar to httpbin.legacy: 200 -sleep.legacy to httpbin.foo: 000 +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.foo to httpbin.legacy: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.bar to httpbin.legacy: 200 +curl.legacy to httpbin.foo: 000 command terminated with exit code 56 -sleep.legacy to httpbin.bar: 200 -sleep.legacy to httpbin.legacy: 200 +curl.legacy to httpbin.bar: 200 +curl.legacy to httpbin.legacy: 200 ENDSNIP snip_enable_mutual_tls_per_workload_1() { @@ -175,26 +175,26 @@ EOF } snip_enable_mutual_tls_per_workload_2() { -for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done +for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done } ! IFS=$'\n' read -r -d '' snip_enable_mutual_tls_per_workload_2_out <<\ENDSNIP -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.foo to httpbin.legacy: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.bar to httpbin.legacy: 200 -sleep.legacy to httpbin.foo: 000 +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.foo to httpbin.legacy: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.bar to httpbin.legacy: 200 +curl.legacy to httpbin.foo: 000 command terminated with exit code 56 -sleep.legacy to httpbin.bar: 000 +curl.legacy to httpbin.bar: 000 command terminated with exit code 56 -sleep.legacy to httpbin.legacy: 200 +curl.legacy to httpbin.legacy: 200 ENDSNIP ! IFS=$'\n' read -r -d '' snip_enable_mutual_tls_per_workload_3 <<\ENDSNIP ... -sleep.legacy to httpbin.bar: 000 +curl.legacy to httpbin.bar: 000 command terminated with exit code 56 ENDSNIP @@ -218,20 +218,20 @@ EOF } snip_enable_mutual_tls_per_workload_5() { -for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done +for from in "foo" "bar" "legacy"; do for to in "foo" "bar" "legacy"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done } ! IFS=$'\n' read -r -d '' snip_enable_mutual_tls_per_workload_5_out <<\ENDSNIP -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.foo to httpbin.legacy: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.bar to httpbin.legacy: 200 -sleep.legacy to httpbin.foo: 000 +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.foo to httpbin.legacy: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.bar to httpbin.legacy: 200 +curl.legacy to httpbin.foo: 000 command terminated with exit code 56 -sleep.legacy to httpbin.bar: 200 -sleep.legacy to httpbin.legacy: 200 +curl.legacy to httpbin.bar: 200 +curl.legacy to httpbin.legacy: 200 ENDSNIP snip_policy_precedence_1() { @@ -251,7 +251,7 @@ EOF } snip_policy_precedence_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n legacy -o jsonpath={.items..metadata.name})" -c sleep -n legacy -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n legacy -o jsonpath={.items..metadata.name})" -c curl -n legacy -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_policy_precedence_2_out <<\ENDSNIP diff --git a/content/en/docs/tasks/security/authentication/authn-policy/test.sh b/content/en/docs/tasks/security/authentication/authn-policy/test.sh index 13925e08c277a..42211dc274609 100644 --- a/content/en/docs/tasks/security/authentication/authn-policy/test.sh +++ b/content/en/docs/tasks/security/authentication/authn-policy/test.sh @@ -27,11 +27,11 @@ _wait_for_deployment istio-system istiod snip_setup_1 _wait_for_deployment foo httpbin -_wait_for_deployment foo sleep +_wait_for_deployment foo curl _wait_for_deployment bar httpbin -_wait_for_deployment bar sleep +_wait_for_deployment bar curl _wait_for_deployment legacy httpbin -_wait_for_deployment legacy sleep +_wait_for_deployment legacy curl _verify_same snip_setup_2 "$snip_setup_2_out" _verify_same snip_setup_3 "$snip_setup_3_out" diff --git a/content/en/docs/tasks/security/authentication/claim-to-header/index.md b/content/en/docs/tasks/security/authentication/claim-to-header/index.md index c1e082fe7234c..7c09c39c3e3c9 100644 --- a/content/en/docs/tasks/security/authentication/claim-to-header/index.md +++ b/content/en/docs/tasks/security/authentication/claim-to-header/index.md @@ -27,20 +27,20 @@ Before you begin this task, do the following: * Install Istio using [Istio installation guide](/docs/setup/install/istioctl/). -* Deploy `httpbin` and `sleep` workloads in namespace `foo` with sidecar injection enabled. +* Deploy `httpbin` and `curl` workloads in namespace `foo` with sidecar injection enabled. Deploy the example namespace and workloads using these commands: {{< text bash >}} $ kubectl create ns foo $ kubectl label namespace foo istio-injection=enabled $ kubectl apply -f @samples/httpbin/httpbin.yaml@ -n foo - $ kubectl apply -f @samples/sleep/sleep.yaml@ -n foo + $ kubectl apply -f @samples/curl/curl.yaml@ -n foo {{< /text >}} -* Verify that `sleep` successfully communicates with `httpbin` using this command: +* Verify that `curl` successfully communicates with `httpbin` using this command: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} @@ -78,7 +78,7 @@ Before you begin this task, do the following: 1. Verify that a request with an invalid JWT is denied: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n" 401 {{< /text >}} @@ -92,14 +92,14 @@ Before you begin this task, do the following: 1. Verify that a request with a valid JWT is allowed: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" 200 {{< /text >}} 1. Verify that a request contains a valid HTTP header with JWT claim value: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -H "Authorization: Bearer $TOKEN" | jq '.headers["X-Jwt-Claim-Foo"][0]' + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -H "Authorization: Bearer $TOKEN" | jq '.headers["X-Jwt-Claim-Foo"][0]' "bar" {{< /text >}} diff --git a/content/en/docs/tasks/security/authentication/claim-to-header/snips.sh b/content/en/docs/tasks/security/authentication/claim-to-header/snips.sh index ce442c88b6f7d..08a6c3f09c59f 100644 --- a/content/en/docs/tasks/security/authentication/claim-to-header/snips.sh +++ b/content/en/docs/tasks/security/authentication/claim-to-header/snips.sh @@ -24,11 +24,11 @@ snip_before_you_begin_1() { kubectl create ns foo kubectl label namespace foo istio-injection=enabled kubectl apply -f samples/httpbin/httpbin.yaml -n foo -kubectl apply -f samples/sleep/sleep.yaml -n foo +kubectl apply -f samples/curl/curl.yaml -n foo } snip_before_you_begin_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_before_you_begin_2_out <<\ENDSNIP @@ -56,7 +56,7 @@ EOF } snip_allow_requests_with_valid_jwt_and_listtyped_claims_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_2_out <<\ENDSNIP @@ -72,7 +72,7 @@ TOKEN=$(curl https://raw.githubusercontent.com/istio/istio/master/security/tools ENDSNIP snip_allow_requests_with_valid_jwt_and_listtyped_claims_4() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_4_out <<\ENDSNIP @@ -80,7 +80,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat ENDSNIP snip_allow_requests_with_valid_jwt_and_listtyped_claims_5() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -H "Authorization: Bearer $TOKEN" | jq '.headers["X-Jwt-Claim-Foo"][0]' +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -H "Authorization: Bearer $TOKEN" | jq '.headers["X-Jwt-Claim-Foo"][0]' } ! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_5_out <<\ENDSNIP diff --git a/content/en/docs/tasks/security/authentication/claim-to-header/test.sh b/content/en/docs/tasks/security/authentication/claim-to-header/test.sh index a78f9ce6a62fe..9206fa93dfa5a 100644 --- a/content/en/docs/tasks/security/authentication/claim-to-header/test.sh +++ b/content/en/docs/tasks/security/authentication/claim-to-header/test.sh @@ -27,7 +27,7 @@ export VERIFY_TIMEOUT=300 snip_before_you_begin_1 _wait_for_deployment foo httpbin -_wait_for_deployment foo sleep +_wait_for_deployment foo curl # Pull the Istio branch from the docs configuration file. ISTIO_BRANCH=$(yq '.source_branch_name' "${REPO_ROOT}"/data/args.yml) diff --git a/content/en/docs/tasks/security/authentication/mtls-migration/index.md b/content/en/docs/tasks/security/authentication/mtls-migration/index.md index 35de89f135e91..34eaab6e64679 100644 --- a/content/en/docs/tasks/security/authentication/mtls-migration/index.md +++ b/content/en/docs/tasks/security/authentication/mtls-migration/index.md @@ -36,35 +36,35 @@ the policies to enforce STRICT mutual TLS between the workloads. ## Set up the cluster -* Create two namespaces, `foo` and `bar`, and deploy [httpbin]({{< github_tree >}}/samples/httpbin) and [sleep]({{< github_tree >}}/samples/sleep) with sidecars on both of them: +* Create two namespaces, `foo` and `bar`, and deploy [httpbin]({{< github_tree >}}/samples/httpbin) and [curl]({{< github_tree >}}/samples/curl) with sidecars on both of them: {{< text bash >}} $ kubectl create ns foo $ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n foo - $ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n foo + $ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n foo $ kubectl create ns bar $ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n bar - $ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n bar + $ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n bar {{< /text >}} -* Create another namespace, `legacy`, and deploy [sleep]({{< github_tree >}}/samples/sleep) without a sidecar: +* Create another namespace, `legacy`, and deploy [curl]({{< github_tree >}}/samples/curl) without a sidecar: {{< text bash >}} $ kubectl create ns legacy - $ kubectl apply -f @samples/sleep/sleep.yaml@ -n legacy + $ kubectl apply -f @samples/curl/curl.yaml@ -n legacy {{< /text >}} -* Verify the setup by sending http requests (using curl) from the sleep pods, in namespaces `foo`, `bar` and `legacy`, to `httpbin.foo` and `httpbin.bar`. +* Verify the setup by sending http requests (using curl) from the curl pods, in namespaces `foo`, `bar` and `legacy`, to `httpbin.foo` and `httpbin.bar`. All requests should succeed with return code 200. {{< text bash >}} - $ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done - sleep.foo to httpbin.foo: 200 - sleep.foo to httpbin.bar: 200 - sleep.bar to httpbin.foo: 200 - sleep.bar to httpbin.bar: 200 - sleep.legacy to httpbin.foo: 200 - sleep.legacy to httpbin.bar: 200 + $ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done + curl.foo to httpbin.foo: 200 + curl.foo to httpbin.bar: 200 + curl.bar to httpbin.foo: 200 + curl.bar to httpbin.bar: 200 + curl.legacy to httpbin.foo: 200 + curl.legacy to httpbin.bar: 200 {{< /text >}} {{< tip >}} @@ -100,17 +100,17 @@ spec: EOF {{< /text >}} -Now, you should see the request from `sleep.legacy` to `httpbin.foo` failing. +Now, you should see the request from `curl.legacy` to `httpbin.foo` failing. {{< text bash >}} -$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.legacy to httpbin.foo: 000 +$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.legacy to httpbin.foo: 000 command terminated with exit code 56 -sleep.legacy to httpbin.bar: 200 +curl.legacy to httpbin.bar: 200 {{< /text >}} If you installed Istio with `values.global.proxy.privileged=true`, you can use `tcpdump` to verify @@ -122,7 +122,7 @@ tcpdump: verbose output suppressed, use -v or -vv for full protocol decode listening on eth0, link-type EN10MB (Ethernet), capture size 262144 bytes {{< /text >}} -You will see plain text and encrypted text in the output when requests are sent from `sleep.legacy` and `sleep.foo` +You will see plain text and encrypted text in the output when requests are sent from `curl.legacy` and `curl.foo` respectively. If you can't migrate all your services to Istio (i.e., inject Envoy sidecar in all of them), you will need to continue to use `PERMISSIVE` mode. @@ -145,11 +145,11 @@ spec: EOF {{< /text >}} -Now, both the `foo` and `bar` namespaces enforce mutual TLS only traffic, so you should see requests from `sleep.legacy` +Now, both the `foo` and `bar` namespaces enforce mutual TLS only traffic, so you should see requests from `curl.legacy` failing for both. {{< text bash >}} -$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done +$ for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done {{< /text >}} ## Clean up the example diff --git a/content/en/docs/tasks/security/authentication/mtls-migration/snips.sh b/content/en/docs/tasks/security/authentication/mtls-migration/snips.sh index ed5dc50ae7403..4f3fffb83efec 100644 --- a/content/en/docs/tasks/security/authentication/mtls-migration/snips.sh +++ b/content/en/docs/tasks/security/authentication/mtls-migration/snips.sh @@ -23,28 +23,28 @@ snip_set_up_the_cluster_1() { kubectl create ns foo kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo kubectl create ns bar kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n bar -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n bar +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n bar } snip_set_up_the_cluster_2() { kubectl create ns legacy -kubectl apply -f samples/sleep/sleep.yaml -n legacy +kubectl apply -f samples/curl/curl.yaml -n legacy } snip_set_up_the_cluster_3() { -for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done +for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done } ! IFS=$'\n' read -r -d '' snip_set_up_the_cluster_3_out <<\ENDSNIP -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.legacy to httpbin.foo: 200 -sleep.legacy to httpbin.bar: 200 +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.legacy to httpbin.foo: 200 +curl.legacy to httpbin.bar: 200 ENDSNIP snip_set_up_the_cluster_4() { @@ -76,17 +76,17 @@ EOF } snip_lock_down_to_mutual_tls_by_namespace_2() { -for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done +for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done } ! IFS=$'\n' read -r -d '' snip_lock_down_to_mutual_tls_by_namespace_2_out <<\ENDSNIP -sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.legacy to httpbin.foo: 000 +curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.legacy to httpbin.foo: 000 command terminated with exit code 56 -sleep.legacy to httpbin.bar: 200 +curl.legacy to httpbin.bar: 200 ENDSNIP snip_lock_down_to_mutual_tls_by_namespace_3() { @@ -111,7 +111,7 @@ EOF } snip_lock_down_mutual_tls_for_the_entire_mesh_2() { -for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name})" -c sleep -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n"; done; done +for from in "foo" "bar" "legacy"; do for to in "foo" "bar"; do kubectl exec "$(kubectl get pod -l app=curl -n ${from} -o jsonpath={.items..metadata.name})" -c curl -n ${from} -- curl http://httpbin.${to}:8000/ip -s -o /dev/null -w "curl.${from} to httpbin.${to}: %{http_code}\n"; done; done } snip_clean_up_the_example_1() { diff --git a/content/en/docs/tasks/security/authentication/mtls-migration/test.sh b/content/en/docs/tasks/security/authentication/mtls-migration/test.sh index 2a0753eb2844c..2b89a457da460 100644 --- a/content/en/docs/tasks/security/authentication/mtls-migration/test.sh +++ b/content/en/docs/tasks/security/authentication/mtls-migration/test.sh @@ -26,10 +26,10 @@ snip_set_up_the_cluster_1 snip_set_up_the_cluster_2 _wait_for_deployment foo httpbin -_wait_for_deployment foo sleep +_wait_for_deployment foo curl _wait_for_deployment bar httpbin -_wait_for_deployment bar sleep -_wait_for_deployment legacy sleep +_wait_for_deployment bar curl +_wait_for_deployment legacy curl # curl_foo_bar_legacy _verify_same snip_set_up_the_cluster_3 "$snip_set_up_the_cluster_3_out" @@ -66,13 +66,13 @@ set +e set +o pipefail # curl_foo_bar_legacy_httpbin_foo_mtls -expected="sleep.foo to httpbin.foo: 200 -sleep.foo to httpbin.bar: 200 -sleep.bar to httpbin.foo: 200 -sleep.bar to httpbin.bar: 200 -sleep.legacy to httpbin.foo: 000 +expected="curl.foo to httpbin.foo: 200 +curl.foo to httpbin.bar: 200 +curl.bar to httpbin.foo: 200 +curl.bar to httpbin.bar: 200 +curl.legacy to httpbin.foo: 000 command terminated with exit code 56 -sleep.legacy to httpbin.bar: 000 +curl.legacy to httpbin.bar: 000 command terminated with exit code 56" _verify_same snip_lock_down_mutual_tls_for_the_entire_mesh_2 "$expected" diff --git a/content/en/docs/tasks/security/authorization/authz-custom/index.md b/content/en/docs/tasks/security/authorization/authz-custom/index.md index a53aa9bb35549..87c338a6b7edb 100644 --- a/content/en/docs/tasks/security/authorization/authz-custom/index.md +++ b/content/en/docs/tasks/security/authorization/authz-custom/index.md @@ -21,7 +21,7 @@ Before you begin this task, do the following: * Deploy test workloads: - This task uses two workloads, `httpbin` and `sleep`, both deployed in namespace `foo`. + This task uses two workloads, `httpbin` and `curl`, both deployed in namespace `foo`. Both workloads run with an Envoy proxy sidecar. Deploy the `foo` namespace and workloads with the following command: @@ -29,13 +29,13 @@ Before you begin this task, do the following: $ kubectl create ns foo $ kubectl label ns foo istio-injection=enabled $ kubectl apply -f @samples/httpbin/httpbin.yaml@ -n foo - $ kubectl apply -f @samples/sleep/sleep.yaml@ -n foo + $ kubectl apply -f @samples/curl/curl.yaml@ -n foo {{< /text >}} -* Verify that `sleep` can access `httpbin` with the following command: +* Verify that `curl` can access `httpbin` with the following command: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} @@ -185,14 +185,14 @@ The external authorizer is now ready to be used by the authorization policy. 1. Verify a request to path `/headers` with header `x-ext-authz: deny` is denied by the sample `ext_authz` server: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: deny" -s + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: deny" -s denied by ext_authz for not found header `x-ext-authz: allow` in the request {{< /text >}} 1. Verify a request to path `/headers` with header `x-ext-authz: allow` is allowed by the sample `ext_authz` server: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: allow" -s | jq '.headers' + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: allow" -s | jq '.headers' ... "X-Ext-Authz-Check-Result": [ "allowed" @@ -203,7 +203,7 @@ The external authorizer is now ready to be used by the authorization policy. 1. Verify a request to path `/ip` is allowed and does not trigger the external authorization: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -s -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -s -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} @@ -213,12 +213,12 @@ The external authorizer is now ready to be used by the authorization policy. $ kubectl logs "$(kubectl get pod -l app=ext-authz -n foo -o jsonpath={.items..metadata.name})" -n foo -c ext-authz 2021/01/07 22:55:47 Starting HTTP server at [::]:8000 2021/01/07 22:55:47 Starting gRPC server at [::]:9000 - 2021/01/08 03:25:00 [gRPCv3][denied]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52088}} principal:"spiffe://cluster.local/ns/foo/sa/sleep"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076306 nanos:473835000} http:{id:"13869142855783664817" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"377ba0cdc2334270"} headers:{key:"x-b3-traceid" value:"635187cb20d92f62377ba0cdc2334270"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"deny"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"9609691a-4e9b-9545-ac71-3889bc2dffb0"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{} - 2021/01/08 03:25:06 [gRPCv3][allowed]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52184}} principal:"spiffe://cluster.local/ns/foo/sa/sleep"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076300 nanos:925912000} http:{id:"17995949296433813435" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"a66b5470e922fa80"} headers:{key:"x-b3-traceid" value:"300c2f2b90a618c8a66b5470e922fa80"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"allow"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"2b62daf1-00b9-97d9-91b8-ba6194ef58a4"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{} + 2021/01/08 03:25:00 [gRPCv3][denied]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52088}} principal:"spiffe://cluster.local/ns/foo/sa/curl"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076306 nanos:473835000} http:{id:"13869142855783664817" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"377ba0cdc2334270"} headers:{key:"x-b3-traceid" value:"635187cb20d92f62377ba0cdc2334270"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"deny"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"9609691a-4e9b-9545-ac71-3889bc2dffb0"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{} + 2021/01/08 03:25:06 [gRPCv3][allowed]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52184}} principal:"spiffe://cluster.local/ns/foo/sa/curl"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076300 nanos:925912000} http:{id:"17995949296433813435" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"a66b5470e922fa80"} headers:{key:"x-b3-traceid" value:"300c2f2b90a618c8a66b5470e922fa80"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"allow"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"2b62daf1-00b9-97d9-91b8-ba6194ef58a4"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{} {{< /text >}} You can also tell from the log that mTLS is enabled for the connection between the `ext-authz` filter and the - sample `ext-authz` server because the source principal is populated with the value `spiffe://cluster.local/ns/foo/sa/sleep`. + sample `ext-authz` server because the source principal is populated with the value `spiffe://cluster.local/ns/foo/sa/curl`. You can now apply another authorization policy for the sample `ext-authz` server to control who is allowed to access it. diff --git a/content/en/docs/tasks/security/authorization/authz-custom/snips.sh b/content/en/docs/tasks/security/authorization/authz-custom/snips.sh index 052083ef43b6a..292f86c455609 100644 --- a/content/en/docs/tasks/security/authorization/authz-custom/snips.sh +++ b/content/en/docs/tasks/security/authorization/authz-custom/snips.sh @@ -24,11 +24,11 @@ snip_before_you_begin_1() { kubectl create ns foo kubectl label ns foo istio-injection=enabled kubectl apply -f samples/httpbin/httpbin.yaml -n foo -kubectl apply -f samples/sleep/sleep.yaml -n foo +kubectl apply -f samples/curl/curl.yaml -n foo } snip_before_you_begin_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_before_you_begin_2_out <<\ENDSNIP @@ -128,7 +128,7 @@ EOF } snip_enable_with_external_authorization_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: deny" -s +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: deny" -s } ! IFS=$'\n' read -r -d '' snip_enable_with_external_authorization_2_out <<\ENDSNIP @@ -136,7 +136,7 @@ denied by ext_authz for not found header `x-ext-authz: allow` in the request ENDSNIP snip_enable_with_external_authorization_3() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: allow" -s | jq '.headers' +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -H "x-ext-authz: allow" -s | jq '.headers' } ! IFS=$'\n' read -r -d '' snip_enable_with_external_authorization_3_out <<\ENDSNIP @@ -148,7 +148,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat ENDSNIP snip_enable_with_external_authorization_4() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -s -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -s -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_enable_with_external_authorization_4_out <<\ENDSNIP @@ -162,8 +162,8 @@ kubectl logs "$(kubectl get pod -l app=ext-authz -n foo -o jsonpath={.items..met ! IFS=$'\n' read -r -d '' snip_enable_with_external_authorization_5_out <<\ENDSNIP 2021/01/07 22:55:47 Starting HTTP server at [::]:8000 2021/01/07 22:55:47 Starting gRPC server at [::]:9000 -2021/01/08 03:25:00 [gRPCv3][denied]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52088}} principal:"spiffe://cluster.local/ns/foo/sa/sleep"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076306 nanos:473835000} http:{id:"13869142855783664817" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"377ba0cdc2334270"} headers:{key:"x-b3-traceid" value:"635187cb20d92f62377ba0cdc2334270"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"deny"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"9609691a-4e9b-9545-ac71-3889bc2dffb0"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{} -2021/01/08 03:25:06 [gRPCv3][allowed]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52184}} principal:"spiffe://cluster.local/ns/foo/sa/sleep"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076300 nanos:925912000} http:{id:"17995949296433813435" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"a66b5470e922fa80"} headers:{key:"x-b3-traceid" value:"300c2f2b90a618c8a66b5470e922fa80"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"allow"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/sleep"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"2b62daf1-00b9-97d9-91b8-ba6194ef58a4"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{} +2021/01/08 03:25:00 [gRPCv3][denied]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52088}} principal:"spiffe://cluster.local/ns/foo/sa/curl"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076306 nanos:473835000} http:{id:"13869142855783664817" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"377ba0cdc2334270"} headers:{key:"x-b3-traceid" value:"635187cb20d92f62377ba0cdc2334270"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"deny"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"9609691a-4e9b-9545-ac71-3889bc2dffb0"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{} +2021/01/08 03:25:06 [gRPCv3][allowed]: httpbin.foo:8000/headers, attributes: source:{address:{socket_address:{address:"10.44.0.22" port_value:52184}} principal:"spiffe://cluster.local/ns/foo/sa/curl"} destination:{address:{socket_address:{address:"10.44.3.30" port_value:80}} principal:"spiffe://cluster.local/ns/foo/sa/httpbin"} request:{time:{seconds:1610076300 nanos:925912000} http:{id:"17995949296433813435" method:"GET" headers:{key:":authority" value:"httpbin.foo:8000"} headers:{key:":method" value:"GET"} headers:{key:":path" value:"/headers"} headers:{key:"accept" value:"*/*"} headers:{key:"content-length" value:"0"} headers:{key:"user-agent" value:"curl/7.74.0-DEV"} headers:{key:"x-b3-sampled" value:"1"} headers:{key:"x-b3-spanid" value:"a66b5470e922fa80"} headers:{key:"x-b3-traceid" value:"300c2f2b90a618c8a66b5470e922fa80"} headers:{key:"x-envoy-attempt-count" value:"1"} headers:{key:"x-ext-authz" value:"allow"} headers:{key:"x-forwarded-client-cert" value:"By=spiffe://cluster.local/ns/foo/sa/httpbin;Hash=dd14782fa2f439724d271dbed846ef843ff40d3932b615da650d028db655fc8d;Subject=\"\";URI=spiffe://cluster.local/ns/foo/sa/curl"} headers:{key:"x-forwarded-proto" value:"http"} headers:{key:"x-request-id" value:"2b62daf1-00b9-97d9-91b8-ba6194ef58a4"} path:"/headers" host:"httpbin.foo:8000" protocol:"HTTP/1.1"}} metadata_context:{} ENDSNIP snip_clean_up_1() { diff --git a/content/en/docs/tasks/security/authorization/authz-custom/test.sh b/content/en/docs/tasks/security/authorization/authz-custom/test.sh index e3d38847b3f59..fca522770ef38 100644 --- a/content/en/docs/tasks/security/authorization/authz-custom/test.sh +++ b/content/en/docs/tasks/security/authorization/authz-custom/test.sh @@ -28,7 +28,7 @@ export VERIFY_TIMEOUT=300 snip_before_you_begin_1 _wait_for_deployment foo httpbin -_wait_for_deployment foo sleep +_wait_for_deployment foo curl _verify_same snip_before_you_begin_2 "$snip_before_you_begin_2_out" diff --git a/content/en/docs/tasks/security/authorization/authz-deny/index.md b/content/en/docs/tasks/security/authorization/authz-deny/index.md index 7072253a45beb..9b66d73df31c8 100644 --- a/content/en/docs/tasks/security/authorization/authz-deny/index.md +++ b/content/en/docs/tasks/security/authorization/authz-deny/index.md @@ -21,20 +21,20 @@ Before you begin this task, do the following: * Deploy workloads: - This task uses two workloads, `httpbin` and `sleep`, deployed on one namespace, `foo`. + This task uses two workloads, `httpbin` and `curl`, deployed on one namespace, `foo`. Both workloads run with an Envoy proxy in front of each. Deploy the example namespace and workloads with the following command: {{< text bash >}} $ kubectl create ns foo $ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n foo - $ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n foo + $ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n foo {{< /text >}} -* Verify that `sleep` talks to `httpbin` with the following command: +* Verify that `curl` talks to `httpbin` with the following command: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} @@ -72,14 +72,14 @@ Caching and propagation overhead can cause some delay. 1. Verify that `GET` requests are denied: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -sS -o /dev/null -w "%{http_code}\n" 403 {{< /text >}} 1. Verify that `POST` requests are allowed: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/post" -X POST -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/post" -X POST -sS -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} @@ -113,14 +113,14 @@ Caching and propagation overhead can cause some delay. 1. Verify that `GET` requests with the HTTP header `x-token: admin` are allowed: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -sS -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} 1. Verify that GET requests with the HTTP header `x-token: guest` are denied: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: guest" -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: guest" -sS -o /dev/null -w "%{http_code}\n" 403 {{< /text >}} @@ -151,7 +151,7 @@ Caching and propagation overhead can cause some delay. by the `deny-method-get` policy. Deny policies takes precedence over the allow policies: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: guest" -s -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: guest" -s -o /dev/null -w "%{http_code}\n" 403 {{< /text >}} @@ -159,7 +159,7 @@ Caching and propagation overhead can cause some delay. allowed by the `allow-path-ip` policy: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} @@ -167,7 +167,7 @@ Caching and propagation overhead can cause some delay. denied because they don’t match the `allow-path-ip` policy: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n" 403 {{< /text >}} diff --git a/content/en/docs/tasks/security/authorization/authz-deny/snips.sh b/content/en/docs/tasks/security/authorization/authz-deny/snips.sh index a0ed31c6077c0..577c401e00615 100644 --- a/content/en/docs/tasks/security/authorization/authz-deny/snips.sh +++ b/content/en/docs/tasks/security/authorization/authz-deny/snips.sh @@ -23,11 +23,11 @@ snip_before_you_begin_1() { kubectl create ns foo kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo } snip_before_you_begin_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_before_you_begin_2_out <<\ENDSNIP @@ -54,7 +54,7 @@ EOF } snip_explicitly_deny_a_request_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_2_out <<\ENDSNIP @@ -62,7 +62,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat ENDSNIP snip_explicitly_deny_a_request_3() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/post" -X POST -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/post" -X POST -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_3_out <<\ENDSNIP @@ -92,7 +92,7 @@ EOF } snip_explicitly_deny_a_request_5() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_5_out <<\ENDSNIP @@ -100,7 +100,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat ENDSNIP snip_explicitly_deny_a_request_6() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: guest" -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: guest" -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_6_out <<\ENDSNIP @@ -127,7 +127,7 @@ EOF } snip_explicitly_deny_a_request_8() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: guest" -s -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: guest" -s -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_8_out <<\ENDSNIP @@ -135,7 +135,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat ENDSNIP snip_explicitly_deny_a_request_9() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/ip" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_9_out <<\ENDSNIP @@ -143,7 +143,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat ENDSNIP snip_explicitly_deny_a_request_10() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/get" -X GET -H "x-token: admin" -s -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_explicitly_deny_a_request_10_out <<\ENDSNIP diff --git a/content/en/docs/tasks/security/authorization/authz-deny/test.sh b/content/en/docs/tasks/security/authorization/authz-deny/test.sh index 9231be080bbd4..73c8ac3fa8ded 100644 --- a/content/en/docs/tasks/security/authorization/authz-deny/test.sh +++ b/content/en/docs/tasks/security/authorization/authz-deny/test.sh @@ -28,7 +28,7 @@ export VERIFY_TIMEOUT=300 snip_before_you_begin_1 _wait_for_deployment foo httpbin -_wait_for_deployment foo sleep +_wait_for_deployment foo curl _verify_same snip_before_you_begin_2 "$snip_before_you_begin_2_out" diff --git a/content/en/docs/tasks/security/authorization/authz-dry-run/index.md b/content/en/docs/tasks/security/authorization/authz-dry-run/index.md index 81d1a7d89cb25..402f04bc41f84 100644 --- a/content/en/docs/tasks/security/authorization/authz-dry-run/index.md +++ b/content/en/docs/tasks/security/authorization/authz-dry-run/index.md @@ -32,14 +32,14 @@ Before you begin this task, do the following: * Deploy test workloads: - This task uses two workloads, `httpbin` and `sleep`, both deployed in namespace `foo`. + This task uses two workloads, `httpbin` and `curl`, both deployed in namespace `foo`. Both workloads run with an Envoy proxy sidecar. Create the `foo` namespace and deploy the workloads with the following command: {{< text bash >}} $ kubectl create ns foo $ kubectl label ns foo istio-injection=enabled $ kubectl apply -f @samples/httpbin/httpbin.yaml@ -n foo - $ kubectl apply -f @samples/sleep/sleep.yaml@ -n foo + $ kubectl apply -f @samples/curl/curl.yaml@ -n foo {{< /text >}} * Enable proxy debug level log for checking dry-run logging results: @@ -49,10 +49,10 @@ Before you begin this task, do the following: rbac: debug {{< /text >}} -* Verify that `sleep` can access `httpbin` with the following command: +* Verify that `curl` can access `httpbin` with the following command: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} @@ -92,10 +92,10 @@ Caching and propagation overhead can cause some delay. {{< /text >}} 1. Verify a request to path `/headers` is allowed because the policy is created in dry-run mode, run the following command - to send 20 requests from `sleep` to `httpbin`, the request includes the header `X-B3-Sampled: 1` to always trigger the Zipkin tracing: + to send 20 requests from `curl` to `httpbin`, the request includes the header `X-B3-Sampled: 1` to always trigger the Zipkin tracing: {{< text bash >}} - $ for i in {1..20}; do kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s -o /dev/null -w "%{http_code}\n"; done + $ for i in {1..20}; do kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s -o /dev/null -w "%{http_code}\n"; done 200 200 200 @@ -154,7 +154,7 @@ Also see the [troubleshooting guide](/docs/ops/common-problems/security-issues/# $ istioctl dashboard zipkin {{< /text >}} -1. Find the trace result for the request from `sleep` to `httpbin`. Try to send some more requests if you do see the trace +1. Find the trace result for the request from `curl` to `httpbin`. Try to send some more requests if you do see the trace result due to the delay in the Zipkin. 1. In the trace result, you should find the following custom tags indicating the request is rejected by the dry-run policy diff --git a/content/en/docs/tasks/security/authorization/authz-dry-run/snips.sh b/content/en/docs/tasks/security/authorization/authz-dry-run/snips.sh index 5617eeaa8a18f..bab048a9fed86 100644 --- a/content/en/docs/tasks/security/authorization/authz-dry-run/snips.sh +++ b/content/en/docs/tasks/security/authorization/authz-dry-run/snips.sh @@ -24,7 +24,7 @@ snip_before_you_begin_1() { kubectl create ns foo kubectl label ns foo istio-injection=enabled kubectl apply -f samples/httpbin/httpbin.yaml -n foo -kubectl apply -f samples/sleep/sleep.yaml -n foo +kubectl apply -f samples/curl/curl.yaml -n foo } snip_before_you_begin_2() { @@ -36,7 +36,7 @@ rbac: debug ENDSNIP snip_before_you_begin_3() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -s -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_before_you_begin_3_out <<\ENDSNIP @@ -68,7 +68,7 @@ kubectl annotate --overwrite authorizationpolicies deny-path-headers -n foo isti } snip_create_dryrun_policy_3() { -for i in {1..20}; do kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s -o /dev/null -w "%{http_code}\n"; done +for i in {1..20}; do kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s -o /dev/null -w "%{http_code}\n"; done } ! IFS=$'\n' read -r -d '' snip_create_dryrun_policy_3_out <<\ENDSNIP diff --git a/content/en/docs/tasks/security/authorization/authz-dry-run/test.sh b/content/en/docs/tasks/security/authorization/authz-dry-run/test.sh index 08fb661eb0f35..637bdc3ec2eb3 100644 --- a/content/en/docs/tasks/security/authorization/authz-dry-run/test.sh +++ b/content/en/docs/tasks/security/authorization/authz-dry-run/test.sh @@ -32,35 +32,35 @@ _deploy_and_wait_for_addons prometheus zipkin snip_enable_tracing_for_mesh_1 snip_customizing_trace_sampling_1 -# Install sleep and httpbin +# Install curl and httpbin snip_before_you_begin_1 _wait_for_deployment foo httpbin -_wait_for_deployment foo sleep +_wait_for_deployment foo curl # Enable RBAC debug logging on httpbin _verify_contains snip_before_you_begin_2 "$snip_before_you_begin_2_out" -# Send request from sleep to httpbin +# Send request from curl to httpbin _verify_contains snip_before_you_begin_3 "$snip_before_you_begin_3_out" # Create authorization policy in dry-run mode snip_create_dryrun_policy_1 snip_create_dryrun_policy_2 -# Send requests from sleep to httpbin +# Send requests from curl to httpbin _verify_elided snip_create_dryrun_policy_3 "$snip_create_dryrun_policy_3_out" # Verify Envoy logs for the dry-run result function check_logs() { # Send more requests in case the log is not showing - kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/headers -s -o /dev/null -w "%{http_code}\n" + kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/headers -s -o /dev/null -w "%{http_code}\n" snip_check_dryrun_result_in_proxy_log_1 } _verify_contains check_logs "ns[foo]-policy[deny-path-headers]-rule[0]" function query_prometheus() { # Send more requests in case the metric is not showing - kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s + kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s curl -sg "http://localhost:9090/api/v1/query?query=$snip_check_dryrun_result_in_metric_using_prometheus_2" | jq '.data.result[0].value[1]' } @@ -71,7 +71,7 @@ pgrep istioctl | xargs kill function query_zipkin() { # Send more requests in case the trace is not showing - kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s + kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/headers -H "X-B3-Sampled: 1" -s curl -s 'http://localhost:9411/zipkin/api/v2/traces?serviceName=httpbin.foo' } diff --git a/content/en/docs/tasks/security/authorization/authz-http/test.sh b/content/en/docs/tasks/security/authorization/authz-http/test.sh index 610beb6a5a327..ab2f3c238eb86 100644 --- a/content/en/docs/tasks/security/authorization/authz-http/test.sh +++ b/content/en/docs/tasks/security/authorization/authz-http/test.sh @@ -44,11 +44,11 @@ function verify { goodResponse=0 ingress_url="http://istio-ingressgateway.istio-system/productpage" - sleep_pod=$(kubectl get pod -l app=sleep -n default -o 'jsonpath={.items..metadata.name}') + curl_pod=$(kubectl get pod -l app=curl -n default -o 'jsonpath={.items..metadata.name}') for ((i=1; i<="$REPEAT"; i++)); do set +e - response=$(kubectl exec "${sleep_pod}" -c sleep -n "default" -- curl "${ingress_url}" -sS -w "\n%{http_code}\n") + response=$(kubectl exec "${curl_pod}" -c curl -n "default" -- curl "${ingress_url}" -sS -w "\n%{http_code}\n") set -e mapfile -t respArray <<< "$response" code=${respArray[-1]} @@ -83,7 +83,7 @@ function verify { } kubectl label namespace default istio-injection=enabled --overwrite -startup_sleep_sample # needed for sending test requests with curl +startup_curl_sample # needed for sending test requests with curl # launch the bookinfo app startup_bookinfo_sample @@ -123,6 +123,6 @@ verify 200 "William Shakespeare" "Book Details" "Book Reviews" snip_clean_up_1 # remaining cleanup (undocumented). cleanup_bookinfo_sample -cleanup_sleep_sample +cleanup_curl_sample kubectl delete -f samples/bookinfo/networking/virtual-service-reviews-v3.yaml kubectl label namespace default istio-injection- diff --git a/content/en/docs/tasks/security/authorization/authz-jwt/index.md b/content/en/docs/tasks/security/authorization/authz-jwt/index.md index 962cd6348920f..a02b7799a43de 100644 --- a/content/en/docs/tasks/security/authorization/authz-jwt/index.md +++ b/content/en/docs/tasks/security/authorization/authz-jwt/index.md @@ -24,20 +24,20 @@ Before you begin this task, do the following: * Install Istio using [Istio installation guide](/docs/setup/install/istioctl/). -* Deploy two workloads: `httpbin` and `sleep`. Deploy these in one namespace, +* Deploy two workloads: `httpbin` and `curl`. Deploy these in one namespace, for example `foo`. Both workloads run with an Envoy proxy in front of each. Deploy the example namespace and workloads using these commands: {{< text bash >}} $ kubectl create ns foo $ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n foo - $ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n foo + $ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n foo {{< /text >}} -* Verify that `sleep` successfully communicates with `httpbin` using this command: +* Verify that `curl` successfully communicates with `httpbin` using this command: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} @@ -72,14 +72,14 @@ Caching and propagation can cause a delay. 1. Verify that a request with an invalid JWT is denied: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n" 401 {{< /text >}} 1. Verify that a request without a JWT is allowed because there is no authorization policy: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} @@ -119,14 +119,14 @@ Caching and propagation can cause a delay. 1. Verify that a request with a valid JWT is allowed: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" 200 {{< /text >}} 1. Verify that a request without a JWT is denied: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n" 403 {{< /text >}} @@ -169,14 +169,14 @@ Caching and propagation can cause a delay. 1. Verify that a request with the JWT that includes `group1` in the `groups` claim is allowed: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN_GROUP" -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN_GROUP" -w "%{http_code}\n" 200 {{< /text >}} 1. Verify that a request with a JWT, which doesn’t have the `groups` claim is rejected: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" 403 {{< /text >}} diff --git a/content/en/docs/tasks/security/authorization/authz-jwt/snips.sh b/content/en/docs/tasks/security/authorization/authz-jwt/snips.sh index 57bf94db44e34..d65cf482e0af1 100644 --- a/content/en/docs/tasks/security/authorization/authz-jwt/snips.sh +++ b/content/en/docs/tasks/security/authorization/authz-jwt/snips.sh @@ -23,11 +23,11 @@ snip_before_you_begin_1() { kubectl create ns foo kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo } snip_before_you_begin_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_before_you_begin_2_out <<\ENDSNIP @@ -52,7 +52,7 @@ EOF } snip_allow_requests_with_valid_jwt_and_listtyped_claims_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer invalidToken" -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_2_out <<\ENDSNIP @@ -60,7 +60,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat ENDSNIP snip_allow_requests_with_valid_jwt_and_listtyped_claims_3() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_3_out <<\ENDSNIP @@ -95,7 +95,7 @@ TOKEN=$(curl https://raw.githubusercontent.com/istio/istio/master/security/tools ENDSNIP snip_allow_requests_with_valid_jwt_and_listtyped_claims_6() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_6_out <<\ENDSNIP @@ -103,7 +103,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat ENDSNIP snip_allow_requests_with_valid_jwt_and_listtyped_claims_7() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_7_out <<\ENDSNIP @@ -141,7 +141,7 @@ TOKEN_GROUP=$(curl https://raw.githubusercontent.com/istio/istio/master/security ENDSNIP snip_allow_requests_with_valid_jwt_and_listtyped_claims_10() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN_GROUP" -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN_GROUP" -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_10_out <<\ENDSNIP @@ -149,7 +149,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat ENDSNIP snip_allow_requests_with_valid_jwt_and_listtyped_claims_11() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl "http://httpbin.foo:8000/headers" -sS -o /dev/null -H "Authorization: Bearer $TOKEN" -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_allow_requests_with_valid_jwt_and_listtyped_claims_11_out <<\ENDSNIP diff --git a/content/en/docs/tasks/security/authorization/authz-jwt/test.sh b/content/en/docs/tasks/security/authorization/authz-jwt/test.sh index 8132904f72345..8c5d9e1e2b962 100644 --- a/content/en/docs/tasks/security/authorization/authz-jwt/test.sh +++ b/content/en/docs/tasks/security/authorization/authz-jwt/test.sh @@ -27,7 +27,7 @@ export VERIFY_TIMEOUT=300 snip_before_you_begin_1 _wait_for_deployment foo httpbin -_wait_for_deployment foo sleep +_wait_for_deployment foo curl # Pull the Istio branch from the docs configuration file. ISTIO_BRANCH=$(yq '.source_branch_name' "${REPO_ROOT}"/data/args.yml) diff --git a/content/en/docs/tasks/security/authorization/authz-tcp/index.md b/content/en/docs/tasks/security/authorization/authz-tcp/index.md index 0e08dfa972df5..df6ba0f239f4e 100644 --- a/content/en/docs/tasks/security/authorization/authz-tcp/index.md +++ b/content/en/docs/tasks/security/authorization/authz-tcp/index.md @@ -19,7 +19,7 @@ Before you begin this task, do the following: * Install Istio using the [Istio installation guide](/docs/setup/install/istioctl/). -* Deploy two workloads named `sleep` and `tcp-echo` together in a namespace, for example `foo`. +* Deploy two workloads named `curl` and `tcp-echo` together in a namespace, for example `foo`. Both workloads run with an Envoy proxy in front of each. The `tcp-echo` workload listens on port 9000, 9001 and 9002 and echoes back any traffic it received with a prefix `hello`. For example, if you send "world" to `tcp-echo`, it will reply with `hello world`. @@ -30,37 +30,37 @@ Before you begin this task, do the following: {{< text bash >}} $ kubectl create ns foo $ kubectl apply -f <(istioctl kube-inject -f @samples/tcp-echo/tcp-echo.yaml@) -n foo - $ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n foo + $ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n foo {{< /text >}} -* Verify that `sleep` successfully communicates with `tcp-echo` on ports 9000 and 9001 +* Verify that `curl` successfully communicates with `tcp-echo` on ports 9000 and 9001 using the following command: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' hello port 9000 connection succeeded {{< /text >}} {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' hello port 9001 connection succeeded {{< /text >}} -* Verify that `sleep` successfully communicates with `tcp-echo` on port 9002. +* Verify that `curl` successfully communicates with `tcp-echo` on port 9002. You need to send the traffic directly to the pod IP of `tcp-echo` because the port 9002 is not defined in the Kubernetes service object of `tcp-echo`. Get the pod IP address and send the request with the following command: {{< text bash >}} $ TCP_ECHO_IP=$(kubectl get pod "$(kubectl get pod -l app=tcp-echo -n foo -o jsonpath={.items..metadata.name})" -n foo -o jsonpath="{.status.podIP}") - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ "echo \"port 9002\" | nc $TCP_ECHO_IP 9002" | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' hello port 9002 connection succeeded @@ -97,8 +97,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p 1. Verify that requests to port 9000 are allowed using the following command: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' hello port 9000 connection succeeded @@ -107,8 +107,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p 1. Verify that requests to port 9001 are allowed using the following command: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' hello port 9001 connection succeeded @@ -119,8 +119,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p explicitly in the `tcp-echo` Kubernetes service object. Run the following command and verify the output: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ "echo \"port 9002\" | nc $TCP_ECHO_IP 9002" | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' connection rejected {{< /text >}} @@ -153,8 +153,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p Run the following command and verify the output: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' connection rejected {{< /text >}} @@ -163,8 +163,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p ALLOW rules. Run the following command and verify the output: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' connection rejected {{< /text >}} @@ -196,8 +196,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p HTTP-only fields while creating a DENY rule for tcp port and due to it's restrictive nature it denies all the traffic to the tcp ports: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' connection rejected {{< /text >}} @@ -205,8 +205,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p 1. Verify that the requests to port 9001 are denied. Same reason as above. {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' connection rejected {{< /text >}} @@ -236,8 +236,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p 1. Verify that requests to port 9000 is denied. This occurs because the request matches the `ports` in the above-mentioned deny policy. {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' connection rejected {{< /text >}} @@ -246,8 +246,8 @@ If you don’t see the expected output, retry after a few seconds. Caching and p the `ports` in the DENY policy: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' hello port 9001 connection succeeded diff --git a/content/en/docs/tasks/security/authorization/authz-tcp/snips.sh b/content/en/docs/tasks/security/authorization/authz-tcp/snips.sh index 1987fc16bd78f..f26db3526205a 100644 --- a/content/en/docs/tasks/security/authorization/authz-tcp/snips.sh +++ b/content/en/docs/tasks/security/authorization/authz-tcp/snips.sh @@ -23,12 +23,12 @@ snip_before_you_begin_1() { kubectl create ns foo kubectl apply -f <(istioctl kube-inject -f samples/tcp-echo/tcp-echo.yaml) -n foo -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo } snip_before_you_begin_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } @@ -38,8 +38,8 @@ connection succeeded ENDSNIP snip_before_you_begin_3() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } @@ -50,8 +50,8 @@ ENDSNIP snip_before_you_begin_4() { TCP_ECHO_IP=$(kubectl get pod "$(kubectl get pod -l app=tcp-echo -n foo -o jsonpath={.items..metadata.name})" -n foo -o jsonpath="{.status.podIP}") -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ "echo \"port 9002\" | nc $TCP_ECHO_IP 9002" | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } @@ -80,8 +80,8 @@ EOF } snip_configure_allow_authorization_policy_for_a_tcp_workload_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } @@ -91,8 +91,8 @@ connection succeeded ENDSNIP snip_configure_allow_authorization_policy_for_a_tcp_workload_3() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } @@ -102,8 +102,8 @@ connection succeeded ENDSNIP snip_configure_allow_authorization_policy_for_a_tcp_workload_4() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ "echo \"port 9002\" | nc $TCP_ECHO_IP 9002" | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } @@ -132,8 +132,8 @@ EOF } snip_configure_allow_authorization_policy_for_a_tcp_workload_6() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } @@ -142,8 +142,8 @@ connection rejected ENDSNIP snip_configure_allow_authorization_policy_for_a_tcp_workload_7() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } @@ -171,8 +171,8 @@ EOF } snip_configure_deny_authorization_policy_for_a_tcp_workload_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } @@ -181,8 +181,8 @@ connection rejected ENDSNIP snip_configure_deny_authorization_policy_for_a_tcp_workload_3() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } @@ -211,8 +211,8 @@ EOF } snip_configure_deny_authorization_policy_for_a_tcp_workload_5() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9000" | nc tcp-echo 9000' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } @@ -221,8 +221,8 @@ connection rejected ENDSNIP snip_configure_deny_authorization_policy_for_a_tcp_workload_6() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" \ - -c sleep -n foo -- sh -c \ +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" \ + -c curl -n foo -- sh -c \ 'echo "port 9001" | nc tcp-echo 9001' | grep "hello" && echo 'connection succeeded' || echo 'connection rejected' } diff --git a/content/en/docs/tasks/security/authorization/authz-tcp/test.sh b/content/en/docs/tasks/security/authorization/authz-tcp/test.sh index abf1bf88ed446..3fff072ee5e00 100644 --- a/content/en/docs/tasks/security/authorization/authz-tcp/test.sh +++ b/content/en/docs/tasks/security/authorization/authz-tcp/test.sh @@ -24,7 +24,7 @@ set -o pipefail snip_before_you_begin_1 _wait_for_deployment foo tcp-echo -_wait_for_deployment foo sleep +_wait_for_deployment foo curl # shellcheck disable=SC2155 export TCP_ECHO_IP=$(kubectl get pod "$(kubectl get pod -l app=tcp-echo -n foo -o jsonpath={.items..metadata.name})" -n foo -o "jsonpath={.status.podIP}") diff --git a/content/en/docs/tasks/security/authorization/authz-td-migration/index.md b/content/en/docs/tasks/security/authorization/authz-td-migration/index.md index e7d1cb8533f8c..dd98db8d0ee26 100644 --- a/content/en/docs/tasks/security/authorization/authz-td-migration/index.md +++ b/content/en/docs/tasks/security/authorization/authz-td-migration/index.md @@ -28,18 +28,18 @@ Before you begin this task, do the following: {{< /text >}} 1. Deploy the [httpbin]({{< github_tree >}}/samples/httpbin) sample in the `default` namespace - and the [sleep]({{< github_tree >}}/samples/sleep) sample in the `default` and `sleep-allow` namespaces: + and the [curl]({{< github_tree >}}/samples/curl) sample in the `default` and `curl-allow` namespaces: {{< text bash >}} $ kubectl label namespace default istio-injection=enabled $ kubectl apply -f @samples/httpbin/httpbin.yaml@ - $ kubectl apply -f @samples/sleep/sleep.yaml@ - $ kubectl create namespace sleep-allow - $ kubectl label namespace sleep-allow istio-injection=enabled - $ kubectl apply -f @samples/sleep/sleep.yaml@ -n sleep-allow + $ kubectl apply -f @samples/curl/curl.yaml@ + $ kubectl create namespace curl-allow + $ kubectl label namespace curl-allow istio-injection=enabled + $ kubectl apply -f @samples/curl/curl.yaml@ -n curl-allow {{< /text >}} -1. Apply the authorization policy below to deny all requests to `httpbin` except from `sleep` in the `sleep-allow` namespace. +1. Apply the authorization policy below to deny all requests to `httpbin` except from `curl` in the `curl-allow` namespace. {{< text bash >}} $ kubectl apply -f - <}} - $ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" 403 {{< /text >}} - * `sleep` in the `sleep-allow` namespace are allowed. + * `curl` in the `curl-allow` namespace are allowed. {{< text bash >}} - $ kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} @@ -99,32 +99,32 @@ Before you begin this task, do the following: Istio mesh is now running with a new trust domain, `new-td`. -1. Redeploy the `httpbin` and `sleep` applications to pick up changes from the new Istio control plane. +1. Redeploy the `httpbin` and `curl` applications to pick up changes from the new Istio control plane. {{< text bash >}} $ kubectl delete pod --all {{< /text >}} {{< text bash >}} - $ kubectl delete pod --all -n sleep-allow + $ kubectl delete pod --all -n curl-allow {{< /text >}} -1. Verify that requests to `httpbin` from both `sleep` in `default` namespace and `sleep-allow` namespace are denied. +1. Verify that requests to `httpbin` from both `curl` in `default` namespace and `curl-allow` namespace are denied. {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" 403 {{< /text >}} {{< text bash >}} - $ kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" 403 {{< /text >}} This is because we specified an authorization policy that deny all requests to `httpbin`, except the ones - the `old-td/ns/sleep-allow/sa/sleep` identity, which is the old identity of the `sleep` application in `sleep-allow` namespace. - When we migrated to a new trust domain above, i.e. `new-td`, the identity of this `sleep` application is now `new-td/ns/sleep-allow/sa/sleep`, - which is not the same as `old-td/ns/sleep-allow/sa/sleep`. Therefore, requests from the `sleep` application in `sleep-allow` namespace + the `old-td/ns/curl-allow/sa/curl` identity, which is the old identity of the `curl` application in `curl-allow` namespace. + When we migrated to a new trust domain above, i.e. `new-td`, the identity of this `curl` application is now `new-td/ns/curl-allow/sa/curl`, + which is not the same as `old-td/ns/curl-allow/sa/curl`. Therefore, requests from the `curl` application in `curl-allow` namespace to `httpbin` were allowed before are now being denied. Prior to Istio 1.4, the only way to make this work is to change the authorization policy manually. In Istio 1.4, we introduce an easy way, as shown below. @@ -147,24 +147,24 @@ Before you begin this task, do the following: 1. Without changing the authorization policy, verify that requests to `httpbin` from: - * `sleep` in the `default` namespace are denied. + * `curl` in the `default` namespace are denied. {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" 403 {{< /text >}} - * `sleep` in the `sleep-allow` namespace are allowed. + * `curl` in the `curl-allow` namespace are allowed. {{< text bash >}} - $ kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} ## Best practices Starting from Istio 1.4, when writing authorization policy, you should consider using the value `cluster.local` as the -trust domain part in the policy. For example, instead of `old-td/ns/sleep-allow/sa/sleep`, it should be `cluster.local/ns/sleep-allow/sa/sleep`. +trust domain part in the policy. For example, instead of `old-td/ns/curl-allow/sa/curl`, it should be `cluster.local/ns/curl-allow/sa/curl`. Notice that in this case, `cluster.local` is not the Istio mesh trust domain (the trust domain is still `old-td`). However, in authorization policy, `cluster.local` is a pointer that points to the current trust domain, i.e. `old-td` (and later `new-td`), as well as its aliases. By using `cluster.local` in the authorization policy, when you migrate to a new trust domain, Istio will detect this and treat the new trust domain @@ -175,8 +175,8 @@ as the old trust domain without you having to include the aliases. {{< text bash >}} $ kubectl delete authorizationpolicy service-httpbin.default.svc.cluster.local $ kubectl delete deploy httpbin; kubectl delete service httpbin; kubectl delete serviceaccount httpbin -$ kubectl delete deploy sleep; kubectl delete service sleep; kubectl delete serviceaccount sleep +$ kubectl delete deploy curl; kubectl delete service curl; kubectl delete serviceaccount curl $ istioctl uninstall --purge -y -$ kubectl delete namespace sleep-allow istio-system +$ kubectl delete namespace curl-allow istio-system $ rm ./td-installation.yaml {{< /text >}} diff --git a/content/en/docs/tasks/security/authorization/authz-td-migration/snips.sh b/content/en/docs/tasks/security/authorization/authz-td-migration/snips.sh index 1674e7bf60324..0d6380961f3dd 100644 --- a/content/en/docs/tasks/security/authorization/authz-td-migration/snips.sh +++ b/content/en/docs/tasks/security/authorization/authz-td-migration/snips.sh @@ -27,10 +27,10 @@ istioctl install --set profile=demo --set meshConfig.trustDomain=old-td snip_before_you_begin_2() { kubectl label namespace default istio-injection=enabled kubectl apply -f samples/httpbin/httpbin.yaml -kubectl apply -f samples/sleep/sleep.yaml -kubectl create namespace sleep-allow -kubectl label namespace sleep-allow istio-injection=enabled -kubectl apply -f samples/sleep/sleep.yaml -n sleep-allow +kubectl apply -f samples/curl/curl.yaml +kubectl create namespace curl-allow +kubectl label namespace curl-allow istio-injection=enabled +kubectl apply -f samples/curl/curl.yaml -n curl-allow } snip_before_you_begin_3() { @@ -45,7 +45,7 @@ spec: - from: - source: principals: - - old-td/ns/sleep-allow/sa/sleep + - old-td/ns/curl-allow/sa/curl to: - operation: methods: @@ -58,7 +58,7 @@ EOF } snip_before_you_begin_4() { -kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_before_you_begin_4_out <<\ENDSNIP @@ -66,7 +66,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name} ENDSNIP snip_before_you_begin_5() { -kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_before_you_begin_5_out <<\ENDSNIP @@ -86,11 +86,11 @@ kubectl delete pod --all } snip_migrate_trust_domain_without_trust_domain_aliases_4() { -kubectl delete pod --all -n sleep-allow +kubectl delete pod --all -n curl-allow } snip_migrate_trust_domain_without_trust_domain_aliases_5() { -kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_migrate_trust_domain_without_trust_domain_aliases_5_out <<\ENDSNIP @@ -98,7 +98,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name} ENDSNIP snip_migrate_trust_domain_without_trust_domain_aliases_6() { -kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_migrate_trust_domain_without_trust_domain_aliases_6_out <<\ENDSNIP @@ -119,7 +119,7 @@ istioctl install --set profile=demo -f td-installation.yaml -y } snip_migrate_trust_domain_with_trust_domain_aliases_2() { -kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_migrate_trust_domain_with_trust_domain_aliases_2_out <<\ENDSNIP @@ -127,7 +127,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name} ENDSNIP snip_migrate_trust_domain_with_trust_domain_aliases_3() { -kubectl exec "$(kubectl -n sleep-allow get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -n sleep-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl -n curl-allow get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -n curl-allow -- curl http://httpbin.default:8000/ip -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_migrate_trust_domain_with_trust_domain_aliases_3_out <<\ENDSNIP @@ -137,8 +137,8 @@ ENDSNIP snip_clean_up_1() { kubectl delete authorizationpolicy service-httpbin.default.svc.cluster.local kubectl delete deploy httpbin; kubectl delete service httpbin; kubectl delete serviceaccount httpbin -kubectl delete deploy sleep; kubectl delete service sleep; kubectl delete serviceaccount sleep +kubectl delete deploy curl; kubectl delete service curl; kubectl delete serviceaccount curl istioctl uninstall --purge -y -kubectl delete namespace sleep-allow istio-system +kubectl delete namespace curl-allow istio-system rm ./td-installation.yaml } diff --git a/content/en/docs/tasks/security/authorization/authz-td-migration/test.sh b/content/en/docs/tasks/security/authorization/authz-td-migration/test.sh index a24790cd99599..72f7a59390f42 100644 --- a/content/en/docs/tasks/security/authorization/authz-td-migration/test.sh +++ b/content/en/docs/tasks/security/authorization/authz-td-migration/test.sh @@ -33,9 +33,9 @@ _wait_for_deployment istio-system istiod snip_before_you_begin_2 -_wait_for_deployment default sleep +_wait_for_deployment default curl _wait_for_deployment default httpbin -_wait_for_deployment sleep-allow sleep +_wait_for_deployment curl-allow curl snip_before_you_begin_3 diff --git a/content/en/docs/tasks/security/cert-management/custom-ca-k8s/index.md b/content/en/docs/tasks/security/cert-management/custom-ca-k8s/index.md index d4ac7edc67384..fe9246067cf46 100644 --- a/content/en/docs/tasks/security/cert-management/custom-ca-k8s/index.md +++ b/content/en/docs/tasks/security/cert-management/custom-ca-k8s/index.md @@ -246,30 +246,30 @@ $ export BARCA=$(kubectl get clusterissuers bar -o jsonpath='{.spec.ca.secretNam $ kubectl apply -f ./proxyconfig-foo.yaml {{< /text >}} -1. Deploy the `httpbin` and `sleep` sample applications in the `foo` and `bar` namespaces. +1. Deploy the `httpbin` and `curl` sample applications in the `foo` and `bar` namespaces. {{< text bash >}} $ kubectl label ns foo istio-injection=enabled $ kubectl label ns bar istio-injection=enabled $ kubectl apply -f samples/httpbin/httpbin.yaml -n foo - $ kubectl apply -f samples/sleep/sleep.yaml -n foo + $ kubectl apply -f samples/curl/curl.yaml -n foo $ kubectl apply -f samples/httpbin/httpbin.yaml -n bar {{< /text >}} -## Verify the network connectivity between `httpbin` and `sleep` within the same namespace +## Verify the network connectivity between `httpbin` and `curl` within the same namespace When the workloads are deployed, they send CSR requests with related signer info. Istiod forwards the CSR request to the custom CA for signing. The custom CA will use the correct cluster issuer to sign the cert back. Workloads under `foo` namespace will use `foo` cluster issuers while workloads under `bar` namespace will use the `bar` cluster issuers. To verify that they have indeed been signed by correct cluster issuers, we can verify workloads under the same namespace can communicate while workloads under the different namespace cannot communicate. -1. Set the `SLEEP_POD_FOO` environment variable to the name of `sleep` pod. +1. Set the `CURL_POD_FOO` environment variable to the name of `curl` pod. {{< text bash >}} - $ export SLEEP_POD_FOO=$(kubectl get pod -n foo -l app=sleep -o jsonpath={.items..metadata.name}) + $ export CURL_POD_FOO=$(kubectl get pod -n foo -l app=curl -o jsonpath={.items..metadata.name}) {{< /text >}} -1. Check network connectivity between service `sleep` and `httpbin` in the `foo` namespace. +1. Check network connectivity between service `curl` and `httpbin` in the `foo` namespace. {{< text bash >}} - $ kubectl exec "$SLEEP_POD_FOO" -n foo -c sleep -- curl http://httpbin.foo:8000/html + $ kubectl exec "$CURL_POD_FOO" -n foo -c curl -- curl http://httpbin.foo:8000/html @@ -285,10 +285,10 @@ When the workloads are deployed, they send CSR requests with related signer info {{< /text >}} -1. Check network connectivity between service `sleep` in the `foo` namespace and `httpbin` in the `bar` namespace. +1. Check network connectivity between service `curl` in the `foo` namespace and `httpbin` in the `bar` namespace. {{< text bash >}} - $ kubectl exec "$SLEEP_POD_FOO" -n foo -c sleep -- curl http://httpbin.bar:8000/html + $ kubectl exec "$CURL_POD_FOO" -n foo -c curl -- curl http://httpbin.bar:8000/html upstream connect error or disconnect/reset before headers. reset reason: connection failure, transport failure reason: TLS error: 268435581:SSL routines:OPENSSL_internal:CERTIFICATE_VERIFY_FAILED {{< /text >}} diff --git a/content/en/docs/tasks/security/cert-management/custom-ca-k8s/snips.sh b/content/en/docs/tasks/security/cert-management/custom-ca-k8s/snips.sh index ef4478c233480..2ed5896ef69d7 100644 --- a/content/en/docs/tasks/security/cert-management/custom-ca-k8s/snips.sh +++ b/content/en/docs/tasks/security/cert-management/custom-ca-k8s/snips.sh @@ -226,19 +226,19 @@ snip_deploy_istio_with_default_certsigner_info_5() { kubectl label ns foo istio-injection=enabled kubectl label ns bar istio-injection=enabled kubectl apply -f samples/httpbin/httpbin.yaml -n foo -kubectl apply -f samples/sleep/sleep.yaml -n foo +kubectl apply -f samples/curl/curl.yaml -n foo kubectl apply -f samples/httpbin/httpbin.yaml -n bar } -snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_1() { -export SLEEP_POD_FOO=$(kubectl get pod -n foo -l app=sleep -o jsonpath={.items..metadata.name}) +snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_1() { +export CURL_POD_FOO=$(kubectl get pod -n foo -l app=curl -o jsonpath={.items..metadata.name}) } -snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_2() { -kubectl exec "$SLEEP_POD_FOO" -n foo -c sleep -- curl http://httpbin.foo:8000/html +snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_2() { +kubectl exec "$CURL_POD_FOO" -n foo -c curl -- curl http://httpbin.foo:8000/html } -! IFS=$'\n' read -r -d '' snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_2_out <<\ENDSNIP +! IFS=$'\n' read -r -d '' snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_2_out <<\ENDSNIP @@ -254,11 +254,11 @@ kubectl exec "$SLEEP_POD_FOO" -n foo -c sleep -- curl http://httpbin.foo:8000/ht ENDSNIP -snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_3() { -kubectl exec "$SLEEP_POD_FOO" -n foo -c sleep -- curl http://httpbin.bar:8000/html +snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_3() { +kubectl exec "$CURL_POD_FOO" -n foo -c curl -- curl http://httpbin.bar:8000/html } -! IFS=$'\n' read -r -d '' snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_3_out <<\ENDSNIP +! IFS=$'\n' read -r -d '' snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_3_out <<\ENDSNIP upstream connect error or disconnect/reset before headers. reset reason: connection failure, transport failure reason: TLS error: 268435581:SSL routines:OPENSSL_internal:CERTIFICATE_VERIFY_FAILED ENDSNIP diff --git a/content/en/docs/tasks/security/cert-management/custom-ca-k8s/test.sh b/content/en/docs/tasks/security/cert-management/custom-ca-k8s/test.sh index 0f4761a08d976..ee36d8e5420dd 100644 --- a/content/en/docs/tasks/security/cert-management/custom-ca-k8s/test.sh +++ b/content/en/docs/tasks/security/cert-management/custom-ca-k8s/test.sh @@ -36,14 +36,14 @@ snip_deploy_istio_with_default_certsigner_info_4 # deploy test application snip_deploy_istio_with_default_certsigner_info_5 -_wait_for_deployment foo sleep +_wait_for_deployment foo curl _wait_for_deployment foo httpbin _wait_for_deployment bar httpbin -snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_1 -_verify_contains snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_2 "Herman Melville - Moby-Dick" -_verify_contains snip_verify_the_network_connectivity_between_httpbin_and_sleep_within_the_same_namespace_3 "upstream connect error" +snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_1 +_verify_contains snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_2 "Herman Melville - Moby-Dick" +_verify_contains snip_verify_the_network_connectivity_between_httpbin_and_curl_within_the_same_namespace_3 "upstream connect error" # @cleanup diff --git a/content/en/docs/tasks/security/cert-management/plugin-ca-cert/index.md b/content/en/docs/tasks/security/cert-management/plugin-ca-cert/index.md index 6d68cff9f9d45..2e54d651a80f6 100644 --- a/content/en/docs/tasks/security/cert-management/plugin-ca-cert/index.md +++ b/content/en/docs/tasks/security/cert-management/plugin-ca-cert/index.md @@ -112,12 +112,12 @@ Support for SHA-1 signatures is [disabled by default in Go 1.18](https://github. ## Deploying example services -1. Deploy the `httpbin` and `sleep` sample services. +1. Deploy the `httpbin` and `curl` sample services. {{< text bash >}} $ kubectl create ns foo $ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo - $ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo + $ kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo {{< /text >}} 1. Deploy a policy for workloads in the `foo` namespace to only accept mutual TLS traffic. @@ -145,7 +145,7 @@ the `verify error:num=19:self signed certificate in certificate chain` error ret openssl command is expected. {{< text bash >}} - $ sleep 20; kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -showcerts -connect httpbin.foo:8000 > httpbin-proxy-cert.txt + $ sleep 20; kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -showcerts -connect httpbin.foo:8000 > httpbin-proxy-cert.txt {{< /text >}} 1. Parse the certificates on the certificate chain. @@ -200,10 +200,10 @@ openssl command is expected. $ kubectl delete peerauthentication -n foo default {{< /text >}} -* Remove the sample applications `sleep` and `httpbin`: +* Remove the sample applications `curl` and `httpbin`: {{< text bash >}} - $ kubectl delete -f samples/sleep/sleep.yaml -n foo + $ kubectl delete -f samples/curl/curl.yaml -n foo $ kubectl delete -f samples/httpbin/httpbin.yaml -n foo {{< /text >}} diff --git a/content/en/docs/tasks/security/cert-management/plugin-ca-cert/snips.sh b/content/en/docs/tasks/security/cert-management/plugin-ca-cert/snips.sh index 307db62febf2f..b526b0a84114b 100644 --- a/content/en/docs/tasks/security/cert-management/plugin-ca-cert/snips.sh +++ b/content/en/docs/tasks/security/cert-management/plugin-ca-cert/snips.sh @@ -53,7 +53,7 @@ istioctl install --set profile=demo snip_deploying_example_services_1() { kubectl create ns foo kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo } snip_deploying_example_services_2() { @@ -69,7 +69,7 @@ EOF } snip_verifying_the_certificates_1() { -sleep 20; kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -showcerts -connect httpbin.foo:8000 > httpbin-proxy-cert.txt +sleep 20; kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -showcerts -connect httpbin.foo:8000 > httpbin-proxy-cert.txt } snip_verifying_the_certificates_2() { @@ -118,7 +118,7 @@ kubectl delete peerauthentication -n foo default } snip_cleanup_4() { -kubectl delete -f samples/sleep/sleep.yaml -n foo +kubectl delete -f samples/curl/curl.yaml -n foo kubectl delete -f samples/httpbin/httpbin.yaml -n foo } diff --git a/content/en/docs/tasks/security/cert-management/plugin-ca-cert/test.sh b/content/en/docs/tasks/security/cert-management/plugin-ca-cert/test.sh index 187ee4c7b6107..3909c57353df4 100644 --- a/content/en/docs/tasks/security/cert-management/plugin-ca-cert/test.sh +++ b/content/en/docs/tasks/security/cert-management/plugin-ca-cert/test.sh @@ -30,12 +30,12 @@ snip_plug_in_certificates_and_key_into_the_cluster_5 echo y | snip_deploy_istio_1 _wait_for_deployment istio-system istiod -# create_ns_foo_with_httpbin_sleep +# create_ns_foo_with_httpbin_curl snip_deploying_example_services_1 snip_deploying_example_services_2 _wait_for_deployment foo httpbin -_wait_for_deployment foo sleep +_wait_for_deployment foo curl # Disable errors, since the next command is expected to return an error. set +e diff --git a/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/index.md b/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/index.md index 54390c107baf0..3fba35021b2a1 100644 --- a/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/index.md +++ b/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/index.md @@ -38,19 +38,19 @@ The maximum TLS version for Istio workloads is 1.3. After configuring the minimum TLS version of Istio workloads, you can verify that the minimum TLS version was configured and works as expected. -* Deploy two workloads: `httpbin` and `sleep`. Deploy these into a single namespace, +* Deploy two workloads: `httpbin` and `curl`. Deploy these into a single namespace, for example `foo`. Both workloads run with an Envoy proxy in front of each. {{< text bash >}} $ kubectl create ns foo $ kubectl apply -f <(istioctl kube-inject -f @samples/httpbin/httpbin.yaml@) -n foo - $ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) -n foo + $ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) -n foo {{< /text >}} -* Verify that `sleep` successfully communicates with `httpbin` using this command: +* Verify that `curl` successfully communicates with `httpbin` using this command: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" + $ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" 200 {{< /text >}} @@ -63,7 +63,7 @@ In the example, the minimum TLS version was configured to be 1.3. To check that TLS 1.3 is allowed, you can run the following command: {{< text bash >}} -$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -alpn istio -tls1_3 -connect httpbin.foo:8000 | grep "TLSv1.3" +$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -alpn istio -tls1_3 -connect httpbin.foo:8000 | grep "TLSv1.3" {{< /text >}} The text output should include: @@ -75,7 +75,7 @@ TLSv1.3 To check that TLS 1.2 is not allowed, you can run the following command: {{< text bash >}} -$ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -alpn istio -tls1_2 -connect httpbin.foo:8000 | grep "Cipher is (NONE)" +$ kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -alpn istio -tls1_2 -connect httpbin.foo:8000 | grep "Cipher is (NONE)" {{< /text >}} The text output should include: @@ -86,11 +86,11 @@ Cipher is (NONE) ## Cleanup -Delete sample applications `sleep` and `httpbin` from the `foo` namespace: +Delete sample applications `curl` and `httpbin` from the `foo` namespace: {{< text bash >}} $ kubectl delete -f samples/httpbin/httpbin.yaml -n foo -$ kubectl delete -f samples/sleep/sleep.yaml -n foo +$ kubectl delete -f samples/curl/curl.yaml -n foo {{< /text >}} Uninstall Istio from the cluster: diff --git a/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/snips.sh b/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/snips.sh index 2c9fc89460f23..351719d58da79 100644 --- a/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/snips.sh +++ b/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/snips.sh @@ -35,11 +35,11 @@ istioctl install -f ./istio.yaml snip_check_the_tls_configuration_of_istio_workloads_1() { kubectl create ns foo kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) -n foo } snip_check_the_tls_configuration_of_istio_workloads_2() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c sleep -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c curl -n foo -- curl http://httpbin.foo:8000/ip -sS -o /dev/null -w "%{http_code}\n" } ! IFS=$'\n' read -r -d '' snip_check_the_tls_configuration_of_istio_workloads_2_out <<\ENDSNIP @@ -47,7 +47,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadat ENDSNIP snip_check_the_tls_configuration_of_istio_workloads_3() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -alpn istio -tls1_3 -connect httpbin.foo:8000 | grep "TLSv1.3" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -alpn istio -tls1_3 -connect httpbin.foo:8000 | grep "TLSv1.3" } ! IFS=$'\n' read -r -d '' snip_check_the_tls_configuration_of_istio_workloads_4 <<\ENDSNIP @@ -55,7 +55,7 @@ TLSv1.3 ENDSNIP snip_check_the_tls_configuration_of_istio_workloads_5() { -kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -alpn istio -tls1_2 -connect httpbin.foo:8000 | grep "Cipher is (NONE)" +kubectl exec "$(kubectl get pod -l app=curl -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -alpn istio -tls1_2 -connect httpbin.foo:8000 | grep "Cipher is (NONE)" } ! IFS=$'\n' read -r -d '' snip_check_the_tls_configuration_of_istio_workloads_6 <<\ENDSNIP @@ -64,7 +64,7 @@ ENDSNIP snip_cleanup_1() { kubectl delete -f samples/httpbin/httpbin.yaml -n foo -kubectl delete -f samples/sleep/sleep.yaml -n foo +kubectl delete -f samples/curl/curl.yaml -n foo } snip_cleanup_2() { diff --git a/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/test.sh b/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/test.sh index a5b6024776946..46dcaf049b1aa 100644 --- a/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/test.sh +++ b/content/en/docs/tasks/security/tls-configuration/workload-min-tls-version/test.sh @@ -30,9 +30,9 @@ _wait_for_deployment istio-system istiod snip_check_the_tls_configuration_of_istio_workloads_1 _wait_for_deployment foo httpbin -_wait_for_deployment foo sleep +_wait_for_deployment foo curl -# Send request from sleep to httpbin +# Send request from curl to httpbin _verify_contains snip_check_the_tls_configuration_of_istio_workloads_2 "$snip_check_the_tls_configuration_of_istio_workloads_2_out" _verify_contains snip_check_the_tls_configuration_of_istio_workloads_3 "$snip_check_the_tls_configuration_of_istio_workloads_4" diff --git a/content/en/docs/tasks/traffic-management/egress/egress-control/gtwapi_test.sh b/content/en/docs/tasks/traffic-management/egress/egress-control/gtwapi_test.sh index fd7751a0cda47..0c50c9aaf4b2e 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-control/gtwapi_test.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-control/gtwapi_test.sh @@ -23,7 +23,7 @@ install_gateway_api_crds kubectl label namespace default istio-injection=enabled --overwrite snip_before_you_begin_1 -_wait_for_deployment default sleep +_wait_for_deployment default curl snip_before_you_begin_3 snip_access_an_external_http_service_1 diff --git a/content/en/docs/tasks/traffic-management/egress/egress-control/index.md b/content/en/docs/tasks/traffic-management/egress/egress-control/index.md index 5d0ff30aa08f7..ab89293d5c7c2 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-control/index.md +++ b/content/en/docs/tasks/traffic-management/egress/egress-control/index.md @@ -28,19 +28,19 @@ This task shows you how to access external services in three different ways: Use the `demo` [configuration profile](/docs/setup/additional-setup/config-profiles/) or otherwise [enable Envoy’s access logging](/docs/tasks/observability/logs/access-log/#enable-envoy-s-access-logging). -* Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample app to use as a test source for sending requests. +* Deploy the [curl]({{< github_tree >}}/samples/curl) sample app to use as a test source for sending requests. If you have [automatic sidecar injection](/docs/setup/additional-setup/sidecar-injection/#automatic-sidecar-injection) enabled, run the following command to deploy the sample app: {{< text bash >}} - $ kubectl apply -f @samples/sleep/sleep.yaml@ + $ kubectl apply -f @samples/curl/curl.yaml@ {{< /text >}} - Otherwise, manually inject the sidecar before deploying the `sleep` application with the following command: + Otherwise, manually inject the sidecar before deploying the `curl` application with the following command: {{< text bash >}} - $ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) + $ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) {{< /text >}} {{< tip >}} @@ -50,7 +50,7 @@ This task shows you how to access external services in three different ways: * Set the `SOURCE_POD` environment variable to the name of your source pod: {{< text bash >}} - $ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items..metadata.name}') + $ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items..metadata.name}') {{< /text >}} ## Envoy passthrough to external services @@ -92,7 +92,7 @@ You can then decide to [configure access to external services](#controlled-acces successful `200` responses: {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sSI https://www.google.com | grep "HTTP/"; kubectl exec "$SOURCE_POD" -c sleep -- curl -sI https://edition.cnn.com | grep "HTTP/" + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sSI https://www.google.com | grep "HTTP/"; kubectl exec "$SOURCE_POD" -c curl -- curl -sI https://edition.cnn.com | grep "HTTP/" HTTP/2 200 HTTP/2 200 {{< /text >}} @@ -143,7 +143,7 @@ any other unintentional accesses. 1. Make a couple of requests to external HTTPS services from `SOURCE_POD` to verify that they are now blocked: {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sI https://www.google.com | grep "HTTP/"; kubectl exec "$SOURCE_POD" -c sleep -- curl -sI https://edition.cnn.com | grep "HTTP/" + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sI https://www.google.com | grep "HTTP/"; kubectl exec "$SOURCE_POD" -c curl -- curl -sI https://edition.cnn.com | grep "HTTP/" command terminated with exit code 35 command terminated with exit code 35 {{< /text >}} @@ -190,7 +190,7 @@ any other unintentional accesses. 1. Make a request to the external HTTP service from `SOURCE_POD`: {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS http://httpbin.org/headers + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sS http://httpbin.org/headers { "headers": { "Accept": "*/*", @@ -238,7 +238,7 @@ any other unintentional accesses. 1. Make a request to the external HTTPS service from `SOURCE_POD`: {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sSI https://www.google.com | grep "HTTP/" + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sSI https://www.google.com | grep "HTTP/" HTTP/2 200 {{< /text >}} @@ -263,7 +263,7 @@ In this example, you set a timeout rule on calls to the `httpbin.org` service. httpbin.org external service: {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- time curl -o /dev/null -sS -w "%{http_code}\n" http://httpbin.org/delay/5 + $ kubectl exec "$SOURCE_POD" -c curl -- time curl -o /dev/null -sS -w "%{http_code}\n" http://httpbin.org/delay/5 200 real 0m5.024s user 0m0.003s @@ -331,7 +331,7 @@ EOF 3) Wait a few seconds, then make the _curl_ request again: {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- time curl -o /dev/null -sS -w "%{http_code}\n" http://httpbin.org/delay/5 + $ kubectl exec "$SOURCE_POD" -c curl -- time curl -o /dev/null -sS -w "%{http_code}\n" http://httpbin.org/delay/5 504 real 0m3.149s user 0m0.004s @@ -515,16 +515,16 @@ add `--set values.global.proxy.includeIPRanges="10.0.0.1/24"`. ### Access the external services -Because the bypass configuration only affects new deployments, you need to terminate and then redeploy the `sleep` +Because the bypass configuration only affects new deployments, you need to terminate and then redeploy the `curl` application as described in the [Before you begin](#before-you-begin) section. -After updating the `istio-sidecar-injector` configmap and redeploying the `sleep` application, +After updating the `istio-sidecar-injector` configmap and redeploying the `curl` application, the Istio sidecar will only intercept and manage internal requests within the cluster. Any external request bypasses the sidecar and goes straight to its intended destination. For example: {{< text bash >}} -$ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS http://httpbin.org/headers +$ kubectl exec "$SOURCE_POD" -c curl -- curl -sS http://httpbin.org/headers { "headers": { "Accept": "*/*", @@ -588,8 +588,8 @@ section. ## Cleanup -Shutdown the [sleep]({{< github_tree >}}/samples/sleep) service: +Shutdown the [curl]({{< github_tree >}}/samples/curl) service: {{< text bash >}} -$ kubectl delete -f @samples/sleep/sleep.yaml@ +$ kubectl delete -f @samples/curl/curl.yaml@ {{< /text >}} diff --git a/content/en/docs/tasks/traffic-management/egress/egress-control/snips.sh b/content/en/docs/tasks/traffic-management/egress/egress-control/snips.sh index e8a4d51399dae..bf4960fc8010b 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-control/snips.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-control/snips.sh @@ -22,15 +22,15 @@ source "content/en/boilerplates/snips/gateway-api-support.sh" snip_before_you_begin_1() { -kubectl apply -f samples/sleep/sleep.yaml +kubectl apply -f samples/curl/curl.yaml } snip_before_you_begin_2() { -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) } snip_before_you_begin_3() { -export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items..metadata.name}') +export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items..metadata.name}') } snip_envoy_passthrough_to_external_services_1() { @@ -38,7 +38,7 @@ kubectl get configmap istio -n istio-system -o yaml } snip_envoy_passthrough_to_external_services_3() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sSI https://www.google.com | grep "HTTP/"; kubectl exec "$SOURCE_POD" -c sleep -- curl -sI https://edition.cnn.com | grep "HTTP/" +kubectl exec "$SOURCE_POD" -c curl -- curl -sSI https://www.google.com | grep "HTTP/"; kubectl exec "$SOURCE_POD" -c curl -- curl -sI https://edition.cnn.com | grep "HTTP/" } ! IFS=$'\n' read -r -d '' snip_envoy_passthrough_to_external_services_3_out <<\ENDSNIP @@ -54,7 +54,7 @@ spec: ENDSNIP snip_change_to_the_blockingbydefault_policy_3() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sI https://www.google.com | grep "HTTP/"; kubectl exec "$SOURCE_POD" -c sleep -- curl -sI https://edition.cnn.com | grep "HTTP/" +kubectl exec "$SOURCE_POD" -c curl -- curl -sI https://www.google.com | grep "HTTP/"; kubectl exec "$SOURCE_POD" -c curl -- curl -sI https://edition.cnn.com | grep "HTTP/" } ! IFS=$'\n' read -r -d '' snip_change_to_the_blockingbydefault_policy_3_out <<\ENDSNIP @@ -81,7 +81,7 @@ EOF } snip_access_an_external_http_service_2() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sS http://httpbin.org/headers +kubectl exec "$SOURCE_POD" -c curl -- curl -sS http://httpbin.org/headers } ! IFS=$'\n' read -r -d '' snip_access_an_external_http_service_2_out <<\ENDSNIP @@ -123,7 +123,7 @@ EOF } snip_access_an_external_https_service_2() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sSI https://www.google.com | grep "HTTP/" +kubectl exec "$SOURCE_POD" -c curl -- curl -sSI https://www.google.com | grep "HTTP/" } ! IFS=$'\n' read -r -d '' snip_access_an_external_https_service_2_out <<\ENDSNIP @@ -139,7 +139,7 @@ kubectl logs "$SOURCE_POD" -c istio-proxy | tail ENDSNIP snip_manage_traffic_to_external_services_1() { -kubectl exec "$SOURCE_POD" -c sleep -- time curl -o /dev/null -sS -w "%{http_code}\n" http://httpbin.org/delay/5 +kubectl exec "$SOURCE_POD" -c curl -- time curl -o /dev/null -sS -w "%{http_code}\n" http://httpbin.org/delay/5 } ! IFS=$'\n' read -r -d '' snip_manage_traffic_to_external_services_1_out <<\ENDSNIP @@ -192,7 +192,7 @@ EOF } snip_manage_traffic_to_external_services_4() { -kubectl exec "$SOURCE_POD" -c sleep -- time curl -o /dev/null -sS -w "%{http_code}\n" http://httpbin.org/delay/5 +kubectl exec "$SOURCE_POD" -c curl -- time curl -o /dev/null -sS -w "%{http_code}\n" http://httpbin.org/delay/5 } ! IFS=$'\n' read -r -d '' snip_manage_traffic_to_external_services_4_out <<\ENDSNIP @@ -281,7 +281,7 @@ kubectl describe pod kube-apiserver -n kube-system | grep 'service-cluster-ip-ra ENDSNIP snip_access_the_external_services_1() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sS http://httpbin.org/headers +kubectl exec "$SOURCE_POD" -c curl -- curl -sS http://httpbin.org/headers } ! IFS=$'\n' read -r -d '' snip_access_the_external_services_1_out <<\ENDSNIP @@ -295,5 +295,5 @@ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS http://httpbin.org/headers ENDSNIP snip_cleanup_1() { -kubectl delete -f samples/sleep/sleep.yaml +kubectl delete -f samples/curl/curl.yaml } diff --git a/content/en/docs/tasks/traffic-management/egress/egress-control/test.sh b/content/en/docs/tasks/traffic-management/egress/egress-control/test.sh index 89fc0a110b74b..0db6e9985d102 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-control/test.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-control/test.sh @@ -27,7 +27,7 @@ _wait_for_deployment istio-system istiod kubectl label namespace default istio-injection=enabled --overwrite snip_before_you_begin_1 -_wait_for_deployment default sleep +_wait_for_deployment default curl snip_before_you_begin_3 _verify_not_contains snip_envoy_passthrough_to_external_services_1 "REGISTRY_ONLY" @@ -59,13 +59,13 @@ _wait_for_deployment istio-system istiod _verify_contains snip_envoy_passthrough_to_external_services_1 "REGISTRY_ONLY" -# Restart the sleep service +# Restart the curl service snip_cleanup_1 snip_before_you_begin_1 start=$(date +%s) kubectl wait --for=delete "pod/$SOURCE_POD" --timeout=180s echo "Wait for termination duration: $(($(date +%s)-start)) seconds" -_wait_for_deployment default sleep +_wait_for_deployment default curl kubectl get po @@ -73,7 +73,7 @@ snip_before_you_begin_3 echo "SOURCE_POD: $SOURCE_POD" -kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v http://httpbin.org/headers +kubectl exec "$SOURCE_POD" -c curl -- curl -sS -v http://httpbin.org/headers #_verify_elided snip_access_the_external_services_1 "$snip_access_the_external_services_1_out" # TODO This doesn't work, curl returns 502. diff --git a/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/index.md b/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/index.md index 4308304bc8b4d..c365dd0afe46c 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/index.md +++ b/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/index.md @@ -25,28 +25,28 @@ traffic to external services. * Setup Istio by following the instructions in the [Installation guide](/docs/setup/). -* Start the [sleep]({{< github_tree >}}/samples/sleep) sample +* Start the [curl]({{< github_tree >}}/samples/curl) sample which will be used as a test source for external calls. If you have enabled [automatic sidecar injection](/docs/setup/additional-setup/sidecar-injection/#automatic-sidecar-injection), do {{< text bash >}} - $ kubectl apply -f @samples/sleep/sleep.yaml@ + $ kubectl apply -f @samples/curl/curl.yaml@ {{< /text >}} - otherwise, you have to manually inject the sidecar before deploying the `sleep` application: + otherwise, you have to manually inject the sidecar before deploying the `curl` application: {{< text bash >}} - $ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) + $ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) {{< /text >}} Note that any pod that you can `exec` and `curl` from would do. * Create a shell variable to hold the name of the source pod for sending requests to external services. - If you used the [sleep]({{< github_tree >}}/samples/sleep) sample, run: + If you used the [curl]({{< github_tree >}}/samples/curl) sample, run: {{< text bash >}} - $ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) + $ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) {{< /text >}} * For macOS users, verify that you are using `openssl` version 1.1 or later: @@ -102,7 +102,7 @@ be done by the egress gateway, as opposed to by the sidecar in the previous exam 1. Verify that your `ServiceEntry` was applied correctly by sending a request to [http://edition.cnn.com/politics](https://edition.cnn.com/politics). {{< text bash >}} - $ kubectl exec "${SOURCE_POD}" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics + $ kubectl exec "${SOURCE_POD}" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics HTTP/1.1 301 Moved Permanently ... location: https://edition.cnn.com/politics @@ -315,7 +315,7 @@ EOF 6) Send an HTTP request to [http://edition.cnn.com/politics](https://edition.cnn.com/politics). {{< text bash >}} - $ kubectl exec "${SOURCE_POD}" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics + $ kubectl exec "${SOURCE_POD}" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics HTTP/1.1 200 OK ... {{< /text >}} @@ -933,7 +933,7 @@ kubernetes://client-credential-cacert Cert Chain ACTIVE true 6) Send an HTTP request to `http://my-nginx.mesh-external.svc.cluster.local`: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sS http://my-nginx.mesh-external.svc.cluster.local + $ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sS http://my-nginx.mesh-external.svc.cluster.local @@ -1039,8 +1039,8 @@ $ kubectl delete referencegrant my-nginx-reference-grant -n mesh-external ## Cleanup -Delete the `sleep` service and deployment: +Delete the `curl` service and deployment: {{< text bash >}} -$ kubectl delete -f @samples/sleep/sleep.yaml@ +$ kubectl delete -f @samples/curl/curl.yaml@ {{< /text >}} diff --git a/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/mtls_test.sh b/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/mtls_test.sh index 874047827547e..974b41440a3b3 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/mtls_test.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/mtls_test.sh @@ -26,9 +26,9 @@ GATEWAY_API="${GATEWAY_API:-false}" # Make sure automatic sidecar injection is enabled kubectl label namespace default istio-injection=enabled || true -# Deploy sleep sample +# Deploy curl sample snip_before_you_begin_1 -_wait_for_deployment default sleep +_wait_for_deployment default curl # Generate Certificates for service outside the mesh to use for mTLS set +e # suppress harmless "No such file or directory:../crypto/bio/bss_file.c:72:fopen('1_root/index.txt.attr','r')" error @@ -60,7 +60,7 @@ else _wait_for_istio destinationrule default egressgateway-for-nginx fi -# Configure routing from sleep to egress gateway to nginx +# Configure routing from curl to egress gateway to nginx if [ "$GATEWAY_API" == "true" ]; then snip_configure_mutual_tls_origination_for_egress_traffic_6 else diff --git a/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/snips.sh b/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/snips.sh index 904ce3a9571e1..5105948ca562a 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/snips.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/snips.sh @@ -22,15 +22,15 @@ source "content/en/boilerplates/snips/gateway-api-support.sh" snip_before_you_begin_1() { -kubectl apply -f samples/sleep/sleep.yaml +kubectl apply -f samples/curl/curl.yaml } snip_before_you_begin_2() { -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) } snip_before_you_begin_3() { -export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) +export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) } snip_before_you_begin_4() { @@ -66,7 +66,7 @@ EOF } snip_perform_tls_origination_with_an_egress_gateway_2() { -kubectl exec "${SOURCE_POD}" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics +kubectl exec "${SOURCE_POD}" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_perform_tls_origination_with_an_egress_gateway_2_out <<\ENDSNIP @@ -247,7 +247,7 @@ EOF } snip_perform_tls_origination_with_an_egress_gateway_8() { -kubectl exec "${SOURCE_POD}" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics +kubectl exec "${SOURCE_POD}" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_perform_tls_origination_with_an_egress_gateway_8_out <<\ENDSNIP @@ -668,7 +668,7 @@ kubernetes://client-credential-cacert Cert Chain ACTIVE true ENDSNIP snip_configure_mutual_tls_origination_for_egress_traffic_11() { -kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sS http://my-nginx.mesh-external.svc.cluster.local +kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sS http://my-nginx.mesh-external.svc.cluster.local } ! IFS=$'\n' read -r -d '' snip_configure_mutual_tls_origination_for_egress_traffic_11_out <<\ENDSNIP @@ -728,5 +728,5 @@ rm ./nginx.conf } snip_cleanup_1() { -kubectl delete -f samples/sleep/sleep.yaml +kubectl delete -f samples/curl/curl.yaml } diff --git a/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/tls_test.sh b/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/tls_test.sh index 1b8b55b1394c3..9951865a02d95 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/tls_test.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-gateway-tls-origination/tls_test.sh @@ -26,9 +26,9 @@ GATEWAY_API="${GATEWAY_API:-false}" # Make sure automatic sidecar injection is enabled kubectl label namespace default istio-injection=enabled || true -# Deploy sleep sample and set up variable pointing to it +# Deploy curl sample and set up variable pointing to it snip_before_you_begin_1 -_wait_for_deployment default sleep +_wait_for_deployment default curl snip_before_you_begin_3 # Apply ServiceEntry for external workload and verify 301 diff --git a/content/en/docs/tasks/traffic-management/egress/egress-gateway/index.md b/content/en/docs/tasks/traffic-management/egress/egress-gateway/index.md index f82e1b1ff7116..c3123d51eceb8 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-gateway/index.md +++ b/content/en/docs/tasks/traffic-management/egress/egress-gateway/index.md @@ -48,10 +48,10 @@ controlled way. [configuration profile](/docs/setup/additional-setup/config-profiles/). {{< /tip >}} -* Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample app to use as a test source for sending requests. +* Deploy the [curl]({{< github_tree >}}/samples/curl) sample app to use as a test source for sending requests. {{< text bash >}} - $ kubectl apply -f @samples/sleep/sleep.yaml@ + $ kubectl apply -f @samples/curl/curl.yaml@ {{< /text >}} {{< tip >}} @@ -61,7 +61,7 @@ controlled way. * Set the `SOURCE_POD` environment variable to the name of your source pod: {{< text bash >}} - $ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) + $ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) {{< /text >}} {{< warning >}} @@ -152,7 +152,7 @@ First create a `ServiceEntry` to allow direct traffic to an external service. 1. Verify that your `ServiceEntry` was applied correctly by sending an HTTP request to [http://edition.cnn.com/politics](http://edition.cnn.com/politics). {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics ... HTTP/1.1 301 Moved Permanently ... @@ -325,7 +325,7 @@ EOF 5) Resend the HTTP request to [http://edition.cnn.com/politics](https://edition.cnn.com/politics). {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics ... HTTP/1.1 301 Moved Permanently ... @@ -459,7 +459,7 @@ You need to specify port 443 with protocol `TLS` in a corresponding `ServiceEntr 1. Verify that your `ServiceEntry` was applied correctly by sending an HTTPS request to [https://edition.cnn.com/politics](https://edition.cnn.com/politics). {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics ... HTTP/2 200 Content-Type: text/html; charset=utf-8 @@ -609,7 +609,7 @@ EOF The output should be the same as before. {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics ... HTTP/2 200 Content-Type: text/html; charset=utf-8 @@ -708,7 +708,7 @@ the egress gateways. This section shows you how to create a [Kubernetes network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) to prevent bypassing of the egress gateway. To test the network policy, you create a namespace, `test-egress`, deploy -the [sleep]({{< github_tree >}}/samples/sleep) sample to it, and then attempt to send requests to a gateway-secured +the [curl]({{< github_tree >}}/samples/curl) sample to it, and then attempt to send requests to a gateway-secured external service. 1) Follow the steps in the @@ -720,25 +720,25 @@ external service. $ kubectl create namespace test-egress {{< /text >}} -3) Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample to the `test-egress` namespace. +3) Deploy the [curl]({{< github_tree >}}/samples/curl) sample to the `test-egress` namespace. {{< text bash >}} - $ kubectl apply -n test-egress -f @samples/sleep/sleep.yaml@ + $ kubectl apply -n test-egress -f @samples/curl/curl.yaml@ {{< /text >}} 4) Check that the deployed pod has a single container with no Istio sidecar attached: {{< text bash >}} - $ kubectl get pod "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress + $ kubectl get pod "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress NAME READY STATUS RESTARTS AGE - sleep-776b7bcdcd-z7mc4 1/1 Running 0 18m + curl-776b7bcdcd-z7mc4 1/1 Running 0 18m {{< /text >}} -5) Send an HTTPS request to [https://edition.cnn.com/politics](https://edition.cnn.com/politics) from the `sleep` pod in +5) Send an HTTPS request to [https://edition.cnn.com/politics](https://edition.cnn.com/politics) from the `curl` pod in the `test-egress` namespace. The request will succeed since you did not define any restrictive policies yet. {{< text bash >}} - $ kubectl exec "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress -c sleep -- curl -s -o /dev/null -w "%{http_code}\n" https://edition.cnn.com/politics + $ kubectl exec "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress -c curl -- curl -s -o /dev/null -w "%{http_code}\n" https://edition.cnn.com/politics 200 {{< /text >}} @@ -849,13 +849,13 @@ EOF {{< /tabset >}} 9) Resend the previous HTTPS request to [https://edition.cnn.com/politics](https://edition.cnn.com/politics). Now it - should fail since the traffic is blocked by the network policy. Note that the `sleep` pod cannot bypass + should fail since the traffic is blocked by the network policy. Note that the `curl` pod cannot bypass the egress gateway. The only way it can access `edition.cnn.com` is by using an Istio sidecar proxy and by directing the traffic to the egress gateway. This setting demonstrates that even if some malicious pod manages to bypass its sidecar proxy, it will not be able to access external sites and will be blocked by the network policy. {{< text bash >}} - $ kubectl exec "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress -c sleep -- curl -v -sS https://edition.cnn.com/politics + $ kubectl exec "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress -c curl -- curl -v -sS https://edition.cnn.com/politics Hostname was NOT found in DNS cache Trying 151.101.65.67... Trying 2a04:4e42:200::323... @@ -869,18 +869,18 @@ EOF connect to 151.101.65.67 port 443 failed: Connection timed out {{< /text >}} -10) Now inject an Istio sidecar proxy into the `sleep` pod in the `test-egress` namespace by first enabling +10) Now inject an Istio sidecar proxy into the `curl` pod in the `test-egress` namespace by first enabling automatic sidecar proxy injection in the `test-egress` namespace: {{< text bash >}} $ kubectl label namespace test-egress istio-injection=enabled {{< /text >}} -11) Then redeploy the `sleep` deployment: +11) Then redeploy the `curl` deployment: {{< text bash >}} - $ kubectl delete deployment sleep -n test-egress - $ kubectl apply -f @samples/sleep/sleep.yaml@ -n test-egress + $ kubectl delete deployment curl -n test-egress + $ kubectl apply -f @samples/curl/curl.yaml@ -n test-egress {{< /text >}} 12) Check that the deployed pod has two containers, including the Istio sidecar proxy (`istio-proxy`): @@ -890,11 +890,11 @@ EOF {{< tab name="Istio APIs" category-value="istio-apis" >}} {{< text bash >}} -$ kubectl get pod "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress -o jsonpath='{.spec.containers[*].name}' -sleep istio-proxy +$ kubectl get pod "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress -o jsonpath='{.spec.containers[*].name}' +curl istio-proxy {{< /text >}} -Before proceeding, you'll need to create a similar destination rule as the one used for the `sleep` pod in the `default` namespace, +Before proceeding, you'll need to create a similar destination rule as the one used for the `curl` pod in the `default` namespace, to direct the `test-egress` namespace traffic through the egress gateway: {{< text bash >}} @@ -915,8 +915,8 @@ EOF {{< tab name="Gateway API" category-value="gateway-api" >}} {{< text bash >}} -$ kubectl get pod "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress -o jsonpath='{.spec.containers[*].name}' -sleep istio-proxy +$ kubectl get pod "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress -o jsonpath='{.spec.containers[*].name}' +curl istio-proxy {{< /text >}} {{< /tab >}} @@ -928,7 +928,7 @@ sleep istio-proxy Network Policy you defined. The gateway then forwards the traffic to `edition.cnn.com`. {{< text bash >}} - $ kubectl exec "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress -c sleep -- curl -sS -o /dev/null -w "%{http_code}\n" https://edition.cnn.com/politics + $ kubectl exec "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress -c curl -- curl -sS -o /dev/null -w "%{http_code}\n" https://edition.cnn.com/politics 200 {{< /text >}} @@ -979,7 +979,7 @@ You should see a line similar to the following: {{< tab name="Istio APIs" category-value="istio-apis" >}} {{< text bash >}} -$ kubectl delete -f @samples/sleep/sleep.yaml@ -n test-egress +$ kubectl delete -f @samples/curl/curl.yaml@ -n test-egress $ kubectl delete destinationrule egressgateway-for-cnn -n test-egress $ kubectl delete networkpolicy allow-egress-to-istio-system-and-kube-dns -n test-egress $ kubectl label namespace kube-system kube-system- @@ -992,7 +992,7 @@ $ kubectl delete namespace test-egress {{< tab name="Gateway API" category-value="gateway-api" >}} {{< text bash >}} -$ kubectl delete -f @samples/sleep/sleep.yaml@ -n test-egress +$ kubectl delete -f @samples/curl/curl.yaml@ -n test-egress $ kubectl delete networkpolicy allow-egress-to-istio-system-and-kube-dns -n test-egress $ kubectl label namespace kube-system kube-system- $ kubectl label namespace istio-system istio- @@ -1008,8 +1008,8 @@ $ kubectl delete namespace test-egress ## Cleanup -Shutdown the [sleep]({{< github_tree >}}/samples/sleep) service: +Shutdown the [curl]({{< github_tree >}}/samples/curl) service: {{< text bash >}} -$ kubectl delete -f @samples/sleep/sleep.yaml@ +$ kubectl delete -f @samples/curl/curl.yaml@ {{< /text >}} diff --git a/content/en/docs/tasks/traffic-management/egress/egress-gateway/snips.sh b/content/en/docs/tasks/traffic-management/egress/egress-gateway/snips.sh index 9d459bdff4471..423b7356ddf3e 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-gateway/snips.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-gateway/snips.sh @@ -22,11 +22,11 @@ source "content/en/boilerplates/snips/gateway-api-gamma-experimental.sh" snip_before_you_begin_1() { -kubectl apply -f samples/sleep/sleep.yaml +kubectl apply -f samples/curl/curl.yaml } snip_before_you_begin_2() { -export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) +export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) } ! IFS=$'\n' read -r -d '' snip_before_you_begin_3 <<\ENDSNIP @@ -66,7 +66,7 @@ EOF } snip_egress_gateway_for_http_traffic_2() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics +kubectl exec "$SOURCE_POD" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_egress_gateway_for_http_traffic_2_out <<\ENDSNIP @@ -202,7 +202,7 @@ EOF } snip_egress_gateway_for_http_traffic_7() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics +kubectl exec "$SOURCE_POD" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_egress_gateway_for_http_traffic_7_out <<\ENDSNIP @@ -283,7 +283,7 @@ EOF } snip_egress_gateway_for_https_traffic_2() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics +kubectl exec "$SOURCE_POD" -c curl -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_egress_gateway_for_https_traffic_2_out <<\ENDSNIP @@ -413,7 +413,7 @@ EOF } snip_egress_gateway_for_https_traffic_5() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics +kubectl exec "$SOURCE_POD" -c curl -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_egress_gateway_for_https_traffic_5_out <<\ENDSNIP @@ -458,20 +458,20 @@ kubectl create namespace test-egress } snip_apply_kubernetes_network_policies_2() { -kubectl apply -n test-egress -f samples/sleep/sleep.yaml +kubectl apply -n test-egress -f samples/curl/curl.yaml } snip_apply_kubernetes_network_policies_3() { -kubectl get pod "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress +kubectl get pod "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress } ! IFS=$'\n' read -r -d '' snip_apply_kubernetes_network_policies_3_out <<\ENDSNIP NAME READY STATUS RESTARTS AGE -sleep-776b7bcdcd-z7mc4 1/1 Running 0 18m +curl-776b7bcdcd-z7mc4 1/1 Running 0 18m ENDSNIP snip_apply_kubernetes_network_policies_4() { -kubectl exec "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress -c sleep -- curl -s -o /dev/null -w "%{http_code}\n" https://edition.cnn.com/politics +kubectl exec "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress -c curl -- curl -s -o /dev/null -w "%{http_code}\n" https://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_apply_kubernetes_network_policies_4_out <<\ENDSNIP @@ -546,7 +546,7 @@ EOF } snip_apply_kubernetes_network_policies_10() { -kubectl exec "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress -c sleep -- curl -v -sS https://edition.cnn.com/politics +kubectl exec "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress -c curl -- curl -v -sS https://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_apply_kubernetes_network_policies_10_out <<\ENDSNIP @@ -568,16 +568,16 @@ kubectl label namespace test-egress istio-injection=enabled } snip_apply_kubernetes_network_policies_12() { -kubectl delete deployment sleep -n test-egress -kubectl apply -f samples/sleep/sleep.yaml -n test-egress +kubectl delete deployment curl -n test-egress +kubectl apply -f samples/curl/curl.yaml -n test-egress } snip_apply_kubernetes_network_policies_13() { -kubectl get pod "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress -o jsonpath='{.spec.containers[*].name}' +kubectl get pod "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress -o jsonpath='{.spec.containers[*].name}' } ! IFS=$'\n' read -r -d '' snip_apply_kubernetes_network_policies_13_out <<\ENDSNIP -sleep istio-proxy +curl istio-proxy ENDSNIP snip_apply_kubernetes_network_policies_14() { @@ -594,15 +594,15 @@ EOF } snip_apply_kubernetes_network_policies_15() { -kubectl get pod "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress -o jsonpath='{.spec.containers[*].name}' +kubectl get pod "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress -o jsonpath='{.spec.containers[*].name}' } ! IFS=$'\n' read -r -d '' snip_apply_kubernetes_network_policies_15_out <<\ENDSNIP -sleep istio-proxy +curl istio-proxy ENDSNIP snip_apply_kubernetes_network_policies_16() { -kubectl exec "$(kubectl get pod -n test-egress -l app=sleep -o jsonpath={.items..metadata.name})" -n test-egress -c sleep -- curl -sS -o /dev/null -w "%{http_code}\n" https://edition.cnn.com/politics +kubectl exec "$(kubectl get pod -n test-egress -l app=curl -o jsonpath={.items..metadata.name})" -n test-egress -c curl -- curl -sS -o /dev/null -w "%{http_code}\n" https://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_apply_kubernetes_network_policies_16_out <<\ENDSNIP @@ -626,7 +626,7 @@ kubectl logs -l gateway.networking.k8s.io/gateway-name=cnn-egress-gateway -c ist ENDSNIP snip_cleanup_network_policies_1() { -kubectl delete -f samples/sleep/sleep.yaml -n test-egress +kubectl delete -f samples/curl/curl.yaml -n test-egress kubectl delete destinationrule egressgateway-for-cnn -n test-egress kubectl delete networkpolicy allow-egress-to-istio-system-and-kube-dns -n test-egress kubectl label namespace kube-system kube-system- @@ -635,7 +635,7 @@ kubectl delete namespace test-egress } snip_cleanup_network_policies_2() { -kubectl delete -f samples/sleep/sleep.yaml -n test-egress +kubectl delete -f samples/curl/curl.yaml -n test-egress kubectl delete networkpolicy allow-egress-to-istio-system-and-kube-dns -n test-egress kubectl label namespace kube-system kube-system- kubectl label namespace istio-system istio- @@ -644,5 +644,5 @@ kubectl delete namespace test-egress } snip_cleanup_1() { -kubectl delete -f samples/sleep/sleep.yaml +kubectl delete -f samples/curl/curl.yaml } diff --git a/content/en/docs/tasks/traffic-management/egress/egress-gateway/test.sh b/content/en/docs/tasks/traffic-management/egress/egress-gateway/test.sh index f63d32e6ba0cc..e230c1b829aae 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-gateway/test.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-gateway/test.sh @@ -28,10 +28,10 @@ source "tests/util/samples.sh" # Make sure default namespace is injected kubectl label namespace default istio-injection=enabled || true -# Deploy sleep sample and set up variable pointing to it -# Start the sleep sample -startup_sleep_sample -export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') +# Deploy curl sample and set up variable pointing to it +# Start the curl sample +startup_curl_sample +export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') # Create ServiceEntry snip_egress_gateway_for_http_traffic_1 @@ -112,7 +112,7 @@ fi # Create namespace snip_apply_kubernetes_network_policies_1 -# Deploy sleep +# Deploy curl snip_apply_kubernetes_network_policies_2 # Verify 200 response @@ -141,16 +141,16 @@ fi # Enable sidecar injection snip_apply_kubernetes_network_policies_11 -# Delete older sleep and reapply +# Delete older curl and reapply snip_apply_kubernetes_network_policies_12 -_wait_for_deployment test-egress sleep +_wait_for_deployment test-egress curl if [ "$GATEWAY_API" == "true" ]; then # verify containers - _verify_contains snip_apply_kubernetes_network_policies_15 "sleep istio-proxy" + _verify_contains snip_apply_kubernetes_network_policies_15 "curl istio-proxy" else # verify containers - _verify_contains snip_apply_kubernetes_network_policies_13 "sleep istio-proxy" + _verify_contains snip_apply_kubernetes_network_policies_13 "curl istio-proxy" # configure DR snip_apply_kubernetes_network_policies_14 diff --git a/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/index.md b/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/index.md index 2002e5cb00d60..520dd492bc884 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/index.md +++ b/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/index.md @@ -40,17 +40,17 @@ Kubernetes Services for egress traffic work with other protocols as well. $ kubectl create namespace without-istio {{< /text >}} -* Start the [sleep]({{< github_tree >}}/samples/sleep) sample in the `without-istio` namespace. +* Start the [curl]({{< github_tree >}}/samples/curl) sample in the `without-istio` namespace. {{< text bash >}} - $ kubectl apply -f @samples/sleep/sleep.yaml@ -n without-istio + $ kubectl apply -f @samples/curl/curl.yaml@ -n without-istio {{< /text >}} * To send requests, create the `SOURCE_POD_WITHOUT_ISTIO` environment variable to store the name of the source pod: {{< text bash >}} - $ export SOURCE_POD_WITHOUT_ISTIO="$(kubectl get pod -n without-istio -l app=sleep -o jsonpath={.items..metadata.name})" + $ export SOURCE_POD_WITHOUT_ISTIO="$(kubectl get pod -n without-istio -l app=curl -o jsonpath={.items..metadata.name})" {{< /text >}} * Verify that the Istio sidecar was not injected, that is the pod has one container: @@ -58,7 +58,7 @@ Kubernetes Services for egress traffic work with other protocols as well. {{< text bash >}} $ kubectl get pod "$SOURCE_POD_WITHOUT_ISTIO" -n without-istio NAME READY STATUS RESTARTS AGE - sleep-66c8d79ff5-8tqrl 1/1 Running 0 32s + curl-66c8d79ff5-8tqrl 1/1 Running 0 32s {{< /text >}} ## Kubernetes ExternalName service to access an external service @@ -95,7 +95,7 @@ Kubernetes Services for egress traffic work with other protocols as well. Note that the _curl_ command below uses the [Kubernetes DNS format for services](https://v1-13.docs.kubernetes.io/docs/concepts/services-networking/dns-pod-service/#a-records): `..svc.cluster.local`. {{< text bash >}} - $ kubectl exec "$SOURCE_POD_WITHOUT_ISTIO" -n without-istio -c sleep -- curl -sS my-httpbin.default.svc.cluster.local/headers + $ kubectl exec "$SOURCE_POD_WITHOUT_ISTIO" -n without-istio -c curl -- curl -sS my-httpbin.default.svc.cluster.local/headers { "headers": { "Accept": "*/*", @@ -128,7 +128,7 @@ Kubernetes Services for egress traffic work with other protocols as well. the `Host` header equals to your service's hostname. {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS my-httpbin.default.svc.cluster.local/headers + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sS my-httpbin.default.svc.cluster.local/headers { "headers": { "Accept": "*/*", @@ -139,7 +139,7 @@ Kubernetes Services for egress traffic work with other protocols as well. "X-B3-Spanid": "5795fab599dca0b8", "X-B3-Traceid": "5079ad3a4af418915795fab599dca0b8", "X-Envoy-Peer-Metadata": "...", - "X-Envoy-Peer-Metadata-Id": "sidecar~10.28.1.74~sleep-6bdb595bcb-drr45.default~default.svc.cluster.local" + "X-Envoy-Peer-Metadata-Id": "sidecar~10.28.1.74~curl-6bdb595bcb-drr45.default~default.svc.cluster.local" } } {{< /text >}} @@ -200,7 +200,7 @@ $ kubectl delete service my-httpbin Use the `--resolve` option of `curl` to access `wikipedia.org` by the cluster IP: {{< text bash >}} - $ kubectl exec "$SOURCE_POD_WITHOUT_ISTIO" -n without-istio -c sleep -- curl -sS --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page | grep -o ".*" + $ kubectl exec "$SOURCE_POD_WITHOUT_ISTIO" -n without-istio -c curl -- curl -sS --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page | grep -o ".*" Wikipedia, the free encyclopedia {{< /text >}} @@ -224,7 +224,7 @@ $ kubectl delete service my-httpbin 1. Access `wikipedia.org` by your Kubernetes service's cluster IP from the source pod with Istio sidecar: {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page | grep -o ".*" + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sS --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page | grep -o ".*" Wikipedia, the free encyclopedia {{< /text >}} @@ -233,7 +233,7 @@ $ kubectl delete service my-httpbin in the output of your service as the cluster IP. {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page -o /dev/null + $ kubectl exec "$SOURCE_POD" -c curl -- curl -sS -v --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page -o /dev/null * Added en.wikipedia.org:443:172.21.156.230 to DNS cache * Hostname en.wikipedia.org was found in DNS cache * Trying 172.21.156.230... @@ -252,16 +252,16 @@ $ kubectl delete service my-wikipedia ## Cleanup -1. Shutdown the [sleep]({{< github_tree >}}/samples/sleep) service: +1. Shutdown the [curl]({{< github_tree >}}/samples/curl) service: {{< text bash >}} - $ kubectl delete -f @samples/sleep/sleep.yaml@ + $ kubectl delete -f @samples/curl/curl.yaml@ {{< /text >}} -1. Shutdown the [sleep]({{< github_tree >}}/samples/sleep) service in the `without-istio` namespace: +1. Shutdown the [curl]({{< github_tree >}}/samples/curl) service in the `without-istio` namespace: {{< text bash >}} - $ kubectl delete -f @samples/sleep/sleep.yaml@ -n without-istio + $ kubectl delete -f @samples/curl/curl.yaml@ -n without-istio {{< /text >}} 1. Delete `without-istio` namespace: diff --git a/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/snips.sh b/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/snips.sh index def458fd81b80..8320aa4da57ec 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/snips.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/snips.sh @@ -26,11 +26,11 @@ kubectl create namespace without-istio } snip__2() { -kubectl apply -f samples/sleep/sleep.yaml -n without-istio +kubectl apply -f samples/curl/curl.yaml -n without-istio } snip__3() { -export SOURCE_POD_WITHOUT_ISTIO="$(kubectl get pod -n without-istio -l app=sleep -o jsonpath={.items..metadata.name})" +export SOURCE_POD_WITHOUT_ISTIO="$(kubectl get pod -n without-istio -l app=curl -o jsonpath={.items..metadata.name})" } snip__4() { @@ -39,7 +39,7 @@ kubectl get pod "$SOURCE_POD_WITHOUT_ISTIO" -n without-istio ! IFS=$'\n' read -r -d '' snip__4_out <<\ENDSNIP NAME READY STATUS RESTARTS AGE -sleep-66c8d79ff5-8tqrl 1/1 Running 0 32s +curl-66c8d79ff5-8tqrl 1/1 Running 0 32s ENDSNIP snip_kubernetes_externalname_service_to_access_an_external_service_1() { @@ -68,7 +68,7 @@ my-httpbin ExternalName httpbin.org 80/TCP 4s ENDSNIP snip_kubernetes_externalname_service_to_access_an_external_service_3() { -kubectl exec "$SOURCE_POD_WITHOUT_ISTIO" -n without-istio -c sleep -- curl -sS my-httpbin.default.svc.cluster.local/headers +kubectl exec "$SOURCE_POD_WITHOUT_ISTIO" -n without-istio -c curl -- curl -sS my-httpbin.default.svc.cluster.local/headers } ! IFS=$'\n' read -r -d '' snip_kubernetes_externalname_service_to_access_an_external_service_3_out <<\ENDSNIP @@ -96,7 +96,7 @@ EOF } snip_kubernetes_externalname_service_to_access_an_external_service_5() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sS my-httpbin.default.svc.cluster.local/headers +kubectl exec "$SOURCE_POD" -c curl -- curl -sS my-httpbin.default.svc.cluster.local/headers } ! IFS=$'\n' read -r -d '' snip_kubernetes_externalname_service_to_access_an_external_service_5_out <<\ENDSNIP @@ -110,7 +110,7 @@ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS my-httpbin.default.svc.cluster.l "X-B3-Spanid": "5795fab599dca0b8", "X-B3-Traceid": "5079ad3a4af418915795fab599dca0b8", "X-Envoy-Peer-Metadata": "...", - "X-Envoy-Peer-Metadata-Id": "sidecar~10.28.1.74~sleep-6bdb595bcb-drr45.default~default.svc.cluster.local" + "X-Envoy-Peer-Metadata-Id": "sidecar~10.28.1.74~curl-6bdb595bcb-drr45.default~default.svc.cluster.local" } } ENDSNIP @@ -160,7 +160,7 @@ my-wikipedia ClusterIP 172.21.156.230 443/TCP 21h ENDSNIP snip_use_a_kubernetes_service_with_endpoints_to_access_an_external_service_4() { -kubectl exec "$SOURCE_POD_WITHOUT_ISTIO" -n without-istio -c sleep -- curl -sS --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page | grep -o ".*" +kubectl exec "$SOURCE_POD_WITHOUT_ISTIO" -n without-istio -c curl -- curl -sS --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page | grep -o ".*" } ! IFS=$'\n' read -r -d '' snip_use_a_kubernetes_service_with_endpoints_to_access_an_external_service_4_out <<\ENDSNIP @@ -182,7 +182,7 @@ EOF } snip_use_a_kubernetes_service_with_endpoints_to_access_an_external_service_6() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sS --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page | grep -o ".*" +kubectl exec "$SOURCE_POD" -c curl -- curl -sS --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page | grep -o ".*" } ! IFS=$'\n' read -r -d '' snip_use_a_kubernetes_service_with_endpoints_to_access_an_external_service_6_out <<\ENDSNIP @@ -190,7 +190,7 @@ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS --resolve en.wikipedia.org:443:" ENDSNIP snip_use_a_kubernetes_service_with_endpoints_to_access_an_external_service_7() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page -o /dev/null +kubectl exec "$SOURCE_POD" -c curl -- curl -sS -v --resolve en.wikipedia.org:443:"$(kubectl get service my-wikipedia -o jsonpath='{.spec.clusterIP}')" https://en.wikipedia.org/wiki/Main_Page -o /dev/null } ! IFS=$'\n' read -r -d '' snip_use_a_kubernetes_service_with_endpoints_to_access_an_external_service_7_out <<\ENDSNIP @@ -209,11 +209,11 @@ kubectl delete service my-wikipedia } snip_cleanup_1() { -kubectl delete -f samples/sleep/sleep.yaml +kubectl delete -f samples/curl/curl.yaml } snip_cleanup_2() { -kubectl delete -f samples/sleep/sleep.yaml -n without-istio +kubectl delete -f samples/curl/curl.yaml -n without-istio } snip_cleanup_3() { diff --git a/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/test.sh b/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/test.sh index a8e9f312c47de..fba4a92c9a725 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/test.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-kubernetes-services/test.sh @@ -26,17 +26,17 @@ source "tests/util/samples.sh" # Make sure default namespace is injected kubectl label namespace default istio-injection=enabled || true -# Deploy sleep sample and set up variable pointing to it -# Start the sleep sample -startup_sleep_sample -export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') +# Deploy curl sample and set up variable pointing to it +# Start the curl sample +startup_curl_sample +export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') # create namespace without istio snip__1 -# deploy sleep in without-istio namespace +# deploy curl in without-istio namespace snip__2 -_wait_for_deployment without-istio sleep +_wait_for_deployment without-istio curl snip__3 # Create secret diff --git a/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/index.md b/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/index.md index 400bbffcb65c3..d88a43193d2be 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/index.md +++ b/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/index.md @@ -31,27 +31,27 @@ is that Istio can produce better telemetry and provide more routing control for * Setup Istio by following the instructions in the [Installation guide](/docs/setup/). -* Start the [sleep]({{< github_tree >}}/samples/sleep) sample which will be used as a test source for external calls. +* Start the [curl]({{< github_tree >}}/samples/curl) sample which will be used as a test source for external calls. - If you have enabled [automatic sidecar injection](/docs/setup/additional-setup/sidecar-injection/#automatic-sidecar-injection), deploy the `sleep` application: + If you have enabled [automatic sidecar injection](/docs/setup/additional-setup/sidecar-injection/#automatic-sidecar-injection), deploy the `curl` application: {{< text bash >}} - $ kubectl apply -f @samples/sleep/sleep.yaml@ + $ kubectl apply -f @samples/curl/curl.yaml@ {{< /text >}} - Otherwise, you have to manually inject the sidecar before deploying the `sleep` application: + Otherwise, you have to manually inject the sidecar before deploying the `curl` application: {{< text bash >}} - $ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) + $ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) {{< /text >}} Note that any pod that you can `exec` and `curl` from will do for the procedures below. * Create a shell variable to hold the name of the source pod for sending requests to external services. - If you used the [sleep]({{< github_tree >}}/samples/sleep) sample, run: + If you used the [curl]({{< github_tree >}}/samples/curl) sample, run: {{< text bash >}} - $ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) + $ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) {{< /text >}} ## Configuring access to an external service @@ -85,7 +85,7 @@ This time, however, use a single `ServiceEntry` to enable both HTTP and HTTPS ac 1. Make a request to the external HTTP service: {{< text syntax=bash snip_id=curl_simple >}} - $ kubectl exec "${SOURCE_POD}" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics + $ kubectl exec "${SOURCE_POD}" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics HTTP/1.1 301 Moved Permanently ... location: https://edition.cnn.com/politics @@ -157,7 +157,7 @@ Both of these issues can be resolved by configuring Istio to perform TLS origina 1. Send an HTTP request to `http://edition.cnn.com/politics`, as in the previous section: {{< text syntax=bash snip_id=curl_origination_http >}} - $ kubectl exec "${SOURCE_POD}" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics + $ kubectl exec "${SOURCE_POD}" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics HTTP/1.1 200 OK ... {{< /text >}} @@ -175,7 +175,7 @@ Both of these issues can be resolved by configuring Istio to perform TLS origina 1. Note that the applications that used HTTPS to access the external service continue to work as before: {{< text syntax=bash snip_id=curl_origination_https >}} - $ kubectl exec "${SOURCE_POD}" -c sleep -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics + $ kubectl exec "${SOURCE_POD}" -c curl -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics HTTP/2 200 ... {{< /text >}} @@ -210,7 +210,7 @@ service that requires mutual TLS. This example is considerably more involved bec 1. Generate client and server certificates 1. Deploy an external service that supports the mutual TLS protocol -1. Configure the client (sleep pod) to use the credentials created in Step 1 +1. Configure the client (curl pod) to use the credentials created in Step 1 Once this setup is complete, you can then configure the external traffic to go through the sidecar which will perform TLS origination. @@ -386,7 +386,7 @@ to hold the configuration of the NGINX server: EOF {{< /text >}} -### Configure the client (sleep pod) +### Configure the client (curl pod) 1. Create Kubernetes [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) to hold the client's certificates: @@ -401,11 +401,11 @@ to hold the configuration of the NGINX server: {{< boilerplate crl-tip >}} {{< /tip >}} -1. Create required `RBAC` to make sure the secret created in the above step is accessible to the client pod, which is `sleep` in this case. +1. Create required `RBAC` to make sure the secret created in the above step is accessible to the client pod, which is `curl` in this case. {{< text bash >}} $ kubectl create role client-credential-role --resource=secret --verb=list - $ kubectl create rolebinding client-credential-role-binding --role=client-credential-role --serviceaccount=default:sleep + $ kubectl create rolebinding client-credential-role-binding --role=client-credential-role --serviceaccount=default:curl {{< /text >}} ### Configure mutual TLS origination for egress traffic at sidecar @@ -438,7 +438,7 @@ to hold the configuration of the NGINX server: spec: workloadSelector: matchLabels: - app: sleep + app: curl host: my-nginx.mesh-external.svc.cluster.local trafficPolicy: loadBalancer: @@ -459,7 +459,7 @@ to hold the configuration of the NGINX server: 1. Verify that the credential is supplied to the sidecar and active. {{< text bash >}} - $ istioctl proxy-config secret deploy/sleep | grep client-credential + $ istioctl proxy-config secret deploy/curl | grep client-credential kubernetes://client-credential Cert Chain ACTIVE true 1 2024-06-04T12:15:20Z 2023-06-05T12:15:20Z kubernetes://client-credential-cacert Cert Chain ACTIVE true 10792363984292733914 2024-06-04T12:15:19Z 2023-06-05T12:15:19Z {{< /text >}} @@ -467,7 +467,7 @@ to hold the configuration of the NGINX server: 1. Send an HTTP request to `http://my-nginx.mesh-external.svc.cluster.local`: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sS http://my-nginx.mesh-external.svc.cluster.local + $ kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sS http://my-nginx.mesh-external.svc.cluster.local @@ -475,10 +475,10 @@ to hold the configuration of the NGINX server: ... {{< /text >}} -1. Check the log of the `sleep` pod for a line corresponding to our request. +1. Check the log of the `curl` pod for a line corresponding to our request. {{< text bash >}} - $ kubectl logs -l app=sleep -c istio-proxy | grep 'my-nginx.mesh-external.svc.cluster.local' + $ kubectl logs -l app=curl -c istio-proxy | grep 'my-nginx.mesh-external.svc.cluster.local' {{< /text >}} You should see a line similar to the following: @@ -518,9 +518,9 @@ to hold the configuration of the NGINX server: ## Cleanup common configuration -Delete the `sleep` service and deployment: +Delete the `curl` service and deployment: {{< text bash >}} -$ kubectl delete service sleep -$ kubectl delete deployment sleep +$ kubectl delete service curl +$ kubectl delete deployment curl {{< /text >}} diff --git a/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/mtls_test.sh b/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/mtls_test.sh index 404c8ac8ab28b..fb2ab57ff1955 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/mtls_test.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/mtls_test.sh @@ -26,9 +26,9 @@ source "tests/util/samples.sh" # Make sure automatic sidecar injection is enabled kubectl label namespace default istio-injection=enabled || true -# Deploy sleep sample +# Deploy curl sample # Deploy sample and set up variable pointing to it -startup_sleep_sample +startup_curl_sample snip_before_you_begin_3 # Generate Certificates for service outside the mesh to use for mTLS @@ -50,9 +50,9 @@ snip_deploy_a_mutual_tls_server_5 # Wait for nginx _wait_for_deployment mesh-external my-nginx -# Configure sleep pod -snip_configure_the_client_sleep_pod_1 -snip_configure_the_client_sleep_pod_2 +# Configure curl pod +snip_configure_the_client_curl_pod_1 +snip_configure_the_client_curl_pod_2 # Configure mTLS for egress traffic from sidecar to external service snip_configure_mutual_tls_origination_for_egress_traffic_at_sidecar_1 @@ -72,5 +72,5 @@ kubectl label namespace default istio-injection- snip_cleanup_the_mutual_tls_origination_configuration_1 snip_cleanup_the_mutual_tls_origination_configuration_2 snip_cleanup_the_mutual_tls_origination_configuration_3 -cleanup_sleep_sample +cleanup_curl_sample kubectl label namespace default istio-injection- diff --git a/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/snips.sh b/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/snips.sh index 7123f0d342157..260d428d92e31 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/snips.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/snips.sh @@ -21,15 +21,15 @@ #################################################################################################### snip_before_you_begin_1() { -kubectl apply -f samples/sleep/sleep.yaml +kubectl apply -f samples/curl/curl.yaml } snip_before_you_begin_2() { -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) } snip_before_you_begin_3() { -export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) +export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) } snip_apply_simple() { @@ -53,7 +53,7 @@ EOF } snip_curl_simple() { -kubectl exec "${SOURCE_POD}" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics +kubectl exec "${SOURCE_POD}" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_curl_simple_out <<\ENDSNIP @@ -101,7 +101,7 @@ EOF } snip_curl_origination_http() { -kubectl exec "${SOURCE_POD}" -c sleep -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics +kubectl exec "${SOURCE_POD}" -c curl -- curl -sSL -o /dev/null -D - http://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_curl_origination_http_out <<\ENDSNIP @@ -110,7 +110,7 @@ HTTP/1.1 200 OK ENDSNIP snip_curl_origination_https() { -kubectl exec "${SOURCE_POD}" -c sleep -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics +kubectl exec "${SOURCE_POD}" -c curl -- curl -sSL -o /dev/null -D - https://edition.cnn.com/politics } ! IFS=$'\n' read -r -d '' snip_curl_origination_https_out <<\ENDSNIP @@ -239,14 +239,14 @@ spec: EOF } -snip_configure_the_client_sleep_pod_1() { +snip_configure_the_client_curl_pod_1() { kubectl create secret generic client-credential --from-file=tls.key=client.example.com.key \ --from-file=tls.crt=client.example.com.crt --from-file=ca.crt=example.com.crt } -snip_configure_the_client_sleep_pod_2() { +snip_configure_the_client_curl_pod_2() { kubectl create role client-credential-role --resource=secret --verb=list -kubectl create rolebinding client-credential-role-binding --role=client-credential-role --serviceaccount=default:sleep +kubectl create rolebinding client-credential-role-binding --role=client-credential-role --serviceaccount=default:curl } snip_configure_mutual_tls_origination_for_egress_traffic_at_sidecar_1() { @@ -275,7 +275,7 @@ metadata: spec: workloadSelector: matchLabels: - app: sleep + app: curl host: my-nginx.mesh-external.svc.cluster.local trafficPolicy: loadBalancer: @@ -291,7 +291,7 @@ EOF } snip_configure_mutual_tls_origination_for_egress_traffic_at_sidecar_2() { -istioctl proxy-config secret deploy/sleep | grep client-credential +istioctl proxy-config secret deploy/curl | grep client-credential } ! IFS=$'\n' read -r -d '' snip_configure_mutual_tls_origination_for_egress_traffic_at_sidecar_2_out <<\ENDSNIP @@ -300,7 +300,7 @@ kubernetes://client-credential-cacert Cert Chain ACTIVE true ENDSNIP snip_configure_mutual_tls_origination_for_egress_traffic_at_sidecar_3() { -kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})" -c sleep -- curl -sS http://my-nginx.mesh-external.svc.cluster.local +kubectl exec "$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name})" -c curl -- curl -sS http://my-nginx.mesh-external.svc.cluster.local } ! IFS=$'\n' read -r -d '' snip_configure_mutual_tls_origination_for_egress_traffic_at_sidecar_3_out <<\ENDSNIP @@ -312,7 +312,7 @@ kubectl exec "$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name} ENDSNIP snip_configure_mutual_tls_origination_for_egress_traffic_at_sidecar_4() { -kubectl logs -l app=sleep -c istio-proxy | grep 'my-nginx.mesh-external.svc.cluster.local' +kubectl logs -l app=curl -c istio-proxy | grep 'my-nginx.mesh-external.svc.cluster.local' } snip_cleanup_the_mutual_tls_origination_configuration_1() { @@ -337,6 +337,6 @@ rm ./nginx.conf } snip_cleanup_common_configuration_1() { -kubectl delete service sleep -kubectl delete deployment sleep +kubectl delete service curl +kubectl delete deployment curl } diff --git a/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/tls_test.sh b/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/tls_test.sh index 53c9b2385a09a..3bea8a3f80c68 100644 --- a/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/tls_test.sh +++ b/content/en/docs/tasks/traffic-management/egress/egress-tls-origination/tls_test.sh @@ -27,7 +27,7 @@ source "tests/util/samples.sh" kubectl label namespace default istio-injection=enabled || true # Deploy sample and set up variable pointing to it -startup_sleep_sample +startup_curl_sample snip_before_you_begin_3 # Confirm we can access plain HTTP @@ -48,5 +48,5 @@ _verify_elided snip_curl_origination_https "$snip_curl_origination_https_out" # @cleanup snip_cleanup_the_tls_origination_configuration_1 -cleanup_sleep_sample +cleanup_curl_sample kubectl label namespace default istio-injection- diff --git a/content/en/docs/tasks/traffic-management/egress/http-proxy/index.md b/content/en/docs/tasks/traffic-management/egress/http-proxy/index.md index 72b009169f87f..a3778efe36b86 100644 --- a/content/en/docs/tasks/traffic-management/egress/http-proxy/index.md +++ b/content/en/docs/tasks/traffic-management/egress/http-proxy/index.md @@ -96,11 +96,11 @@ This example uses [Squid](http://www.squid-cache.org) but you can use any HTTPS EOF {{< /text >}} -1. Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample in the `external` namespace to test traffic to the +1. Deploy the [curl]({{< github_tree >}}/samples/curl) sample in the `external` namespace to test traffic to the proxy without Istio traffic control. {{< text bash >}} - $ kubectl apply -n external -f @samples/sleep/sleep.yaml@ + $ kubectl apply -n external -f @samples/curl/curl.yaml@ {{< /text >}} 1. Obtain the IP address of the proxy pod and define the `PROXY_IP` environment variable to store it: @@ -116,10 +116,10 @@ This example uses [Squid](http://www.squid-cache.org) but you can use any HTTPS $ export PROXY_PORT=3128 {{< /text >}} -1. Send a request from the `sleep` pod in the `external` namespace to an external service via the proxy: +1. Send a request from the `curl` pod in the `external` namespace to an external service via the proxy: {{< text bash >}} - $ kubectl exec "$(kubectl get pod -n external -l app=sleep -o jsonpath={.items..metadata.name})" -n external -- sh -c "HTTPS_PROXY=$PROXY_IP:$PROXY_PORT curl https://en.wikipedia.org/wiki/Main_Page" | grep -o ".*" + $ kubectl exec "$(kubectl get pod -n external -l app=curl -o jsonpath={.items..metadata.name})" -n external -- sh -c "HTTPS_PROXY=$PROXY_IP:$PROXY_PORT curl https://en.wikipedia.org/wiki/Main_Page" | grep -o ".*" Wikipedia, the free encyclopedia {{< /text >}} @@ -163,11 +163,11 @@ Next, you must configure the traffic from the Istio-enabled pods to use the HTTP EOF {{< /text >}} -1. Send a request from the `sleep` pod in the `default` namespace. Because the `sleep` pod has a sidecar, +1. Send a request from the `curl` pod in the `default` namespace. Because the `curl` pod has a sidecar, Istio controls its traffic. {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- sh -c "HTTPS_PROXY=$PROXY_IP:$PROXY_PORT curl https://en.wikipedia.org/wiki/Main_Page" | grep -o ".*" + $ kubectl exec "$SOURCE_POD" -c curl -- sh -c "HTTPS_PROXY=$PROXY_IP:$PROXY_PORT curl https://en.wikipedia.org/wiki/Main_Page" | grep -o ".*" Wikipedia, the free encyclopedia {{< /text >}} @@ -198,16 +198,16 @@ not aware of the fact that the external proxy forwards the requests further. ## Cleanup -1. Shutdown the [sleep]({{< github_tree >}}/samples/sleep) service: +1. Shutdown the [curl]({{< github_tree >}}/samples/curl) service: {{< text bash >}} - $ kubectl delete -f @samples/sleep/sleep.yaml@ + $ kubectl delete -f @samples/curl/curl.yaml@ {{< /text >}} -1. Shutdown the [sleep]({{< github_tree >}}/samples/sleep) service in the `external` namespace: +1. Shutdown the [curl]({{< github_tree >}}/samples/curl) service in the `external` namespace: {{< text bash >}} - $ kubectl delete -f @samples/sleep/sleep.yaml@ -n external + $ kubectl delete -f @samples/curl/curl.yaml@ -n external {{< /text >}} 1. Shutdown the Squid proxy, remove the `ConfigMap` and the configuration file: diff --git a/content/en/docs/tasks/traffic-management/egress/http-proxy/snips.sh b/content/en/docs/tasks/traffic-management/egress/http-proxy/snips.sh index 66f5abeb8e90b..ea5547fc0fd24 100644 --- a/content/en/docs/tasks/traffic-management/egress/http-proxy/snips.sh +++ b/content/en/docs/tasks/traffic-management/egress/http-proxy/snips.sh @@ -78,7 +78,7 @@ EOF } snip_deploy_an_https_proxy_5() { -kubectl apply -n external -f samples/sleep/sleep.yaml +kubectl apply -n external -f samples/curl/curl.yaml } snip_deploy_an_https_proxy_6() { @@ -90,7 +90,7 @@ export PROXY_PORT=3128 } snip_deploy_an_https_proxy_8() { -kubectl exec "$(kubectl get pod -n external -l app=sleep -o jsonpath={.items..metadata.name})" -n external -- sh -c "HTTPS_PROXY=$PROXY_IP:$PROXY_PORT curl https://en.wikipedia.org/wiki/Main_Page" | grep -o ".*" +kubectl exec "$(kubectl get pod -n external -l app=curl -o jsonpath={.items..metadata.name})" -n external -- sh -c "HTTPS_PROXY=$PROXY_IP:$PROXY_PORT curl https://en.wikipedia.org/wiki/Main_Page" | grep -o ".*" } ! IFS=$'\n' read -r -d '' snip_deploy_an_https_proxy_8_out <<\ENDSNIP @@ -126,7 +126,7 @@ EOF } snip_configure_traffic_to_external_https_proxy_2() { -kubectl exec "$SOURCE_POD" -c sleep -- sh -c "HTTPS_PROXY=$PROXY_IP:$PROXY_PORT curl https://en.wikipedia.org/wiki/Main_Page" | grep -o ".*" +kubectl exec "$SOURCE_POD" -c curl -- sh -c "HTTPS_PROXY=$PROXY_IP:$PROXY_PORT curl https://en.wikipedia.org/wiki/Main_Page" | grep -o ".*" } ! IFS=$'\n' read -r -d '' snip_configure_traffic_to_external_https_proxy_2_out <<\ENDSNIP @@ -150,11 +150,11 @@ kubectl exec "$(kubectl get pod -n external -l app=squid -o jsonpath={.items..me ENDSNIP snip_cleanup_1() { -kubectl delete -f samples/sleep/sleep.yaml +kubectl delete -f samples/curl/curl.yaml } snip_cleanup_2() { -kubectl delete -f samples/sleep/sleep.yaml -n external +kubectl delete -f samples/curl/curl.yaml -n external } snip_cleanup_3() { diff --git a/content/en/docs/tasks/traffic-management/egress/http-proxy/test.sh b/content/en/docs/tasks/traffic-management/egress/http-proxy/test.sh index 59c640e7b02b4..2dea13ac1031c 100644 --- a/content/en/docs/tasks/traffic-management/egress/http-proxy/test.sh +++ b/content/en/docs/tasks/traffic-management/egress/http-proxy/test.sh @@ -26,10 +26,10 @@ source "tests/util/samples.sh" # Make sure default namespace is injected kubectl label namespace default istio-injection=enabled || true -# Deploy sleep sample and set up variable pointing to it -# Start the sleep sample -startup_sleep_sample -export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') +# Deploy curl sample and set up variable pointing to it +# Start the curl sample +startup_curl_sample +export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath='{.items[0].metadata.name}') # create external ns snip_deploy_an_https_proxy_1 @@ -42,9 +42,9 @@ snip_deploy_an_https_proxy_3 snip_deploy_an_https_proxy_4 _wait_for_deployment external squid -# create sleep +# create curl snip_deploy_an_https_proxy_5 -_wait_for_deployment external sleep +_wait_for_deployment external curl snip_deploy_an_https_proxy_6 snip_deploy_an_https_proxy_7 diff --git a/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/index.md b/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/index.md index d5429c309aa78..1fa052680f96a 100644 --- a/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/index.md +++ b/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/index.md @@ -60,19 +60,19 @@ $ istioctl install --set profile=minimal -y \ {{< /tabset >}} -* Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample app to use as a test source for sending requests. +* Deploy the [curl]({{< github_tree >}}/samples/curl) sample app to use as a test source for sending requests. If you have [automatic sidecar injection](/docs/setup/additional-setup/sidecar-injection/#automatic-sidecar-injection) enabled, run the following command to deploy the sample app: {{< text bash >}} - $ kubectl apply -f @samples/sleep/sleep.yaml@ + $ kubectl apply -f @samples/curl/curl.yaml@ {{< /text >}} - Otherwise, manually inject the sidecar before deploying the `sleep` application with the following command: + Otherwise, manually inject the sidecar before deploying the `curl` application with the following command: {{< text bash >}} - $ kubectl apply -f <(istioctl kube-inject -f @samples/sleep/sleep.yaml@) + $ kubectl apply -f <(istioctl kube-inject -f @samples/curl/curl.yaml@) {{< /text >}} {{< tip >}} @@ -82,7 +82,7 @@ $ istioctl install --set profile=minimal -y \ * Set the `SOURCE_POD` environment variable to the name of your source pod: {{< text bash >}} - $ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) + $ export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) {{< /text >}} ## Configure direct traffic to a wildcard host @@ -125,7 +125,7 @@ the default) is used in the service entry below. [https://en.wikipedia.org](https://en.wikipedia.org) and [https://de.wikipedia.org](https://de.wikipedia.org): {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- sh -c 'curl -s https://en.wikipedia.org/wiki/Main_Page | grep -o ".*"; curl -s https://de.wikipedia.org/wiki/Wikipedia:Hauptseite | grep -o ".*"' + $ kubectl exec "$SOURCE_POD" -c curl -- sh -c 'curl -s https://en.wikipedia.org/wiki/Main_Page | grep -o ".*"; curl -s https://de.wikipedia.org/wiki/Wikipedia:Hauptseite | grep -o ".*"' Wikipedia, the free encyclopedia Wikipedia – Die freie Enzyklopädie {{< /text >}} @@ -314,7 +314,7 @@ EOF [https://en.wikipedia.org](https://en.wikipedia.org) and [https://de.wikipedia.org](https://de.wikipedia.org): {{< text bash >}} - $ kubectl exec "$SOURCE_POD" -c sleep -- sh -c 'curl -s https://en.wikipedia.org/wiki/Main_Page | grep -o ".*"; curl -s https://de.wikipedia.org/wiki/Wikipedia:Hauptseite | grep -o ".*"' + $ kubectl exec "$SOURCE_POD" -c curl -- sh -c 'curl -s https://en.wikipedia.org/wiki/Main_Page | grep -o ".*"; curl -s https://de.wikipedia.org/wiki/Wikipedia:Hauptseite | grep -o ".*"' Wikipedia, the free encyclopedia Wikipedia – Die freie Enzyklopädie {{< /text >}} @@ -395,10 +395,10 @@ found in [routing egress traffic to wildcard destinations](/blog/2023/egress-sni ## Cleanup -* Shutdown the [sleep]({{< github_tree >}}/samples/sleep) service: +* Shutdown the [curl]({{< github_tree >}}/samples/curl) service: {{< text bash >}} - $ kubectl delete -f @samples/sleep/sleep.yaml@ + $ kubectl delete -f @samples/curl/curl.yaml@ {{< /text >}} * Uninstall Istio from your cluster: diff --git a/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/snips.sh b/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/snips.sh index d4e90fc019659..8314a86d670fe 100644 --- a/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/snips.sh +++ b/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/snips.sh @@ -33,15 +33,15 @@ istioctl install --set profile=minimal -y \ } snip_before_you_begin_3() { -kubectl apply -f samples/sleep/sleep.yaml +kubectl apply -f samples/curl/curl.yaml } snip_before_you_begin_4() { -kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) +kubectl apply -f <(istioctl kube-inject -f samples/curl/curl.yaml) } snip_before_you_begin_5() { -export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) +export SOURCE_POD=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) } snip_configure_direct_traffic_to_a_wildcard_host_1() { @@ -61,7 +61,7 @@ EOF } snip_configure_direct_traffic_to_a_wildcard_host_2() { -kubectl exec "$SOURCE_POD" -c sleep -- sh -c 'curl -s https://en.wikipedia.org/wiki/Main_Page | grep -o ".*"; curl -s https://de.wikipedia.org/wiki/Wikipedia:Hauptseite | grep -o ".*"' +kubectl exec "$SOURCE_POD" -c curl -- sh -c 'curl -s https://en.wikipedia.org/wiki/Main_Page | grep -o ".*"; curl -s https://de.wikipedia.org/wiki/Wikipedia:Hauptseite | grep -o ".*"' } ! IFS=$'\n' read -r -d '' snip_configure_direct_traffic_to_a_wildcard_host_2_out <<\ENDSNIP @@ -223,7 +223,7 @@ EOF } snip_configure_egress_gateway_traffic_to_a_wildcard_host_4() { -kubectl exec "$SOURCE_POD" -c sleep -- sh -c 'curl -s https://en.wikipedia.org/wiki/Main_Page | grep -o ".*"; curl -s https://de.wikipedia.org/wiki/Wikipedia:Hauptseite | grep -o ".*"' +kubectl exec "$SOURCE_POD" -c curl -- sh -c 'curl -s https://en.wikipedia.org/wiki/Main_Page | grep -o ".*"; curl -s https://de.wikipedia.org/wiki/Wikipedia:Hauptseite | grep -o ".*"' } ! IFS=$'\n' read -r -d '' snip_configure_egress_gateway_traffic_to_a_wildcard_host_4_out <<\ENDSNIP @@ -263,7 +263,7 @@ kubectl delete tlsroute forward-wikipedia-from-egress-gateway } snip_cleanup_1() { -kubectl delete -f samples/sleep/sleep.yaml +kubectl delete -f samples/curl/curl.yaml } snip_cleanup_2() { diff --git a/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/test.sh b/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/test.sh index 80b9ceef514c6..db69fe019392a 100755 --- a/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/test.sh +++ b/content/en/docs/tasks/traffic-management/egress/wildcard-egress-hosts/test.sh @@ -33,11 +33,11 @@ _wait_for_deployment istio-system istiod kubectl label namespace default istio-injection=enabled --overwrite snip_before_you_begin_3 -_wait_for_deployment default sleep +_wait_for_deployment default curl snip_before_you_begin_5 confirm_blocking() { -kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -I https://www.google.com | grep "HTTP/"; kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -I https://edition.cnn.com | grep "HTTP/" +kubectl exec "$SOURCE_POD" -c curl -- curl -sS -I https://www.google.com | grep "HTTP/"; kubectl exec "$SOURCE_POD" -c curl -- curl -sS -I https://edition.cnn.com | grep "HTTP/" } _verify_contains confirm_blocking "command terminated with exit code 35" diff --git a/content/en/docs/tasks/traffic-management/fault-injection/test.sh b/content/en/docs/tasks/traffic-management/fault-injection/test.sh index ff7ab8ae7aee6..0ede04c3cc24a 100644 --- a/content/en/docs/tasks/traffic-management/fault-injection/test.sh +++ b/content/en/docs/tasks/traffic-management/fault-injection/test.sh @@ -32,7 +32,7 @@ get_bookinfo_productpage_jason() { } kubectl label namespace default istio-injection=enabled --overwrite -startup_sleep_sample # needed for sending test requests with curl +startup_curl_sample # needed for sending test requests with curl # launch the bookinfo app startup_bookinfo_sample @@ -71,5 +71,5 @@ _verify_not_contains get_bookinfo_productpage "Ratings service is currently unav # @cleanup snip_cleanup_1 cleanup_bookinfo_sample -cleanup_sleep_sample +cleanup_curl_sample kubectl label namespace default istio-injection- diff --git a/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/index.md b/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/index.md index 7da16e47edf4c..f4e1ddd57c431 100644 --- a/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/index.md +++ b/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/index.md @@ -199,12 +199,12 @@ EOF ## Verification Now that the httpbin server is deployed and configured, bring up two clients to test the end to end connectivity from both inside and outside of the mesh: -1. An internal client (sleep) in the same namespace (test) as the httpbin service, with sidecar injected. -1. An external client (sleep) in the default namespace (i.e., outside of the service mesh). +1. An internal client (curl) in the same namespace (test) as the httpbin service, with sidecar injected. +1. An external client (curl) in the default namespace (i.e., outside of the service mesh). {{< text bash >}} -$ kubectl apply -f samples/sleep/sleep.yaml -$ kubectl -n test apply -f samples/sleep/sleep.yaml +$ kubectl apply -f samples/curl/curl.yaml +$ kubectl -n test apply -f samples/curl/curl.yaml {{< /text >}} Run the following commands to verify that everything is up and running, and configured correctly. @@ -212,21 +212,21 @@ Run the following commands to verify that everything is up and running, and conf {{< text bash >}} $ kubectl get pods NAME READY STATUS RESTARTS AGE -sleep-557747455f-xx88g 1/1 Running 0 4m14s +curl-557747455f-xx88g 1/1 Running 0 4m14s {{< /text >}} {{< text bash >}} $ kubectl get pods -n test NAME READY STATUS RESTARTS AGE httpbin-5bbdbd6588-z9vbs 2/2 Running 0 8m44s -sleep-557747455f-brzf6 2/2 Running 0 6m57s +curl-557747455f-brzf6 2/2 Running 0 6m57s {{< /text >}} {{< text bash >}} $ kubectl get svc -n test NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE httpbin ClusterIP 10.100.78.113 8443/TCP,8080/TCP 10m -sleep ClusterIP 10.110.35.153 80/TCP 8m49s +curl ClusterIP 10.110.35.153 80/TCP 8m49s {{< /text >}} In the following command, replace `httpbin-5bbdbd6588-z9vbs` with the name of your httpbin pod. @@ -243,8 +243,8 @@ file-root:/etc/istio/tls-ca-certs/ca.crt Cert Cha ### Verify internal mesh connectivity on port 8080 {{< text bash >}} -$ export INTERNAL_CLIENT=$(kubectl -n test get pod -l app=sleep -o jsonpath={.items..metadata.name}) -$ kubectl -n test exec "${INTERNAL_CLIENT}" -c sleep -- curl -IsS "http://httpbin:8080/status/200" +$ export INTERNAL_CLIENT=$(kubectl -n test get pod -l app=curl -o jsonpath={.items..metadata.name}) +$ kubectl -n test exec "${INTERNAL_CLIENT}" -c curl -- curl -IsS "http://httpbin:8080/status/200" HTTP/1.1 200 OK server: envoy date: Mon, 24 Oct 2022 09:04:52 GMT @@ -257,19 +257,19 @@ x-envoy-upstream-service-time: 5 ### Verify external to internal mesh connectivity on port 8443 -To verify mTLS traffic from an external client, first copy the CA certificate and client certificate/key to the sleep client running in the default namespace. +To verify mTLS traffic from an external client, first copy the CA certificate and client certificate/key to the curl client running in the default namespace. {{< text bash >}} -$ export EXTERNAL_CLIENT=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) +$ export EXTERNAL_CLIENT=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) $ kubectl cp client.test.svc.cluster.local.key default/"${EXTERNAL_CLIENT}":/tmp/ $ kubectl cp client.test.svc.cluster.local.crt default/"${EXTERNAL_CLIENT}":/tmp/ $ kubectl cp example.com.crt default/"${EXTERNAL_CLIENT}":/tmp/ca.crt {{< /text >}} -Now that the certificates are available for the external sleep client, you can verify connectivity from it to the internal httpbin service using the following command. +Now that the certificates are available for the external curl client, you can verify connectivity from it to the internal httpbin service using the following command. {{< text bash >}} -$ kubectl exec "${EXTERNAL_CLIENT}" -c sleep -- curl -IsS --cacert /tmp/ca.crt --key /tmp/client.test.svc.cluster.local.key --cert /tmp/client.test.svc.cluster.local.crt -HHost:httpbin.test.svc.cluster.local "https://httpbin.test.svc.cluster.local:8443/status/200" +$ kubectl exec "${EXTERNAL_CLIENT}" -c curl -- curl -IsS --cacert /tmp/ca.crt --key /tmp/client.test.svc.cluster.local.key --cert /tmp/client.test.svc.cluster.local.crt -HHost:httpbin.test.svc.cluster.local "https://httpbin.test.svc.cluster.local:8443/status/200" server: istio-envoy date: Mon, 24 Oct 2022 09:05:31 GMT content-type: text/html; charset=utf-8 @@ -283,7 +283,7 @@ x-envoy-decorator-operation: ingress-sidecar.test:9080/* In addition to verifying external mTLS connectivity via the ingress port 8443, it is also important to verify that port 8080 does not accept any external mTLS traffic. {{< text bash >}} -$ kubectl exec "${EXTERNAL_CLIENT}" -c sleep -- curl -IsS --cacert /tmp/ca.crt --key /tmp/client.test.svc.cluster.local.key --cert /tmp/client.test.svc.cluster.local.crt -HHost:httpbin.test.svc.cluster.local "http://httpbin.test.svc.cluster.local:8080/status/200" +$ kubectl exec "${EXTERNAL_CLIENT}" -c curl -- curl -IsS --cacert /tmp/ca.crt --key /tmp/client.test.svc.cluster.local.key --cert /tmp/client.test.svc.cluster.local.crt -HHost:httpbin.test.svc.cluster.local "http://httpbin.test.svc.cluster.local:8080/status/200" curl: (56) Recv failure: Connection reset by peer command terminated with exit code 56 {{< /text >}} @@ -294,11 +294,11 @@ command terminated with exit code 56 {{< text bash >}} $ kubectl delete secret httpbin-mtls-termination httpbin-mtls-termination-cacert -n test - $ kubectl delete service httpbin sleep -n test - $ kubectl delete deployment httpbin sleep -n test + $ kubectl delete service httpbin curl -n test + $ kubectl delete deployment httpbin curl -n test $ kubectl delete namespace test - $ kubectl delete service sleep - $ kubectl delete deployment sleep + $ kubectl delete service curl + $ kubectl delete deployment curl {{< /text >}} 1. Delete the certificates and private keys: diff --git a/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/snips.sh b/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/snips.sh index f60428f82684c..8cdc0d17fb17d 100644 --- a/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/snips.sh +++ b/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/snips.sh @@ -167,8 +167,8 @@ EOF } snip_verification_1() { -kubectl apply -f samples/sleep/sleep.yaml -kubectl -n test apply -f samples/sleep/sleep.yaml +kubectl apply -f samples/curl/curl.yaml +kubectl -n test apply -f samples/curl/curl.yaml } snip_verification_2() { @@ -177,7 +177,7 @@ kubectl get pods ! IFS=$'\n' read -r -d '' snip_verification_2_out <<\ENDSNIP NAME READY STATUS RESTARTS AGE -sleep-557747455f-xx88g 1/1 Running 0 4m14s +curl-557747455f-xx88g 1/1 Running 0 4m14s ENDSNIP snip_verification_3() { @@ -187,7 +187,7 @@ kubectl get pods -n test ! IFS=$'\n' read -r -d '' snip_verification_3_out <<\ENDSNIP NAME READY STATUS RESTARTS AGE httpbin-5bbdbd6588-z9vbs 2/2 Running 0 8m44s -sleep-557747455f-brzf6 2/2 Running 0 6m57s +curl-557747455f-brzf6 2/2 Running 0 6m57s ENDSNIP snip_verification_4() { @@ -197,7 +197,7 @@ kubectl get svc -n test ! IFS=$'\n' read -r -d '' snip_verification_4_out <<\ENDSNIP NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE httpbin ClusterIP 10.100.78.113 8443/TCP,8080/TCP 10m -sleep ClusterIP 10.110.35.153 80/TCP 8m49s +curl ClusterIP 10.110.35.153 80/TCP 8m49s ENDSNIP snip_verification_5() { @@ -213,8 +213,8 @@ file-root:/etc/istio/tls-ca-certs/ca.crt Cert Cha ENDSNIP snip_verify_internal_mesh_connectivity_on_port_8080_1() { -export INTERNAL_CLIENT=$(kubectl -n test get pod -l app=sleep -o jsonpath={.items..metadata.name}) -kubectl -n test exec "${INTERNAL_CLIENT}" -c sleep -- curl -IsS "http://httpbin:8080/status/200" +export INTERNAL_CLIENT=$(kubectl -n test get pod -l app=curl -o jsonpath={.items..metadata.name}) +kubectl -n test exec "${INTERNAL_CLIENT}" -c curl -- curl -IsS "http://httpbin:8080/status/200" } ! IFS=$'\n' read -r -d '' snip_verify_internal_mesh_connectivity_on_port_8080_1_out <<\ENDSNIP @@ -229,14 +229,14 @@ x-envoy-upstream-service-time: 5 ENDSNIP snip_verify_external_to_internal_mesh_connectivity_on_port_8443_1() { -export EXTERNAL_CLIENT=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) +export EXTERNAL_CLIENT=$(kubectl get pod -l app=curl -o jsonpath={.items..metadata.name}) kubectl cp client.test.svc.cluster.local.key default/"${EXTERNAL_CLIENT}":/tmp/ kubectl cp client.test.svc.cluster.local.crt default/"${EXTERNAL_CLIENT}":/tmp/ kubectl cp example.com.crt default/"${EXTERNAL_CLIENT}":/tmp/ca.crt } snip_verify_external_to_internal_mesh_connectivity_on_port_8443_2() { -kubectl exec "${EXTERNAL_CLIENT}" -c sleep -- curl -IsS --cacert /tmp/ca.crt --key /tmp/client.test.svc.cluster.local.key --cert /tmp/client.test.svc.cluster.local.crt -HHost:httpbin.test.svc.cluster.local "https://httpbin.test.svc.cluster.local:8443/status/200" +kubectl exec "${EXTERNAL_CLIENT}" -c curl -- curl -IsS --cacert /tmp/ca.crt --key /tmp/client.test.svc.cluster.local.key --cert /tmp/client.test.svc.cluster.local.crt -HHost:httpbin.test.svc.cluster.local "https://httpbin.test.svc.cluster.local:8443/status/200" } ! IFS=$'\n' read -r -d '' snip_verify_external_to_internal_mesh_connectivity_on_port_8443_2_out <<\ENDSNIP @@ -251,7 +251,7 @@ x-envoy-decorator-operation: ingress-sidecar.test:9080/* ENDSNIP snip_verify_external_to_internal_mesh_connectivity_on_port_8443_3() { -kubectl exec "${EXTERNAL_CLIENT}" -c sleep -- curl -IsS --cacert /tmp/ca.crt --key /tmp/client.test.svc.cluster.local.key --cert /tmp/client.test.svc.cluster.local.crt -HHost:httpbin.test.svc.cluster.local "http://httpbin.test.svc.cluster.local:8080/status/200" +kubectl exec "${EXTERNAL_CLIENT}" -c curl -- curl -IsS --cacert /tmp/ca.crt --key /tmp/client.test.svc.cluster.local.key --cert /tmp/client.test.svc.cluster.local.crt -HHost:httpbin.test.svc.cluster.local "http://httpbin.test.svc.cluster.local:8080/status/200" } ! IFS=$'\n' read -r -d '' snip_verify_external_to_internal_mesh_connectivity_on_port_8443_3_out <<\ENDSNIP @@ -261,11 +261,11 @@ ENDSNIP snip_cleanup_the_mutual_tls_termination_example_1() { kubectl delete secret httpbin-mtls-termination httpbin-mtls-termination-cacert -n test -kubectl delete service httpbin sleep -n test -kubectl delete deployment httpbin sleep -n test +kubectl delete service httpbin curl -n test +kubectl delete deployment httpbin curl -n test kubectl delete namespace test -kubectl delete service sleep -kubectl delete deployment sleep +kubectl delete service curl +kubectl delete deployment curl } snip_cleanup_the_mutual_tls_termination_example_2() { diff --git a/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/test.sh b/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/test.sh index cd1719630fde8..6217fa05ca1ee 100644 --- a/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/test.sh +++ b/content/en/docs/tasks/traffic-management/ingress/ingress-sidecar-tls-termination/test.sh @@ -44,8 +44,8 @@ _wait_for_deployment test httpbin # deploy test applications snip_verification_1 -_wait_for_deployment test sleep -_wait_for_deployment default sleep +_wait_for_deployment test curl +_wait_for_deployment default curl # verification _verify_first_line snip_verify_internal_mesh_connectivity_on_port_8080_1 "HTTP/1.1 200 OK" diff --git a/content/en/docs/tasks/traffic-management/locality-load-balancing/before-you-begin/index.md b/content/en/docs/tasks/traffic-management/locality-load-balancing/before-you-begin/index.md index eb97e8417d7cb..945d56060d168 100644 --- a/content/en/docs/tasks/traffic-management/locality-load-balancing/before-you-begin/index.md +++ b/content/en/docs/tasks/traffic-management/locality-load-balancing/before-you-begin/index.md @@ -130,13 +130,13 @@ $ kubectl apply --context="${CTX_R3_Z4}" -n sample \ -f helloworld-region3.zone4.yaml {{< /text >}} -## Deploy `Sleep` +## Deploy `curl` -Deploy the `Sleep` application to `region1` `zone1`: +Deploy the `curl` application to `region1` `zone1`: {{< text bash >}} $ kubectl apply --context="${CTX_R1_Z1}" \ - -f @samples/sleep/sleep.yaml@ -n sample + -f @samples/curl/curl.yaml@ -n sample {{< /text >}} ## Wait for `HelloWorld` pods diff --git a/content/en/docs/tasks/traffic-management/locality-load-balancing/before-you-begin/snips.sh b/content/en/docs/tasks/traffic-management/locality-load-balancing/before-you-begin/snips.sh index 45536d2d89fae..9516d2dc4e089 100644 --- a/content/en/docs/tasks/traffic-management/locality-load-balancing/before-you-begin/snips.sh +++ b/content/en/docs/tasks/traffic-management/locality-load-balancing/before-you-begin/snips.sh @@ -66,9 +66,9 @@ kubectl apply --context="${CTX_R3_Z4}" -n sample \ -f helloworld-region3.zone4.yaml } -snip_deploy_sleep_1() { +snip_deploy_curl_1() { kubectl apply --context="${CTX_R1_Z1}" \ - -f samples/sleep/sleep.yaml -n sample + -f samples/curl/curl.yaml -n sample } snip_wait_for_helloworld_pods_1() { diff --git a/content/en/docs/tasks/traffic-management/locality-load-balancing/common.sh b/content/en/docs/tasks/traffic-management/locality-load-balancing/common.sh index 5ea45ee9fa502..9752cc65649a7 100644 --- a/content/en/docs/tasks/traffic-management/locality-load-balancing/common.sh +++ b/content/en/docs/tasks/traffic-management/locality-load-balancing/common.sh @@ -58,15 +58,15 @@ function deploy_services snip_deploy_helloworld_4 snip_deploy_helloworld_5 - echo "Deploying Sleep" - # Make a copy of sleep.yaml. - cp "samples/sleep/sleep.yaml" "samples/sleep/sleep.yaml.original" - # Add the locality label to sleep.yaml - add_locality_label "samples/sleep/sleep.yaml" "region1.zone1" - # Deploy sleep - snip_deploy_sleep_1 + echo "Deploying curl" + # Make a copy of curl.yaml. + cp "samples/curl/curl.yaml" "samples/curl/curl.yaml.original" + # Add the locality label to curl.yaml + add_locality_label "samples/curl/curl.yaml" "region1.zone1" + # Deploy curl + snip_deploy_curl_1 # Restore the original file. - mv -f "samples/sleep/sleep.yaml.original" "samples/sleep/sleep.yaml" + mv -f "samples/curl/curl.yaml.original" "samples/curl/curl.yaml" echo "Waiting for HelloWorld pods" _verify_like snip_wait_for_helloworld_pods_1 "$snip_wait_for_helloworld_pods_1_out" diff --git a/content/en/docs/tasks/traffic-management/locality-load-balancing/distribute/index.md b/content/en/docs/tasks/traffic-management/locality-load-balancing/distribute/index.md index 878c7586cd265..aaae8ea21e6ec 100644 --- a/content/en/docs/tasks/traffic-management/locality-load-balancing/distribute/index.md +++ b/content/en/docs/tasks/traffic-management/locality-load-balancing/distribute/index.md @@ -11,7 +11,7 @@ Follow this guide to configure the distribution of traffic across localities. Before proceeding, be sure to complete the steps under [before you begin](/docs/tasks/traffic-management/locality-load-balancing/before-you-begin). -In this task, you will use the `Sleep` pod in `region1` `zone1` as the source of +In this task, you will use the `curl` pod in `region1` `zone1` as the source of requests to the `HelloWorld` service. You will configure Istio with the following distribution across localities: @@ -61,12 +61,12 @@ EOF ## Verify the distribution -Call the `HelloWorld` service from the `Sleep` pod: +Call the `HelloWorld` service from the `curl` pod: {{< text bash >}} -$ kubectl exec --context="${CTX_R1_Z1}" -n sample -c sleep \ +$ kubectl exec --context="${CTX_R1_Z1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_R1_Z1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sSL helloworld.sample:5000/hello {{< /text >}} diff --git a/content/en/docs/tasks/traffic-management/locality-load-balancing/distribute/snips.sh b/content/en/docs/tasks/traffic-management/locality-load-balancing/distribute/snips.sh index f39f0c8290c6b..997109500cc62 100644 --- a/content/en/docs/tasks/traffic-management/locality-load-balancing/distribute/snips.sh +++ b/content/en/docs/tasks/traffic-management/locality-load-balancing/distribute/snips.sh @@ -46,8 +46,8 @@ EOF } snip_verify_the_distribution_1() { -kubectl exec --context="${CTX_R1_Z1}" -n sample -c sleep \ +kubectl exec --context="${CTX_R1_Z1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_R1_Z1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sSL helloworld.sample:5000/hello } diff --git a/content/en/docs/tasks/traffic-management/locality-load-balancing/failover/index.md b/content/en/docs/tasks/traffic-management/locality-load-balancing/failover/index.md index a3128bf7e986c..9f1e6e637f274 100644 --- a/content/en/docs/tasks/traffic-management/locality-load-balancing/failover/index.md +++ b/content/en/docs/tasks/traffic-management/locality-load-balancing/failover/index.md @@ -11,7 +11,7 @@ Follow this guide to configure your mesh for locality failover. Before proceeding, be sure to complete the steps under [before you begin](/docs/tasks/traffic-management/locality-load-balancing/before-you-begin). -In this task, you will use the `Sleep` pod in `region1.zone1` as the source of +In this task, you will use the `curl` pod in `region1.zone1` as the source of requests to the `HelloWorld` service. You will then trigger failures that will cause failover between localities in the following sequence: @@ -22,7 +22,7 @@ cause failover between localities in the following sequence: Internally, [Envoy priorities](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/priority.html) are used to control failover. These priorities will be assigned as follows for -traffic originating from the `Sleep` pod (in `region1` `zone1`): +traffic originating from the `curl` pod (in `region1` `zone1`): Priority | Locality | Details -------- | -------- | ------- @@ -81,12 +81,12 @@ EOF ## Verify traffic stays in `region1.zone1` -Call the `HelloWorld` service from the `Sleep` pod: +Call the `HelloWorld` service from the `curl` pod: {{< text bash >}} -$ kubectl exec --context="${CTX_R1_Z1}" -n sample -c sleep \ +$ kubectl exec --context="${CTX_R1_Z1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_R1_Z1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sSL helloworld.sample:5000/hello Hello version: region1.zone1, instance: helloworld-region1.zone1-86f77cd7b-cpxhv {{< /text >}} @@ -108,12 +108,12 @@ $ kubectl --context="${CTX_R1_Z1}" exec \ -n sample -c istio-proxy -- curl -sSL -X POST 127.0.0.1:15000/drain_listeners {{< /text >}} -Call the `HelloWorld` service from the `Sleep` pod: +Call the `HelloWorld` service from the `curl` pod: {{< text bash >}} -$ kubectl exec --context="${CTX_R1_Z1}" -n sample -c sleep \ +$ kubectl exec --context="${CTX_R1_Z1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_R1_Z1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sSL helloworld.sample:5000/hello Hello version: region1.zone2, instance: helloworld-region1.zone2-86f77cd7b-cpxhv {{< /text >}} @@ -134,12 +134,12 @@ $ kubectl --context="${CTX_R1_Z2}" exec \ -n sample -c istio-proxy -- curl -sSL -X POST 127.0.0.1:15000/drain_listeners {{< /text >}} -Call the `HelloWorld` service from the `Sleep` pod: +Call the `HelloWorld` service from the `curl` pod: {{< text bash >}} -$ kubectl exec --context="${CTX_R1_Z1}" -n sample -c sleep \ +$ kubectl exec --context="${CTX_R1_Z1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_R1_Z1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sSL helloworld.sample:5000/hello Hello version: region2.zone3, instance: helloworld-region2.zone3-86f77cd7b-cpxhv {{< /text >}} @@ -160,12 +160,12 @@ $ kubectl --context="${CTX_R2_Z3}" exec \ -n sample -c istio-proxy -- curl -sSL -X POST 127.0.0.1:15000/drain_listeners {{< /text >}} -Call the `HelloWorld` service from the `Sleep` pod: +Call the `HelloWorld` service from the `curl` pod: {{< text bash >}} -$ kubectl exec --context="${CTX_R1_Z1}" -n sample -c sleep \ +$ kubectl exec --context="${CTX_R1_Z1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_R1_Z1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sSL helloworld.sample:5000/hello Hello version: region3.zone4, instance: helloworld-region3.zone4-86f77cd7b-cpxhv {{< /text >}} diff --git a/content/en/docs/tasks/traffic-management/locality-load-balancing/failover/snips.sh b/content/en/docs/tasks/traffic-management/locality-load-balancing/failover/snips.sh index cae0a71d42c9e..75665d877feb3 100644 --- a/content/en/docs/tasks/traffic-management/locality-load-balancing/failover/snips.sh +++ b/content/en/docs/tasks/traffic-management/locality-load-balancing/failover/snips.sh @@ -47,9 +47,9 @@ EOF } snip_verify_traffic_stays_in_region1zone1_1() { -kubectl exec --context="${CTX_R1_Z1}" -n sample -c sleep \ +kubectl exec --context="${CTX_R1_Z1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_R1_Z1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sSL helloworld.sample:5000/hello } @@ -65,9 +65,9 @@ kubectl --context="${CTX_R1_Z1}" exec \ } snip_failover_to_region1zone2_2() { -kubectl exec --context="${CTX_R1_Z1}" -n sample -c sleep \ +kubectl exec --context="${CTX_R1_Z1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_R1_Z1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sSL helloworld.sample:5000/hello } @@ -83,9 +83,9 @@ kubectl --context="${CTX_R1_Z2}" exec \ } snip_failover_to_region2zone3_2() { -kubectl exec --context="${CTX_R1_Z1}" -n sample -c sleep \ +kubectl exec --context="${CTX_R1_Z1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_R1_Z1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sSL helloworld.sample:5000/hello } @@ -101,9 +101,9 @@ kubectl --context="${CTX_R2_Z3}" exec \ } snip_failover_to_region3zone4_2() { -kubectl exec --context="${CTX_R1_Z1}" -n sample -c sleep \ +kubectl exec --context="${CTX_R1_Z1}" -n sample -c curl \ "$(kubectl get pod --context="${CTX_R1_Z1}" -n sample -l \ - app=sleep -o jsonpath='{.items[0].metadata.name}')" \ + app=curl -o jsonpath='{.items[0].metadata.name}')" \ -- curl -sSL helloworld.sample:5000/hello } diff --git a/content/en/docs/tasks/traffic-management/mirroring/index.md b/content/en/docs/tasks/traffic-management/mirroring/index.md index 65fdfe308f0b3..4414bc630a52f 100644 --- a/content/en/docs/tasks/traffic-management/mirroring/index.md +++ b/content/en/docs/tasks/traffic-management/mirroring/index.md @@ -104,26 +104,26 @@ you will apply a rule to mirror a portion of traffic to `v2`. EOF {{< /text >}} -1. Deploy the `sleep` workload you'll use to send requests to the `httpbin` service: +1. Deploy the `curl` workload you'll use to send requests to the `httpbin` service: {{< text bash >}} $ cat <}} - $ kubectl exec deploy/sleep -c sleep -- curl -sS http://httpbin:8000/headers + $ kubectl exec deploy/curl -c curl -- curl -sS http://httpbin:8000/headers { "headers": { "Accept": "*/*", @@ -344,7 +344,7 @@ In this step, you will change that behavior so that all traffic goes to `v1`. 1. Send the traffic: {{< text bash >}} - $ kubectl exec deploy/sleep -c sleep -- curl -sS http://httpbin:8000/headers + $ kubectl exec deploy/curl -c curl -- curl -sS http://httpbin:8000/headers {{< /text >}} Now, you should see access logging for both `v1` and `v2`. The access logs @@ -387,9 +387,9 @@ In this step, you will change that behavior so that all traffic goes to `v1`. {{< /tabset >}} -1. Delete `httpbin` and `sleep` deployments and `httpbin` service: +1. Delete `httpbin` and `curl` deployments and `httpbin` service: {{< text bash >}} - $ kubectl delete deploy httpbin-v1 httpbin-v2 sleep + $ kubectl delete deploy httpbin-v1 httpbin-v2 curl $ kubectl delete svc httpbin {{< /text >}} diff --git a/content/en/docs/tasks/traffic-management/mirroring/snips.sh b/content/en/docs/tasks/traffic-management/mirroring/snips.sh index b5fa7e556f3b3..37af0798e1f2a 100644 --- a/content/en/docs/tasks/traffic-management/mirroring/snips.sh +++ b/content/en/docs/tasks/traffic-management/mirroring/snips.sh @@ -100,19 +100,19 @@ cat <}} -1. Deploy the [sleep]({{< github_tree >}}/samples/sleep) sample app to use as a test source for sending requests. +1. Deploy the [curl]({{< github_tree >}}/samples/curl) sample app to use as a test source for sending requests. {{< text bash >}} - $ kubectl apply -f @samples/sleep/sleep.yaml@ -n istio-io-tcp-traffic-shifting + $ kubectl apply -f @samples/curl/curl.yaml@ -n istio-io-tcp-traffic-shifting {{< /text >}} 1. Deploy the `v1` and `v2` versions of the `tcp-echo` microservice. @@ -100,9 +100,9 @@ $ export TCP_INGRESS_PORT=$(kubectl get gtw tcp-echo-gateway -n istio-io-tcp-tra 3) Confirm that the `tcp-echo` service is up and running by sending some TCP traffic. {{< text bash >}} - $ export SLEEP=$(kubectl get pod -l app=sleep -n istio-io-tcp-traffic-shifting -o jsonpath={.items..metadata.name}) + $ export CURL=$(kubectl get pod -l app=curl -n istio-io-tcp-traffic-shifting -o jsonpath={.items..metadata.name}) $ for i in {1..20}; do \ - kubectl exec "$SLEEP" -c sleep -n istio-io-tcp-traffic-shifting -- sh -c "(date; sleep 1) | nc $INGRESS_HOST $TCP_INGRESS_PORT"; \ + kubectl exec "$CURL" -c curl -n istio-io-tcp-traffic-shifting -- sh -c "(date; sleep 1) | nc $INGRESS_HOST $TCP_INGRESS_PORT"; \ done one Mon Nov 12 23:24:57 UTC 2022 one Mon Nov 12 23:25:00 UTC 2022 @@ -210,9 +210,9 @@ spec: 6) Send some more TCP traffic to the `tcp-echo` microservice. {{< text bash >}} - $ export SLEEP=$(kubectl get pod -l app=sleep -n istio-io-tcp-traffic-shifting -o jsonpath={.items..metadata.name}) + $ export CURL=$(kubectl get pod -l app=curl -n istio-io-tcp-traffic-shifting -o jsonpath={.items..metadata.name}) $ for i in {1..20}; do \ - kubectl exec "$SLEEP" -c sleep -n istio-io-tcp-traffic-shifting -- sh -c "(date; sleep 1) | nc $INGRESS_HOST $TCP_INGRESS_PORT"; \ + kubectl exec "$CURL" -c curl -n istio-io-tcp-traffic-shifting -- sh -c "(date; sleep 1) | nc $INGRESS_HOST $TCP_INGRESS_PORT"; \ done one Mon Nov 12 23:38:45 UTC 2022 two Mon Nov 12 23:38:47 UTC 2022 @@ -269,10 +269,10 @@ $ kubectl delete -f @samples/tcp-echo/gateway-api/tcp-echo-all-v1.yaml@ -n istio {{< /tabset >}} -2) Remove the `sleep` sample, `tcp-echo` application and test namespace: +2) Remove the `curl` sample, `tcp-echo` application and test namespace: {{< text bash >}} - $ kubectl delete -f @samples/sleep/sleep.yaml@ -n istio-io-tcp-traffic-shifting + $ kubectl delete -f @samples/curl/curl.yaml@ -n istio-io-tcp-traffic-shifting $ kubectl delete -f @samples/tcp-echo/tcp-echo-services.yaml@ -n istio-io-tcp-traffic-shifting $ kubectl delete namespace istio-io-tcp-traffic-shifting {{< /text >}} diff --git a/content/en/docs/tasks/traffic-management/tcp-traffic-shifting/snips.sh b/content/en/docs/tasks/traffic-management/tcp-traffic-shifting/snips.sh index 9467327931c5e..391cdddf14b0a 100644 --- a/content/en/docs/tasks/traffic-management/tcp-traffic-shifting/snips.sh +++ b/content/en/docs/tasks/traffic-management/tcp-traffic-shifting/snips.sh @@ -26,7 +26,7 @@ kubectl create namespace istio-io-tcp-traffic-shifting } snip_set_up_the_test_environment_2() { -kubectl apply -f samples/sleep/sleep.yaml -n istio-io-tcp-traffic-shifting +kubectl apply -f samples/curl/curl.yaml -n istio-io-tcp-traffic-shifting } snip_set_up_the_test_environment_3() { @@ -48,9 +48,9 @@ export TCP_INGRESS_PORT=$(kubectl get gtw tcp-echo-gateway -n istio-io-tcp-traff } snip_apply_weightbased_tcp_routing_4() { -export SLEEP=$(kubectl get pod -l app=sleep -n istio-io-tcp-traffic-shifting -o jsonpath={.items..metadata.name}) +export CURL=$(kubectl get pod -l app=curl -n istio-io-tcp-traffic-shifting -o jsonpath={.items..metadata.name}) for i in {1..20}; do \ -kubectl exec "$SLEEP" -c sleep -n istio-io-tcp-traffic-shifting -- sh -c "(date; sleep 1) | nc $INGRESS_HOST $TCP_INGRESS_PORT"; \ +kubectl exec "$CURL" -c curl -n istio-io-tcp-traffic-shifting -- sh -c "(date; sleep 1) | nc $INGRESS_HOST $TCP_INGRESS_PORT"; \ done } @@ -134,9 +134,9 @@ spec: ENDSNIP snip_apply_weightbased_tcp_routing_9() { -export SLEEP=$(kubectl get pod -l app=sleep -n istio-io-tcp-traffic-shifting -o jsonpath={.items..metadata.name}) +export CURL=$(kubectl get pod -l app=curl -n istio-io-tcp-traffic-shifting -o jsonpath={.items..metadata.name}) for i in {1..20}; do \ -kubectl exec "$SLEEP" -c sleep -n istio-io-tcp-traffic-shifting -- sh -c "(date; sleep 1) | nc $INGRESS_HOST $TCP_INGRESS_PORT"; \ +kubectl exec "$CURL" -c curl -n istio-io-tcp-traffic-shifting -- sh -c "(date; sleep 1) | nc $INGRESS_HOST $TCP_INGRESS_PORT"; \ done } @@ -163,7 +163,7 @@ kubectl delete -f samples/tcp-echo/gateway-api/tcp-echo-all-v1.yaml -n istio-io- } snip_cleanup_3() { -kubectl delete -f samples/sleep/sleep.yaml -n istio-io-tcp-traffic-shifting +kubectl delete -f samples/curl/curl.yaml -n istio-io-tcp-traffic-shifting kubectl delete -f samples/tcp-echo/tcp-echo-services.yaml -n istio-io-tcp-traffic-shifting kubectl delete namespace istio-io-tcp-traffic-shifting } diff --git a/content/en/docs/tasks/traffic-management/tcp-traffic-shifting/test.sh b/content/en/docs/tasks/traffic-management/tcp-traffic-shifting/test.sh index c18bed84d7482..098c3d30fd2df 100644 --- a/content/en/docs/tasks/traffic-management/tcp-traffic-shifting/test.sh +++ b/content/en/docs/tasks/traffic-management/tcp-traffic-shifting/test.sh @@ -26,7 +26,7 @@ GATEWAY_API="${GATEWAY_API:-false}" # create a new namespace for testing purposes snip_set_up_the_test_environment_1 -# start the sleep sample +# start the curl sample snip_set_up_the_test_environment_2 # start the v1 and v2 versions of the echo service @@ -35,7 +35,7 @@ snip_set_up_the_test_environment_3 # wait for deployments to start _wait_for_deployment istio-io-tcp-traffic-shifting tcp-echo-v1 _wait_for_deployment istio-io-tcp-traffic-shifting tcp-echo-v2 -_wait_for_deployment istio-io-tcp-traffic-shifting sleep +_wait_for_deployment istio-io-tcp-traffic-shifting curl # Route all traffic to echo v1 if [ "$GATEWAY_API" == "true" ]; then diff --git a/content/en/docs/tasks/traffic-management/traffic-shifting/gtwapi_test.sh b/content/en/docs/tasks/traffic-management/traffic-shifting/gtwapi_test.sh index 4781f63b45e1e..3723e2a2fba5c 100644 --- a/content/en/docs/tasks/traffic-management/traffic-shifting/gtwapi_test.sh +++ b/content/en/docs/tasks/traffic-management/traffic-shifting/gtwapi_test.sh @@ -25,6 +25,6 @@ source "content/en/docs/tasks/traffic-management/traffic-shifting/test.sh" source "tests/util/samples.sh" snip_gtw_cleanup cleanup_bookinfo_sample -cleanup_sleep_sample +cleanup_curl_sample kubectl label namespace default istio-injection- remove_gateway_api_crds diff --git a/content/en/docs/tasks/traffic-management/traffic-shifting/test.sh b/content/en/docs/tasks/traffic-management/traffic-shifting/test.sh index 433ab6b2484d1..63a824b03212f 100644 --- a/content/en/docs/tasks/traffic-management/traffic-shifting/test.sh +++ b/content/en/docs/tasks/traffic-management/traffic-shifting/test.sh @@ -26,8 +26,8 @@ source "tests/util/samples.sh" # @setup profile=default kubectl label namespace default istio-injection=enabled --overwrite -startup_sleep_sample # needed for sending test requests with curl -SLEEP_POD=$(kubectl get pod -l app=sleep -n default -o 'jsonpath={.items..metadata.name}') +startup_curl_sample # needed for sending test requests with curl +CURL_POD=$(kubectl get pod -l app=curl -n default -o 'jsonpath={.items..metadata.name}') # launch the bookinfo app startup_bookinfo_sample @@ -43,7 +43,7 @@ function reviews_v3_traffic_percentage() { local v3_count=0 local v3_search_string="glyphicon glyphicon-star" # search string present in reviews_v3 response html for ((i = 1; i <= total_request_count; i++)); do - if (kubectl exec "${SLEEP_POD}" -c sleep -n "default" -- curl -sS http://productpage:9080/productpage | grep -q "$v3_search_string"); then + if (kubectl exec "${CURL_POD}" -c curl -n "default" -- curl -sS http://productpage:9080/productpage | grep -q "$v3_search_string"); then v3_count=$((v3_count + 1)) fi done @@ -127,6 +127,6 @@ _verify_same reviews_v3_traffic_percentage 100 if [ "$GATEWAY_API" != "true" ]; then snip_cleanup cleanup_bookinfo_sample - cleanup_sleep_sample + cleanup_curl_sample kubectl label namespace default istio-injection- fi diff --git a/tests/util/samples.sh b/tests/util/samples.sh index 3f06e5dc750e7..1df5dde6022e8 100644 --- a/tests/util/samples.sh +++ b/tests/util/samples.sh @@ -46,13 +46,13 @@ cleanup_bookinfo_sample() { fi } -startup_sleep_sample() { - kubectl apply -f samples/sleep/sleep.yaml -n default - _wait_for_deployment default sleep +startup_curl_sample() { + kubectl apply -f samples/curl/curl.yaml -n default + _wait_for_deployment default curl } -cleanup_sleep_sample() { - kubectl delete -f samples/sleep/sleep.yaml -n default || true +cleanup_curl_sample() { + kubectl delete -f samples/curl/curl.yaml -n default || true } startup_httpbin_sample() { @@ -78,7 +78,7 @@ sample_http_request() { fi local ingress_url - local sleep_pod + local curl_pod local response if [ "$GATEWAY_API" == "true" ]; then @@ -87,21 +87,21 @@ sample_http_request() { ingress_url="http://istio-ingressgateway.istio-system" fi - sleep_pod=$(kubectl get pod -l app=sleep -n default -o 'jsonpath={.items..metadata.name}') + curl_pod=$(kubectl get pod -l app=curl -n default -o 'jsonpath={.items..metadata.name}') local args="" if [[ -n "$user" ]]; then # make request as logged in user - kubectl exec "$sleep_pod" -c sleep -n "default" -- curl -c /tmp/sample.cookies "$ingress_url/login" --data "username=$user&passwd=password" + kubectl exec "$curl_pod" -c curl -n "default" -- curl -c /tmp/sample.cookies "$ingress_url/login" --data "username=$user&passwd=password" args="-b /tmp/sample.cookies" fi # shellcheck disable=SC2086 - response=$(kubectl exec "$sleep_pod" -c sleep -n "default" -- \ + response=$(kubectl exec "$curl_pod" -c curl -n "default" -- \ curl "$ingress_url$path" $args -s --retry 3 --retry-connrefused --retry-delay 5) if [[ -n "$user" ]]; then # shellcheck disable=SC2086 - kubectl exec "$sleep_pod" -c sleep -n "default" -- curl $args "$ingress_url/logout" + kubectl exec "$curl_pod" -c curl -n "default" -- curl $args "$ingress_url/logout" fi echo "$response"