kubectl run whereami-test --image=us-docker.pkg.dev/google-samples/containers/gke/whereami:v1.2.22
kubectl exec --stdin --tty whereami-test -- /bin/sh
while true; do curl http://whereami-frontend.frontend; sleep 0.01; done
create strict mTLS policy
# apply
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f peer-auth/
done
# remove
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT delete -f peer-auth/
done
authz journey
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f authz/asm-ingress.yaml
kubectl --context=$CONTEXT apply -f authz/frontend.yaml
kubectl --context=$CONTEXT apply -f authz/allow-none.yaml
done
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f authz/backend.yaml
done
apply locality
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f locality/
done
test backend service failover
# scale to zero
for CONTEXT in ${CLUSTER_1_NAME}
do
kubectl --context=$CONTEXT -n backend scale --replicas=0 deployment/whereami-backend
done
# scale back up
for CONTEXT in ${CLUSTER_1_NAME}
do
kubectl --context=$CONTEXT -n backend scale --replicas=3 deployment/whereami-backend
done
egress demo setup
# create team-x and team-y namespaces
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f egress/namespaces
done
# map to KSAs
gcloud iam service-accounts add-iam-policy-binding whereami-tracer@${PROJECT}.iam.gserviceaccount.com \
--role roles/iam.workloadIdentityUser \
--member "serviceAccount:${PROJECT}.svc.id.goog[team-x/app-x]"
gcloud iam service-accounts add-iam-policy-binding whereami-tracer@${PROJECT}.iam.gserviceaccount.com \
--role roles/iam.workloadIdentityUser \
--member "serviceAccount:${PROJECT}.svc.id.goog[team-y/app-y]"
# create team-x and team-y workloads
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f egress/workloads
done
# deploy egress gateway stuff
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT create namespace istio-egress
kubectl --context=$CONTEXT label namespace istio-egress istio.io/rev=asm-managed-rapid
done
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply --recursive --filename egressgateway/
done
# apply locality
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f egress/locality
done
testing out egress
kubectl -n team-x exec -it \
$(kubectl -n team-x get pod -l app=app-x -o jsonpath={.items..metadata.name}) \
-c whereami -- curl -v http://example.com
kubectl -n team-x exec -it \
$(kubectl -n team-x get pod -l app=app-x -o jsonpath={.items..metadata.name}) \
-c whereami -- curl -v http://httpbin.org/json
kubectl -n team-y exec -it \
$(kubectl -n team-y get pod -l app=app-y -o jsonpath={.items..metadata.name}) \
-c whereami -- curl -v http://example.com # BG
kubectl -n team-y exec -it \
$(kubectl -n team-y get pod -l app=app-y -o jsonpath={.items..metadata.name}) \
-c whereami -- curl -v http://httpbin.org/json
# implement sidecar
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f egress/sidecar
done
# and now add the external services to the service registry
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f egress/service-entry
done
# so are we done? well.... no.
kubectl -n team-x exec -it \
$(kubectl -n team-x get pod -l app=app-x -o jsonpath={.items..metadata.name}) \
-c whereami -- curl http://127.0.0.1:15000
# forget sidecar injection
# manipulate admin interface
# so we need to route traffic through egress gateways... how? via
# set up gateway and destination rules
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f egress/gw-dr
done
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f egress/virtual-service
done
# still doesn't work... why?
# check & stream logs
kubectl -n istio-egress logs -f $(kubectl -n istio-egress get pod -l istio=egressgateway \
-o jsonpath="{.items[0].metadata.name}") istio-proxy
# an authorization issue - remember when we left this, we required explict permissions for traffic flows?
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f egress/authz
done
kubectl -n team-x exec -it \
$(kubectl -n team-x get pod -l app=app-x -o jsonpath={.items..metadata.name}) \
-c whereami -- curl -v http://example.com
kubectl -n team-x exec -it \
$(kubectl -n team-x get pod -l app=app-x -o jsonpath={.items..metadata.name}) \
-c whereami -- curl -v http://httpbin.org/json
kubectl -n team-y exec -it \
$(kubectl -n team-y get pod -l app=app-y -o jsonpath={.items..metadata.name}) \
-c whereami -- curl -v http://example.com # BG because VS not exported to this namespace (team-y)
kubectl -n team-y exec -it \
$(kubectl -n team-y get pod -l app=app-y -o jsonpath={.items..metadata.name}) \
-c whereami -- curl -v http://httpbin.org/json
# implement TLS origination for example.com
# show logs + talk about tracing / observability
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f egress/tls-origination
done