export PROJECT=mesh-demo-01
export PROJECT_NUMBER=$(gcloud projects describe ${PROJECT} --format="value(projectNumber)")
gcloud config set project ${PROJECT}
cd ${HOME}/csmdemo
export WORKDIR=`pwd`
export KUBECONFIG=${WORKDIR}/csmdemo_kubeconfig
gcloud config set accessibility/screen_reader false
export CLUSTER_1_NAME=edge-to-mesh-01
export CLUSTER_2_NAME=edge-to-mesh-02
export CLUSTER_1_REGION=us-central1
export CLUSTER_2_REGION=us-east4
export PUBLIC_ENDPOINT=frontend.endpoints.${PROJECT}.cloud.goog
you should see responses from both regions, but only from the frontend service
watch -n 0.1 'curl -s https://frontend.endpoints.${PROJECT}.cloud.goog | jq'
note how requests are being bounced across regions - this isn't typically ideal because it increases latency and increases costs, especially once we add another service
note: in the background, i have a GCE VM running the following command from
us-central1
:
hey -n 99999999999999 -c 2 -q 20 https://frontend.endpoints.mesh-demo-01.cloud.goog
- architecture schematic
- describe gateway / show in console
kubectl describe gtw external-http -n asm-ingress
cat gateway/default-httproute.yaml
cat gateway/default-httproute-redirect.yaml
- describe relationship between managed LB & ingress gateways
- describe TLS termination (managed cert via certificate manager @ load balancer + additional TLS on hop between LB & IG to enable HTTP/2)
in a browser, navigate to http://frontend.endpoints.mesh-demo-01.cloud.goog
notice that browser will be redirected to https://frontend.endpoints.mesh-demo-01.cloud.goog
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -k ${WORKDIR}/whereami-backend/variant-v1
done
hmmm, but it isn't working... why? because we have a default ALLOW NONE
AuthorizationPolicy
kubectl get authorizationpolicy -A
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f ${WORKDIR}/authz/backend.yaml
done
pull up the mesh topology graph, and select a service (frontend is ideal) - show basic telemetry + verify mTLS is enabled for both inbound & outbound calls
- show KSA in namespace
check the trace console (or mesh console, which also includes traces) to verify that traces are there - also note that latency is inconsistent due to lack of locality
also point out how the gce_service_account
reflects a GSA that has tracing access (trace agent, specifically)
# apply destinationRules for locality
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f ${WORKDIR}/locality/
done
note: after some time, demonstrate delta in trace latency
return to the trace console to see that latency has reduced
# first scale to zero to show failover
for CONTEXT in ${CLUSTER_1_NAME}
do
kubectl --context=$CONTEXT -n asm-ingress scale --replicas=0 deployment/asm-ingressgateway
done
# then scale back up to restore ingress gateways in local region
for CONTEXT in ${CLUSTER_1_NAME}
do
kubectl --context=$CONTEXT -n asm-ingress scale --replicas=3 deployment/asm-ingressgateway
done
# start by setting up VS & DR for splitting using subsets
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT -n backend apply -f ${WORKDIR}/traffic-splitting/subsets-dr.yaml
kubectl --context=$CONTEXT -n backend apply -f ${WORKDIR}/traffic-splitting/vs-0.yaml
done
# deploy v2 of backend service
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f ${WORKDIR}/whereami-backend/v2
done
# check backend metadata a few times to see that it's still all v1
for i in {1..10}
do
curl -s https://frontend.endpoints.${PROJECT}.cloud.goog | jq '.backend_result.metadata'
done
# change to 50/50 splitting
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT -n backend apply -f ${WORKDIR}/traffic-splitting/vs-50.yaml
done
# check backend metadata again, and notice that roughly it's 50/50 between v1 and v2
for i in {1..10}
do
curl -s https://frontend.endpoints.${PROJECT}.cloud.goog | jq '.backend_result.metadata'
done
# now jump to all v2 and verify
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT -n backend apply -f ${WORKDIR}/traffic-splitting/vs-100.yaml
done
for i in {1..10}
do
curl -s https://frontend.endpoints.${PROJECT}.cloud.goog | jq '.backend_result.metadata'
done
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT -n backend delete -f ${WORKDIR}/traffic-splitting/subsets-dr.yaml
kubectl --context=$CONTEXT -n backend delete -f ${WORKDIR}/traffic-splitting/vs-100.yaml
kubectl --context=$CONTEXT -n backend delete -f ${WORKDIR}/whereami-backend/v2
kubectl --context=$CONTEXT delete -f ${WORKDIR}/locality/
kubectl --context=$CONTEXT delete -f ${WORKDIR}/authz/backend.yaml
kubectl --context=$CONTEXT delete -k ${WORKDIR}/whereami-backend/variant-v1
done
export PROJECT=mesh-demo-01
export PROJECT_NUMBER=$(gcloud projects describe ${PROJECT} --format="value(projectNumber)")
gcloud config set project ${PROJECT}
# set up kubeconfig
cd ${HOME}/csmdemo
export WORKDIR=`pwd`
# touch csmdemo_kubeconfig # only need this once
export KUBECONFIG=${WORKDIR}/csmdemo_kubeconfig
# make stuff easier to read
gcloud config set accessibility/screen_reader false
# make sure APIs are enabled
gcloud services enable \
container.googleapis.com \
mesh.googleapis.com \
gkehub.googleapis.com \
multiclusterservicediscovery.googleapis.com \
multiclusteringress.googleapis.com \
trafficdirector.googleapis.com \
certificatemanager.googleapis.com \
cloudtrace.googleapis.com \
anthos.googleapis.com \
servicenetworking.googleapis.com
export CLUSTER_1_NAME=edge-to-mesh-01
export CLUSTER_2_NAME=edge-to-mesh-02
export CLUSTER_1_REGION=us-central1
export CLUSTER_2_REGION=us-east4
export PUBLIC_ENDPOINT=frontend.endpoints.${PROJECT}.cloud.goog
# using Argolis, so need to create default VPC
gcloud compute networks create default --project=${PROJECT} --subnet-mode=auto --mtu=1460 --bgp-routing-mode=regional
# create clusters
gcloud container clusters create-auto --async \
${CLUSTER_1_NAME} --region ${CLUSTER_1_REGION} \
--release-channel rapid --labels mesh_id=proj-${PROJECT_NUMBER} \
--enable-private-nodes --enable-fleet
gcloud container clusters create-auto \
${CLUSTER_2_NAME} --region ${CLUSTER_2_REGION} \
--release-channel rapid --labels mesh_id=proj-${PROJECT_NUMBER} \
--enable-private-nodes --enable-fleet
gcloud container clusters get-credentials ${CLUSTER_1_NAME} \
--region ${CLUSTER_1_REGION}
kubectl config rename-context gke_${PROJECT}_${CLUSTER_1_REGION}_${CLUSTER_1_NAME} ${CLUSTER_1_NAME}
kubectl config rename-context gke_${PROJECT}_${CLUSTER_2_REGION}_${CLUSTER_2_NAME} ${CLUSTER_2_NAME}
enables the GKE nodes to write metrics to Cloud Monitoring
gcloud projects add-iam-policy-binding ${PROJECT} \
--member=serviceAccount:${PROJECT_NUMBER}-compute@developer.gserviceaccount.com \
--role=roles/monitoring.metricWriter
gcloud container fleet mesh enable
gcloud container fleet mesh update \
--management automatic \
--memberships ${CLUSTER_1_NAME},${CLUSTER_2_NAME}
kubectl --context=${CLUSTER_1_NAME} create namespace asm-ingress
kubectl --context=${CLUSTER_2_NAME} create namespace asm-ingress
kubectl --context=${CLUSTER_1_NAME} label namespace asm-ingress istio-injection=enabled
kubectl --context=${CLUSTER_2_NAME} label namespace asm-ingress istio-injection=enabled
openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
-subj "/CN=frontend.endpoints.${PROJECT}.cloud.goog/O=Edge2Mesh Inc" \
-keyout ${WORKDIR}/frontend.endpoints.${PROJECT}.cloud.goog.key \
-out ${WORKDIR}/frontend.endpoints.${PROJECT}.cloud.goog.crt
kubectl --context ${CLUSTER_1_NAME} -n asm-ingress create secret tls \
edge2mesh-credential \
--key=${WORKDIR}/frontend.endpoints.${PROJECT}.cloud.goog.key \
--cert=${WORKDIR}/frontend.endpoints.${PROJECT}.cloud.goog.crt
kubectl --context ${CLUSTER_2_NAME} -n asm-ingress create secret tls \
edge2mesh-credential \
--key=${WORKDIR}/frontend.endpoints.${PROJECT}.cloud.goog.key \
--cert=${WORKDIR}/frontend.endpoints.${PROJECT}.cloud.goog.crt
kubectl --context ${CLUSTER_1_NAME} apply -k ${WORKDIR}/asm-ig/variant
kubectl --context ${CLUSTER_2_NAME} apply -k ${WORKDIR}/asm-ig/variant
gcloud container fleet multi-cluster-services enable
gcloud projects add-iam-policy-binding ${PROJECT} \
--member "serviceAccount:${PROJECT}.svc.id.goog[gke-mcs/gke-mcs-importer]" \
--role "roles/compute.networkViewer"
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context $CONTEXT apply -f ${WORKDIR}/mcs/svc_export.yaml
done
gcloud compute addresses create mcg-ip --global
export MCG_IP=$(gcloud compute addresses describe mcg-ip --global --format "value(address)")
echo ${MCG_IP}
gcloud endpoints services deploy ${WORKDIR}/endpoints/dns-spec.yaml
gcloud certificate-manager certificates create mcg-cert \
--domains="frontend.endpoints.${PROJECT}.cloud.goog"
gcloud certificate-manager maps create mcg-cert-map
gcloud certificate-manager maps entries create mcg-cert-map-entry \
--map="mcg-cert-map" \
--certificates="mcg-cert" \
--hostname="frontend.endpoints.${PROJECT}.cloud.goog"
gcloud compute security-policies create edge-fw-policy \
--description "Block XSS attacks"
gcloud compute security-policies rules create 1000 \
--security-policy edge-fw-policy \
--expression "evaluatePreconfiguredExpr('xss-stable')" \
--action "deny-403" \
--description "XSS attack filtering"
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context $CONTEXT apply -f ${WORKDIR}/policies/
done
gcloud container fleet ingress enable \
--config-membership=${CLUSTER_1_NAME} \
--location=${CLUSTER_1_REGION}
gcloud projects add-iam-policy-binding ${PROJECT} \
--member "serviceAccount:service-${PROJECT_NUMBER}@gcp-sa-multiclusteringress.iam.gserviceaccount.com" \
--role "roles/container.admin"
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context $CONTEXT apply -f ${WORKDIR}/gateway/
done
# this specific section only seems to enable access logging, not tracing, as this requires allow-listing
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context $CONTEXT apply -f ${WORKDIR}/observability/enable.yaml
done
# so we use the telemetry API instead
# note: at CSM launch, tracing only samples @ 1%
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context $CONTEXT apply -f ${WORKDIR}/observability/telemetry-for-tracing.yaml
done
# create GSA for writing traces
gcloud iam service-accounts create whereami-tracer \
--project=${PROJECT}
gcloud projects add-iam-policy-binding ${PROJECT} \
--member "serviceAccount:whereami-tracer@${PROJECT}.iam.gserviceaccount.com" \
--role "roles/cloudtrace.agent"
# map to KSAs
gcloud iam service-accounts add-iam-policy-binding whereami-tracer@${PROJECT}.iam.gserviceaccount.com \
--role roles/iam.workloadIdentityUser \
--member "serviceAccount:${PROJECT}.svc.id.goog[frontend/whereami-frontend]"
gcloud iam service-accounts add-iam-policy-binding whereami-tracer@${PROJECT}.iam.gserviceaccount.com \
--role roles/iam.workloadIdentityUser \
--member "serviceAccount:${PROJECT}.svc.id.goog[backend/whereami-backend]"
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context $CONTEXT apply -f ${WORKDIR}/authz/allow-none.yaml
done
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f ${WORKDIR}/authz/asm-ingress.yaml
done
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT create ns backend
kubectl --context=$CONTEXT label namespace backend istio-injection=enabled
kubectl --context=$CONTEXT create ns frontend
kubectl --context=$CONTEXT label namespace frontend istio-injection=enabled
kubectl --context=$CONTEXT apply -k ${WORKDIR}/whereami-frontend/variant
kubectl --context=$CONTEXT apply -f ${WORKDIR}/authz/frontend.yaml
done
# set up frontend virtualService
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f ${WORKDIR}/whereami-frontend/frontend-vs.yaml
done
idea is to create addtional head room in clusters to reduce provisioning time for demo pods
see this link for more details
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT apply -f ${WORKDIR}/balloon-pods/
done
# restart ingress gateway pods
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT -n asm-ingress rollout restart deployment asm-ingressgateway
done
# restart backend pods
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT -n backend rollout restart deployment whereami-backend
done
# restart frontend pods
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT -n frontend rollout restart deployment whereami-frontend
done
# remove backend service
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT delete -k ${WORKDIR}/whereami-backend/variant-v1
done
# remove PeerAuthentication policy
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT -n frontend delete -f ${WORKDIR}/mtls/
kubectl --context=$CONTEXT -n backend delete -f ${WORKDIR}/mtls/
done
# remove locality
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT delete -f ${WORKDIR}/locality/
done
# set strict peer auth (mTLS) policy
for CONTEXT in ${CLUSTER_1_NAME} ${CLUSTER_2_NAME}
do
kubectl --context=$CONTEXT -n frontend apply -f ${WORKDIR}/mtls/
kubectl --context=$CONTEXT -n backend apply -f ${WORKDIR}/mtls/
done