- docker
- kind
- kubectl
- doctl
- GitHub Account
- Fork this repository on GitHub: https://github.com/cod-r/bdw-workshop
- Go to https://github.com
- Search for
bdw-workshop
- Click
cod-r/bdw-workshop
- Click Fork -> Create Fork
- Add your Github Username to an environment variable
export GH_USERNAME=<your-gh-username>
- Clone the forked repo
echo $GH_USERNAME
git clone https://github.com/${GH_USERNAME}/bdw-workshop.git
kind create cluster --config kind-config.yaml
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
- Create
argocd
directory
mkdir argocd
- Create ConfigMap
cat > argocd/argocd-cm.yaml <<EOF
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/name: argocd-cm
app.kubernetes.io/part-of: argocd
name: argocd-cm
namespace: argocd
data:
timeout.reconciliation: 10s
exec.enabled: "true"
EOF
- Apply ConfigMap
kubectl apply -f argocd/argocd-cm.yaml
- Redeploy
argocd-application-controller
for changes to take effect
kubectl -n argocd rollout restart statefulset argocd-application-controller
kubectl port-forward svc/argocd-server -n argocd 8080:443
Username: admin
Password
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d && echo
- Create the main Application.
This app will apply all manifests found inargocd/applications
directory in this repository.
- Create directories
mkdir -p argocd/applications
- Create
main-app.yaml
file
cat > argocd/applications/main-app.yaml <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: main-argocd-app
namespace: argocd
spec:
project: default
source:
repoURL: https://github.com/${GH_USERNAME}/bdw-workshop.git
path: argocd
directory:
recurse: true
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
EOF
- Apply the manifest
kubectl apply -f argocd/applications/main-app.yaml
- Commit and push
git add .
git commit -m "a gitops test"
git push
After applying the manifest we can add other manifests in argocd/applications
and Argo CD will apply them automatically.
- Create a simple secret manifest
cat > argocd/applications/test.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
name: test-secret
stringData:
my-key: my-value
EOF
-
Commit and push
-
Verify
kubectl get secrets
Day 2 Operations.
- Deploy ingress-nginx
cat > argocd/applications/ingress-nginx.yaml <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ingress-nginx
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://github.com/kubernetes/ingress-nginx.git
path: deploy/static/provider/kind
destination:
server: https://kubernetes.default.svc
namespace: ingress-nginx
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
EOF
- Commit and push
- Create ingress for Argo CD
cat > argocd/argocd-ingress.yaml <<EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: argocd
name: argocd
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
spec:
rules:
- host: localhost
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 443
EOF
- Commit and push
- Access the UI via ingress https://localhost/
Username: admin
Password
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d && echo
- Deploy kube-prometheus-stack CRDs
cat > argocd/applications/kube-prometheus-stack-crds.yaml <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kube-prometheus-stack-crds
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "-1"
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
destination:
server: "https://kubernetes.default.svc"
namespace: monitoring
source:
repoURL: https://github.com/prometheus-community/helm-charts.git
path: charts/kube-prometheus-stack/crds/
targetRevision: kube-prometheus-stack-40.3.1
directory:
recurse: true
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
- Replace=true
EOF
- Deploy kube-prometheus-stack
cat > argocd/applications/kube-prometheus-stack.yaml <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kube-prometheus-stack
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://prometheus-community.github.io/helm-charts
targetRevision: 40.3.1
chart: kube-prometheus-stack
helm:
skipCrds: true
values: |
prometheus:
prometheusSpec:
retention: 7d
grafana:
grafana.ini:
server:
root_url: http://localhost/grafana
domain: localhost
serve_from_sub_path: true
ingress:
enabled: true
hosts:
- localhost
path: /grafana
additionalDataSources:
- name: loki
type: loki
url: http://loki-stack.monitoring.svc.cluster.local:3100
destination:
server: https://kubernetes.default.svc
namespace: monitoring
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
EOF
- Deploy loki-stack
cat > argocd/applications/loki-stack.yaml <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: loki-stack
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://grafana.github.io/helm-charts
targetRevision: 2.8.3
chart: loki-stack
helm:
values: |
loki:
enabled: true
destination:
server: https://kubernetes.default.svc
namespace: monitoring
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
EOF
- Commit and push
- Access Grafana UI https://localhost/grafana
Username: admin
Password: prom-operator
- Delete kind cluster
kind cluster delete
- Recreate the cluster
- Install Argo CD in the new cluster
- Cluster recovered
- Deploy crossplane
cat > argocd/applications/crossplane.yaml <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: crossplane
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "-1"
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://charts.crossplane.io/stable
targetRevision: 1.9.1
chart: crossplane
destination:
server: https://kubernetes.default.svc
namespace: crossplane-system
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
EOF
- Create Application for DigitalOcean provider manifests
cat > argocd/applications/crossplane-do.yaml <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: crossplane-do-resources
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://github.com/${GH_USERNAME}/bdw-workshop.git
path: crossplane-do
destination:
server: https://kubernetes.default.svc
namespace: crossplane-do
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
EOF
- Create secret containing the token from DigitalOcean
- Create token env var
export DO_TOKEN=<your-do-token>
- Create secret
kubectl apply -f -<<EOF
apiVersion: v1
kind: Secret
metadata:
namespace: crossplane-do
name: provider-do-secret
type: Opaque
stringData:
token: ${DO_TOKEN}
- Create droplet
cat > crossplane-do/droplet.yaml <<EOF
apiVersion: compute.do.crossplane.io/v1alpha1
kind: Droplet
metadata:
name: crossplane-droplet
annotations:
crossplane.io/external-name: crossplane-droplet
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
forProvider:
region: fra1
size: s-1vcpu-1gb
image: ubuntu-20-04-x64
providerConfigRef:
name: do-config
EOF
doctl auth init
doctl compute droplet list
doctl compute droplet delete crossplane-droplet
Wait for droplet to be recreated by Crossplane
- Create k8s cluster
cat > crossplane-do/droplet.yaml <<EOF
apiVersion: kubernetes.do.crossplane.io/v1alpha1
kind: DOKubernetesCluster
metadata:
name: k8s-cluster
annotations:
argocd.argoproj.io/sync-wave: "3"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
providerConfigRef:
name: do-config
forProvider:
region: fra1
version: 1.24.4-do.0
nodePools:
- size: s-1vcpu-2gb
count: 1
name: worker-pool
maintenancePolicy:
startTime: "00:00"
day: wednesday
autoUpgrade: true
surgeUpgrade: false
highlyAvailable: false
EOF
- Get kubeconfig
doctl kubernetes cluster kubeconfig save k8s-cluster
- Get kubeconfig
gcloud container clusters get-credentials gke-cluster
- create service account and clusterrolebinding on the destination cluster:
kubectl apply -f -<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: argocd
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: argocd
namespace: kube-system
EOF
- get server address
kubectl cluster-info
- get certificate and the token
TOKEN_SECRET=$(kubectl -n kube-system get sa argocd \
-o go-template='{{range .secrets}}{{.name}}{{"\n"}}{{end}}')
CA_CRT=$(kubectl -n kube-system get secrets ${TOKEN_SECRET} -o go-template='{{index .data "ca.crt"}}')
TOKEN=$(kubectl -n kube-system get secrets ${TOKEN_SECRET} -o go-template='{{.data.token}}' | base64 -d)
- create cluster secret in kind cluster
kubectl apply -f -<<EOF
apiVersion: v1
kind: Secret
metadata:
name: gke-cluster-conn-secret
namespace: argocd
labels:
argocd.argoproj.io/secret-type: cluster
type: Opaque
stringData:
name: gke-cluster
server: https://34.118.116.43
config: |
{
"bearerToken": "${TOKEN}",
"tlsClientConfig": {
"insecure": false,
"caData": "${CA_CRT}"
}
}
EOF