-
Create k8s-controll Instance with image ubuntu
-
Create Role
- Go to iam
- Click role
- CLick add role
- give tag for youre role (optional)
- add premission (ec2 full, route53 full, s3 full, iam full, vps full)
- attach role to instance
- go to ec2 dashbord
- select youre instance
- click action
- instance seeting
- Modify IAM role
- select youre role
- click save
- Create route 53
- go to route 53
- click hostedzone
- click hosted zone
- input Domain name
- select youre instance region
- select youre instance vpcid
- click created hosted zone
- ssh to instance and install aws cli
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
apt install -y unzip python
unzip awscliv2.zip
sudo ./aws/install
- install kubectl
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
- install kops
curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64
chmod +x kops-linux-amd64
sudo mv kops-linux-amd64 /usr/local/bin/kops
# update kops
sudo rm -rf /usr/local/bin/kops
# and to above step
- config aws cli
aws configure
- create s3 bucket
aws s3api create-bucket \
--bucket k8s-sample-store \
--region us-east-1
# for reqion other than use-east1 use command bellow
aws s3api create-bucket \
--bucket k8s-example-store \
--region us-west-1 \
--create-bucket-configuration LocationConstraint=us-west-1
# or
aws s3 mb s3://k8s-example-store
# Note: We STRONGLY recommend versioning your S3 bucket in case you ever need to revert or recover a previous state store.
aws s3api put-bucket-versioning --bucket k8s-example-store --versioning-configuration Status=Enabled
# Delete bucket
aws s3 rm s3://k8s-example-store --recursive # empty buccket before delete
aws s3api delete-bucket --bucket k8s-example-store --region us-west-1
- add to .bashrc
export NAME=yudomain.com
export KOPS_STATE_STORE=s3://k8s-sample-store
- generate ssh no password
ssh-keygen
- list availibillity zone
aws ec2 describe-availability-zones
- create cluster with kops
# kops create cluster --cloud=aws --zones=us-east-1a --name=k8s.devops.com --dns-zone=k8s.devops.com --dns private
kops create cluster --cloud=aws --zones=us-east-1a --name=$NAME --node-size=t2.small --master-size=t2.small --dns-zone=$NAME --dns private
- edit configuration
kops edit cluster $NAME
kops edit ig --name=$NAME nodes-us-east-1a
kops edit ig --name=$NAME master-us-east-1a
- set ssh and create cluster
# connect with ssh
kops create secret --name $NAME sshpublickey admin -i ~/.ssh/id_rsa.pub
# apply cluster
kops update cluster --name $NAME --yes --admin
- check cluster
kops validate cluster --wait 10m
# or
kops validate cluster
kubectl get nodes --show-labels
- ssh to master node
ssh -i ~/.ssh/id_rsa ubuntu@api.$NAME
ssh -i ~/.ssh/id_rsa ubuntu@api.sunsummit.net
- delete cluster
kops delete cluster --name=$NAME --state=$KOPS_STATE_STORE --yes
-
setup youre helm like write on docs https://helm.sh/docs/intro/install/
-
install nginx ingress https://hub.kubeapps.com/charts/ingress-nginx/ingress-nginx
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
helm install nginx-ingress ingress-nginx/ingress-nginx
# make sure after install nginx thers alb created in your aws
-
install cert-manager with helm follow the intruction here https://hub.kubeapps.com/charts/jetstack/cert-manager
-
setup ingress with cert in here https://cert-manager.io/docs/tutorials/acme/ingress/
- add prometheus Helm repo
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
- add grafana Helm repo
helm repo add grafana https://grafana.github.io/helm-charts
- Deploy Prometheus
kubectl create namespace prometheus
helm install prometheus prometheus-community/prometheus --namespace prometheus --set alertmanager.persistentVolume.storageClass="gp2" --set server.persistentVolume.storageClass="gp2"
- Prometheus components deployed as expected
kubectl get all -n prometheus
- kubectl port forwarding
kubectl port-forward -n prometheus deploy/prometheus-server 8080:9090
- Deploy Grafana using below command
save this to grafana.yaml
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus-server.prometheus.svc.cluster.local
access: proxy
isDefault: true
kubectl create namespace grafana
helm install grafana grafana/grafana --namespace grafana --set persistence.storageClassName="gp2" --set persistence.enabled=true --set adminPassword='abcd1234' --values ./grafana.yaml --set service.type=LoadBalancer
- Check if Grafana is deployed
kubectl get all -n grafana
- Get Grafana ELB URL using this command
kubectl get svc -n grafana grafana -o jsonpath='{.status.loadBalancer.ingress[0].hostname}'
- Access dashboard IDs
3119/6417