apiVersion: apps/v1 kind: Deployment metadata: name: deploytest namespace: dev spec: replica: 3 selector: matchLabels: name: test-pod template: metadata: labels: name: test-pod spec: containers: - name: test-pod-container image: nginx
apiVersion: v1 kind: Pod metadata: name: test-pod spec: containers: - name: test-pod-container image: nginx command: - sleep - "2000"
or
apiVersion: v1 kind: Pod metadata: name: test-pod spec: containers: - name: test-pod-container image: nginx command: ["sleep"] args: ["--time", "1000"]
apiVersion: v1 kind: ConfigMap metadata: name: my-config-map data: MY_MAP_KEY_1: VALUE_1 MY_MAP_KEY_2: VALUE_2
apiVersion: v1 kind: Pod metadata: name: my-pod spec: containers: - name: my-pod-container image: nginx env: - name: MY_KEY_1 value: VALUE_1
apiVersion: v1 kind: Pod metadata: name: my-pod spec: containers: - name: my-pod-container image: nginx envFrom: - configMapRef: name: my-config-map
apiVersion: v1 kind: Pod metadata: name: my-pod spec: containers: - name: my-pod-container image: nginx env: - name: MY_KEY_1 valueFrom: configMapKeyRef: name: my-config-map key: MY_MAP_KEY_1
apiVersion: v1 kind: Pod metadata: name: my-pod spec: volumes: - name: my-configmap-volume configMap: name: my-config-map containers: - name: my-pod-container image: nginx volumeMounts: - name: my-volume mountPath: "/etc/config"
apiVersion: v1 kind: Secret metadata: name: my-secret data: SECRET_1: base64(VALUE_1)
$ echo -n 'xxxx' | base64
$ echo -n 'eHh4eAo=' | base64 --decode
apiVersion: v1 kind: Pod metadata: name: my-pod spec: containers: - name: my-pod-container image: nginx envFrom: - secretRef: name: my-secret
apiVersion: v1 kind: Pod metadata: name: my-pod spec: containers: - name: my-pod-container image: nginx env: - name: MY_ENV_1 valueFrom: secretKeyRef: name: my-secret key: SECRET_1
apVersion: v1 kind: Pod metadata: name: my-pod spec: volumes: - name: secret-volume secret: secretName: my-secret containers: - name: my-pod-container image: nginx volumeMounts: - name: mounted-secret mountPath: "/etc/foo" readOnly: false
Let Pod run as specific level of User
Pod can set user and container can overwrite it
use capabilities to add permission
only container can use capabilities
apiVersion: v1 kind: Pod metadata: name: my-pod spec: securityContext: runAsUser: 1000 containers: - name: my-pod-container image: nginx securityContext: runAsUser: 1001 capabilities: add: ["SYS_TIME"]
$ kubectl create serviceaccount [NAME]
$ kubectl describe serviceaccount [NAME]
above commands can find "Tokens" attribute with a name, here we called it [TOKENS_NAME] and
$ kubectl describe secrets [TOKENS_NAME]
You can find real token
apiVersion: apps/v1 kind: Deployment metadata: name: my-deployment spec: replica: 1 selector: matchLabels: name=my-deployment template: serviceAccount: [NAME] metadata: labels: name=my-deployment spec: containers: - name: my-deployment-container image: nginx
apiVersion: v1 kind: Pod metadata: name: my-pod spec: containers: - name: my-pod-container image: nginx ports: - containerPort: 8080 resources: request: memory: "1Gi" cpu: 2 limit: memory: "2Gi" cpu: 4
- Taint on Node
- Tolerant on Pod
$ kubectl taint node [node name] [taint name]:[effect]
- [taint name]: could be a [traint key]=[traint value] or a name like node-role.kubernetes.io/master
- [effect]: NoSchedule / NoExecute / PreferNoSchedule
$ kubectl taint node [node name] [taint name]-
apiVersion: v1 kind: Pod metadata: name: my-pod spec: tolerations: - key: "[taint key]" value: "[taint value]" operator: "Equal" effect: "[taint effect]" containers: - name: my-pod-container image: nginx
Taint only can prevent pod executed on specific Node, but it can NOT let Pod executed on specific Node!
$ kubectl label node <NODE name> [key]=[value]
e.g.
$ kubectl label node node01 size=Large
apiVersion: v1 kind: Pod metadata: name: my-pod spec: containers: - name: my-pod-container image: nginx nodeSelector: [key]: [value]
apiVersion: v1 kind: Pod metadata: name: my-pod spec: containers: - name: my-pod-container image: nginx affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - key: [key] operator: In values: - [value1] - [value2]
- taint node
- set Tolerations on Pod
- set label to node
- set node affinity on Pod
Empty...
apiVersion: v1 kind: Pod metadata: name: my-pod spec: containers: - name: my-pod-container image: nginx ports: - containerPort: 8080 readinessProbe: httpGet: path: /api/ready port: 8080 initialDelaySeconds: 10 periodSeconds: 1 failureThreshold: 10 livenessProbe: httpGet: path: / port: initialDelaySeconds: 10 periodSeconds: 1 failureThreshold: 10
$ kubectl logs -f [pod name]
if more than one container in Pod
$ kubectl logs -f [pod name] [container name]
$ kubectl get [resource] \
--selector label_key=label_value \
--selector label_key=label_value
apiVersion: apps/v1 kind: Deployment metadata: name: my-deployment spec: replica: 4 selector: matchLabels: name: frontend strategy: type: Recreate
strategy: type: RollingUpdate rollingUpdate: maxSurge: 20% maxunavailable: 10% template: metadat: name: my-pod labels: name: frontend spec: containers: - name: my-pod-container image: nginx ports: - containerPort: 8080
$ kubectl create -f myapp-deployment.yml
$ kubectl rollout status deployment/myapp-deployment
$ kubectl rollout history deployment/myapp-deployment
$ kubectl apply -f myapp-deployment-new.yml
or
$ kubectl set image deployment/myapp-deployment [container name]=[image name]
$ kubectl rollout undo deployment/myapp-deployment
apiVersion: batch/v1 kind: Job metadata: name: my-job spec: completions: 3 parallelism: 3 template: spec: container: - name: my-job-container image: nginx restartPolicy: Never
apiVersion: batch/v1beta1 kind: CronJob metadata: name: my-cron-job spec: schedule: "* * * * *" jobTemplate: ... Job define ....
apiVersion: batch/v1beta1 kind: CronJob metadata: name: my-cron-job spce: schedule: "0 12 * * *" jobTemplate: spec: completions: 3 parallelism: 3 template: spec: containers: - name: my-cronjob-container image: nginx restartPolicy: Never
apiVersion: v1 kind: Service metadata: name: my-service spec: type: NodePort ports: - targetPort: 8080 port: 80 nodePort: 30008 selector: app: my-app type: frontend
apiVersion: v1 kind: Service metadata: name: my-service spec: type: ClusterIP ports: - port: 80 targetPort: 80 selector: app: my-app type: frontend
$ kubectl get networkpolicy
apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: my-ingress-policy spec: podSelector: matchLabels: name: my-pod policyTypes: - Ingress ingress: - from: - podSelector: matchLabels: name: other-pod-in ports: - protocol: TCP port: 8080
apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: my-egress-policy spec: podSelector: matchLabels: name: my-pod policyTypes: - Egress egress: - to: - podSelector: matchLabels: name: to-other-pod ports: - port: 3306 protocol: TCP
$ kubectl get pv
$ kubectl get pvc
apiVersion: v1 kind: Pod metadata: name: my-pod spec: volumes: - name: my-volume hostPath: path: [Host Path] type: Directory containers: - name: my-pod-container image: nginx volumeMounts: - mountPath: [container path] name: my-volume
apiVersion: v1 kind: PersistentVolume metadata: name: my-pv spec: accessModes: - ReadWriteMany capacity: storage: 1Gi hostPath: path: [Host path] peristentVolumeReclaimPolicy: Retain
apiVersion: v1 kind: PersistentVolume metadata: name: my-pv spec: accessModes: - ReadWriteMany capacity: storage: 1Gi hostPath: path: [Host path] peristentVolumeReclaimPolicy: Retain
apiVersion: v1 kind: PersistentVolume metadata: name: my-pv spec: accessModes: - ReadWriteMany capacity: storage: 1Gi nfs: path: [NFS path] server: [NFS server] peristentVolumeReclaimPolicy: Retain
apiVersion: v1 kind: Pod metadata: name: my-pod spec: volumes: - name: my-volume-name hostPath: path: [Host path] type: Directory - name: my-pvc-name persistentVolumeClaim: claimName: my-claim-name containers: - name: my-pod-container image: nginx volumeMounts: - mountPath: [containers path] name: my-volume-name - mountPath: [containers path] name: my-pvc-name
$ kubectl get nodes
$ kubectl get node [NAME] -o wide (or yaml or json)
$ kubectl get nodes --show-labels
$ kubectl label node [NAME] key=value
$ kubectl label node [NAME] key-
$ kubectl label node [NAME] \
node-role.kubernetes.io/[role]=\
node-role.kubernetes.io/master=
$ kubectl config get-contexts
$ kubectl config set current-context [context name]
$ kubectl config use-context [context name]
$ kubectl config delete-context [NAME]
$ kubectl get pods (--all-namespaces) (--namespace=[space name])
$ kubectl get po
$ kubectl exec [pod name] -it sh (or other command)
$ kubectl exec [pod name] -c [container name] -it sh (or other command)
$ kubectl delete pod [NAME]
$ kubectl delete po [NAME]
$ kubectl get namespace
$ kubectl get ns
$ kubectl create namespace [NAME]
$ kubectl create ns [NAME]
$ kubectl delete namespace [NAME]
$ kubectl delete ns [NAME]
$ kubectl get deployment
$ kubectl get deploy
$ kubectl delete deployment [NAME]
$ kubectl delete deploy [NAME]
$ kubectl scale --current-replicas=[num] --replicas=[num] deployment/[NAME]
$ kubectl set image deployment/[NAME] [container name]=[image name]:[new version]
$ kubectl rollout status deployment/[NAME]
$ kubectl rollout history deployment/[NAME]
$ kubectl rollout history deployment/[NAME] --revision=[num]
$ kubectl rollout undo deployment/[NAME]
$ kubectl rollout undo deployment/[NAME] --to-revision=[num]
$ kubectl get replicaset
$ kubectl get rs
$ kubectl run --generator=run-pod/v1 nginx --image=nginx
$ kubectl run --generator=run-pod/v1 nginx --image=nginx --dry-run -o yaml
$ kubectl run --generator=deployment/v1beta1 nginx --image=nginx
$ kubectl run --generator=deployment/v1beta1 nginx --image=nginx --dry-run -o yaml
$ kubectl run --generator=deployment/v1beta1 nginx --image=nginx --dry-run --replicas=4 -o yaml
$ kubectl run --generator=deployment/v1beta1 nginx --image=nginx --dry-run --replicas=4 -o yaml > nginx-deployment.yaml
$ kubectl get po <name> -o yaml --export > file.yml