Getting started
# kubectl apply -f hello-world.yaml
apiVersion: v1
kind: Pod
metadata:
name: hello-world
spec:
restartPolicy: Never
containers:
- name: hello
image: "ubuntu:14.04"
command: ["/bin/echo", "hello", "world"]
Basic commands
Create vs Apply
Syntax kubectl apply <source.yaml>
vs kubectl create <source.yaml>
Create more specific, if resource exists throws error. Hot updates via edit command
kubectl create deployment redis --image=redis
kubectl create deployment <resource> --image=<namespace>/<resource>:v0.1
Apply more general, updates using yaml configuration if resource exists. Slightly more risky to use, check yaml contents first if not an official source
kubectl apply -f https://kubernetes.com/some_config.yaml
Expose
Required to enable external/internal comms
kubectl expose deployment redis --port 6379
kubectl expose deploy/<deployment> --type=NodePort --port=80
Scaling
scale a deployment to 10 replicas total
kubectl scale deployment <resource> --replicas=10
Logs
kubectl logs deploy/rng
# and follow up
kubectl logs deploy/worker -f
Get
Get command finds (lists) resources such as services, pods, containers
# list all services
kubectl get svc
# list all pods and follow updates
kubectl get pods -w
# extract clusterIP from named service and set to variable
SERV=$(kubectl get svc <service> -o go-template={{.spec.clusterIP}})
Attach
Connects via console to an existing resource, i.e., pod
kubectl attach --namespace=shpod -ti shpod
Dry runs
Use to test yaml locally or against server
## runs locally, no server validation
kubectl apply -f web.yaml --dry-run --validate-false
## goes through extensive server validation, mutation; and returns obj
kubectl apply -f web.yaml --server-dry-run --validate-false -o yaml
## check what has changed between applied and current yaml file
kubectl diff -f web.yaml
Some minimal (local) dry run examples
ClusterRole
kubectl create clusterrole "my-role" --verb=get --resource=pods -o yaml --dry-run
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: my-role
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
ClusterRoleAdmin
kubectl create clusterrolebinding my-cluster-admin --clusterrole=my-cluster-admin -o yaml --dry-run
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: my-cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: my-cluster-admin
Configmap
kubectl create configmap my-config -o yaml --dry-run
apiVersion: v1
kind: ConfigMap
metadata:
creationTimestamp: null
name: my-config
Cronjob
kubectl create cronjob my-cronjob --image some-img --schedule "* * * * 1" -o yaml --dry-run
apiVersion: batch/v1beta1
kind: CronJob
metadata:
creationTimestamp: null
name: my-cronjob
spec:
jobTemplate:
metadata:
creationTimestamp: null
name: my-cronjob
spec:
template:
metadata:
creationTimestamp: null
spec:
containers:
- image: some-img
name: my-cronjob
resources: {}
restartPolicy: OnFailure
schedule: '* * * * 1'
status: {}
Deployment
kubectl create deployment web --image ngnix -o yaml --dry-run
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: web
name: web
spec:
replicas: 1
selector:
matchLabels:
app: web
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: web
spec:
containers:
- image: ngnix
name: ngnix
resources: {}
status: {}
Job
kubectl create job my-job --image some-img -o yaml --dry-run
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
name: my-job
spec:
template:
metadata:
creationTimestamp: null
spec:
containers:
- image: some-img
name: my-job
resources: {}
restartPolicy: Never
status: {}
Namespace
kubectl create namespace my-namespace -o yaml --dry-run
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: my-namespace
spec: {}
status: {}
PodDisruptionBudget
kubectl create poddisruptionbudget my-budget --selector my-app --min-available 1 -o yaml --dry-run
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
name: my-budget
spec:
minAvailable: 1
selector:
matchExpressions:
- key: my-app
operator: Exists
status:
currentHealthy: 0
desiredHealthy: 0
disruptionsAllowed: 0
expectedPods: 0
PriorityClass
kubectl create priorityclass my-class -o yaml --dry-run
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
creationTimestamp: null
name: my-class
preemptionPolicy: ""
value: 0
Quota
kubectl create quota my-quota -o yaml --dry-run
apiVersion: v1
kind: ResourceQuota
metadata:
creationTimestamp: null
name: my-quota
spec: {}
status: {}
Role
kubectl create role my-role --verb=get --resource=pods -o yaml --dry-run
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
name: my-role
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
RoleBinding
kubectl create rolebinding my-role-binding --clusterrole=my-role -o yaml --dry-run
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
name: my-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: my-role
? secret, service, service account
Rolling updates
A way to deploy progressively, controlling replica sets (identical pods), and temporarily maintaining old and new sets
Update strategy
- maxSurge
- maxUnavailable
Check current strategy
kubectl get deploy -o json | jq."items[]" | {name: .metadata.name} + .spec.strategy.rollingUpdate
...
{
name: "redis"
maxSurge: 25%
maxUnavailable: 25%
}
...
after applying changes with set <image>
, edit
or apply -f <file>
# check status
kubectl rollout status deploy worker
# if unhealthy, finds more info
kubectl describe deploy worker
and use dashboards, other tools to debug
// Find revision and cause
kubect rollout history deploy worker
// Check in annotations for clues
kubectl describe replicaSets -l app=worker | grep -A3 Annotations
// Undoing last, use it once only, not history stack
kubectl rollout undo deploy worker
Healthchecks
Via probes (applied to pods not containers)
- Liveness dead or alive. Most important
- Readiness ready to serve traffic. Mostly services
- Startup New is container ready?
Timings, thresholds with default values
periodSeconds: 10 //probe execution interval
timeoutSeconds: 1 //for a probe
successThreshold: 1
failureThreshold: 3
initialDelaySeconds: 0 //useful for slow startup processes
Handlers
HTTP, TCP, Arbitrary Execution (exec)
Checklist
before adding healthchecks
☑️ determine liveness, readiness, both?
☑️ check if existing http endpoints are useful
☑️ decide whether to add new endpoints
☑️ find a balance between checks a resources usage
☑️ figure out how often to check
☑️ mark probes to services with external dependencies (readiness)
Simple http liveness probe
Default values:
defaultTimeout: 1
defaultFailureThreshold: 3
defaultSuccessThreshold: 3
edit and kubectl apply
<append to existing yaml👇>
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 30 <for service to start>
periodSeconds: 5 <probe every n seconds>
Post setup checklist
✅deployments can run
✅Pods can run
kubeclt run nginx --image=nginx
kubectl get deployments
kubectl get pods
✅Pods can be accessed directly
kubectl port-forward nginx-6dasaghug 8081:80
curl 127.0.0.1:8081
✅Logs can be collected
kubectl get pods
kubectl logs nginx-6db489d4b7-fl869
✅Commands run from pod
kubectl exec -it nginx-6db489d4b7-fl869 -- nginx -vnginx version: nginx/1.19.2
✅Services can provide access
kubectl expose deployment nginx --port 80 --type NodePort
worker node: curl -I localhost:31297
✅Nodes are healthy
kubectl describe nodes
✅Pods are healthy
kubectl describe pods
Testing
Using Apache benchmark make 1000 requests, 10 concurrent to the clusterIP
with actual traffic \1
, instead of just ping.
# observe changes in event and pods
kubectl get event -w
kubectl get pods -w
# simulate load with Apache benchmark
ab -c 10 -n 1000 http://<ClusterIP>/1
Consider tini to perform readiness checks (with proper shutdown, no 🧟)
Installation
Centos7
Installing from scratch with flannel
setenforce 0
# disable selinux
sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disable/g' /etc/sysconfig/selinux
modprobe br_netfilter
# kubernetes can manipulate iptables
echo '1' > /proc/sys/net/bridge/bridge-nf-call-iptables
# to report memory properly disable swap
swapoff -a
vim /etc/fstab
# install goodies
yum install -y yum-utils device-mapper-persistent-data lvm2
# add repo
yum-config-manager --add-repo http://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce
## clean up /etc/yum.repos.d/ if fails
# disable secrets drive
sed -i '/^ExecStart/ s/$/ --exec-opt native.cgroupdriver=systemd/' /usr/lib/systemd/system/docker.service
systemctl daemon-reload
systemctl enable docker --now
systemctl status docker
docker info | grep -i driver
#install kubectl
cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=[Kubernetes]
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgpkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubectl kubeadm kubelet
systemctl enable kubelet
## ON MASTER node only
kubeadm init --pod-network-cidr=10.244.0.0/16
## if fails run sudo kubeadm reset
# copy and set aside join command for other nodes
-----
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 172.31.35.31:6443 --token 57zptd.ydndjyr7qdirxe78 \
--discovery-token-ca-cert-hash sha256:b38bfcfca52f5b8434d6a658aaaaa3c61e7b000239957e12f85125e40f8a
-----
## ON MASTER node only
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get nodes
$ Not Ready
# apply network overlay
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl get nodes
$ ready
## ON WORKER nodes
kubeadm join 172.31.35.31:6443 --token 57zptd.ydndjyr7qdirxe78 \
--discovery-token-ca-cert-hash sha256:b38bfcfca52f5b8434d6a658faaa112ca3c61e7b00023995aaa2f85125e40f8a
## ON MASTER node
kubectl get nodes
$ READY x 3
Ubuntu
⭐️ Updated to Ubuntu Focal Nossa
All nodes:
# Get the Docker gpg key
curl -fsSL <https://download.docker.com/linux/ubuntu/gpg> | sudo apt-key add -
# Add the Docker repository
sudo add-apt-repository "deb [arch=amd64] <https://download.docker.com/linux/ubuntu> \\
$(lsb_release -cs) \\
stable"
# Get the Kubernetes gpg key
curl -s <https://packages.cloud.google.com/apt/doc/apt-key.gpg> | sudo apt-key add -
# Add the Kubernetes repository
cat << EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb <https://apt.kubernetes.io/> kubernetes-xenial main
EOF
# Update your packages
sudo apt-get update
# Install Docker, kubelet, kubeadm, and kubectl
sudo apt-get install -y docker-ce=5:19.03.9~3-0~ubuntu-focal kubelet=1.17.8-00 kubeadm=1.17.8-00 kubectl=1.17.8-00
# Hold them at the current version
sudo apt-mark hold docker-ce kubelet kubeadm kubectl
# Add the iptables rule to sysctl.conf
echo "net.bridge.bridge-nf-call-iptables=1" | sudo tee -a /etc/sysctl.conf
# Enable iptables immediately
sudo sysctl -p
Master only
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# implement calico cni
kubectl apply -f https://docs.projectcalico.org/v3.14/manifests/calico.yaml
Workers only
kubeadm join 172.31.31.60:6443 --token jeq3mr.2psex1ssewavq5 --discovery-token-ca-cert-hash sha256:991326ed19b971b27db06ab6asaaaa42cef6954145129b18c4cf6fd57
Verify all nodes are Ready
with kubectl get nodes