0% found this document useful (0 votes)
23 views8 pages

CKA

1. The document outlines steps to create a cluster role and role binding to allow a service account to create deployments and workloads in a specific namespace. 2. It shows commands to drain a node by ignoring daemonsets and deleting local data before taking the node offline for maintenance. 3. It demonstrates how to backup and restore etcd using snapshot commands.

Uploaded by

carline daiane
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
23 views8 pages

CKA

1. The document outlines steps to create a cluster role and role binding to allow a service account to create deployments and workloads in a specific namespace. 2. It shows commands to drain a node by ignoring daemonsets and deleting local data before taking the node offline for maintenance. 3. It demonstrates how to backup and restore etcd using snapshot commands.

Uploaded by

carline daiane
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 8

1.

set context

alias k=kubectl

k create clusterrole deployment-clusterole --verb=create --


resource=deployments,statefulsets,daemonsets

k create serviceaccount cicd-token --namespace=app-team1

k create rolebinding rb1 --clusterrole=deployment-clusterrole --


serviceaccount=app-team1:cicd-token --namespace=app-team1

verify:

k auth can-i create deployment -n app-team1 --as


system:serviceaccount:app-team1:cicd-token

output -- yes

##################################################

2.

set context

kubectl get nodes/pod

kubectl drain ek8s-node-1 --ignore-daemonsets --delete-local-data --


force
kubectl drain node ek8s-node-1 --ignore-daemonsets=true --delete-
emptydir-data=true

kubectl get nodes

kubectl get pod -o wide

#######################################################

3.

#backup
ETCDCTL_API=3 etcdctl --endpoints="https://127.0.0.1:2379" --
cacert=/opt/KUIN000601/ca.crt --cert=/opt/KUIN000601/etcd-client.crt
--key=/opt/KUIN000601/etcd-client.key snapshot save /etc/data/etcd-
snapshot.db

#restore
ETCDCTL_API=3 etcdctl --endpoints="https://127.0.0.1:2379" --
cacert=/opt/KUIN000601/ca.crt --cert=/opt/KUIN000601/etcd-client.crt
--key=/opt/KUIN000601/etcd-client.key snapshot restore /var/lib/
backup/etcd-snapshot-previoys.db

4.

vi network.yaml

piVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: all-port-from-namespace
namespace: fubar
spec:
podSelector:
matchLabels: {}
ingress:
- from:
- namespaceSelector:
matchLabels:
name: corp-net
- podSelector: {}
ports:
- port: 9000

kubectl create -f network.yaml

#################################################
5.

set context

kubectl get deploment front-end -o yaml > deploy.yaml

vi deploy.yaml

update the below uner image

ports:
-containerport: 80
name: http
protocol: TCP

kubectl replace -f deploy.yaml --force

kubectl expose deployment front-end --name=front-end-svc --port=80


--target-port=80 --type=NodePort

kubectl get svc


kubectl get deployment

######################################################
6. set context

vi ingress.yaml

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ping
namespace: ing-internal
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
kubernetes.io/ingress.class: "nginx" ... test it with
kubernetes.io/ingress.class or this mentioned
spec:
rules:
- host: hello-world.info ---> remove this line
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web
port:
number: 8080

kubectl create -f ingress.yaml

kubectl get ingress


sx
######################################################
7.

kubectl get deployment

kubectl scale deployment presentation --replicas=4

kubectl get deployment


kubectl get pod

#################################################

8.

vi nginx.yaml

apiVersion: v1
kind: Pod
metadata:
name: nginx-kusc007
spec:
containers:
- name: nginx
image: nginx
nodeSelector:
disk: spinning

kubectl create -f nginx.yaml

kubectl get pod

############################################

9.

kubectl get nodes | grep -i ready

kubectl describe nodes | grep -i taints | grep -v NoExecute | grep


-v NoSchedule

kubectl describe nodes | grep -i taints | grep -v NoExecute | grep


-v NoSchedule > /some/file/path

cat /some/file/path

############################################

10.

vi kucc1.yaml

apiVersion: v1
kind: Pod
metadata:
name: kucc1
spec:
containers:
- name: nginx
image: nginx
- name: redis
image: redis

kubectl create -f kucc1.yaml

kubectl get pod

##################################################

11.

set context

vi pv.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
name: app-config
spec:
capacity:
storage: 2Gi
accessModes:
- ReadOnlyMany
hostPath:
path: " /srv/app-config"

kubectl create -f pv.yaml

kubectl get pv

###################################################

12.

vi pvc.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-volume
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 10Mi
storageClassName: csi-hostpath-sc

kubectl create -f pvc.yaml

vi pod.yam

apiVersion: v1
kind: Pod
metadata:
name: web-server
spec:
containers:
- name: web-server
image: nginx
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: pv-volume
volumes:
- name: pv-volume
persistentVolumeClaim:
claimName: pv-volume

kubectl create -f pod.yaml

kubectl get pvc

kubectl get pod

vi patch.yaml

spec:
resources:
requests:
storage: 70Mi

kubectl edit pvc pv-volume --save-config


kubectl edit pvc pv-volume --record

kubectl patch pvc pv-volume --type=merge --patch "$(cat patch.yaml)"


--record

kubectl describe pvc pv-volume

######################################

13.

kubectl get pod foobar

kubectl logs foobar | grep -i "error - file-not-found"

kubectl logs foobar | grep -i "error - file-not-found" > /some/path/


filename

cat /some/path/filename

########################################

14.

kubectl get pod legacy-app -o yaml > sidecar.yaml

update the below

name: busybox
image: busybox
args: [/bin/sh -c tail -n+1 /var/log/legacy-app.log]
volumeMounts:
- name: logs
mountPath: /var/log

kubectl replace -f sidecar.yaml --force


###########################################

15.

kubectl top pod -l name=overload-cpu

echo pod_name > /some/path/filename

#############################################

16.

set context

kubectl get nodes

ssh wk8s-node-0

sudo systemctl status kubelet

journalctl -u kubelet

sudo systemctl start kubelet

sudo systemctl enable kubelet

sudo systemctl status kubelet

exit

kubelet get nodes

##############################################

17.

kubectl cordon masternode

kubectl drain masternode --ignore-daemonsets --delete-local-data --


force

ssh MasterNode

sudo -i or use sudo

kubeadm upgrade plan

apt update

apt-get install kubeadm=1.22.2-00 kubelet=1.22.2-00


kubectl=1.22.2-00
sudo kubeadm upgrade apply v1.22.x --etcd-upgrade=false

sudo systemctl daemon-reload

sudo systemctl daemon-reload

exit from root then run

kubectl uncordon masternode

kubectl get nodes

kubectl version

kubeadm version

kubelet --version

##################################################################

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy