https://v1-29.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade

https://kodekloud.com/community/t/upgrading-kubeadm-to-version-not-displayed-by-madison-command/422910/2

cat /etc/apt/sources.list.d/kubernetes.list
deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /


kubectl get deployments -n admin2406 –sort-by=.metadata.name \
-o=custom-columns=’DEPLOYMENT:metadata.name,CONTAINER_IMAGE:spec.template.spec.containers[0].image,READY_REPLICAS:status.readyReplicas,NAMESPACE:metadata.namespace’ > /opt/admin2406_data


mysql-alpha-pvc

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-alpha-pvc
namespace: alpha
spec:
accessModes:

  • ReadWriteOnce
    resources:
    requests:
    storage: 1Gi
    storageClassName: slow

kubectl create deployment nginx-deploy –image=nginx:1.16 –dry-run=client -o yaml > nginx-deploy.yaml


kubectl get pod -n kube-system

kubectl describe pod etcd-bbsvibmk8m -n kube-system

export ETCDCTL_API=3
etcdctl snapshot save –cacert=/etc/kubernetes/pki/etcd/ca.crt –cert=/etc/kubernetes/pki/etcd/server.crt –key=/etc/kubernetes/pki/etcd/server.key –endpoints=127.0.0.1:2379 /root/etcd-backup.db


kubectl run secret-1401 -n admin1401 –image busybox –dry-run=client -o yaml –command — sleep 4800 > admin.yaml

apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: secret-1401
name: secret-1401
namespace: admin1401
spec:
volumes:

  • name: secret-volume
    secret:
    secretName: dotfile-secret
    containers:
  • command:
    • sleep
    • “4800”
      image: busybox
      name: secret-admin
      volumeMounts:
    • name: secret-volume
      readOnly: true
      mountPath: /etc/secret-volume

apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: web-proj-268
name: web-proj-268
spec:
replicas: 1
selector:
matchLabels:
app: web-proj-268
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: web-proj-268
spec:
containers:
– image: nginx:1.16
name: nginx
resources: {}
status: {}

kubectl set image deployment web-proj-268 nginx=nginx:1.17 –record=true

kubectl rollout history deployment web-proj-268


kubectl create deployment web-003 –image=nginx –replicas=3


kubectl run web-load-5461 –image=nginx:1.17 –labels=”tier=web”
kubectl run web-load-5461 –image=nginx:1.17 –labels tier=web


https://linchpiner.github.io/k8s-multi-container-pods.html

apiVersion: v1
kind: Pod
metadata:
name: pod-multi
spec:
containers:

  • name: container1
    image: nginx
  • name: container2
    image: busybox
    command: [“sleep”, “4800”]

kubectl run front-end-helper –image=busybox -it –rm –restart=Never — /bin/sh -c ‘echo binaries downloaded successfully’ > front-end-helper-log.txt


kubectl create namespace defence

apiVersion: v1
kind: Pod
metadata:
name: delta-pod
namespace: defence
spec:
containers:

  • name: delta-pod image: nginx:1.17 env:
    • name: env
      value: “dev”
    • name: tier
      value: “front”

apiVersion: v1
kind: Pod
metadata:
name: admin-pod
namespace: default
spec:
containers:

  • command:
    • sleep
    • “3200”
      image: busybox
      name: admin-pod
      securityContext:
      capabilities:
      add: [“SYS_TIME”]

kubectl expose rc nginx –port=80 –target-port=8000
kubectl expose pod web-pod –name=web-pod-svc –port=80
kubectl exec -it nslookup — nslookup web-pod-svc


kubectl get node -o jsonpath='{.items[*].status.nodeInfo.osImage}’


apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-rnd
spec:
storageClassName: manual
capacity:
storage: 100Mi
accessModes:
– ReadWriteMany
hostPath:
path: “/pv/host_data-rnd”


node01 not responding

kubectl describe node node01

ssh node01

systemctl status kubelet

systemctl start kubelet


get nodes -o wide -o jsonpath='{.items[*].status.addresses[?(@.type==”InternalIP”)].address}’

static pod :

ps -ef | grep kubelet
more /var/lib/kubelet/config.yaml | grep static
staticPodPath: /etc/kubernetes/manifests
cd /etc/kubernetes/manifests/

remove static.yaml

ssh node01

ps -ef | grep kubelet
more /var/lib/kubelet/config.yaml | grep static
staticPodPath: /etc/kubernetes/manifests

cd /etc/kubernetes/manifests/

vi static.yaml
pod oluştur


kubectl -n kube-system logs kube-controller-manager-bbsvibmk8m
kubectl -n kube-system describe pod kube-controller-manager-bbsvibmk8m
kubectl -n kube-system get pod

/etc/kubernetes/manifests önemli kube-controller-manager


taint :

kubectl describe nodes | grep -i taint
kubectl taint node bbsvibmk8m cpu=high:NoSchedule

kubectl get nodes -o json | jq “.items[] | {name:.metadata.name}”
{
“name”: “bbsvibmk8m”
}
{
“name”: “bbsvibmk8s1”
}
{
“name”: “bbsvibmk8s2”
}

kubectl get nodes -o json | jq “.items[] | {name:.metadata.name, tainst:.spec.taints}”
{
“name”: “bbsvibmk8m”,
“tainst”: [
{
“effect”: “NoSchedule”,
“key”: “cpu”,
“value”: “high”
},
{
“effect”: “NoSchedule”,
“key”: “node-role.kubernetes.io/control-plane”
}
]
}
{
“name”: “bbsvibmk8s1”,
“tainst”: null
}
{
“name”: “bbsvibmk8s2”,
“tainst”: null
}


kubectl create namespace airfusion

kubectl run nginx –image=nginx –namespace airfusion

vim netpool.yaml

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: my-net-pool
namespace: airfusion
spec:
podSelector: {}
policyTypes:

  • Ingress
    ingress:
  • from:
    • podSelector: {}
      ports:
    • protocol: TCP
      port: 80

kubectl run audit-web-app –image nginx

vim srvc.yaml

apiVersion: v1
kind: Service
metadata:
name: audit-web-app-service
spec:
type: NodePort
selector:
run: audit-web-app
ports:

  • protocol: TCP
    port: 8080
    targetPort: 8080
    nodePort: 30002

taint and toleration

untaint :
kubectl taint nodes bbsvibmk8m cpu=high:NoSchedule-
node/bbsvibmk8m untainted

kubectl taint nodes bbsvibmk8s1 env_type=production:NoSchedule

apiVersion: v1
kind: Pod
metadata:
name: prod-pod-nginx
labels:
run: prod-pod-nginx
spec:
containers:
– name: prod-pod-nginx
image: nginx
tolerations:
– key: “env_type”
operator: “Equal”
value: “production”
effect: “NoSchedule”


kubectl get deployment | awk ‘{print $1}’ | tail -n +2


apiVersion: v1
kind: Pod
metadata:
name: pod-jxc56fv
spec:
securityContext:
runAsUser: 1000
runAsGroup: 2000
fsGroup: 2000
containers:

  • name: pod-jxc56fv
    image: redis:alpine

kubectl exec -it pod-jxc56fv whoami


yetki verilmesi

openssl genrsa -out k8admin.key 2048

openssl req -new -key k8admin.key -out k8admin.csr -subj “/CN=k8admin/O=dev”

csr.yaml

apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: k8admin
spec:
request: cat k8admin.csr | base64 | tr -d “\n”
signerName: kubernetes.io/kube-apiserver-client
usages:

  • client auth

kubectl get csr

kubectl certificate approve k8admin

rol.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: space
name: pod-reader
rules:

  • apiGroups: [“”] # “” indicates the core API group
    resources: [“pods”]
    verbs: [“get”, “watch”, “list”,”update”,”delete”]

rb.yaml

apiVersion: rbac.authorization.k8s.io/v1

This role binding allows “jane” to read pods in the “default” namespace.

You need to already have a Role named “pod-reader” in that namespace.

kind: RoleBinding
metadata:
name: read-pods
namespace: space
subjects:

You can specify more than one “subject”

  • kind: User
    name: k8admin # “name” is case sensitive
    apiGroup: rbac.authorization.k8s.io
    roleRef:
    # “roleRef” specifies the binding to a Role / ClusterRole
    kind: Role #this must be Role or ClusterRole
    name: pod-reader # this must match the name of the Role or ClusterRole you wish to bind to
    apiGroup: rbac.authorization.k8s.io

kubectl auth can-i get pod –as=k8admin -n space
kubectl auth can-i list pod –as=k8admin -n space


pv – pvc – pod

vi pv.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
name: mypvlog
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 100Mi
accessModes:
– ReadWriteMany
hostPath:
path: /pv/log/

pvc.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-claim-log
spec:
storageClassName: manual
accessModes:
– ReadWriteMany
resources:
requests:
storage: 50Mi

pod. yaml

apiVersion: v1
kind: Pod
metadata:
name: my-nginx-pod
spec:
volumes:
– name: task-pv-storage
persistentVolumeClaim:
claimName: pv-claim-log
containers:
– name: task-pv-container
image: nginx
ports:
– containerPort: 80
name: “http-server”
volumeMounts:
– mountPath: “/log”
name: task-pv-storage


fix to node01

kubectl get node
kubectl describe node node01

ssh node01
systemctl status kubelet.service

hata devam ederse ; journalctl

/var/lib/kubelet/config.yaml

sertifikaya bak ve config dosyasını düzelt

/etc/kubernet/pki

systemctl daemon-reload
systemctl restart kubelet


multiple pod

apiVersion: v1
kind: Pod
metadata:
name: nginx
namespace: development
spec:
containers:

  • name: nginx
    image: nginx
  • name: redis
    image: redis

fix the pod

kubectl describe pod pod01
kubectl describe node node01 | grep -i taint
kubectl get pod nginx-pod -o yaml > pod.yaml

spec -> containers ile aynı hizada
tolerations:

  • key: “color”
    operator: “Equal”
    value: “blue”
    effect: “NoSchedule”

kubectl create deployment nginx-deploy –image nginx –replicas 8 –dry-client=run -o yaml


replicaset :

kubectl get pod –show-labels

run=dev-pod-nginx

apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: rs-nginx
labels:
app: rs-nginx
tier: dev-pod-nginx
spec:
replicas: 8
selector:
matchLabels:
run: dev-pod-nginx
template:
metadata:
labels:
tier: frontend
spec:
containers:
– name: dev-pod-nginx
image: ginx


Daemonset

kubectl taint node bbsvibmk8s1 env=qa:NoSchedule

apiVersion: apps/v1
kind: DaemonSet
metadata:
name: ssd-monitor
spec:
selector:
matchLabels:
app: ssd-monitor
template:
metadata:
labels:
app: ssd-monitor
spec:
nodeSelector:
disk: ssd
containers:
– name: web
image: nginx:latest


custom-column
kubectl get nodes -o custom-columns=”NAMESPACE:.metadata.name,AVALIABLE_MEMORY:.status.allocatable.memory,AVALIABLE_CPU:status.capacity.cpu”


init container

apiVersion: v1
kind: Pod
metadata:
name: nginx-k8s
labels:
app.kubernetes.io/name: MyApp
spec:
containers:

  • name: nginx-container image: nginx volumeMounts:
    • name: config-vol
      mountPath: “/usr/share/nginx/html”
      volumes:
  • name: git-volume
    gitRepo:
    repository: “https://github.com/jhawithu/k8s-nginx.git”
    initContainers:
  • name: git-k8s
    image: alpine/git

custom-column
kubectl get nodes -o custom-columns=”NAMESPACE:.metadata.name,AVALIABLE_MEMORY:.status.allocatable.memory,AVALIABLE_CPU:status.capacity.cpu”


init container

apiVersion: v1
kind: Pod
metadata:
name: nginx-k8s
labels:
app.kubernetes.io/name: MyApp
spec:
containers:

  • name: nginx-container image: nginx volumeMounts:
    • name: config-vol
      mountPath: “/usr/share/nginx/html”
      volumes:
  • name: git-volume
    gitRepo:
    repository: “https://github.com/jhawithu/k8s-nginx.git”
    initContainers:
  • name: git-k8s
    image: alpine/git

kubectl run nginx-resolver –image=nginx
kubectl expose pod nginx-resolver –name=nginx-resolver-service –port=80 –target-port=80 –type=ClusterIP

kubectl run test-nslookup –image=busybox:1.28 –rm -it –restart=Never — nslookup nginx-resolver-service
kubectl run test-nslookup –image=busybox:1.28 –rm -it –restart=Never — nslookup nginx-resolver-service > /root/CKA/nginx.svc

kubectl get pod nginx-resolver -o wide
kubectl run test-nslookup –image=busybox:1.28 –rm -it –restart=Never — nslookup > /root/CKA/nginx.pod


kubectl apply -f – <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: pvviewer
EOF

kubectl create sa pvviewer

kubectl create clusterrole pvviewer-role –verb=list –resource=persistentvolumes

kubectl create clusterrolebinding pvviewer-role-binding –clusterrole=pvviewer-role –serviceaccount=default:pvviewer

apiVersion: v1
kind: Pod
metadata:
labels:
run: pvviewer
name: pvviewer
spec:
containers:

  • image: redis
    name: pvviewer
    serviceAccountName: pvviewer

kubectl run hr-pod –image=redis:alpine –namespace=hr –labels=environment=production,tier=frontend


key: env_type, value: production, operator: Equal and effect: NoSchedule

kubectl taint node node01 env_type=production:NoSchedule

kubectl run dev-redis –image=redis:alpine

kubectl get pods -o wide


apiVersion: v1
kind: Pod
metadata:
name: prod-redis
spec:
containers:

  • name: prod-redis
    image: redis:alpine
    tolerations:
  • effect: NoSchedule
    key: env_type
    operator: Equal
    value: production

kubectl get pods -o wide | grep prod-redis


kubectl config set-context gce –user=cluster-admin