# kubectl completions kubectl completion -h export do="-o yaml --dry-run=client" # Creating Resources/Objects ====================================================================================================== # IMPERATIVE Paradigm - no embedded history kubectl create -f (from FILE or STDIN) # if the resource exists it will error # DECLARATIVE Paradigm kubectl apply -f (from FILE or STDIN) # kubectl run - create and run a particular image in a pod. creates deployment or jobs # kubectl expose - creates services # kubectl set,edit,patch - top dog # kubectl replace -f deployment.yaml --save-config # Creates a deployment kubectl create deployment nginx --image=nginx kubectl create deployment webapp --image=nginx:1.17.8 --replicas=2 # Create a pod with a shell kubectl run -i --tty busybox --image=busybox --restart=Never -- sh kubectl run -i --tty mykube --image=dejanualex/kubectl:1.0 --restart=Never -- sh # naked broken kubectl run broken --image=dejanualex/kubectl:99 --restart=Never kubectl run ephemeral-demo --image=registry.k8s.io/pause:3.1 --restart=Never # Create a pod (the minimum needed flag is --image) kubectl run nginx --image=nginx # Create a pod named basic with port 80 open to TCP kubectl run basic --image=nginx:stable-alpine-perl --restart=OnFailure --port=80 # Create a pod using dry-run, e.g. the --image option is required kubectl -run --image=nginx:stable-alpine-perl basic --dry-run=client -o yaml # commmand vs arg: command coresponds to ENTRYPOINT and args corresponds to CMD instruction: # give cmd kubectl run borg1 --image=busybox --restart=Always --command -- /bin/sh -c "echo working... && sleep 3600" # gives args kubectl run borg2 --image=busybox --restart=Always -- /bin/sh -c "echo working... && sleep 3600" # what doc: https://jamesdefabia.github.io/docs/user-guide/kubectl/kubectl_run/ # ping from kubectl -n sec1 exec -it pod2 -- ping $pod1IP # Labels ====================================================================================================== # overwrite po label kubectl label po sidecar.istio.io/inject=false --overwrite # create po label on the fly kubectl run am-i-ready --image=nginx:1.16.1-alpine --labels="id=cross-server-ready" # labels at runtime --show-labels kubectl run --image=ngnix --labels='environment=dev','cluster=1' --overwrite # start po with label e.g. set sidecar.istio.io/inject to false kubectl run istiomssql -i --tty --labels=sidecar.istio.io/inject="false" --image=mcr.microsoft.com/mssql-tools:latest # attach tty kubectl attach istiomssql -c istiomssql -i -t # use the kubectl debug node command to deploy a Pod to a Node that you want to troubleshoot kubectl debug pods/mssql --image nicolaka/netshoot -it --target mssql -- bash # Creating SERVICEs ====================================================================================================== # Service discovery via DNS: svc_name.namespace.svc.cluster.local # creates proxy server or application-level gateway between localhost and the Kubernetes API server. kubectl proxy -p 5555 # Default: 8001 # tunnel traffic: forward connection to a local port:8080 to a port on a pod:80 # forwards connections from LOCAL_PORT:to a POD_PORT # is more generic as it can forward TCP traffic while kubectl proxy can only forward HTTP traffic kubectl port-forward pod/ 8080:80 # create service kubectl -n red expose po basic --name=cloudacademy-svc --type="ClusterIP" --port=8080 --target-port=80 # patch service, from ClusterIP to LoadBalancer kubectl -n red patch svc cloudacademy-svc -p '{"spec":{"type":"LoadBalancer"}} # Expose a Pod in the red Namespace with the following configuration: The Service name is cloudacademy, the Service port is8080, the Target port is80, the Service type isClusterIP kubectl expose pod basic -n red --name=cloudacademy-svc --port=8080 --target-port=80 # create service NodePort on port 32080: expose deployment for an app that runs on port 80 Service # The command will assign a random port >= 30000. So use the Patch command to assign the port to a known, unused and desired port >= 30000 kubectl -n ca1 expose deployment cloudforce --name=cloudforce-svc --type="NodePort" --port=80 # get the port exposed by the service kubectl -n ca1 get svc cloudforce-scv -ojsonpath="{.spec.ports[0].nodePort}" # patch the service to a known port kubectl -n ca1 patch service cloudforce-svc --type='json' --patch='[{"op": "replace", "path": "/spec/ports/0/nodePort", "value":32080}]' # all the above in one command kubectl -n ca1 expose --type=NodePort deployment cloudforce --port 80 --name cloudforce-svc --overrides '{ "apiVersion": "v1","spec":{"ports": [{"port":80,"protocol":"TCP","targetPort": 80,"nodePort":32080}]}}' # create a nodeport service kubectl -n accounting expose deployment nginx-one --type=NodePort --name=service-lab # spin up pod that run curl to a service kubectl run client -n skynet --image=appropriate/curl -it --rm --restart=Never -- curl http://t2-svc:8080 > /home/ubuntu/svc-output.txt ## Deployments ====================================================================================================== # create deployment webserver yaml file based on ngnix image kubectl create deploy webserver --image nginx:1.22.1 --replicas=2 --dry-run=client -o yaml | tee dep.yaml # get the deployment strategy kubectl get deploy -o yaml | grep -A 4 strategy # a Deployment's revision is created when a Deployment's rollout is triggered # check previous rollout revision kubectl rollout history deployment # restart deployment kubectl rollout restart deployment # change the base image for the deployment aka trigger deployment rollout kubectl set image deploy webserver nginx=nginx:1.23.1-alpine kubectl set image deployment.v1.apps/cloudforce nginx=nginx:1.19.0-perl # To manage the deployment history, use the annotate command to create a message. kubectl annotate deployment cloudforce kubernetes.io/change-cause="set image to nginx:1.19.0-perl" --overwrite=true # Deployment strategies / Update strategies: RollingUpdate vs. Recreate # are defined under spec.strategy in the manifests # rolling deployment is the default deployment strategy kubectl rollout status deloy/ # view object revisions kubectl rollout history deploy # rollback to a previous version or to a specific revision kubectl rollout undo kubectl rollout undo deploy --to-revision=1 # create deployment named rtro kubectl -n cal create deployment rtro --image=busybox:1.31.1 # autoscale deployments: Autoscaling creates a HorizontalPodAutoscaler # horizontalpodautoscaler.autoscaling: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/ # this deployment to autoscale based on CPU utilisation. The autoscaling should be set for a minimum of 2, maximum of 4, and CPU usage of 65%. kubectl autoscale deployment --cpu-percent=65 --min=2 --max=4 ## Secrets ====================================================================================================== # Secret = key/value pairs of sensitive data that can be accessed by pods (encoded in base64) so describe will show opaque data # create secret kubectl create secret generic mysql --from-literal=password=root kubectl create secret generic password-secret --from-file=./pass.txt # describe secret kubectl -n istio-system describe secret ingress-cert-cacert # take the ca.crt from secret kubectl get secrets -n istio-system ingress-cert-cacert -o json | jq -r '.data."ca.crt"' | base64 -d > ca.crt # create ingress-cert-cacert kubectl -n istio-system create secret generic ingress-cert-cacert --from-file=ca.crt --from-file=Publiccert.cer # SECRETS example scenario kubectl create secret generic --from-literal=MYSQL_ROOT_PASSWORD=test kubectl get secrets -o yaml > sqlsecret.yml # service accounts have token as a secret kubectl -n test1 get sa -ojson | jq -r '.secrets[].name' # getting the service account token that is used for authentication: kubernetes.io/service-account-token kubectl -n kube-system get secret default-token-gfnmr -oyaml kubectl -n kube-system get secret default-token-gfnmr -o jsonpath={.data.token} | base64 -d TOKEN=$(kubectl get secret default-token-zc7s7 -o json | jq -r '.data."token"' | base64 -d ) # configmap can be created from literal values, or individual files ## Scheduling and Nodes ====================================================================================================== # check node labels kubectl describe nodes|grep -A5 -i label # label node kubectl label node