watch -n 1 <command>
kubectl config view –flatten
- https://cluster-api.sigs.k8s.io/tasks/certs/generate-kubeconfig.html
- kubernetes/kubectl#823 (comment)
- https://kubernetes.io/docs/tasks/administer-cluster/certificates/
Official method | Hardened method | Hardening info |
curl -sfL https://get.k3s.io | sh -s | curl -sfL https://get.k3s.io | sh -s - –write-kubeconfig-mode 644 | k3s-io/k3s#389 (comment) |
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
- kubectl get pods
- kubectl logs <pod name>
- kubectl get services
- kubectl get endpoints
- kubectl rollout restart -f deployment/
- kubectl rollout restart -n default deployment <deployment name>
- kubectl exec [POD] – [COMMAND]
- kubectl port-forward
netstat -tulpn | grep LISTEN
- Rename volumeMounts.mountPath value (e.g. “data” to “data2”)
- kubectl apply -f <postgres file>.yaml
- kubectl exec -ti <postgres pod name> /bin/bash
- su postgres
- pg_resetwal /var/lib/postgresql/data
pg_resetwal: error: lock file “postmaster.pid” exists pg_resetwal: Is a server running? If not, delete the lock file and try again.
- rm /var/lib/postgresql/data/postmaster.pid
- pg_resetwal /var/lib/postgresql/data
- exit
- Rename volumeMounts.mountPath to its former value (e.g. “data2” to “data”)
- kubectl apply -f <postgres file>.yaml
- Wait and profit
$ vagrant up Bringing machine ‘master’ up with ‘virtualbox’ provider… ==> master: Checking if box ‘generic/opensuse15’ version ‘3.6.8’ is up to date… ==> master: Clearing any previously set forwarded ports… ==> master: Fixed port collision for 6443 => 6443. Now on port 2200. Vagrant cannot forward the specified ports on this VM, since they would collide with some other application that is already listening on these ports. The forwarded port to 30000 is already in use on the host machine.
To fix this, modify your current project’s Vagrantfile to use another port. Example, where ‘1234’ would be replaced by a unique host port:
config.vm.network :forwarded_port, guest: 30000, host: 1234
Sometimes, Vagrant will attempt to auto-correct this for you. In this case, Vagrant was unable to. This is usually because the guest machine is in a state which doesn’t allow modifying port forwarding. You could try ‘vagrant reload’ (equivalent of running a halt followed by an up) so vagrant can attempt to auto-correct this upon booting. Be warned that any unsaved work might be lost.
$ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 70029226e353 postgis/postgis “docker-entrypoint.s…” 11 days ago Up Less than a second 0.0.0.0:5432->5432/tcp, :::5432->5432/tcp udaconnect_db_1 ff243bad4391 kindest/node:v1.21.1 “/usr/local/bin/entr…” 3 months ago Up Less than a second 127.0.0.1:41557->6443/tcp demo-control-plane cd6891c18004 portainer/portainer-ce “/portainer” 9 months ago Up Less than a second 0.0.0.0:8500->8500/tcp, :::8500->8500/tcp, 8000/tcp, 0.0.0.0:9000->9000/tcp, :::9000->9000/tcp portainer
$ docker stop ff243bad4391 && docker rm ff243bad4391 ff243bad4391 ff243bad4391
mikel@mikel-home:~/Udaconnect$ sudo netstat -tlpn Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp 0 0 0.0.0.0:8500 0.0.0.0:* LISTEN 2048/docker-proxy tcp 0 0 0.0.0.0:30005 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 127.0.0.53:53 0.0.0.0:* LISTEN 913/systemd-resolve tcp 0 0 127.0.0.1:631 0.0.0.0:* LISTEN 1055/cupsd tcp 0 0 0.0.0.0:5432 0.0.0.0:* LISTEN 2075/docker-proxy tcp 0 0 127.0.0.1:10010 0.0.0.0:* LISTEN 49388/containerd tcp 0 0 127.0.0.1:9050 0.0.0.0:* LISTEN 1163/tor tcp 0 0 0.0.0.0:32674 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 0.0.0.0:30882 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 0.0.0.0:902 0.0.0.0:* LISTEN 1585/vmware-authdla tcp 0 0 127.0.0.1:10248 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 0.0.0.0:9000 0.0.0.0:* LISTEN 2020/docker-proxy tcp 0 0 127.0.0.1:10249 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 0.0.0.0:30091 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 127.0.0.1:6444 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 0.0.0.0:31117 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 0.0.0.0:30000 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 127.0.0.1:10256 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 127.0.0.1:10257 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 0.0.0.0:30002 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 127.0.0.1:10258 0.0.0.0:* LISTEN 49369/k3s server tcp 0 0 127.0.0.1:10259 0.0.0.0:* LISTEN 49369/k3s server tcp6 0 0 :::1716 :::* LISTEN 12403/kdeconnectd tcp6 0 0 :::8500 :::* LISTEN 2055/docker-proxy tcp6 0 0 ::1:631 :::* LISTEN 1055/cupsd tcp6 0 0 :::5432 :::* LISTEN 2081/docker-proxy tcp6 0 0 127.0.0.1:6942 :::* LISTEN 40262/java tcp6 0 0 :::902 :::* LISTEN 1585/vmware-authdla tcp6 0 0 :::9000 :::* LISTEN 2028/docker-proxy tcp6 0 0 :::10250 :::* LISTEN 49369/k3s server tcp6 0 0 :::10251 :::* LISTEN 49369/k3s server tcp6 0 0 :::6443 :::* LISTEN 49369/k3s server tcp6 0 0 127.0.0.1:63342 :::* LISTEN 40262/java
$ /usr/local/bin/k3s-uninstall.sh # or simply k3s-uninstall.sh
$ vagrant up
$ kubectl get pods W0213 09:51:33.996235 54980 loader.go:221] Config not found: /etc/rancher/k3s/k3s.yaml The connection to the server localhost:8080 was refused - did you specify the right host or port?
$ vagrant reload
$ vagrant ssh
$ sudo chmod 644 /etc/rancher/k3s/k3s.yaml
$ sudo systemctl restart k3s
sudo emacs ~/.bashrc export KUBECONFIG=~/.kube/config
openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365
client-certificate-data | cert.pem |
certificate-authority-data | key.pem |
- certificate-authority-data
base64-encoded string of /etc/kubernetes/ca.crt
- client-certificate-data
base64-encoded kubernetes-admin certificate
- client-key-data
base64-encoded kubernetes-admin key This admin certificate is automatically created and managed by kubeadm.
kubectl config view –flatten > $KUBECONFIG
k3s.yaml template
apiVersion: v1 clusters:
- cluster: certificate-authority-data: server: https://127.0.0.1:6443 name: default
contexts:
- context: cluster: default user: default name: default
current-context: default kind: Config preferences: {} users:
- name: default user: client-certificate-data: cert.pem client-key-data: key.pem
vagrant@master:~> sudo rm /var/lib/rancher/k3s/server/tls/client-admin.crt /var/lib/rancher/k3s/server/tls/client-admin.key $ sudo systemctl restart k3s $ sudo ls /var/lib/rancher/k3s/server/tls
- Install vagrant SCP plugin: $ vagrant plugin install vagrant-scp
- Vagrant SCP Guide: https://github.com/invernizzi/vagrant-scp/blob/master/README.md
- $ echo “test” > test.txt
- $ vagrant ssh
Last login: Sun Feb 13 14:01:16 2022 from 10.0.2.2 vagrant@master:~> pwd /home/vagrant
- $ exit
- $ vagrant scp test.txt /home/vagrant
Warning: Permanently added ‘[127.0.0.1]:2000’ (ECDSA) to the list of known hosts. test.txt 100% 5 6.1KB/s 00:00
- $ vagrant ssh
vagrant@master:~> cat test.txt test
kubectl exec -ti <pod name> – sh
docker build . -t workshoft/persons-api:latest && docker push workshoft/persons-api:latest && kubectl rollout restart -f ../../deployment
- kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml
- kubectl proxy
- http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#/login
- http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#/workloads?namespace=default
- kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk ‘{print $1}’)
- Reference: https://www.itwonderlab.com/en/installating-kubernetes-dashboard/
- vmware-tanzu/kubeapps#1550
cat <<EOF | kubectl apply -f - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects:
- kind: ServiceAccount name: admin-user namespace: kubernetes-dashboard
EOF
https://wildwolf.name/how-to-delete-old-replicasets-in-kubernetes/
To delete all old ReplicaSets in Kubernetes, you can use the following one-liner:
- kubectl get rs -A -o wide | tail -n +2 | awk ‘{if ($3 + $4 + $5 == 0) print “kubectl delete rs -n “$1, $2 }’ | sh
kubectl port-forward pod/persons-api-857bc67f74-k6bfg 8888:5000
kubectl patch pv postgres-volume -p ‘{“spec”:{“claimRef”: null}}’ kubernetes/kubernetes#20753 https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/
kubectl -n kafka run kafka-consumer -ti –image=quay.io/strimzi/kafka:0.28.0-kafka-3.1.0 –rm=true –restart=Never – bin/kafka-console-consumer.sh –bootstrap-server my-cluster-kafka-bootstrap:9092 –topic my-topic –from-beginning
kubectl -n kafka run kafka-producer -ti –image=quay.io/strimzi/kafka:0.28.0-kafka-3.1.0 –rm=true –restart=Never – bin/kafka-console-producer.sh –broker-list my-cluster-kafka-bootstrap:9092 –topic my-topic
helm/helm#9115 chmod go-r ~/.kube/config
wget https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 chmod 700 get-helm-3 sudo ./get-helm-3
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
helm repo add bitnami https://charts.bitnami.com/bitnami helm install kafka-bitnami bitnami/kafka
https://kafka-python.readthedocs.io/en/master/#kafkaconsumer from kafka import KafkaConsumer consumer = KafkaConsumer(“sliced-bread”, bootstrap_servers=”kafka-bitnami:9092”) for msg in consumer: print(msg)
https://kafka-python.readthedocs.io/en/master/#kafkaproducer from kafka import KafkaProducer producer = KafkaProducer(bootstrap_servers=”kafka-bitnami:9092”) producer.send(“sliced-bread”, b”The best thing”)
https://kafka.apache.org/quickstart
$ kubectl apply -f configmap.yaml
https://stackoverflow.com/questions/43492322/vagrant-was-unable-to-mount-virtualbox-shared-folders vagrant plugin uninstall vagrant-vbguest vagrant plugin install vagrant-vbguest vagrant up
kubectl create namespace monitoring
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo add stable https://charts.helm.sh/stable helm repo update helm install prometheus prometheus-community/kube-prometheus-stack –namespace monitoring –kubeconfig /etc/rancher/k3s/k3s.yaml
kubectl get pods,svc –namespace=monitoring kubectl get pods -n monitoring
kubectl port-forward service/prometheus-grafana –address 0.0.0.0 3000:80 -n monitoring
username: admin password: prom-operator
export jaeger_version=v1.32.0
kubectl create -n observability -f https://github.com/jaegertracing/jaeger-operator/releases/download/v1.32.0/jaeger-operator.yaml
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.7.2/cert-manager.yaml kubectl apply -f https://github.com/jaegertracing/jaeger-operator/releases/download/v1.32.0/jaeger-operator.yaml -n observability
kubectl create -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/${jaeger_version}/deploy/crds/jaegertracing.io_jaegers_crd.yaml
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/${jaeger_version}/deploy/service_account.yaml
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/${jaeger_version}/deploy/role.yaml kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/${jaeger_version}/deploy/role_binding.yaml
kubectl create -n observability -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/${jaeger_version}/deploy/operator.yaml
kubectl create -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/${jaeger_version}/deploy/cluster_role.yaml kubectl create -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/${jaeger_version}/deploy/cluster_role_binding.yaml
kubectl get deployments jaeger-operator -n observability kubectl get pods,svc -n observability
kubectl apply -f https://raw.githubusercontent.com/jaegertracing/jaeger-operator/${jaeger_version}/examples/business-application-injected-sidecar.yaml
apiVersion: apps/v1 kind: Deployment metadata: name: hello_world annotations: “sidecar.jaegertracing.io/inject”: “true” spec: selector: matchLabels: app: hello_world template: metadata: labels: app: hello_world spec: containers:
- name: hello_world image: workshoft/hello_world:latest
[default] A Virtualbox Guest Additions installation was found but no tools to rebuild or start them.
vagrant plugin uninstall vagrant-vbguest vagrant plugin install vagrant-vbguest –plugin-version 0.xx
In the Vagrantfile, make sure there is a line like this one: config.vm.network “forwarded_port”, guest: 6443, host: 6443
for p in $(kubectl get pods | grep Terminating | awk ‘{print $1}’); do kubectl delete pod $p –grace-period=0 –force;don https://stackoverflow.com/a/56064433/1908332 https://kubernetes.io/docs/concepts/architecture/garbage-collection/
https://stackoverflow.com/questions/52369247/namespace-stuck-as-terminating-how-i-removed-it
https://stackoverflow.com/questions/59473707/kubenetes-pod-delete-with-pattern-match-or-wildcard
kubectl port-forward prometheus-grafana-<id> 8083:3000 -n monitoring
helm install prometheus prometheus-community/kube-prometheus-stack –namespace monitoring –kubeconfig $KUBECONFIG kubectl –namespace monitoring get pods -l “release=prometheus”
kubectl port-forward prometheus-grafana-748c6b74b4-r28n7 8083:3000 -n monitoring kubectl port-forward -n monitoring prometheus-prometheus-kube-prometheus-prometheus-0 8084:9090
http://prometheus-kube-prometheus-prometheus.monitoring:9090/
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.7.2/cert-manager.yaml -n observability
helm repo add jaegertracing https://jaegertracing.github.io/helm-charts
mikel@mikel-home:~/observability_udacity/Project_Starter_Files-Building_a_Metrics_Dashboard$ helm upgrade -i jaeger jaegertracing/jaeger WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: home/mikel.kube/config WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: home/mikel.kube/config Release “jaeger” does not exist. Installing it now. NAME: jaeger LAST DEPLOYED: Mon Mar 28 23:28:37 2022 NAMESPACE: default STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: ################################################################### ### IMPORTANT: Ensure that storage is explicitly configured ### ### Default storage options are subject to change. ### ### ### ### IMPORTANT: The use of <component>.env: {…} is deprecated. ### ### Please use <component>.extraEnv: [] instead. ### ###################################################################
You can log into the Jaeger Query UI here:
export POD_NAME=$(kubectl get pods –namespace default -l “app.kubernetes.io/instance=jaeger,app.kubernetes.io/component=query” -o jsonpath=”{.items[0].metadata.name}”) echo http://127.0.0.1:8080/ kubectl port-forward –namespace default $POD_NAME 8080:16686
mikel@mikel-home:~/observability_udacity/Project_Starter_Files-Building_a_Metrics_Dashboard$ helm upgrade -i jaeger-operator jaegertracing/jaeger-operator WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: home/mikel.kube/config WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: home/mikel.kube/config Release “jaeger-operator” does not exist. Installing it now. manifest_sorter.go:192: info: skipping unknown hook: “crd-install” NAME: jaeger-operator LAST DEPLOYED: Mon Mar 28 23:32:29 2022 NAMESPACE: default STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: jaeger-operator is installed.
Check the jaeger-operator logs export POD=$(kubectl get pods -l app.kubernetes.io/instance=jaeger-operator -lapp.kubernetes.io/name=jaeger-operator –namespace default –output name) kubectl logs $POD –namespace=default