set tabstop=2 softtabstop=2 shiftwidth=2
set expandtab
set number ruler
set autoindent smartindent
syntax enable
filetype plugin indent on
" Force saving files that require root permission
cnoremap w!! w !sudo tee > /dev/null %
kubectl config view # Show Merged kubeconfig settings.
KUBECONFIG=~/.kube/config:~/.kube/kubconfig2
kubectl config view
kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}'
kubectl config view -o jsonpath='{.users[].name}' # display the first user
kubectl config view -o jsonpath='{.users[*].name}' # get a list of users
kubectl config get-contexts # display list of contexts
kubectl config current-context # display the current-context
kubectl config use-context my-cluster-name # set the default context to my-cluster-name
kubectl config set-credentials kubeuser/foo.kubernetes.com --username=kubeuser --password=kubepassword
kubectl config set-context --current --namespace=ggckad-s2
## Set a context utilizing a specific username and namespace.
kubectl config set-context gce --user=cluster-admin --namespace=foo && kubectl config use-context gce
kubectl config unset users.foo # delete user foo
APISERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
TOKEN=$(kubectl get secret $(kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode )
curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure
curl -ks -X GET -H 'Authorization: Bearer $TOKEN' "https://prometheus-k8s-openshift-monitoring.apps.cluster.ocp.local/api/v1/query?query=(kubelet_volume_stats_used_bytes*100)/kubelet_volume_stats_capacity_bytes" | jq -r '.data.result[] |"\(.metric.persistentvolumeclaim)=\(.value[1])%"'
kubectl get pods -o json | jq -r '.items[] | select(.metadata.name | test("test-")).spec.containers[].image'
En entornos de Desarrollo para configurar el almacenamiento de tipo emptyDir (las imágenes se perderán si se reinicia el registry):
oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"storage":{"emptyDir":{}}}}'
Si ejecuta este comando antes de que el Operador de registro de imágenes inicialice sus componentes, el comando oc patch falla con el siguiente error:
Error from server (NotFound): configs.imageregistry.operator.openshift.io "cluster" not found
Espere unos minutos y vuelva a ejecutar el comando.
Para configurar almacenamiento persistente por ejemplo en vSphere ir a la documentación.
oc get pods --all-namespaces -o=jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' | sort
oc get pods --namespace <namespace> -o jsonpath="{.items[*].spec.containers[*].image}"
oc import-image image-custom --from=docker.io/lorcopotia/image-custom:latest --confirm
Enlace a documentacion oficial.
htpasswd -c -B -b </path/to/users.htpasswd> <user_name> <password>
htpasswd -c -B -b users.htpasswd ocp-admin Sup3rS3cr3t!
oc create secret generic htpass-secret --from-file=htpasswd=users.htpasswd --dry-run=client -o yaml -n openshift-config | oc replace -f -
oc get secret htpass-secret -ojsonpath={.data.htpasswd} -n openshift-config | base64 --decode
oc login api.openshift.example.com:6443 --loglevel=6
oc create -f discovery_role.yaml
apiVersion: v1
kind: ClusterRole
metadata:
namespace: default
name: discovery_role
rules:
- apiGroups: [""]
resources: ["nodes", "pods", "namespaces", "services", "replicationcontrollers", "persistentvolumes", "persistentvolumeclaims", "resourcequotas", "configmaps", "serviceaccounts"]
verbs: ["get", "list"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["get", "list"]
- apiGroups: ["apps", "extensions"]
resources: ["replicasets", "deployments", "daemonsets", "statefulsets"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list"]
- apiGroups: ["networking.k8s.io", "extensions"]
resources: ["ingresses"]
verbs: ["get", "list"]
- apiGroups: ["route.openshift.io"]
resources: ["routes"]
verbs: ["get", "list"]
- apiGroups: ["network.openshift.io"]
resources: ["clusternetworks"]
verbs: ["get", "list"]
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["get", "list"]
- apiGroups: ["quota.openshift.io"]
resources: ["clusterresourcequotas"]
verbs: ["get", "list"]
oc adm policy add-cluster-role-to-user discovery_role USUARIO
Para utilizar cualquiera de los perfiles de scheduler en Openshift o sea, decirle a Openshift como queremos que distrubuya nuestros pods por el clúster, utilizamos el siguiente comando:
oc edit scheduler cluster
Que luego nos permitirá utilizar uno de los perfiles LowNodeUtilization, HighNodeUtilization, or NoScoring , segun nuestra preferencia, ej:
apiVersion: config.openshift.io/v1
kind: Scheduler
metadata:
...
name: cluster
resourceVersion: "601"
selfLink: /apis/config.openshift.io/v1/schedulers/cluster
uid: b351d6d0-d06f-4a99-a26b-87af62e79f59
spec:
mastersSchedulable: false
profile: HighNodeUtilization
oc patch clusterversion version --type json -p '[{"op": "remove", "path": "/spec/overrides"}]'