jfrog / kubenab

Kubernetes Admission Webhook to enforce pulling of Docker images from the private registry.

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

Docker image name over 3 layers caused kubenab replace instead of add private registry

lyyao09 opened this issue · comments

Describe the bug
When docker image name over 3 layers, it caused kubenab replace instead of add private registry to pod(flag REPLACE_REGISTRY_URL=false).

Attach relevant code:if (len(imageParts) < 3) || repRegUrl(Not quite reasonable here)

func handleContainer(container *v1.Container, dockerRegistryUrl string) bool {
	log.Println("Container Image is", container.Image)

	if !containsRegisty(whitelistedRegistries, container.Image) {
		message := fmt.Sprintf("Image is not being pulled from Private Registry: %s", container.Image)
		log.Printf(message)

		imageParts := strings.Split(container.Image, "/")
		newImage := ""

		// pre-pend new Docker Registry Domain
		repRegUrl, _ := strconv.ParseBool(replaceRegistryUrl) // we do not need to check for errors here, since we have done this already in checkArguments()
		if (len(imageParts) < 3) || repRegUrl {
			newImage = dockerRegistryUrl + "/" + container.Image
		} else {
			imageParts[0] = dockerRegistryUrl
			newImage = strings.Join(imageParts, "/")
		}
		log.Printf("Changing image registry to: %s", newImage)

		container.Image = newImage
		return true
	} else {
		log.Printf("Image is being pulled from Private Registry: %s", container.Image)
	}
	return false
}

To Reproduce
Steps to reproduce the behavior:

  1. Install kubenab with those Settings: https://github.com/jfrog/kubenab/tree/master/deployment
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kubenab
  namespace: kube-system
  labels:
    app: kubenab
spec:
  replicas: 3
  selector:
    matchLabels:
      app: kubenab
  template:
    metadata:
      labels:
        app: kubenab
    spec:
      priorityClassName: system-cluster-critical
      nodeSelector:
        role: master
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - kubenab
            topologyKey: kubernetes.io/hostname
      containers:
      - name: kubenab
        image: kubenab:0.3.2
        imagePullPolicy: IfNotPresent
        env:
          - name: DOCKER_REGISTRY_URL
            value: "100.2.0.115:8088"
          - name: REGISTRY_SECRET_NAME
            value: 'regsecret'
          - name: WHITELIST_NAMESPACES
            value: "kube-system,default"
          - name: WHITELIST_REGISTRIES
            value: "100.2.0.115:8088"
          - name: REPLACE_REGISTRY_URL
            value: "false"
        ports:
          - containerPort: 443
            name: https
        volumeMounts:
        - name: tls
          mountPath: /etc/admission-controller/tls
        resources: {}
      volumes:
        - name: tls
          secret:
            secretName: kubenab-certs
  1. Create a new Pod with image name over 3 layers
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-client-provisioner
  namespace: repo
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      nodeSelector:
        role: master
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: repo/external_storage/nfs-client-provisioner:v3.1.0
          imagePullPolicy: Always
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 100.2.0.5
            - name: NFS_PATH
              value: /opt/share/test
      volumes:
        - name: nfs-client-root
          nfs:
            server: 100.2.0.5
            path: /opt/share/test
  1. Check status with kubectl describe [POD]
Events:
  Type     Reason          Age                  From               Message
  ----     ------          ----                 ----               -------
  Normal   Scheduled       119s                 default-scheduler  Successfully assigned repo/nfs-client-provisioner-65cff57884-d7dlq to node3
  Normal   SandboxChanged  104s (x2 over 111s)  kubelet, node3     Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          45s (x5 over 105s)   kubelet, node3     Error: ImagePullBackOff
  Normal   Pulling         33s (x4 over 112s)   kubelet, node3     Pulling image "100.2.0.115:8088/external_storage/nfs-client-provisioner:v3.1.0"
  Warning  Failed          33s (x4 over 112s)   kubelet, node3     Failed to pull image "100.2.0.115:8088/external_storage/nfs-client-provisioner:v3.1.0": rpc error: code = Unknown desc = Error response from daemon: pull access denied for 100.2.0.115:8088/external_storage/nfs-client-provisioner, repository does not exist or may require 'docker login'
  Warning  Failed          33s (x4 over 112s)   kubelet, node3     Error: ErrImagePull
  Normal   BackOff         20s (x6 over 105s)   kubelet, node3     Back-off pulling image "100.2.0.115:8088/external_storage/nfs-client-provisioner:v3.1.0"

Expected behavior
kubenab add private registry to pod.

Should be 100.2.0.115:8088/repo/external_storage/nfs-client-provisioner:v3.1.0

Logs

kubectl logs -f -n kube-system kubenab-5ccbd84cdb-lsbdx

2019/10/15 01:04:14 AdmissionReview Namespace is: repo
2019/10/15 01:04:14 Container Image is repo/external_storage/nfs-client-provisioner:v3.1.0
2019/10/15 01:04:14 Image is not being pulled from Private Registry: repo/external_storage/nfs-client-provisioner:v3.1.0
2019/10/15 01:04:14 Changing image registry to: 100.2.0.115:8088/external_storage/nfs-client-provisioner:v3.1.0
2019/10/15 01:04:14 Serving request: /validate
2019/10/15 01:04:14 {"kind":"AdmissionReview","apiVersion":"admission.k8s.io/v1beta1","request":{"uid":"ae07f519-604e-4a51-879c-58d684eae321","kind":{"group":"","version":"v1","kind":"Pod"},"resource":{"group":"","version":"v1","resource":"pods"},"requestKind":{"group":"","version":"v1","kind":"Pod"},"requestResource":{"group":"","version":"v1","resource":"pods"},"namespace":"repo","operation":"CREATE","userInfo":{"username":"system:serviceaccount:kube-system:replicaset-controller","uid":"b7c19393-6ec2-47a2-8855-d511bfcdede0","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"]},"object":{"kind":"Pod","apiVersion":"v1","metadata":{"name":"nfs-client-provisioner-65cff57884-d7dlq","generateName":"nfs-client-provisioner-65cff57884-","namespace":"repo","uid":"046c4978-cc0c-4214-b714-e30f2bbc011f","creationTimestamp":"2019-10-15T01:04:14Z","labels":{"app":"nfs-client-provisioner","pod-template-hash":"65cff57884"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"ReplicaSet","name":"nfs-client-provisioner-65cff57884","uid":"577716ce-c3a1-48d0-8246-006da6b68acf","controller":true,"blockOwnerDeletion":true}]},"spec":{"volumes":[{"name":"nfs-client-root","nfs":{"server":"100.2.0.5","path":"/opt/share/test"}},{"name":"nfs-client-provisioner-token-2sgsx","secret":{"secretName":"nfs-client-provisioner-token-2sgsx","defaultMode":420}}],"containers":[{"name":"nfs-client-provisioner","image":"100.2.0.115:8088/external_storage/nfs-client-provisioner:v3.1.0","env":[{"name":"PROVISIONER_NAME","value":"fuseim.pri/ifs"},{"name":"NFS_SERVER","value":"100.2.0.5"},{"name":"NFS_PATH","value":"/opt/share/test"}],"resources":{},"volumeMounts":[{"name":"nfs-client-root","mountPath":"/persistentvolumes"},{"name":"nfs-client-provisioner-token-2sgsx","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","nodeSelector":{"role":"master"},"serviceAccountName":"nfs-client-provisioner","serviceAccount":"nfs-client-provisioner","securityContext":{},"imagePullSecrets":[{"name":"regsecret"}],"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":300},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":300}],"priority":0,"enableServiceLinks":true},"status":{"phase":"Pending","qosClass":"BestEffort"}},"oldObject":null,"dryRun":false,"options":{"kind":"CreateOptions","apiVersion":"meta.k8s.io/v1"}}}

Additional context
None

Versions

  • Kubernetes Cluster Version: v1.15.2
  • kubenab Version: 0.3.2

@rimusz can you please assign me to this Issue and add the bug label?