ceph / ceph-container

Docker files and images to run Ceph in containers

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

Ceph-mon CrashLoopBackOff

Alt-Shivam opened this issue · comments

Is this a bug report or feature request?

  • Bug Report

Bug Report

What happened: ceph-mon-default CrashLoopBackOff error

What you expected to happen: it must run in healthy state.

How to reproduce it (minimal and precise):

Just create a kubernetes 4 node cluster - 1 master and 3 workers
and follow this doc to deploy ceph using helm

Environment:

  • OS (e.g. from /etc/os-release): 20.04.4 LTS (Focal Fossa)
  • Kernel (e.g. uname -a): Linux ubuntu 5.4.0-91-generic
  • Docker version (e.g. docker version): 20.10.7
  • Ceph version (e.g. ceph -v):

Also, here is the pod logs.

  • export LC_ALL=C
  • LC_ALL=C
  • : 1
  • : /etc/ceph/ceph.mon.keyring
  • : /etc/ceph/ceph.client.admin.keyring
  • : /var/lib/ceph/bootstrap-mds/ceph.keyring
  • : /var/lib/ceph/bootstrap-osd/ceph.keyring
  • : /etc/ceph/ceph.conf
  • [[ ! -e /etc/ceph/ceph.conf.template ]]
    ++ mon_host_from_k8s_ep ceph ceph-mon-discovery
    ++ local ns=ceph
    ++ local ep=ceph-mon-discovery
    ++ '[' -z ceph ']'
    ++ '[' -z ceph-mon-discovery ']'
    ++ kubectl get endpoints -n ceph ceph-mon-discovery -o 'go-template=
    {{- $sep := "" }}
    {{- range $,$s := .subsets }}
    {{- $v2port := 0 }}
    {{- $v1port := 0 }}
    {{- range $
    ,$port := index $s "ports" }}
    {{- if (eq $port.name "mon-msgr2") }}
    {{- $v2port = $port.port }}
    {{- else if (eq $port.name "mon") }}
    {{- $v1port = $port.port }}
    {{- end }}
    {{- end }}
    {{- range $_,$address := index $s "addresses" }}
    {{- $v2endpoint := printf "v2:%s:%d/0" $address.ip $v2port }}
    {{- $v1endpoint := printf "v1:%s:%d/0" $address.ip $v1port }}
    {{- if (and $v2port $v1port) }}
    {{- printf "%s[%s,%s]" $sep $v2endpoint $v1endpoint }}
    {{- $sep = "," }}
    {{- else if $v2port }}
    {{- printf "%s[%s]" $sep $v2endpoint }}
    {{- $sep = "," }}
    {{- else if $v1port }}
    {{- printf "%s[%s]" $sep $v1endpoint }}
    {{- $sep = "," }}
    {{- end }}
    {{- end }}
    {{- end }}'
  • ENDPOINT='[v2:172.18.0.5:3300/0,v1:172.18.0.5:6789/0]'
  • [[ -z [v2:172.18.0.5:3300/0,v1:172.18.0.5:6789/0] ]]
  • /bin/sh -c -e 'cat /etc/ceph/ceph.conf.template | sed '''s#mon_host.*#mon_host = [v2:172.18.0.5:3300/0,v1:172.18.0.5:6789/0]#g''' | tee /etc/ceph/ceph.conf'
    [global]
    cephx = true
    cephx_cluster_require_signatures = true
    cephx_require_signatures = false
    cephx_service_require_signatures = false
    debug_ms = 0/0
    fsid = ${CEPH_FS_ID}
    log_file = /dev/stdout
    mon_addr = :6789
    mon_cluster_log_file = /dev/stdout
    mon_data_avail_warn = 15
    mon_host = [v2:172.18.0.5:3300/0,v1:172.18.0.5:6789/0]
    mon_osd_down_out_interval = 1800
    mon_osd_down_out_subtree_limit = root
    mon_osd_min_in_ratio = 0
    mon_osd_min_up_ratio = 0
    objecter_inflight_op_bytes = 1073741824
    objecter_inflight_ops = 10240
    osd_pool_default_size = 1
    [osd]
    cluster_network = 172.17.0.1/16
    filestore_max_sync_interval = 10
    filestore_merge_threshold = -10
    filestore_split_multiple = 12
    ms_bind_port_max = 7100
    ms_bind_port_min = 6800
    osd_crush_chooseleaf_type = 0
    osd_deep_scrub_stride = 1048576
    osd_journal_size = 10240
    osd_max_object_name_len = 256
    osd_mkfs_options_xfs = -f -i size=2048
    osd_mkfs_type = xfs
    osd_mount_options_xfs = rw,noatime,largeio,inode64,swalloc,logbufs=8,logbsize=256k,allocsize=4M
    osd_pg_max_concurrent_snap_trims = 1
    osd_recovery_max_active = 1
    osd_recovery_op_priority = 1
    osd_scrub_begin_hour = 22
    osd_scrub_chunk_max = 4
    osd_scrub_chunk_min = 1
    osd_scrub_during_recovery = false
    osd_scrub_end_hour = 4
    osd_scrub_load_threshold = 10
    osd_scrub_priority = 1
    osd_scrub_sleep = 0.1
    osd_snap_trim_priority = 1
    osd_snap_trim_sleep = 0.1
    public_network = 172.17.0.1/16
  • [[ -z 172.17.0.1/16 ]]
  • [[ -z 172.18.0.5 ]]
  • [[ 1 -eq 0 ]]
  • MON_NAME=kind-worker3
  • MON_DATA_DIR=/var/lib/ceph/mon/ceph-kind-worker3
  • MONMAP=/etc/ceph/monmap-ceph
  • /bin/sh -c 'mkdir -p "/var/lib/ceph/mon/ceph-kind-worker3"'
  • get_mon_config
    ++ ceph-conf --lookup fsid -c /etc/ceph/ceph.conf
    parse error setting 'fsid' to '${CEPH_FS_ID}'
  • local 'fsid=$CEPH_FS_ID}'
  • timeout=10
  • MONMAP_ADD=
  • [[ -z '' ]]
  • [[ 10 -gt 0 ]]
  • [[ 1 -eq 0 ]]
    ++ kubectl get pods --namespace=ceph -l application=ceph -l component=mon -o template '--template={{range .items}}{{if .status.podIP}}--addv {{.spec.nodeName}} [v1:{{.status.podIP}}:6789,v2:{{.status.podIP}}:3300] {{end}} {{end}}'
  • MONMAP_ADD='--addv kind-worker3 [v1:172.18.0.5:6789,v2:172.18.0.5:3300] '
  • (( timeout-- ))
  • sleep 1
  • [[ -z --addvkind-worker3[v1:172.18.0.5:6789,v2:172.18.0.5:3300] ]]
  • [[ -z --addvkind-worker3[v1:172.18.0.5:6789,v2:172.18.0.5:3300] ]]
  • monmaptool --create --addv kind-worker3 '[v1:172.18.0.5:6789,v2:172.18.0.5:3300]' --fsid '$CEPH_FS_ID}' /etc/ceph/monmap-ceph --clobber
    parse error setting 'fsid' to '$CEPH_FS_ID}'

monmaptool: monmap file /etc/ceph/monmap-ceph
monmaptool: generated fsid 496c0b48-c74d-4d7f-a7ef-7ea561f2a1c0
monmaptool: writing epoch 0 to /etc/ceph/monmap-ceph (1 monitors)

  • '[' '!' -e /var/lib/ceph/mon/ceph-kind-worker3/keyring ']'
  • '[' '!' -e /etc/ceph/ceph.mon.keyring.seed ']'
  • cp -vf /etc/ceph/ceph.mon.keyring.seed /etc/ceph/ceph.mon.keyring
    '/etc/ceph/ceph.mon.keyring.seed' -> '/etc/ceph/ceph.mon.keyring'
  • '[' '!' -e /etc/ceph/monmap-ceph ']'
  • for KEYRING in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ${ADMIN_KEYRING}
  • ceph-authtool /etc/ceph/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
    importing contents of /var/lib/ceph/bootstrap-osd/ceph.keyring into /etc/ceph/ceph.mon.keyring
  • for KEYRING in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ${ADMIN_KEYRING}
  • ceph-authtool /etc/ceph/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring
    importing contents of /var/lib/ceph/bootstrap-mds/ceph.keyring into /etc/ceph/ceph.mon.keyring
  • for KEYRING in ${OSD_BOOTSTRAP_KEYRING} ${MDS_BOOTSTRAP_KEYRING} ${ADMIN_KEYRING}
  • ceph-authtool /etc/ceph/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
    importing contents of /etc/ceph/ceph.client.admin.keyring into /etc/ceph/ceph.mon.keyring
  • ceph-mon --setuser ceph --setgroup ceph --cluster ceph --mkfs -i kind-worker3 --inject-monmap /etc/ceph/monmap-ceph --keyring /etc/ceph/ceph.mon.keyring --mon-data /var/lib/ceph/mon/ceph-kind-worker3
    parse error setting 'fsid' to '${CEPH_FS_ID}'
    ignoring --setuser ceph since I am not root
    ignoring --setgroup ceph since I am not root
    2022-03-11T12:18:28.012+0000 7fe6a7464540 1 initial generated monmap:
    epoch 0
    fsid 00000000-0000-0000-0000-000000000000
    last_changed 2022-03-11T12:18:28.014906+0000
    created 2022-03-11T12:18:28.014906+0000
    min_mon_release 0 (unknown)
    0: [v2:172.18.0.5:3300/0,v1:172.18.0.5:6789/0] mon.noname-a

2022-03-11T12:18:28.012+0000 7fe6a7464540 0 monmap addrs are v2:172.18.0.5:3300/0,v1:172.18.0.5:6789/0, checking if any are local
2022-03-11T12:18:28.012+0000 7fe6a7464540 0 have local addr v2:172.18.0.5:3300/0
2022-03-11T12:18:28.012+0000 7fe6a7464540 0 ceph-mon: mon.noname-a v2:172.18.0.5:3300/0 is local, renaming to mon.kind-worker3
2022-03-11T12:18:28.012+0000 7fe6a7464540 -1 ceph-mon: generated monmap has no fsid; use '--fsid '
2022-03-11T12:18:28.012+0000 7fe6a7464540 -1 ceph-mon: generated monmap has no fsid; use '--fsid '