The deployed mongodb cluster is not working
glyslxq opened this issue · comments
I deployed it according to the operation document, but it doesn't seem to be running properly. This is my first time using mongodb and I'm not sure if there's anything else I need to do
pod status
root@pu20:~# kubectl get pod -n mongodb
NAME READY STATUS RESTARTS AGE
mongodb7-0 2/2 Running 0 15m
mongodb7-1 2/2 Running 0 14m
mongodb7-2 2/2 Running 0 14m
mongodb-kubernetes-operator-6cc9b94f8-jq5q8 1/1 Running 0 30h
pod logs
root@pu20# kubectl logs mongodb7-0 -n mongodb
Defaulted container "mongod" out of: mongod, mongodb-agent, mongod-posthook (init), mongodb-agent-readinessprobe (init)
2023-09-05T09:40:53.443Z INFO versionhook/main.go:32 Running version change post-start hook
2023-09-05T09:40:53.445Z INFO versionhook/main.go:39 Waiting for agent health status...
2023-09-05T09:40:54.445Z INFO versionhook/main.go:45 Agent health status file not found, mongod will start
deploy file
---
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: ipds-mongodb7
namespace: mongodb
spec:
members: 3
type: ReplicaSet
version: "7.0.0"
security:
authentication:
modes: ["SCRAM"]
users:
- name: my-user
db: admin
passwordSecretRef: # a reference to the secret that will be used to generate the user's password
name: mongodb7
roles:
- name: clusterAdmin
db: admin
- name: userAdminAnyDatabase
db: admin
scramCredentialsSecretName: my-scram
additionalMongodConfig:
storage.wiredTiger.engineConfig.journalCompressor: zlib
# the user credentials will be generated from this secret
# once the credentials are generated, this secret is no longer required
---
apiVersion: v1
kind: Secret
metadata:
name: mongodb7
namespace: mongodb
type: Opaque
stringData:
password: sssss
Operator Information
- Operator Version: 0.8.2
- MongoDB Image used: 7.0.0
Kubernetes Cluster Information
- 1.26.6
Same problem here. For me, any version greater than 4.4.0 doesnt work at all, even with clean install. Every lower or equal version than 4.4.0 works fine for me.
Could you please tell me more what doesn't work exactly? What is the output of kubectl get mongodbcommunity -o yaml
?
Same problem here.
I deployed the yaml but stucked after "mongod will start".
And I can't connect to mongod because the mongod does'nt accept requests.
My output of kubectl get mongodbcommunity -o yaml
:
apiVersion: v1
items:
- apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"mongodbcommunity.mongodb.com/v1","kind":"MongoDBCommunity","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"scalablevc"},"name":"scalablevc-mongodb","namespace":"scalablevc"},"spec":{"additionalMongodConfig":{"storage.wiredTiger.engineConfig.journalCompressor":"zlib","systemLog.quiet":false},"members":1,"security":{"authentication":{"modes":["SCRAM"]}},"statefulSet":{"spec":{"template":{"spec":{"containers":[{"name":"mongod","resources":{"limits":{"cpu":"0.9","memory":"800Mi"},"requests":{"cpu":"0.25","memory":"500Mi"}}},{"name":"mongodb-agent","resources":{"limits":{"cpu":"0.1","memory":"200M"},"requests":{"cpu":"0.1","memory":"200M"}}}]}},"volumeClaimTemplates":[{"metadata":{"name":"data-volume"},"spec":{"resources":{"requests":{"storage":"1Gi"}}}},{"metadata":{"name":"logs-volume"},"spec":{"resources":{"requests":{"storage":"1Gi"}}}}]}},"type":"ReplicaSet","users":[{"connectionStringSecretName":"scalablevc-scalablevc-mongodb-connection-string","db":"scalablevc","name":"scalablevc","passwordSecretRef":{"name":"scalablevc-scalablevc-mongodb-initial-password"},"roles":[{"db":"scalablevc","name":"dbOwner"}],"scramCredentialsSecretName":"scalablevc-scalablevc-mongodb-scram"}],"version":"4.4.8"}}
mongodb.com/v1.lastAppliedMongoDBVersion: 4.4.8
mongodb.com/v1.lastSuccessfulConfiguration: '{"members":1,"type":"ReplicaSet","version":"4.4.8","arbiters":0,"security":{"authentication":{"modes":["SCRAM"],"ignoreUnknownUsers":true},"tls":{"enabled":false,"optional":false,"certificateKeySecretRef":{}}},"users":[{"name":"scalablevc","db":"scalablevc","passwordSecretRef":{"name":"scalablevc-scalablevc-mongodb-initial-password","key":""},"roles":[{"db":"scalablevc","name":"dbOwner"}],"scramCredentialsSecretName":"scalablevc-scalablevc-mongodb-scram","connectionStringSecretName":"scalablevc-scalablevc-mongodb-connection-string"}],"statefulSet":{"spec":{}},"agent":{"logLevel":"","maxLogFileDurationHours":0},"additionalMongodConfig":{}}'
creationTimestamp: "2023-10-10T00:31:53Z"
generation: 1
labels:
app.kubernetes.io/instance: scalablevc
name: scalablevc-mongodb
namespace: scalablevc
resourceVersion: "643165"
uid: 364f94ef-46d5-4eca-9c44-84d696e5422f
spec:
additionalMongodConfig:
storage.wiredTiger.engineConfig.journalCompressor: zlib
systemLog.quiet: false
members: 1
security:
authentication:
ignoreUnknownUsers: true
modes:
- SCRAM
statefulSet:
spec:
template:
spec:
containers:
- name: mongod
resources:
limits:
cpu: "0.9"
memory: 800Mi
requests:
cpu: "0.25"
memory: 500Mi
- name: mongodb-agent
resources:
limits:
cpu: "0.1"
memory: 200M
requests:
cpu: "0.1"
memory: 200M
volumeClaimTemplates:
- metadata:
name: data-volume
spec:
resources:
requests:
storage: 1Gi
- metadata:
name: logs-volume
spec:
resources:
requests:
storage: 1Gi
type: ReplicaSet
users:
- connectionStringSecretName: scalablevc-scalablevc-mongodb-connection-string
db: scalablevc
name: scalablevc
passwordSecretRef:
name: scalablevc-scalablevc-mongodb-initial-password
roles:
- db: scalablevc
name: dbOwner
scramCredentialsSecretName: scalablevc-scalablevc-mongodb-scram
version: 4.4.8
status:
currentMongoDBMembers: 1
currentStatefulSetReplicas: 1
mongoUri: mongodb://scalablevc-mongodb-0.scalablevc-mongodb-svc.scalablevc.svc.services-cluster.cron.home:27017/?replicaSet=scalablevc-mongodb
phase: Running
version: 4.4.8
kind: List
metadata:
resourceVersion: ""
Could you please tell me more what doesn't work exactly? What is the output of
kubectl get mongodbcommunity -o yaml
?
root@pu20:~# kubectl get mongodbcommunity.mongodbcommunity.mongodb.com ipds-mongodb7 -n mongodb -o yaml
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
annotations:
mongodb.com/v1.lastAppliedMongoDBVersion: 7.0.0
mongodb.com/v1.lastSuccessfulConfiguration: '{"members":3,"type":"ReplicaSet","version":"7.0.0","arbiters":0,"security":{"authentication":{"modes":["SCRAM"],"ignoreUnknownUsers":true},"tls":{"enabled":false,"optional":false,"certificateKeySecretRef":{}}},"users":[{"name":"my-user","db":"admin","passwordSecretRef":{"name":"ipds-mongodb7","key":""},"roles":[{"db":"admin","name":"clusterAdmin"},{"db":"admin","name":"userAdminAnyDatabase"}],"scramCredentialsSecretName":"my-scram","connectionStringSecretName":"","additionalConnectionStringConfig":null}],"statefulSet":{"spec":{},"metadata":{}},"agent":{"logLevel":"","maxLogFileDurationHours":0},"additionalMongodConfig":{},"additionalConnectionStringConfig":{}}'
creationTimestamp: "2023-09-05T03:00:21Z"
generation: 1
name: ipds-mongodb7
namespace: mongodb
resourceVersion: "95458275"
uid: b7c0bad9-a6d4-40e7-b5a4-ace067827595
spec:
additionalMongodConfig:
storage.wiredTiger.engineConfig.journalCompressor: zlib
members: 3
security:
authentication:
ignoreUnknownUsers: true
modes:
- SCRAM
type: ReplicaSet
users:
- db: admin
name: my-user
passwordSecretRef:
name: ipds-mongodb7
roles:
- db: admin
name: clusterAdmin
- db: admin
name: userAdminAnyDatabase
scramCredentialsSecretName: my-scram
version: 7.0.0
status:
currentMongoDBMembers: 3
currentStatefulSetReplicas: 3
mongoUri: mongodb://ipds-mongodb7-0.ipds-mongodb7-svc.mongodb.svc.cluster.local:27017,ipds-mongodb7-1.ipds-mongodb7-svc.mongodb.svc.cluster.local:27017,ipds-mongodb7-2.ipds-mongodb7-svc.mongodb.svc.cluster.local:27017/?replicaSet=ipds-mongodb7
phase: Running
version: 7.0.0
Perhaps the cluster domain name has affected it?cluster.locl is not my k8s cluster domain name
This issue is being marked stale because it has been open for 60 days with no activity. Please comment if this issue is still affecting you. If there is no change, this issue will be closed in 30 days.
This issue was closed because it became stale and did not receive further updates. If the issue is still affecting you, please re-open it, or file a fresh Issue with updated information.