whether the configuration does not take effect?
wang-xiaowu opened this issue · comments
Area
- Scheduler
- Controller
- Helm Chart
- Documents
Other components
trimaran,load-watcher,kube-scheduler
What happened?
after i deploy a nginx pod using
schedulerName: trimaran
,it shows no resources,but i think defaultRequets should be 1000m,right?
[root@k3s-node1 ~]# kubectl apply -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
schedulerName: trimaran
containers:
- name: nginx
image: nginx:alpine
imagePullPolicy: IfNotPresent
restartPolicy: Always
EOF
[root@k3s-node1 ~]# kubectl get pod -n test nginx-d476987dd-q9wn7 -o yaml|grep resources
resources: {}
What did you expect to happen?
after i deploy a nginx pod using
schedulerName: trimaran
,it shows no resources,but i think defaultRequets should be 1000m,right?
How can we reproduce it (as minimally and precisely as possible)?
i am currently using these config files to deploy and test trimaran
configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: trimaran
namespace: kube-system
data:
k3s.yaml: |-
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUyT1RFek9EY3lNVGt3SGhjTk1qTXdPREEzTURVME5qVTVXaGNOTXpNd09EQTBNRFUwTmpVNQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUyT1RFek9EY3lNVGt3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRMWpCZEtWVXFhTFI0OFdwdGg3RWp6cFBWRHV3ekJMWG9MTTJ6OEpLaFMKUkltbHZjaVZBejJWdjRPR085SlRiQzVtM3l0ZyszUFpKMmRBOGxWNldIQUxvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTVvS0ExZDlVRTllT2UyRE1aTEV0CkpPeHJwcjB3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUxRMitvK25BNkFYeG5rQ0ljVjJ4Rk9lY1pSZGpNczgKUitMelIwZ2htTXUzQWlBOWZuZ29TSkkwM2FsaGpOaEo3QmU5dTdZL1FnT0RjU2hpYjZxL1kwNEdiUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://10.0.2.15:6443
name: default
contexts:
- context:
cluster: default
user: default
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: default
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRlZ0F3SUJBZ0lJYURlU3llbWw4ZDR3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOamt4TXpnM01qRTVNQjRYRFRJek1EZ3dOekExTkRZMU9Wb1hEVEkwTURndwpOakExTkRZMU9Wb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBU0c1THphQy9TRVNvU0gKK3BOWDlaR1lmVE0xV1NHRVh4V3U1S2FrMzBCY2toRFpCenFOZWZJbzNEbXhaNjFTSkxZOGMrQXVSQksxVjM1LwpqUzAzWjltalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCU29SNDdySE9CaXZPeTFmczlYL3cxdWs5cGo5akFLQmdncWhrak9QUVFEQWdOSkFEQkcKQWlFQXJLV2VsNERLemZLTTdXQmJMOEJ1V01LUlNRSzd6SUZ0cnAwUmFuSWc5b29DSVFDZ1gxWDR5cGR2bnMyZgpGaWRVa1VpdVBEejhsdEdabUZlUnBkS1QrY3RVaFE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZHpDQ0FSMmdBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwClpXNTBMV05oUURFMk9URXpPRGN5TVRrd0hoY05Nak13T0RBM01EVTBOalU1V2hjTk16TXdPREEwTURVME5qVTUKV2pBak1TRXdId1lEVlFRRERCaHJNM010WTJ4cFpXNTBMV05oUURFMk9URXpPRGN5TVRrd1dUQVRCZ2NxaGtqTwpQUUlCQmdncWhrak9QUU1CQndOQ0FBU0JnalNUWnhOTVJ1VURUc05Qc0VQdnM0K3lNdkVyT24vYnhSTUY4L1V3ClAva3QrMlZpN1Z6WVUzS09FYytSWGVibUJaeTlSdXJxNXdEREJkS3hPM0t2bzBJd1FEQU9CZ05WSFE4QkFmOEUKQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVVxRWVPNnh6Z1lyenN0WDdQVi84TgpicFBhWS9Zd0NnWUlLb1pJemowRUF3SURTQUF3UlFJZ0JHKzZUbjhBUWxYYUZxRFJOakpCemFUMjNhR2VqVjVNCkNMaFN6cmhSZmZvQ0lRQ0NMT25KeG9FUmpUQmV1clRFMk4zS1FsQ2M1T2xqU1BqOXdHSWNKcHZSaXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUQyL0JCN1krQnR4V3FEeFZTckIvcytjaG9ZWElyazU4U2N0UTVZcDdNZWZvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFQklia3ZOb0w5SVJLaElmNmsxZjFrWmg5TXpWWklZUmZGYTdrcHFUZlFGeVNFTmtIT28xNQo4aWpjT2JGbnJWSWt0anh6NEM1RUVyVlhmbitOTFRkbjJRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
scheduler-config.yaml: |-
apiVersion: kubescheduler.config.k8s.io/v1beta2
kind: KubeSchedulerConfiguration
leaderElection:
leaderElect: false
clientConnection:
kubeconfig: "/etc/rancher/k3s/k3s.yaml"
profiles:
- schedulerName: trimaran
plugins:
score:
disabled:
- name: NodeResourcesBalancedAllocation
- name: NodeResourcesLeastAllocated
enabled:
- name: TargetLoadPacking
- name: LoadVariationRiskBalancing
pluginConfig:
- name: TargetLoadPacking
# README:https://github.com/kubernetes-sigs/scheduler-plugins/blob/release-1.24/pkg/trimaran/targetloadpacking/README.md
args:
defaultRequests:
cpu: "1000m"
defaultRequestsMultiplier: "2"
targetUtilization: 70
watcherAddress: http://load-watcher.kube-system.svc.cluster.local:2020
- name: LoadVariationRiskBalancing
# README:https://github.com/kubernetes-sigs/scheduler-plugins/blob/release-1.24/pkg/trimaran/loadvariationriskbalancing/README.md
args:
safeVarianceMargin: 1
safeVarianceSensitivity: 2
watcherAddress: http://load-watcher.kube-system.svc.cluster.local:2020
load-watcher.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: load-watcher-deployment
namespace: kube-system
labels:
app: load-watcher
spec:
replicas: 1
selector:
matchLabels:
app: load-watcher
template:
metadata:
labels:
app: load-watcher
spec:
serviceAccountName: admin
containers:
- name: load-watcher
image: wangxiaowu950330/load-watcher:0.2.3
imagePullPolicy: IfNotPresent
env:
- name: KUBE_CONFIG
value: /etc/rancher/k3s/k3s.yaml
ports:
- containerPort: 2020
volumeMounts:
- mountPath: /etc/rancher/k3s/k3s.yaml
name: kube-config
subPath: k3s.yaml
readOnly: true
volumes:
- name: kube-config
configMap:
name: trimaran
defaultMode: 0644
---
apiVersion: v1
kind: Service
metadata:
namespace: kube-system
name: load-watcher
labels:
app: load-watcher
spec:
type: ClusterIP
ports:
- name: http
port: 2020
targetPort: 2020
protocol: TCP
selector:
app: load-watcher
kube-scheduler.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
component: scheduler
tier: control-plane
name: trimaran
namespace: kube-system
spec:
selector:
matchLabels:
component: scheduler
tier: control-plane
replicas: 1
template:
metadata:
labels:
component: scheduler
tier: control-plane
version: second
spec:
serviceAccountName: admin
hostNetwork: false
hostPID: false
containers:
- name: trimaran
command:
- /bin/kube-scheduler
- --leader-elect=false
- --config=/home/scheduler-config.yaml
- -v=9
image: wangxiaowu950330/trimaran:1.24
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
volumeMounts:
- mountPath: /shared
name: shared
- mountPath: /etc/rancher/k3s
name: kube-config
readOnly: true
- mountPath: /home
name: kube-config
readOnly: true
volumes:
- name: shared
hostPath:
path: /tmp
type: Directory
- name: kube-config
configMap:
name: trimaran
items:
- key: k3s.yaml
path: k3s.yaml
- key: scheduler-config.yaml
path: scheduler-config.yaml
defaultMode: 0644
Anything else we need to know?
No response
Kubernetes version
$ kubectl version
# paste output here
Client Version: version.Info{Major:"1", Minor:"24", GitVersion:"v1.24.16+k3s1", GitCommit:"ccae12ed8cad8da77df245da84bf3b64ab21fb89", GitTreeState:"clean", BuildDate:"2023-07-28T00:54:33Z", GoVersion:"go1.20.6", Compiler:"gc", Platform:"linux/amd64"}
Kustomize Version: v4.5.4
Server Version: version.Info{Major:"1", Minor:"24", GitVersion:"v1.24.16+k3s1", GitCommit:"ccae12ed8cad8da77df245da84bf3b64ab21fb89", GitTreeState:"clean", BuildDate:"2023-07-28T00:54:33Z", GoVersion:"go1.20.6", Compiler:"gc", Platform:"linux/amd64"}
Scheduler Plugins version
after i deploy a nginx pod using schedulerName: trimaran,it shows no resources,but i think defaultRequets should be 1000m,right?
If you don't specify requests in your pod spec, then it's as expected to show nothing - in other words, it's a best-efforts pod. It's irrelevant with which plugin/scheduler you're running with.
configmap.yaml
Why you plumb the scheduler config into a configmap? May I know which document/step you're following?
If you don't specify requests in your pod spec, then it's as expected to show nothing - in other words, it's a best-efforts pod. It's irrelevant with which plugin/scheduler you're running with.
so follow this doc what's the defaultRequests
meaning?
Why you plumb the scheduler config into a configmap? May I know which document/step you're following?
i am following this doc,in this doc, it puts the scheduler-config.yaml into the dockerfile, and i plumb it into configmap in order to dynamic config the scheduler-config.yaml
i replenishmented the install steps
build load-watcher image(wangxiaowu950330/load-watcher)
follow:https://github.com/paypal/load-watcher
- download source
git clone https://github.com/paypal/load-watcher.git \
&& cd load-watcher \
&& git checkout 0.2.3
- execute
docker build -t load-watcher:<version> .
docker tag load-watcher:<version> <your-docker-repo>:<version>
docker push <your-docker-repo>
build kube-scheduler image(wangxiaowu950330/trimaran)
- download source
git clone https://github.com/kubernetes-sigs/scheduler-plugins \
&& cd scheduler-plugins \
&& git checkout release-1.24
- Makefile.tm
COMMONENVVAR=GOOS=$(shell uname -s | tr A-Z a-z) GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m)))
BUILDENVVAR=CGO_ENABLED=0
.PHONY: all
all: build
chmod +x bin/kube-scheduler
.PHONY: build
build:
$(COMMONENVVAR) $(BUILDENVVAR) go build -o bin/kube-scheduler cmd/scheduler/main.go
.PHONY: clean
clean:
rm -rf ./bin
- Dockerfile
FROM golang:1.17.3
WORKDIR /go/src/github.com/kubernetes-sigs/scheduler-plugins
COPY . .
RUN make --file=Makefile.tm build
FROM golang:1.17.3
COPY --from=0 /go/src/github.com/kubernetes-sigs/scheduler-plugins/bin/kube-scheduler /bin/kube-scheduler
CMD ["/bin/kube-scheduler"]
- execute
docker build -t trimaran .
docker tag trimaran:latest <your-docker-repo>:latest
docker push <your-docker-repo>
deploy
serviceaccount
# 这里以admin sa进行测试部署
kubectl apply -f - <<EOF
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: admin
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: admin
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
EOF
trimaran-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: trimaran
namespace: kube-system
data:
k3s.yaml: |-
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUyT1RFek9EY3lNVGt3SGhjTk1qTXdPREEzTURVME5qVTVXaGNOTXpNd09EQTBNRFUwTmpVNQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUyT1RFek9EY3lNVGt3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRMWpCZEtWVXFhTFI0OFdwdGg3RWp6cFBWRHV3ekJMWG9MTTJ6OEpLaFMKUkltbHZjaVZBejJWdjRPR085SlRiQzVtM3l0ZyszUFpKMmRBOGxWNldIQUxvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVTVvS0ExZDlVRTllT2UyRE1aTEV0CkpPeHJwcjB3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQUxRMitvK25BNkFYeG5rQ0ljVjJ4Rk9lY1pSZGpNczgKUitMelIwZ2htTXUzQWlBOWZuZ29TSkkwM2FsaGpOaEo3QmU5dTdZL1FnT0RjU2hpYjZxL1kwNEdiUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://10.0.2.15:6443
name: default
contexts:
- context:
cluster: default
user: default
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: default
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRlZ0F3SUJBZ0lJYURlU3llbWw4ZDR3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOamt4TXpnM01qRTVNQjRYRFRJek1EZ3dOekExTkRZMU9Wb1hEVEkwTURndwpOakExTkRZMU9Wb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBU0c1THphQy9TRVNvU0gKK3BOWDlaR1lmVE0xV1NHRVh4V3U1S2FrMzBCY2toRFpCenFOZWZJbzNEbXhaNjFTSkxZOGMrQXVSQksxVjM1LwpqUzAzWjltalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCU29SNDdySE9CaXZPeTFmczlYL3cxdWs5cGo5akFLQmdncWhrak9QUVFEQWdOSkFEQkcKQWlFQXJLV2VsNERLemZLTTdXQmJMOEJ1V01LUlNRSzd6SUZ0cnAwUmFuSWc5b29DSVFDZ1gxWDR5cGR2bnMyZgpGaWRVa1VpdVBEejhsdEdabUZlUnBkS1QrY3RVaFE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZHpDQ0FSMmdBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwClpXNTBMV05oUURFMk9URXpPRGN5TVRrd0hoY05Nak13T0RBM01EVTBOalU1V2hjTk16TXdPREEwTURVME5qVTUKV2pBak1TRXdId1lEVlFRRERCaHJNM010WTJ4cFpXNTBMV05oUURFMk9URXpPRGN5TVRrd1dUQVRCZ2NxaGtqTwpQUUlCQmdncWhrak9QUU1CQndOQ0FBU0JnalNUWnhOTVJ1VURUc05Qc0VQdnM0K3lNdkVyT24vYnhSTUY4L1V3ClAva3QrMlZpN1Z6WVUzS09FYytSWGVibUJaeTlSdXJxNXdEREJkS3hPM0t2bzBJd1FEQU9CZ05WSFE4QkFmOEUKQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVVxRWVPNnh6Z1lyenN0WDdQVi84TgpicFBhWS9Zd0NnWUlLb1pJemowRUF3SURTQUF3UlFJZ0JHKzZUbjhBUWxYYUZxRFJOakpCemFUMjNhR2VqVjVNCkNMaFN6cmhSZmZvQ0lRQ0NMT25KeG9FUmpUQmV1clRFMk4zS1FsQ2M1T2xqU1BqOXdHSWNKcHZSaXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUQyL0JCN1krQnR4V3FEeFZTckIvcytjaG9ZWElyazU4U2N0UTVZcDdNZWZvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFQklia3ZOb0w5SVJLaElmNmsxZjFrWmg5TXpWWklZUmZGYTdrcHFUZlFGeVNFTmtIT28xNQo4aWpjT2JGbnJWSWt0anh6NEM1RUVyVlhmbitOTFRkbjJRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
scheduler-config.yaml: |-
apiVersion: kubescheduler.config.k8s.io/v1beta2
kind: KubeSchedulerConfiguration
leaderElection:
leaderElect: false
clientConnection:
kubeconfig: "/etc/rancher/k3s/k3s.yaml"
profiles:
- schedulerName: trimaran
plugins:
score:
disabled:
- name: NodeResourcesBalancedAllocation
- name: NodeResourcesLeastAllocated
enabled:
- name: TargetLoadPacking
- name: LoadVariationRiskBalancing
pluginConfig:
- name: TargetLoadPacking
# README:https://github.com/kubernetes-sigs/scheduler-plugins/blob/release-1.24/pkg/trimaran/targetloadpacking/README.md
args:
# 这将为没有请求或限制的容器配置CPU请求,即QoS:BestEffort 默认值为1个核心
defaultRequests:
cpu: "1000m"
# 这将为没有限制的容器配置乘数,即可突发性QoS。默认值为1.5
defaultRequestsMultiplier: "1.5"
# 希望在装箱时达到的 CPU 利用率 % 目标。建议将此值保持为比想要的值小 10。如果未指定,则默认为 40
targetUtilization: 70
# load-watcher service
watcherAddress: http://load-watcher.kube-system.svc.cluster.local:2020
- name: LoadVariationRiskBalancing
# README:https://github.com/kubernetes-sigs/scheduler-plugins/blob/release-1.24/pkg/trimaran/loadvariationriskbalancing/README.md
args:
# 标准差的乘数(非负浮点)默认1
safeVarianceMargin: 1
# 标准差的根幂(非负浮点)默认1
safeVarianceSensitivity: 1
# load-watcher service
watcherAddress: http://load-watcher.kube-system.svc.cluster.local:2020
load-watcher.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: load-watcher-deployment
namespace: kube-system
labels:
app: load-watcher
spec:
replicas: 1
selector:
matchLabels:
app: load-watcher
template:
metadata:
labels:
app: load-watcher
spec:
serviceAccountName: admin
containers:
- name: load-watcher
image: wangxiaowu950330/load-watcher:0.2.3
imagePullPolicy: IfNotPresent
env:
- name: KUBE_CONFIG
value: /etc/rancher/k3s/k3s.yaml
ports:
- containerPort: 2020
volumeMounts:
- mountPath: /etc/rancher/k3s/k3s.yaml
name: kube-config
subPath: k3s.yaml
readOnly: true
volumes:
- name: kube-config
configMap:
name: trimaran
defaultMode: 0644
---
apiVersion: v1
kind: Service
metadata:
namespace: kube-system
name: load-watcher
labels:
app: load-watcher
spec:
type: ClusterIP
ports:
- name: http
port: 2020
targetPort: 2020
protocol: TCP
selector:
app: load-watcher
trimaran-scheduler.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
component: scheduler
tier: control-plane
name: trimaran
namespace: kube-system
spec:
selector:
matchLabels:
component: scheduler
tier: control-plane
replicas: 1
template:
metadata:
labels:
component: scheduler
tier: control-plane
version: second
spec:
serviceAccountName: admin
hostNetwork: false
hostPID: false
containers:
- name: trimaran
command:
- /bin/kube-scheduler
- --leader-elect=false
- --config=/home/scheduler-config.yaml
- -v=9
image: wangxiaowu950330/trimaran:1.24
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
volumeMounts:
- mountPath: /shared
name: shared
- mountPath: /etc/rancher/k3s
name: kube-config
readOnly: true
- mountPath: /home
name: kube-config
readOnly: true
volumes:
- name: shared
hostPath:
path: /tmp
type: Directory
- name: kube-config
configMap:
name: trimaran
items:
- key: k3s.yaml
path: k3s.yaml
- key: scheduler-config.yaml
path: scheduler-config.yaml
defaultMode: 0644
test
deploy a pod which scheduler is trimaran
kubectl apply -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: test
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
# 指定 scheduler 为trimaran
schedulerName: trimaran
containers:
- name: nginx
image: nginx:alpine
imagePullPolicy: IfNotPresent
restartPolicy: Always
EOF
verify
kubectl describe pod
i just wondering is there any answers for the question?
The Kubernetes project currently lacks enough contributors to adequately respond to all issues.
This bot triages un-triaged issues according to the following rules:
- After 90d of inactivity,
lifecycle/stale
is applied - After 30d of inactivity since
lifecycle/stale
was applied,lifecycle/rotten
is applied - After 30d of inactivity since
lifecycle/rotten
was applied, the issue is closed
You can:
- Mark this issue as fresh with
/remove-lifecycle stale
- Close this issue with
/close
- Offer to help out with Issue Triage
Please send feedback to sig-contributor-experience at kubernetes/community.
/lifecycle stale
The Kubernetes project currently lacks enough active contributors to adequately respond to all issues.
This bot triages un-triaged issues according to the following rules:
- After 90d of inactivity,
lifecycle/stale
is applied - After 30d of inactivity since
lifecycle/stale
was applied,lifecycle/rotten
is applied - After 30d of inactivity since
lifecycle/rotten
was applied, the issue is closed
You can:
- Mark this issue as fresh with
/remove-lifecycle rotten
- Close this issue with
/close
- Offer to help out with Issue Triage
Please send feedback to sig-contributor-experience at kubernetes/community.
/lifecycle rotten
The Kubernetes project currently lacks enough active contributors to adequately respond to all issues and PRs.
This bot triages issues according to the following rules:
- After 90d of inactivity,
lifecycle/stale
is applied - After 30d of inactivity since
lifecycle/stale
was applied,lifecycle/rotten
is applied - After 30d of inactivity since
lifecycle/rotten
was applied, the issue is closed
You can:
- Reopen this issue with
/reopen
- Mark this issue as fresh with
/remove-lifecycle rotten
- Offer to help out with Issue Triage
Please send feedback to sig-contributor-experience at kubernetes/community.
/close not-planned
@k8s-triage-robot: Closing this issue, marking it as "Not Planned".
In response to this:
The Kubernetes project currently lacks enough active contributors to adequately respond to all issues and PRs.
This bot triages issues according to the following rules:
- After 90d of inactivity,
lifecycle/stale
is applied- After 30d of inactivity since
lifecycle/stale
was applied,lifecycle/rotten
is applied- After 30d of inactivity since
lifecycle/rotten
was applied, the issue is closedYou can:
- Reopen this issue with
/reopen
- Mark this issue as fresh with
/remove-lifecycle rotten
- Offer to help out with Issue Triage
Please send feedback to sig-contributor-experience at kubernetes/community.
/close not-planned
Instructions for interacting with me using PR comments are available here. If you have questions or suggestions related to my behavior, please file an issue against the kubernetes/test-infra repository.