From 45ea7d561263b3b227512ea15995a013235b7f83 Mon Sep 17 00:00:00 2001 From: pixiake Date: Tue, 9 May 2023 14:06:19 +0800 Subject: [PATCH] fix: Add worker label according to host role Signed-off-by: pixiake --- cmd/kk/pkg/k3s/module.go | 15 --------------- cmd/kk/pkg/k3s/tasks.go | 13 +++++++++---- cmd/kk/pkg/k8e/module.go | 15 --------------- cmd/kk/pkg/k8e/tasks.go | 13 +++++++++---- cmd/kk/pkg/kubernetes/module.go | 15 --------------- cmd/kk/pkg/kubernetes/tasks.go | 13 +++++++++---- 6 files changed, 27 insertions(+), 57 deletions(-) diff --git a/cmd/kk/pkg/k3s/module.go b/cmd/kk/pkg/k3s/module.go index da73fd5b5..9520d88e8 100644 --- a/cmd/kk/pkg/k3s/module.go +++ b/cmd/kk/pkg/k3s/module.go @@ -195,20 +195,6 @@ func (i *InitClusterModule) Init() { Retry: 5, } - addWorkerLabel := &task.RemoteTask{ - Name: "AddWorkerLabel", - Desc: "Add worker label", - Hosts: i.Runtime.GetHostsByRole(common.Master), - Prepare: &prepare.PrepareCollection{ - new(common.OnlyFirstMaster), - &ClusterIsExist{Not: true}, - new(common.IsWorker), - }, - Action: new(AddWorkerLabel), - Parallel: true, - Retry: 5, - } - i.Tasks = []task.Interface{ k3sService, k3sEnv, @@ -216,7 +202,6 @@ func (i *InitClusterModule) Init() { enableK3s, copyKubeConfig, addMasterTaint, - addWorkerLabel, } } diff --git a/cmd/kk/pkg/k3s/tasks.go b/cmd/kk/pkg/k3s/tasks.go index d0ffe8f5d..c3a4742a5 100644 --- a/cmd/kk/pkg/k3s/tasks.go +++ b/cmd/kk/pkg/k3s/tasks.go @@ -374,11 +374,16 @@ type AddWorkerLabel struct { } func (a *AddWorkerLabel) Execute(runtime connector.Runtime) error { - if _, err := runtime.GetRunner().SudoCmd( - "/usr/local/bin/kubectl label nodes --selector='!node-role.kubernetes.io/worker' node-role.kubernetes.io/worker=", - true); err != nil { - return errors.Wrap(errors.WithStack(err), "add worker label failed") + for _, host := range runtime.GetAllHosts() { + if host.IsRole(common.Worker) { + if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf( + "/usr/local/bin/kubectl label --overwrite node %s node-role.kubernetes.io/worker=", + host.GetName()), true); err != nil { + return errors.Wrap(errors.WithStack(err), "add worker label failed") + } + } } + return nil } diff --git a/cmd/kk/pkg/k8e/module.go b/cmd/kk/pkg/k8e/module.go index b3394e0ce..d5184852a 100644 --- a/cmd/kk/pkg/k8e/module.go +++ b/cmd/kk/pkg/k8e/module.go @@ -182,27 +182,12 @@ func (i *InitClusterModule) Init() { Retry: 5, } - addWorkerLabel := &task.RemoteTask{ - Name: "AddWorkerLabel", - Desc: "Add worker label", - Hosts: i.Runtime.GetHostsByRole(common.Master), - Prepare: &prepare.PrepareCollection{ - new(common.OnlyFirstMaster), - &ClusterIsExist{Not: true}, - new(common.IsWorker), - }, - Action: new(AddWorkerLabel), - Parallel: true, - Retry: 5, - } - i.Tasks = []task.Interface{ k8eService, k8eEnv, enableK8e, copyKubeConfig, addMasterTaint, - addWorkerLabel, } } diff --git a/cmd/kk/pkg/k8e/tasks.go b/cmd/kk/pkg/k8e/tasks.go index ff8167b16..c1064dc4c 100644 --- a/cmd/kk/pkg/k8e/tasks.go +++ b/cmd/kk/pkg/k8e/tasks.go @@ -369,11 +369,16 @@ type AddWorkerLabel struct { } func (a *AddWorkerLabel) Execute(runtime connector.Runtime) error { - if _, err := runtime.GetRunner().SudoCmd( - "/usr/local/bin/kubectl label nodes --selector='!node-role.kubernetes.io/worker' node-role.kubernetes.io/worker=", - true); err != nil { - return errors.Wrap(errors.WithStack(err), "add worker label failed") + for _, host := range runtime.GetAllHosts() { + if host.IsRole(common.Worker) { + if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf( + "/usr/local/bin/kubectl label --overwrite node %s node-role.kubernetes.io/worker=", + host.GetName()), true); err != nil { + return errors.Wrap(errors.WithStack(err), "add worker label failed") + } + } } + return nil } diff --git a/cmd/kk/pkg/kubernetes/module.go b/cmd/kk/pkg/kubernetes/module.go index b5316d630..154708b18 100644 --- a/cmd/kk/pkg/kubernetes/module.go +++ b/cmd/kk/pkg/kubernetes/module.go @@ -188,26 +188,11 @@ func (i *InitKubernetesModule) Init() { Retry: 5, } - addWorkerLabel := &task.RemoteTask{ - Name: "AddWorkerLabel", - Desc: "Add worker label", - Hosts: i.Runtime.GetHostsByRole(common.Master), - Prepare: &prepare.PrepareCollection{ - new(common.OnlyFirstMaster), - &ClusterIsExist{Not: true}, - new(common.IsWorker), - }, - Action: new(AddWorkerLabel), - Parallel: true, - Retry: 5, - } - i.Tasks = []task.Interface{ generateKubeadmConfig, kubeadmInit, copyKubeConfig, removeMasterTaint, - addWorkerLabel, } } diff --git a/cmd/kk/pkg/kubernetes/tasks.go b/cmd/kk/pkg/kubernetes/tasks.go index 87a852ee3..4ae9e2717 100644 --- a/cmd/kk/pkg/kubernetes/tasks.go +++ b/cmd/kk/pkg/kubernetes/tasks.go @@ -400,11 +400,16 @@ type AddWorkerLabel struct { } func (a *AddWorkerLabel) Execute(runtime connector.Runtime) error { - if _, err := runtime.GetRunner().SudoCmd( - "/usr/local/bin/kubectl label nodes --selector='!node-role.kubernetes.io/worker' node-role.kubernetes.io/worker=", - true); err != nil { - return errors.Wrap(errors.WithStack(err), "add worker label failed") + for _, host := range runtime.GetAllHosts() { + if host.IsRole(common.Worker) { + if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf( + "/usr/local/bin/kubectl label --overwrite node %s node-role.kubernetes.io/worker=", + host.GetName()), true); err != nil { + return errors.Wrap(errors.WithStack(err), "add worker label failed") + } + } } + return nil }