From 7b2316c47887008e61f04dcf3b5abd59ea9a216c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Dec 2023 16:11:09 +0000 Subject: [PATCH 01/24] Bump actions/setup-go from 3 to 5 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3 to 5. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v3...v5) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/kubernetes-auto-support.yaml | 2 +- .github/workflows/release.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5d480c5fa..0d3b33c73 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,7 +20,7 @@ jobs: steps: - name: Set up Go 1.19 - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version: 1.19 id: go diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 04b4448ca..f288cd2ee 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -18,7 +18,7 @@ jobs: - test steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v5 with: go-version: 1.19 - name: golangci-lint diff --git a/.github/workflows/kubernetes-auto-support.yaml b/.github/workflows/kubernetes-auto-support.yaml index 766365fe8..8a9728b90 100644 --- a/.github/workflows/kubernetes-auto-support.yaml +++ b/.github/workflows/kubernetes-auto-support.yaml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v3 - name: Set up Go 1.19 - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version: 1.19 id: go diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 19ac80a0d..978ed0b97 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -17,7 +17,7 @@ jobs: with: fetch-depth: 0 - name: Install go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version: '^1.19' - name: generate release artifacts From eb92b639d76737c6f5cba1dd34416a1bb30065e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 16:13:47 +0000 Subject: [PATCH 02/24] build(deps): bump actions/checkout from 3 to 4 Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/build-multiarch.yaml | 2 +- .github/workflows/ci.yml | 2 +- .github/workflows/gen-repository-iso.yaml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/kubernetes-auto-support.yaml | 2 +- .github/workflows/release.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-multiarch.yaml b/.github/workflows/build-multiarch.yaml index 2dea5ee67..05df9fb79 100644 --- a/.github/workflows/build-multiarch.yaml +++ b/.github/workflows/build-multiarch.yaml @@ -16,7 +16,7 @@ jobs: run: echo "RELEASE_TAG=${GITHUB_REF:10}" >> $GITHUB_ENV - name: Check out code into the Go module directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5d480c5fa..312cfc389 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,7 +26,7 @@ jobs: id: go - name: Check out code into the Go module directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 1 diff --git a/.github/workflows/gen-repository-iso.yaml b/.github/workflows/gen-repository-iso.yaml index 70e819500..3b5919b36 100644 --- a/.github/workflows/gen-repository-iso.yaml +++ b/.github/workflows/gen-repository-iso.yaml @@ -32,7 +32,7 @@ jobs: dockerfile: dockerfile.ubuntu2204 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v3 diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 04b4448ca..4672c956c 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -17,7 +17,7 @@ jobs: - "" - test steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-go@v3 with: go-version: 1.19 diff --git a/.github/workflows/kubernetes-auto-support.yaml b/.github/workflows/kubernetes-auto-support.yaml index 6afafacbc..f4fac99c6 100644 --- a/.github/workflows/kubernetes-auto-support.yaml +++ b/.github/workflows/kubernetes-auto-support.yaml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest if: github.repository == 'kubesphere/kubekey' steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go 1.19 uses: actions/setup-go@v3 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 19ac80a0d..0d1b8ecf3 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -13,7 +13,7 @@ jobs: - name: Set env run: echo "RELEASE_TAG=${GITHUB_REF:10}" >> $GITHUB_ENV - name: checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install go From 8045ca723a4a7b1dbac947ebb9fd4ed86899aee0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 16:13:51 +0000 Subject: [PATCH 03/24] build(deps): bump lewagon/wait-on-check-action from 1.3.1 to 1.3.3 Bumps [lewagon/wait-on-check-action](https://github.com/lewagon/wait-on-check-action) from 1.3.1 to 1.3.3. - [Release notes](https://github.com/lewagon/wait-on-check-action/releases) - [Commits](https://github.com/lewagon/wait-on-check-action/compare/v1.3.1...v1.3.3) --- updated-dependencies: - dependency-name: lewagon/wait-on-check-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/gen-repository-iso.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gen-repository-iso.yaml b/.github/workflows/gen-repository-iso.yaml index 70e819500..d5dd6601b 100644 --- a/.github/workflows/gen-repository-iso.yaml +++ b/.github/workflows/gen-repository-iso.yaml @@ -56,7 +56,7 @@ jobs: sha256sum *.iso > ${{ matrix.name }}.iso.sha256sum.txt - name: Wait for release workflow to finish - uses: lewagon/wait-on-check-action@v1.3.1 + uses: lewagon/wait-on-check-action@v1.3.3 with: ref: ${{ github.ref }} check-name: 'create draft release' From 231ea4cb229e822784bac7260fa7127b0c3d86e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jan 2024 17:00:22 +0000 Subject: [PATCH 04/24] build(deps): bump peter-evans/create-pull-request from 5 to 6 Bumps [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) from 5 to 6. - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/v5...v6) --- updated-dependencies: - dependency-name: peter-evans/create-pull-request dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/kubernetes-auto-support.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/kubernetes-auto-support.yaml b/.github/workflows/kubernetes-auto-support.yaml index 6afafacbc..9ad795ff4 100644 --- a/.github/workflows/kubernetes-auto-support.yaml +++ b/.github/workflows/kubernetes-auto-support.yaml @@ -56,7 +56,7 @@ jobs: if: steps.get_new_version.outputs.UPDATE_VERSION == 'true' - name: Create Pull Request - uses: peter-evans/create-pull-request@v5 + uses: peter-evans/create-pull-request@v6 with: commit-message: Add new kubernetes version committer: GitHub From ac524bc7fb6dd9de26771d92edff8e439949b929 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Feb 2024 16:57:34 +0000 Subject: [PATCH 05/24] build(deps): bump golangci/golangci-lint-action from 3.6.0 to 4.0.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3.6.0 to 4.0.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v3.6.0...v4.0.0) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 04b4448ca..91d4162df 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -22,7 +22,7 @@ jobs: with: go-version: 1.19 - name: golangci-lint - uses: golangci/golangci-lint-action@v3.6.0 + uses: golangci/golangci-lint-action@v4.0.0 with: version: v1.50.1 working-directory: ${{matrix.working-directory}} From 1ef39dde1cdc1e0ad650b07664a807ccb84bf00e Mon Sep 17 00:00:00 2001 From: wenwutang <1218040628@qq.com> Date: Thu, 22 Feb 2024 13:57:59 +0800 Subject: [PATCH 06/24] support pause:3.9 --- cmd/kk/pkg/images/tasks.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/kk/pkg/images/tasks.go b/cmd/kk/pkg/images/tasks.go index bc2fa9b78..fee9a2862 100644 --- a/cmd/kk/pkg/images/tasks.go +++ b/cmd/kk/pkg/images/tasks.go @@ -103,6 +103,10 @@ func GetImage(runtime connector.ModuleRuntime, kubeConf *common.KubeConf, name s pauseTag = "3.8" corednsTag = "1.9.3" } + if versionutil.MustParseSemantic(kubeConf.Cluster.Kubernetes.Version).AtLeast(versionutil.MustParseSemantic("v1.26.0")) { + pauseTag = "3.9" + corednsTag = "1.9.3" + } logger.Log.Debugf("pauseTag: %s, corednsTag: %s", pauseTag, corednsTag) From df3697e42fd5aacc5aeab0e72c33b272183f72ba Mon Sep 17 00:00:00 2001 From: xiongww Date: Thu, 8 Dec 2022 11:29:27 +0800 Subject: [PATCH 07/24] update calico config and kubeadm_config for ipv6 support Change-Id: I13d12caebced1c6fb5cdbcaa1b70b5154ba701ba --- cmd/kk/apis/kubekey/v1alpha2/cluster_types.go | 6 +++--- cmd/kk/pkg/kubernetes/templates/kubeadm_config.go | 3 ++- cmd/kk/pkg/plugins/network/modules.go | 4 +++- .../pkg/plugins/network/templates/calico_v1.16+.go | 12 +++++++++--- 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go b/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go index 1bd4f7799..613dd8ffe 100644 --- a/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go +++ b/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go @@ -145,7 +145,7 @@ func (cfg *ClusterSpec) GenerateCertSANs() []string { } } - extraCertSANs = append(extraCertSANs, util.ParseIp(cfg.Network.KubeServiceCIDR)[0]) + extraCertSANs = append(extraCertSANs, util.ParseIp(strings.Split(cfg.Network.KubeServiceCIDR, ",")[0])[0]) defaultCertSANs = append(defaultCertSANs, extraCertSANs...) @@ -210,12 +210,12 @@ func toHosts(cfg HostCfg) *KubeHost { // ClusterIP is used to get the kube-apiserver service address inside the cluster. func (cfg *ClusterSpec) ClusterIP() string { - return util.ParseIp(cfg.Network.KubeServiceCIDR)[0] + return util.ParseIp(strings.Split(cfg.Network.KubeServiceCIDR, ",")[0])[0] } // CorednsClusterIP is used to get the coredns service address inside the cluster. func (cfg *ClusterSpec) CorednsClusterIP() string { - return util.ParseIp(cfg.Network.KubeServiceCIDR)[2] + return util.ParseIp(strings.Split(cfg.Network.KubeServiceCIDR, ",")[0])[2] } // ClusterDNS is used to get the dns server address inside the cluster. diff --git a/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go b/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go index 673c9aacd..43c10267f 100644 --- a/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go +++ b/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go @@ -96,7 +96,8 @@ apiServer: {{- end }} controllerManager: extraArgs: - node-cidr-mask-size: "{{ .NodeCidrMaskSize }}" + node-cidr-mask-size-ipv4: "{{ .NodeCidrMaskSize }}" + node-cidr-mask-size-ipv6: 64 {{ toYaml .ControllerManagerArgs | indent 4 }} extraVolumes: - name: host-time diff --git a/cmd/kk/pkg/plugins/network/modules.go b/cmd/kk/pkg/plugins/network/modules.go index db28cf6a0..9c234024f 100644 --- a/cmd/kk/pkg/plugins/network/modules.go +++ b/cmd/kk/pkg/plugins/network/modules.go @@ -18,6 +18,7 @@ package network import ( "path/filepath" + "strings" versionutil "k8s.io/apimachinery/pkg/util/version" @@ -131,7 +132,8 @@ func deployCalico(d *DeployNetworkPluginModule) []task.Interface { Template: templates.CalicoNew, Dst: filepath.Join(common.KubeConfigDir, templates.CalicoNew.Name()), Data: util.Data{ - "KubePodsCIDR": d.KubeConf.Cluster.Network.KubePodsCIDR, + "KubePodsV4CIDR": strings.Split(d.KubeConf.Cluster.Network.KubePodsCIDR, ",")[0], + "KubePodsV6CIDR": strings.Split(d.KubeConf.Cluster.Network.KubePodsCIDR, ",")[1], "CalicoCniImage": images.GetImage(d.Runtime, d.KubeConf, "calico-cni").ImageName(), "CalicoNodeImage": images.GetImage(d.Runtime, d.KubeConf, "calico-node").ImageName(), "CalicoFlexvolImage": images.GetImage(d.Runtime, d.KubeConf, "calico-flexvol").ImageName(), diff --git a/cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go b/cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go index d803b611b..89f9afa04 100644 --- a/cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go +++ b/cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go @@ -114,7 +114,9 @@ data: "nodename": "__KUBERNETES_NODE_NAME__", "mtu": __CNI_MTU__, "ipam": { - "type": "calico-ipam" + "type": "calico-ipam", + "assign_ipv4": "true", + "assign_ipv6": "true" }, "policy": { "type": "k8s" @@ -4850,6 +4852,8 @@ spec: value: "can-reach=$(NODEIP)" - name: IP value: "autodetect" + - name: IP6 + value: "autodetect" # Enable IPIP - name: CALICO_IPV4POOL_IPIP value: "{{ .IPIPMode }}" @@ -4889,7 +4893,9 @@ spec: # chosen from this range. Changing this value after installation will have # no effect. - name: CALICO_IPV4POOL_CIDR - value: "{{ .KubePodsCIDR }}" + value: "{{ .KubePodsV4CIDR }}" + - name: CALICO_IPV6POOL_CIDR + value: "{{ .KubePodsV6CIDR }}" - name: CALICO_IPV4POOL_BLOCK_SIZE value: "{{ .NodeCidrMaskSize }}" {{- else }} @@ -4907,7 +4913,7 @@ spec: value: "ACCEPT" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT - value: "false" + value: "true" - name: FELIX_HEALTHENABLED value: "true" - name: FELIX_DEVICEROUTESOURCEADDRESS From 4166bbe4b28dc316f00cf181f84fc92bd2eab6aa Mon Sep 17 00:00:00 2001 From: xiongww Date: Fri, 23 Feb 2024 10:15:01 +0800 Subject: [PATCH 08/24] add ipv6 sysctl config --- cmd/kk/pkg/bootstrap/os/templates/init_script.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/kk/pkg/bootstrap/os/templates/init_script.go b/cmd/kk/pkg/bootstrap/os/templates/init_script.go index 5bceb2085..846e14da1 100644 --- a/cmd/kk/pkg/bootstrap/os/templates/init_script.go +++ b/cmd/kk/pkg/bootstrap/os/templates/init_script.go @@ -94,6 +94,12 @@ echo 'kernel.pid_max = 65535' >> /etc/sysctl.conf echo 'kernel.watchdog_thresh = 5' >> /etc/sysctl.conf echo 'kernel.hung_task_timeout_secs = 5' >> /etc/sysctl.conf +#add for ipv6 +echo 'net.ipv6.conf.all.disable_ipv6 = 0' >> /etc/sysctl.conf +echo 'net.ipv6.conf.default.disable_ipv6 = 0' >> /etc/sysctl.conf +echo 'net.ipv6.conf.lo.disable_ipv6 = 0' >> /etc/sysctl.conf +echo 'net.ipv6.conf.all.forwarding=1' >> /etc/sysctl.conf + #See https://help.aliyun.com/document_detail/118806.html#uicontrol-e50-ddj-w0y sed -r -i "s@#{0,}?net.ipv4.tcp_tw_recycle ?= ?(0|1|2)@net.ipv4.tcp_tw_recycle = 0@g" /etc/sysctl.conf sed -r -i "s@#{0,}?net.ipv4.tcp_tw_reuse ?= ?(0|1)@net.ipv4.tcp_tw_reuse = 0@g" /etc/sysctl.conf From 5a24ab6b2c337325361e7b152bd5b0897245de23 Mon Sep 17 00:00:00 2001 From: xiongww Date: Thu, 8 Dec 2022 15:12:27 +0800 Subject: [PATCH 09/24] update host interneladdress for generate certs Change-Id: If68f60ccc1fae9918e78a9477929f0a9ee64ab92 --- cmd/kk/apis/kubekey/v1alpha2/cluster_types.go | 2 +- cmd/kk/pkg/etcd/certs.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go b/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go index 613dd8ffe..78158b917 100644 --- a/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go +++ b/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go @@ -145,7 +145,7 @@ func (cfg *ClusterSpec) GenerateCertSANs() []string { } } - extraCertSANs = append(extraCertSANs, util.ParseIp(strings.Split(cfg.Network.KubeServiceCIDR, ",")[0])[0]) + extraCertSANs = append(extraCertSANs, util.ParseIp(strings.Split(cfg.Network.KubeServiceCIDR, ",")[0])[0], util.ParseIp(strings.Split(cfg.Network.KubeServiceCIDR, ",")[1])[0]) defaultCertSANs = append(defaultCertSANs, extraCertSANs...) diff --git a/cmd/kk/pkg/etcd/certs.go b/cmd/kk/pkg/etcd/certs.go index d481ae1d0..67dcd067c 100644 --- a/cmd/kk/pkg/etcd/certs.go +++ b/cmd/kk/pkg/etcd/certs.go @@ -203,7 +203,7 @@ func GenerateAltName(k *common.KubeConf, runtime *connector.Runtime) *cert.AltNa for _, host := range k.Cluster.Hosts { dnsList = append(dnsList, host.Name) - internalAddress := netutils.ParseIPSloppy(host.InternalAddress) + internalAddress := netutils.ParseIPSloppy(strings.Split(host.InternalAddress,",")[0]) if internalAddress != nil { ipList = append(ipList, internalAddress) } From d0725d724b56c7191c3f689263eda1bada1cfb25 Mon Sep 17 00:00:00 2001 From: xiongww Date: Thu, 8 Dec 2022 15:54:01 +0800 Subject: [PATCH 10/24] write ipv6 address in /etc/hosts config Change-Id: I472ed48889750c9226c4d28c7b31761c49d3265b --- pkg/service/bootstrap/bootstrap.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/service/bootstrap/bootstrap.go b/pkg/service/bootstrap/bootstrap.go index 176547da7..c635a9315 100644 --- a/pkg/service/bootstrap/bootstrap.go +++ b/pkg/service/bootstrap/bootstrap.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "text/template" "github.com/pkg/errors" @@ -113,7 +114,12 @@ func (s *Service) ExecInitScript() error { for _, host := range s.scope.AllInstancesInfo() { if host.Name != "" { hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", - host.InternalAddress, + strings.Split(host.InternalAddress, ",")[0], + host.Name, + s.scope.KubernetesClusterName(), + host.Name)) + hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", + strings.Split(host.InternalAddress, ",")[1], host.Name, s.scope.KubernetesClusterName(), host.Name)) From 2f5bdd0222594bacb104574213cf73c85798827f Mon Sep 17 00:00:00 2001 From: xiongww Date: Thu, 8 Dec 2022 16:24:30 +0800 Subject: [PATCH 11/24] write ipv6 address in /etc/hosts config in no kkcluster Change-Id: Ib52730123b41b4ab69dc29ebf9e2ca95af1da44b --- cmd/kk/pkg/bootstrap/os/templates/init_script.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/cmd/kk/pkg/bootstrap/os/templates/init_script.go b/cmd/kk/pkg/bootstrap/os/templates/init_script.go index 846e14da1..a683740eb 100644 --- a/cmd/kk/pkg/bootstrap/os/templates/init_script.go +++ b/cmd/kk/pkg/bootstrap/os/templates/init_script.go @@ -19,6 +19,7 @@ package templates import ( "fmt" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/bootstrap/registry" + "strings" "text/template" "github.com/lithammer/dedent" @@ -232,7 +233,12 @@ func GenerateHosts(runtime connector.ModuleRuntime, kubeConf *common.KubeConf) [ for _, host := range runtime.GetAllHosts() { if host.GetName() != "" { hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", - host.GetInternalAddress(), + strings.Split(host.GetInternalAddress(), ",")[0], + host.GetName(), + kubeConf.Cluster.Kubernetes.ClusterName, + host.GetName())) + hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", + strings.Split(host.GetInternalAddress(), ",")[1], host.GetName(), kubeConf.Cluster.Kubernetes.ClusterName, host.GetName())) @@ -241,9 +247,11 @@ func GenerateHosts(runtime connector.ModuleRuntime, kubeConf *common.KubeConf) [ if len(runtime.GetHostsByRole(common.Registry)) > 0 { if kubeConf.Cluster.Registry.PrivateRegistry != "" { - hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(), kubeConf.Cluster.Registry.PrivateRegistry)) + hostsList = append(hostsList, fmt.Sprintf("%s %s", strings.Split(runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(),",")[0], kubeConf.Cluster.Registry.PrivateRegistry)) + hostsList = append(hostsList, fmt.Sprintf("%s %s", strings.Split(runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(),",")[1], kubeConf.Cluster.Registry.PrivateRegistry)) } else { - hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(), registry.RegistryCertificateBaseName)) + hostsList = append(hostsList, fmt.Sprintf("%s %s", strings.Split(runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(),",")[0], registry.RegistryCertificateBaseName)) + hostsList = append(hostsList, fmt.Sprintf("%s %s", strings.Split(runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(),",")[1], registry.RegistryCertificateBaseName)) } } From ad70afcff002496e666fc1f10afec1ed0734c0be Mon Sep 17 00:00:00 2001 From: xiongww Date: Thu, 8 Dec 2022 17:25:38 +0800 Subject: [PATCH 12/24] add GetInternalIPv4Address GetInternalIPv6Address for host Change-Id: I40af792c7437fca41472e8f5012e7f78952fbf96 --- cmd/kk/pkg/bootstrap/os/tasks.go | 6 +++--- cmd/kk/pkg/bootstrap/os/templates/init_script.go | 6 ++++-- cmd/kk/pkg/core/connector/host.go | 9 +++++++++ cmd/kk/pkg/core/connector/interface.go | 2 ++ cmd/kk/pkg/etcd/prepares.go | 2 +- cmd/kk/pkg/etcd/tasks.go | 16 ++++++++-------- cmd/kk/pkg/k3s/tasks.go | 4 ++-- cmd/kk/pkg/k8e/tasks.go | 4 ++-- cmd/kk/pkg/kubernetes/tasks.go | 4 ++-- cmd/kk/pkg/kubesphere/tasks.go | 4 ++-- 10 files changed, 35 insertions(+), 22 deletions(-) diff --git a/cmd/kk/pkg/bootstrap/os/tasks.go b/cmd/kk/pkg/bootstrap/os/tasks.go index 7a0b83a09..836ac4e2b 100644 --- a/cmd/kk/pkg/bootstrap/os/tasks.go +++ b/cmd/kk/pkg/bootstrap/os/tasks.go @@ -517,12 +517,12 @@ func (n *NodeConfigureNtpServer) Execute(runtime connector.Runtime) error { serverAddr := strings.Trim(server, " \"") fmt.Printf("ntpserver: %s, current host: %s\n", serverAddr, currentHost.GetName()) - if serverAddr == currentHost.GetName() || serverAddr == currentHost.GetInternalAddress() { + if serverAddr == currentHost.GetName() || serverAddr == currentHost.GetInternalIPv4Address() { deleteAllowCmd := fmt.Sprintf(`sed -i '/^allow/d' %s`, chronyConfigFile) if _, err := runtime.GetRunner().SudoCmd(deleteAllowCmd, false); err != nil { return errors.Wrapf(err, "delete allow failed, please check file %s", chronyConfigFile) } - allowClientCmd := fmt.Sprintf(`echo 'allow 0.0.0.0/0' >> %s`, chronyConfigFile) + allowClientCmd := fmt.Sprintf(`echo "allow 0.0.0.0/0" >> %s`, chronyConfigFile) if _, err := runtime.GetRunner().SudoCmd(allowClientCmd, false); err != nil { return errors.Wrapf(err, "change host:%s chronyd conf failed, please check file %s", serverAddr, chronyConfigFile) } @@ -539,7 +539,7 @@ func (n *NodeConfigureNtpServer) Execute(runtime connector.Runtime) error { // use internal ip to client chronyd server for _, host := range runtime.GetAllHosts() { if serverAddr == host.GetName() { - serverAddr = host.GetInternalAddress() + serverAddr = host.GetInternalIPv4Address() break } } diff --git a/cmd/kk/pkg/bootstrap/os/templates/init_script.go b/cmd/kk/pkg/bootstrap/os/templates/init_script.go index a683740eb..164be49bf 100644 --- a/cmd/kk/pkg/bootstrap/os/templates/init_script.go +++ b/cmd/kk/pkg/bootstrap/os/templates/init_script.go @@ -228,17 +228,19 @@ func GenerateHosts(runtime connector.ModuleRuntime, kubeConf *common.KubeConf) [ if kubeConf.Cluster.ControlPlaneEndpoint.Address != "" { lbHost = fmt.Sprintf("%s %s", kubeConf.Cluster.ControlPlaneEndpoint.Address, kubeConf.Cluster.ControlPlaneEndpoint.Domain) + } else { + lbHost = fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Master)[0].GetInternalIPv4Address(), kubeConf.Cluster.ControlPlaneEndpoint.Domain) } for _, host := range runtime.GetAllHosts() { if host.GetName() != "" { hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", - strings.Split(host.GetInternalAddress(), ",")[0], + host.GetInternalIPv4Address(), host.GetName(), kubeConf.Cluster.Kubernetes.ClusterName, host.GetName())) hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", - strings.Split(host.GetInternalAddress(), ",")[1], + host.GetInternalIPv6Address(), host.GetName(), kubeConf.Cluster.Kubernetes.ClusterName, host.GetName())) diff --git a/cmd/kk/pkg/core/connector/host.go b/cmd/kk/pkg/core/connector/host.go index 762c5a89f..3e4d033cf 100644 --- a/cmd/kk/pkg/core/connector/host.go +++ b/cmd/kk/pkg/core/connector/host.go @@ -18,6 +18,7 @@ package connector import ( "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/cache" + "strings" ) type BaseHost struct { @@ -65,6 +66,14 @@ func (b *BaseHost) GetInternalAddress() string { return b.InternalAddress } +func (b *BaseHost) GetInternalIPv4Address() string { + return strings.Split(b.InternalAddress, ",")[0] +} + +func (b *BaseHost) GetInternalIPv6Address() string { + return strings.Split(b.InternalAddress, ",")[1] +} + func (b *BaseHost) SetInternalAddress(str string) { b.InternalAddress = str } diff --git a/cmd/kk/pkg/core/connector/interface.go b/cmd/kk/pkg/core/connector/interface.go index 60d124182..4b964de09 100644 --- a/cmd/kk/pkg/core/connector/interface.go +++ b/cmd/kk/pkg/core/connector/interface.go @@ -70,6 +70,8 @@ type Host interface { GetAddress() string SetAddress(str string) GetInternalAddress() string + GetInternalIPv4Address() string + GetInternalIPv6Address() string SetInternalAddress(str string) GetPort() int SetPort(port int) diff --git a/cmd/kk/pkg/etcd/prepares.go b/cmd/kk/pkg/etcd/prepares.go index aa183dbd2..305ab448a 100644 --- a/cmd/kk/pkg/etcd/prepares.go +++ b/cmd/kk/pkg/etcd/prepares.go @@ -38,7 +38,7 @@ func (f *FirstETCDNode) PreCheck(runtime connector.Runtime) (bool, error) { cluster := v.(*EtcdCluster) if (!cluster.clusterExist && runtime.GetHostsByRole(common.ETCD)[0].GetName() == runtime.RemoteHost().GetName()) || - (cluster.clusterExist && strings.Contains(cluster.peerAddresses[0], runtime.RemoteHost().GetInternalAddress())) { + (cluster.clusterExist && strings.Contains(cluster.peerAddresses[0], runtime.RemoteHost().GetInternalIPv4Address())) { return !f.Not, nil } return f.Not, nil diff --git a/cmd/kk/pkg/etcd/tasks.go b/cmd/kk/pkg/etcd/tasks.go index 0aeaccdd7..a5188ed1a 100644 --- a/cmd/kk/pkg/etcd/tasks.go +++ b/cmd/kk/pkg/etcd/tasks.go @@ -84,12 +84,12 @@ func (g *GetStatus) Execute(runtime connector.Runtime) error { if v, ok := g.PipelineCache.Get(common.ETCDCluster); ok { c := v.(*EtcdCluster) - c.peerAddresses = append(c.peerAddresses, fmt.Sprintf("%s=https://%s:2380", etcdName, host.GetInternalAddress())) + c.peerAddresses = append(c.peerAddresses, fmt.Sprintf("%s=https://%s:2380", etcdName, host.GetInternalIPv4Address())) c.clusterExist = true // type: *EtcdCluster g.PipelineCache.Set(common.ETCDCluster, c) } else { - cluster.peerAddresses = append(cluster.peerAddresses, fmt.Sprintf("%s=https://%s:2380", etcdName, host.GetInternalAddress())) + cluster.peerAddresses = append(cluster.peerAddresses, fmt.Sprintf("%s=https://%s:2380", etcdName, host.GetInternalIPv4Address())) cluster.clusterExist = true g.PipelineCache.Set(common.ETCDCluster, cluster) } @@ -169,7 +169,7 @@ type GenerateAccessAddress struct { func (g *GenerateAccessAddress) Execute(runtime connector.Runtime) error { var addrList []string for _, host := range runtime.GetHostsByRole(common.ETCD) { - addrList = append(addrList, fmt.Sprintf("https://%s:2379", host.GetInternalAddress())) + addrList = append(addrList, fmt.Sprintf("https://%s:2379", host.GetInternalIPv4Address())) } accessAddresses := strings.Join(addrList, ",") @@ -227,7 +227,7 @@ func (g *GenerateConfig) Execute(runtime connector.Runtime) error { if v, ok := g.PipelineCache.Get(common.ETCDCluster); ok { cluster := v.(*EtcdCluster) - cluster.peerAddresses = append(cluster.peerAddresses, fmt.Sprintf("%s=https://%s:2380", etcdName, host.GetInternalAddress())) + cluster.peerAddresses = append(cluster.peerAddresses, fmt.Sprintf("%s=https://%s:2380", etcdName, host.GetInternalIPv4Address())) g.PipelineCache.Set(common.ETCDCluster, cluster) if !cluster.clusterExist { @@ -295,7 +295,7 @@ func refreshConfig(KubeConf *common.KubeConf, runtime connector.Runtime, endpoin Data: util.Data{ "Tag": kubekeyapiv1alpha2.DefaultEtcdVersion, "Name": etcdName, - "Ip": host.GetInternalAddress(), + "Ip": host.GetInternalIPv4Address(), "Hostname": host.GetName(), "State": state, "PeerAddresses": strings.Join(endpoints, ","), @@ -341,7 +341,7 @@ func (j *JoinMember) Execute(runtime connector.Runtime) error { "export ETCDCTL_CA_FILE='/etc/ssl/etcd/ssl/ca.pem';"+ "%s/etcdctl --endpoints=%s member add %s %s", host.GetName(), host.GetName(), common.BinDir, cluster.accessAddresses, etcdName, - fmt.Sprintf("https://%s:2380", host.GetInternalAddress())) + fmt.Sprintf("https://%s:2380", host.GetInternalIPv4Address())) if _, err := runtime.GetRunner().SudoCmd(joinMemberCmd, true); err != nil { return errors.Wrap(errors.WithStack(err), "add etcd member failed") @@ -375,7 +375,7 @@ func (c *CheckMember) Execute(runtime connector.Runtime) error { if err != nil { return errors.Wrap(errors.WithStack(err), "list etcd member failed") } - if !strings.Contains(memberList, fmt.Sprintf("https://%s:2379", host.GetInternalAddress())) { + if !strings.Contains(memberList, fmt.Sprintf("https://%s:2379", host.GetInternalIPv4Address())) { return errors.Wrap(errors.WithStack(err), "add etcd member failed") } } else { @@ -405,7 +405,7 @@ func (b *BackupETCD) Execute(runtime connector.Runtime) error { Dst: filepath.Join(b.KubeConf.Cluster.Etcd.BackupScriptDir, "etcd-backup.sh"), Data: util.Data{ "Hostname": runtime.RemoteHost().GetName(), - "Etcdendpoint": fmt.Sprintf("https://%s:2379", runtime.RemoteHost().GetInternalAddress()), + "Etcdendpoint": fmt.Sprintf("https://%s:2379", runtime.RemoteHost().GetInternalIPv4Address()), "DataDir": b.KubeConf.Cluster.Etcd.DataDir, "Backupdir": b.KubeConf.Cluster.Etcd.BackupDir, "KeepbackupNumber": b.KubeConf.Cluster.Etcd.KeepBackupNumber + 1, diff --git a/cmd/kk/pkg/k3s/tasks.go b/cmd/kk/pkg/k3s/tasks.go index 9b68c81d8..25d4bdf6c 100644 --- a/cmd/kk/pkg/k3s/tasks.go +++ b/cmd/kk/pkg/k3s/tasks.go @@ -211,7 +211,7 @@ func (g *GenerateK3sService) Execute(runtime connector.Runtime) error { "IsMaster": host.IsRole(common.Master), "IsDockerRuntime": g.KubeConf.Cluster.Kubernetes.ContainerManager == common.Docker, "ContainerRuntimeEndpoint": g.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint, - "NodeIP": host.GetInternalAddress(), + "NodeIP": host.GetInternalIPv4Address(), "HostName": host.GetName(), "PodSubnet": g.KubeConf.Cluster.Network.KubePodsCIDR, "ServiceSubnet": g.KubeConf.Cluster.Network.KubeServiceCIDR, @@ -261,7 +261,7 @@ func (g *GenerateK3sServiceEnv) Execute(runtime connector.Runtime) error { } default: for _, node := range runtime.GetHostsByRole(common.ETCD) { - endpoint := fmt.Sprintf("https://%s:%s", node.GetInternalAddress(), kubekeyapiv1alpha2.DefaultEtcdPort) + endpoint := fmt.Sprintf("https://%s:%s", node.GetInternalIPv4Address(), kubekeyapiv1alpha2.DefaultEtcdPort) endpointsList = append(endpointsList, endpoint) } externalEtcd.Endpoints = endpointsList diff --git a/cmd/kk/pkg/k8e/tasks.go b/cmd/kk/pkg/k8e/tasks.go index c1064dc4c..3c255c365 100644 --- a/cmd/kk/pkg/k8e/tasks.go +++ b/cmd/kk/pkg/k8e/tasks.go @@ -203,7 +203,7 @@ func (g *GenerateK8eService) Execute(runtime connector.Runtime) error { Data: util.Data{ "Server": server, "IsMaster": host.IsRole(common.Master), - "NodeIP": host.GetInternalAddress(), + "NodeIP": host.GetInternalIPv4Address(), "HostName": host.GetName(), "PodSubnet": g.KubeConf.Cluster.Network.KubePodsCIDR, "ServiceSubnet": g.KubeConf.Cluster.Network.KubeServiceCIDR, @@ -253,7 +253,7 @@ func (g *GenerateK8eServiceEnv) Execute(runtime connector.Runtime) error { } default: for _, node := range runtime.GetHostsByRole(common.ETCD) { - endpoint := fmt.Sprintf("https://%s:%s", node.GetInternalAddress(), kubekeyapiv1alpha2.DefaultEtcdPort) + endpoint := fmt.Sprintf("https://%s:%s", node.GetInternalIPv4Address(), kubekeyapiv1alpha2.DefaultEtcdPort) endpointsList = append(endpointsList, endpoint) } externalEtcd.Endpoints = endpointsList diff --git a/cmd/kk/pkg/kubernetes/tasks.go b/cmd/kk/pkg/kubernetes/tasks.go index aaeda50fd..c995d2eb4 100644 --- a/cmd/kk/pkg/kubernetes/tasks.go +++ b/cmd/kk/pkg/kubernetes/tasks.go @@ -225,7 +225,7 @@ func (g *GenerateKubeadmConfig) Execute(runtime connector.Runtime) error { switch g.KubeConf.Cluster.Etcd.Type { case kubekeyv1alpha2.KubeKey: for _, host := range runtime.GetHostsByRole(common.ETCD) { - endpoint := fmt.Sprintf("https://%s:%s", host.GetInternalAddress(), kubekeyv1alpha2.DefaultEtcdPort) + endpoint := fmt.Sprintf("https://%s:%s", host.GetInternalIPv4Address(), kubekeyv1alpha2.DefaultEtcdPort) endpointsList = append(endpointsList, endpoint) } externalEtcd.Endpoints = endpointsList @@ -287,7 +287,7 @@ func (g *GenerateKubeadmConfig) Execute(runtime connector.Runtime) error { "Version": g.KubeConf.Cluster.Kubernetes.Version, "ClusterName": g.KubeConf.Cluster.Kubernetes.ClusterName, "DNSDomain": g.KubeConf.Cluster.Kubernetes.DNSDomain, - "AdvertiseAddress": host.GetInternalAddress(), + "AdvertiseAddress": host.GetInternalIPv4Address(), "BindPort": kubekeyv1alpha2.DefaultApiserverPort, "ControlPlaneEndpoint": fmt.Sprintf("%s:%d", g.KubeConf.Cluster.ControlPlaneEndpoint.Domain, g.KubeConf.Cluster.ControlPlaneEndpoint.Port), "PodSubnet": g.KubeConf.Cluster.Network.KubePodsCIDR, diff --git a/cmd/kk/pkg/kubesphere/tasks.go b/cmd/kk/pkg/kubesphere/tasks.go index e867fb4ec..f1836426a 100644 --- a/cmd/kk/pkg/kubesphere/tasks.go +++ b/cmd/kk/pkg/kubesphere/tasks.go @@ -88,7 +88,7 @@ func (s *Setup) Execute(runtime connector.Runtime) error { switch s.KubeConf.Cluster.Etcd.Type { case kubekeyapiv1alpha2.KubeKey: for _, host := range runtime.GetHostsByRole(common.ETCD) { - addrList = append(addrList, host.GetInternalAddress()) + addrList = append(addrList, host.GetInternalIPv4Address()) } caFile := "/etc/ssl/etcd/ssl/ca.pem" @@ -105,7 +105,7 @@ func (s *Setup) Execute(runtime connector.Runtime) error { } case kubekeyapiv1alpha2.Kubeadm: for _, host := range runtime.GetHostsByRole(common.Master) { - addrList = append(addrList, host.GetInternalAddress()) + addrList = append(addrList, host.GetInternalIPv4Address()) } caFile := "/etc/kubernetes/pki/etcd/ca.crt" From abb0c1b5239c89217609c6b92d28eec745213956 Mon Sep 17 00:00:00 2001 From: xiongww Date: Thu, 8 Dec 2022 20:17:36 +0800 Subject: [PATCH 13/24] parseIP not support ipv6 Change-Id: I32736b0bb28da4434067ae262dc1b484a90c8c00 --- cmd/kk/apis/kubekey/v1alpha2/cluster_types.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go b/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go index 78158b917..613dd8ffe 100644 --- a/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go +++ b/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go @@ -145,7 +145,7 @@ func (cfg *ClusterSpec) GenerateCertSANs() []string { } } - extraCertSANs = append(extraCertSANs, util.ParseIp(strings.Split(cfg.Network.KubeServiceCIDR, ",")[0])[0], util.ParseIp(strings.Split(cfg.Network.KubeServiceCIDR, ",")[1])[0]) + extraCertSANs = append(extraCertSANs, util.ParseIp(strings.Split(cfg.Network.KubeServiceCIDR, ",")[0])[0]) defaultCertSANs = append(defaultCertSANs, extraCertSANs...) From ee25e90f0c0ae831abb8645beba2fd702826bbd9 Mon Sep 17 00:00:00 2001 From: xiongww Date: Thu, 8 Dec 2022 21:04:17 +0800 Subject: [PATCH 14/24] fix kubeadm config node-cidr-mask-size-ipv6 and apiserver certssan error Change-Id: I66b6c33b34b7588a0181f8d031d9965afa95560c --- cmd/kk/apis/kubekey/v1alpha2/cluster_types.go | 7 +++++-- cmd/kk/pkg/kubernetes/templates/kubeadm_config.go | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go b/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go index 613dd8ffe..e98acb526 100644 --- a/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go +++ b/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go @@ -140,9 +140,12 @@ func (cfg *ClusterSpec) GenerateCertSANs() []string { if host.Address != cfg.ControlPlaneEndpoint.Address { extraCertSANs = append(extraCertSANs, host.Address) } - if host.InternalAddress != host.Address && host.InternalAddress != cfg.ControlPlaneEndpoint.Address { - extraCertSANs = append(extraCertSANs, host.InternalAddress) + InternalIPv4Address := strings.Split(host.InternalAddress, ",")[0] + InternalIPv6Address := strings.Split(host.InternalAddress, ",")[1] + if InternalIPv4Address != host.Address && InternalIPv4Address != cfg.ControlPlaneEndpoint.Address { + extraCertSANs = append(extraCertSANs, InternalIPv4Address) } + extraCertSANs = append(extraCertSANs, InternalIPv6Address) } extraCertSANs = append(extraCertSANs, util.ParseIp(strings.Split(cfg.Network.KubeServiceCIDR, ",")[0])[0]) diff --git a/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go b/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go index 43c10267f..292aca36f 100644 --- a/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go +++ b/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go @@ -97,7 +97,7 @@ apiServer: controllerManager: extraArgs: node-cidr-mask-size-ipv4: "{{ .NodeCidrMaskSize }}" - node-cidr-mask-size-ipv6: 64 + node-cidr-mask-size-ipv6: "64" {{ toYaml .ControllerManagerArgs | indent 4 }} extraVolumes: - name: host-time From b71b09e629f8511d0a314b355b209fd911686c96 Mon Sep 17 00:00:00 2001 From: xiongww Date: Wed, 14 Dec 2022 09:21:15 +0800 Subject: [PATCH 15/24] update doc/comfig-example Change-Id: I7301e6a5957fbea48b73f37cee5a617db8c5a693 --- docs/config-example.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/config-example.md b/docs/config-example.md index 3a8796bb7..28ecaf556 100644 --- a/docs/config-example.md +++ b/docs/config-example.md @@ -8,12 +8,12 @@ spec: hosts: # Assume that the default port for SSH is 22. Otherwise, add the port number after the IP address. # If you install Kubernetes on ARM, add "arch: arm64". For example, {...user: ubuntu, password: Qcloud@123, arch: arm64}. - - {name: node1, address: 172.16.0.2, internalAddress: 172.16.0.2, port: 8022, user: ubuntu, password: "Qcloud@123"} + - {name: node1, address: 172.16.0.2, internalAddress: "172.16.0.2,2022::2", port: 8022, user: ubuntu, password: "Qcloud@123"} # For default root user. # Kubekey will parse `labels` field and automatically label the node. - - {name: node2, address: 172.16.0.3, internalAddress: 172.16.0.3, password: "Qcloud@123", labels: {disk: SSD, role: backend}} + - {name: node2, address: 172.16.0.3, internalAddress: "172.16.0.3,2022::3", password: "Qcloud@123", labels: {disk: SSD, role: backend}} # For password-less login with SSH keys. - - {name: node3, address: 172.16.0.4, internalAddress: 172.16.0.4, privateKeyPath: "~/.ssh/id_rsa"} + - {name: node3, address: 172.16.0.4, internalAddress: "172.16.0.4,2022::4", privateKeyPath: "~/.ssh/id_rsa"} roleGroups: etcd: - node1 # All the nodes in your cluster that serve as the etcd nodes. @@ -145,15 +145,15 @@ spec: ipipMode: Always # IPIP Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, vxlanMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Always] vxlanMode: Never # VXLAN Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, ipipMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Never] vethMTU: 0 # The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. By default, MTU is auto-detected. [Default: 0] - kubePodsCIDR: 10.233.64.0/18 - kubeServiceCIDR: 10.233.0.0/18 + kubePodsCIDR: 10.233.64.0/18,fc00::/48 + kubeServiceCIDR: 10.233.0.0/18,fd00::/108 storage: openebs: basePath: /var/openebs/local # base path of the local PV provisioner registry: registryMirrors: [] insecureRegistries: [] - privateRegistry: "" + privateRegistry: "dockerhub.kubekey.local" namespaceOverride: "" auths: # if docker add by `docker login`, if containerd append to `/etc/containerd/config.toml` "dockerhub.kubekey.local": From 05b34e82565a246ed50ec44c848c363e8dcc4219 Mon Sep 17 00:00:00 2001 From: xiongww Date: Thu, 15 Dec 2022 15:35:13 +0800 Subject: [PATCH 16/24] set ControlPlaneEndpoint ipv4 address when lb is haproxy Change-Id: Icf7289e0914200880819be4a3464427426585abe --- cmd/kk/apis/kubekey/v1alpha2/default.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kk/apis/kubekey/v1alpha2/default.go b/cmd/kk/apis/kubekey/v1alpha2/default.go index b29348836..43ebf8950 100644 --- a/cmd/kk/apis/kubekey/v1alpha2/default.go +++ b/cmd/kk/apis/kubekey/v1alpha2/default.go @@ -203,7 +203,7 @@ func SetDefaultLBCfg(cfg *ClusterSpec, masterGroup []*KubeHost) ControlPlaneEndp } if (cfg.ControlPlaneEndpoint.Address == "" && !cfg.ControlPlaneEndpoint.EnableExternalDNS()) || cfg.ControlPlaneEndpoint.Address == "127.0.0.1" { - cfg.ControlPlaneEndpoint.Address = masterGroup[0].InternalAddress + cfg.ControlPlaneEndpoint.Address = masterGroup[0].GetInternalIPv4Address() } if cfg.ControlPlaneEndpoint.Domain == "" { cfg.ControlPlaneEndpoint.Domain = DefaultLBDomain From 83cb889ad41c12241d279a6dc716a36eca426345 Mon Sep 17 00:00:00 2001 From: xiongww Date: Tue, 27 Dec 2022 16:14:06 +0800 Subject: [PATCH 17/24] disable chrony pool and add local stratum 10 Change-Id: I4f153af1dfd914eeaaf0d419a915bb913e775ba2 --- cmd/kk/pkg/bootstrap/os/tasks.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/kk/pkg/bootstrap/os/tasks.go b/cmd/kk/pkg/bootstrap/os/tasks.go index 836ac4e2b..684bb2d3e 100644 --- a/cmd/kk/pkg/bootstrap/os/tasks.go +++ b/cmd/kk/pkg/bootstrap/os/tasks.go @@ -512,6 +512,10 @@ func (n *NodeConfigureNtpServer) Execute(runtime connector.Runtime) error { return errors.Wrapf(err, "delete old servers failed, please check file %s", chronyConfigFile) } + poolDisableCmd := fmt.Sprintf(`sed -i 's/^pool /#pool /g' %s`, chronyConfigFile) + if _, err := runtime.GetRunner().SudoCmd(poolDisableCmd, false); err != nil { + return errors.Wrapf(err, "set pool disable failed") + } // if NtpServers was configured for _, server := range n.KubeConf.Cluster.System.NtpServers { @@ -522,7 +526,7 @@ func (n *NodeConfigureNtpServer) Execute(runtime connector.Runtime) error { if _, err := runtime.GetRunner().SudoCmd(deleteAllowCmd, false); err != nil { return errors.Wrapf(err, "delete allow failed, please check file %s", chronyConfigFile) } - allowClientCmd := fmt.Sprintf(`echo "allow 0.0.0.0/0" >> %s`, chronyConfigFile) + allowClientCmd := fmt.Sprintf(`echo 'allow 0.0.0.0/0' >> %s`, chronyConfigFile) if _, err := runtime.GetRunner().SudoCmd(allowClientCmd, false); err != nil { return errors.Wrapf(err, "change host:%s chronyd conf failed, please check file %s", serverAddr, chronyConfigFile) } From 740e766d566719175ad1ed2760ccc9003eaefac9 Mon Sep 17 00:00:00 2001 From: xiongww Date: Thu, 22 Feb 2024 10:06:23 +0800 Subject: [PATCH 18/24] fix ipv6 address is not configured will panic Change-Id: I0929936fe524803cf8b0795b56281b232f96238a --- cmd/kk/apis/kubekey/v1alpha2/cluster_types.go | 10 +++++++--- cmd/kk/pkg/core/connector/host.go | 7 ++++++- cmd/kk/pkg/plugins/network/modules.go | 11 ++++++++++- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go b/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go index e98acb526..2a3cfeddb 100644 --- a/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go +++ b/cmd/kk/apis/kubekey/v1alpha2/cluster_types.go @@ -140,12 +140,16 @@ func (cfg *ClusterSpec) GenerateCertSANs() []string { if host.Address != cfg.ControlPlaneEndpoint.Address { extraCertSANs = append(extraCertSANs, host.Address) } - InternalIPv4Address := strings.Split(host.InternalAddress, ",")[0] - InternalIPv6Address := strings.Split(host.InternalAddress, ",")[1] + + nodeAddresses := strings.Split(host.InternalAddress, ",") + InternalIPv4Address := nodeAddresses[0] if InternalIPv4Address != host.Address && InternalIPv4Address != cfg.ControlPlaneEndpoint.Address { extraCertSANs = append(extraCertSANs, InternalIPv4Address) } - extraCertSANs = append(extraCertSANs, InternalIPv6Address) + if len(nodeAddresses)==2 { + InternalIPv6Address := nodeAddresses[1] + extraCertSANs = append(extraCertSANs, InternalIPv6Address) + } } extraCertSANs = append(extraCertSANs, util.ParseIp(strings.Split(cfg.Network.KubeServiceCIDR, ",")[0])[0]) diff --git a/cmd/kk/pkg/core/connector/host.go b/cmd/kk/pkg/core/connector/host.go index 3e4d033cf..1677662f0 100644 --- a/cmd/kk/pkg/core/connector/host.go +++ b/cmd/kk/pkg/core/connector/host.go @@ -71,7 +71,12 @@ func (b *BaseHost) GetInternalIPv4Address() string { } func (b *BaseHost) GetInternalIPv6Address() string { - return strings.Split(b.InternalAddress, ",")[1] + internalIPv6Address := "" + nodeAddresses := strings.Split(b.InternalAddress, ",") + if len(nodeAddresses) == 2 { + internalIPv6Address = nodeAddresses[1] + } + return internalIPv6Address } func (b *BaseHost) SetInternalAddress(str string) { diff --git a/cmd/kk/pkg/plugins/network/modules.go b/cmd/kk/pkg/plugins/network/modules.go index 9c234024f..9484d5323 100644 --- a/cmd/kk/pkg/plugins/network/modules.go +++ b/cmd/kk/pkg/plugins/network/modules.go @@ -133,7 +133,7 @@ func deployCalico(d *DeployNetworkPluginModule) []task.Interface { Dst: filepath.Join(common.KubeConfigDir, templates.CalicoNew.Name()), Data: util.Data{ "KubePodsV4CIDR": strings.Split(d.KubeConf.Cluster.Network.KubePodsCIDR, ",")[0], - "KubePodsV6CIDR": strings.Split(d.KubeConf.Cluster.Network.KubePodsCIDR, ",")[1], + "KubePodsV6CIDR": GetKubePodsV6CIDR(d), "CalicoCniImage": images.GetImage(d.Runtime, d.KubeConf, "calico-cni").ImageName(), "CalicoNodeImage": images.GetImage(d.Runtime, d.KubeConf, "calico-node").ImageName(), "CalicoFlexvolImage": images.GetImage(d.Runtime, d.KubeConf, "calico-flexvol").ImageName(), @@ -175,6 +175,15 @@ func deployCalico(d *DeployNetworkPluginModule) []task.Interface { } } +func GetKubePodsV6CIDR(d *DeployNetworkPluginModule) string { + kubePodsV6CIDR := "" + kubePodsCIDR := strings.Split(d.KubeConf.Cluster.Network.KubePodsCIDR, ",") + if len(kubePodsCIDR)==2 { + kubePodsV6CIDR = kubePodsCIDR[1] + } + return kubePodsV6CIDR +} + func deployFlannel(d *DeployNetworkPluginModule) []task.Interface { generateFlannelPSP := &task.RemoteTask{ Name: "GenerateFlannel", From 128680659745b64b373cfd4df2ee9cce57aa7098 Mon Sep 17 00:00:00 2001 From: xiongww Date: Fri, 23 Feb 2024 09:57:33 +0800 Subject: [PATCH 19/24] fix registry cert, calico, kube-controller-manager config when ipv6 address not provide Change-Id: I9388a58780e467e9c1283bf29c07102ec2ba5b54 --- cmd/kk/pkg/bootstrap/registry/certs.go | 2 +- cmd/kk/pkg/kubernetes/tasks.go | 1 + cmd/kk/pkg/kubernetes/templates/kubeadm_config.go | 4 ++++ cmd/kk/pkg/plugins/network/modules.go | 10 ++++++++++ cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go | 2 ++ 5 files changed, 18 insertions(+), 1 deletion(-) diff --git a/cmd/kk/pkg/bootstrap/registry/certs.go b/cmd/kk/pkg/bootstrap/registry/certs.go index 182960788..690cb4bfa 100644 --- a/cmd/kk/pkg/bootstrap/registry/certs.go +++ b/cmd/kk/pkg/bootstrap/registry/certs.go @@ -110,7 +110,7 @@ func (g *GenerateCerts) Execute(runtime connector.Runtime) error { for _, h := range runtime.GetHostsByRole(common.Registry) { dnsList = append(dnsList, h.GetName()) - ipList = append(ipList, netutils.ParseIPSloppy(h.GetInternalAddress())) + ipList = append(ipList, netutils.ParseIPSloppy(h.GetInternalIPv4Address())) } altName.DNSNames = dnsList altName.IPs = ipList diff --git a/cmd/kk/pkg/kubernetes/tasks.go b/cmd/kk/pkg/kubernetes/tasks.go index c995d2eb4..c6e8a1d6f 100644 --- a/cmd/kk/pkg/kubernetes/tasks.go +++ b/cmd/kk/pkg/kubernetes/tasks.go @@ -307,6 +307,7 @@ func (g *GenerateKubeadmConfig) Execute(runtime connector.Runtime) error { "CgroupDriver": checkCgroupDriver, "BootstrapToken": bootstrapToken, "CertificateKey": certificateKey, + "IPv6Support": host.GetInternalIPv6Address()!="", }, } diff --git a/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go b/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go index 292aca36f..6d30206a5 100644 --- a/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go +++ b/cmd/kk/pkg/kubernetes/templates/kubeadm_config.go @@ -96,8 +96,12 @@ apiServer: {{- end }} controllerManager: extraArgs: +{{- if .IPv6Support }} node-cidr-mask-size-ipv4: "{{ .NodeCidrMaskSize }}" node-cidr-mask-size-ipv6: "64" +{{- else }} + node-cidr-mask-size: "{{ .NodeCidrMaskSize }}" +{{- end }} {{ toYaml .ControllerManagerArgs | indent 4 }} extraVolumes: - name: host-time diff --git a/cmd/kk/pkg/plugins/network/modules.go b/cmd/kk/pkg/plugins/network/modules.go index 9484d5323..188e35e2f 100644 --- a/cmd/kk/pkg/plugins/network/modules.go +++ b/cmd/kk/pkg/plugins/network/modules.go @@ -147,6 +147,7 @@ func deployCalico(d *DeployNetworkPluginModule) []task.Interface { "ConatinerManagerIsIsula": d.KubeConf.Cluster.Kubernetes.ContainerManager == "isula", "IPV4POOLNATOUTGOING": d.KubeConf.Cluster.Network.Calico.EnableIPV4POOL_NAT_OUTGOING(), "DefaultIPPOOL": d.KubeConf.Cluster.Network.Calico.EnableDefaultIPPOOL(), + "IPv6Support": GetKubeIPv6Support(d), }, }, Parallel: true, @@ -175,6 +176,15 @@ func deployCalico(d *DeployNetworkPluginModule) []task.Interface { } } +func GetKubeIPv6Support(d *DeployNetworkPluginModule) bool { + IPv6Support := false + kubePodsCIDR := strings.Split(d.KubeConf.Cluster.Network.KubePodsCIDR, ",") + if len(kubePodsCIDR)==2 { + IPv6Support = true + } + return IPv6Support +} + func GetKubePodsV6CIDR(d *DeployNetworkPluginModule) string { kubePodsV6CIDR := "" kubePodsCIDR := strings.Split(d.KubeConf.Cluster.Network.KubePodsCIDR, ",") diff --git a/cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go b/cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go index 89f9afa04..2f6d5fced 100644 --- a/cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go +++ b/cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go @@ -4852,8 +4852,10 @@ spec: value: "can-reach=$(NODEIP)" - name: IP value: "autodetect" +{{- if .IPv6Support }} - name: IP6 value: "autodetect" +{{- end }} # Enable IPIP - name: CALICO_IPV4POOL_IPIP value: "{{ .IPIPMode }}" From 9d4f5996a1b347f294f0c5265c30c7db4b2e5133 Mon Sep 17 00:00:00 2001 From: xiongww Date: Fri, 23 Feb 2024 10:50:38 +0800 Subject: [PATCH 20/24] remove kkcluster ipv6 hosts config when ipv6 address not provide --- pkg/service/bootstrap/bootstrap.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/pkg/service/bootstrap/bootstrap.go b/pkg/service/bootstrap/bootstrap.go index c635a9315..66d15123f 100644 --- a/pkg/service/bootstrap/bootstrap.go +++ b/pkg/service/bootstrap/bootstrap.go @@ -118,11 +118,6 @@ func (s *Service) ExecInitScript() error { host.Name, s.scope.KubernetesClusterName(), host.Name)) - hostsList = append(hostsList, fmt.Sprintf("%s %s.%s %s", - strings.Split(host.InternalAddress, ",")[1], - host.Name, - s.scope.KubernetesClusterName(), - host.Name)) } } hostsList = append(hostsList, lbHost) From 20934f715138030eaff5e7cb42bcf7b1a6273123 Mon Sep 17 00:00:00 2001 From: "deqing.ldq" Date: Sun, 25 Feb 2024 21:45:53 +0800 Subject: [PATCH 21/24] fix #2143: My bin directory was deleted by installContainerdMoudle --- cmd/kk/pkg/container/containerd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kk/pkg/container/containerd.go b/cmd/kk/pkg/container/containerd.go index 05caae1e9..f8724f26b 100644 --- a/cmd/kk/pkg/container/containerd.go +++ b/cmd/kk/pkg/container/containerd.go @@ -63,7 +63,7 @@ func (s *SyncContainerd) Execute(runtime connector.Runtime) error { } if _, err := runtime.GetRunner().SudoCmd( - fmt.Sprintf("mkdir -p /usr/bin && tar -zxf %s && mv bin/* /usr/bin && rm -rf bin", dst), + fmt.Sprintf("mkdir -p /usr/bin && cd %s && tar -zxf %s && mv bin/* /usr/bin && rm -rf bin", common.TmpDir, dst), false); err != nil { return errors.Wrap(errors.WithStack(err), fmt.Sprintf("install containerd binaries failed")) } From 9cb5949170c19b99ed99567bad070c60b8a9747d Mon Sep 17 00:00:00 2001 From: pixiake Date: Wed, 21 Feb 2024 16:06:09 +0800 Subject: [PATCH 22/24] update cri-dockerd support Signed-off-by: pixiake --- cmd/kk/apis/kubekey/v1alpha2/default.go | 5 +- .../apis/kubekey/v1alpha2/kubernetes_types.go | 19 +- cmd/kk/pkg/binaries/kubernetes.go | 3 +- cmd/kk/pkg/bootstrap/confirm/tasks.go | 31 ++- .../pkg/bootstrap/os/templates/init_script.go | 17 +- cmd/kk/pkg/bootstrap/precheck/tasks.go | 6 + cmd/kk/pkg/common/common.go | 1 + cmd/kk/pkg/common/kube_prepare.go | 8 + cmd/kk/pkg/container/docker.go | 3 +- cmd/kk/pkg/container/module.go | 195 +++++++++++------- .../templates/cri_dockerd_service.go | 2 +- cmd/kk/pkg/files/file.go | 3 + cmd/kk/pkg/kubernetes/module.go | 5 +- cmd/kk/pkg/kubernetes/tasks.go | 16 +- cmd/kk/pkg/pipelines/add_nodes.go | 1 + cmd/kk/pkg/pipelines/create_cluster.go | 1 + cmd/kk/pkg/pipelines/upgrade_cluster.go | 6 + cmd/kk/pkg/version/kubernetes/version_enum.go | 13 -- hack/sync-components.sh | 21 ++ version/components.json | 4 +- 20 files changed, 230 insertions(+), 130 deletions(-) diff --git a/cmd/kk/apis/kubekey/v1alpha2/default.go b/cmd/kk/apis/kubekey/v1alpha2/default.go index 43ebf8950..dad33ac1d 100644 --- a/cmd/kk/apis/kubekey/v1alpha2/default.go +++ b/cmd/kk/apis/kubekey/v1alpha2/default.go @@ -22,7 +22,6 @@ import ( "strings" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/util" - "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/version/kubernetes" ) const ( @@ -43,7 +42,7 @@ const ( DefaultEtcdVersion = "v3.5.6" DefaultEtcdPort = "2379" DefaultDockerVersion = "24.0.9" - DefaultCriDockerdVersion = "0.3.9" + DefaultCriDockerdVersion = "0.3.10" DefaultContainerdVersion = "1.7.12" DefaultRuncVersion = "v1.1.11" DefaultCrictlVersion = "v1.29.0" @@ -318,7 +317,7 @@ func SetDefaultClusterCfg(cfg *ClusterSpec) Kubernetes { if cfg.Kubernetes.ContainerRuntimeEndpoint == "" { switch cfg.Kubernetes.ContainerManager { case Docker: - if kubernetes.IsAtLeastV124(cfg.Kubernetes.Version) { + if cfg.Kubernetes.IsAtLeastV124() { cfg.Kubernetes.ContainerRuntimeEndpoint = DefaultCriDockerdEndpoint } else { cfg.Kubernetes.ContainerRuntimeEndpoint = "" diff --git a/cmd/kk/apis/kubekey/v1alpha2/kubernetes_types.go b/cmd/kk/apis/kubekey/v1alpha2/kubernetes_types.go index c9f055812..9531f4817 100644 --- a/cmd/kk/apis/kubekey/v1alpha2/kubernetes_types.go +++ b/cmd/kk/apis/kubekey/v1alpha2/kubernetes_types.go @@ -16,7 +16,10 @@ package v1alpha2 -import "k8s.io/apimachinery/pkg/runtime" +import ( + "k8s.io/apimachinery/pkg/runtime" + versionutil "k8s.io/apimachinery/pkg/util/version" +) // Kubernetes contains the configuration for the cluster type Kubernetes struct { @@ -103,3 +106,17 @@ func (k *Kubernetes) EnableAudit() bool { } return *k.AutoRenewCerts } + +// IsAtLeastV124 is used to determine whether the k8s version is greater than v1.24. +func (k *Kubernetes) IsAtLeastV124() bool { + parsedVersion, err := versionutil.ParseGeneric(k.Version) + if err != nil { + return false + } + + if parsedVersion.AtLeast(versionutil.MustParseSemantic("v1.24.0")) { + return true + } + + return false +} diff --git a/cmd/kk/pkg/binaries/kubernetes.go b/cmd/kk/pkg/binaries/kubernetes.go index df87b25a2..98eeafe99 100644 --- a/cmd/kk/pkg/binaries/kubernetes.go +++ b/cmd/kk/pkg/binaries/kubernetes.go @@ -26,7 +26,6 @@ import ( "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/logger" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/util" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/files" - "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/version/kubernetes" "github.com/pkg/errors" ) @@ -50,7 +49,7 @@ func K8sFilesDownloadHTTP(kubeConf *common.KubeConf, path, version, arch string, if kubeConf.Cluster.Kubernetes.ContainerManager == kubekeyapiv1alpha2.Docker { binaries = append(binaries, docker) - if kubernetes.IsAtLeastV124(kubeConf.Cluster.Kubernetes.Version) && kubeConf.Cluster.Kubernetes.ContainerManager == common.Docker { + if kubeConf.Cluster.Kubernetes.IsAtLeastV124() && kubeConf.Cluster.Kubernetes.ContainerManager == common.Docker { binaries = append(binaries, criDockerd) } } else if kubeConf.Cluster.Kubernetes.ContainerManager == kubekeyapiv1alpha2.Containerd { diff --git a/cmd/kk/pkg/bootstrap/confirm/tasks.go b/cmd/kk/pkg/bootstrap/confirm/tasks.go index 59edc41f2..445ccf805 100644 --- a/cmd/kk/pkg/bootstrap/confirm/tasks.go +++ b/cmd/kk/pkg/bootstrap/confirm/tasks.go @@ -28,7 +28,6 @@ import ( "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/connector" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/logger" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/util" - "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/version/kubernetes" "github.com/mitchellh/mapstructure" "github.com/modood/table" "github.com/pkg/errors" @@ -107,17 +106,18 @@ func (i *InstallationConfirm) Execute(runtime connector.Runtime) error { fmt.Println("https://github.com/kubesphere/kubekey#requirements-and-recommendations") fmt.Println("") - if kubernetes.IsAtLeastV124(i.KubeConf.Cluster.Kubernetes.Version) && i.KubeConf.Cluster.Kubernetes.ContainerManager == common.Docker && - i.KubeConf.Cluster.Kubernetes.Type != common.Kubernetes { + if i.KubeConf.Cluster.Kubernetes.IsAtLeastV124() && i.KubeConf.Cluster.Kubernetes.ContainerManager == common.Docker { fmt.Println("[Notice]") - fmt.Println("Incorrect runtime. Please specify a container runtime other than Docker to install Kubernetes v1.24 or later.") + fmt.Println("For Kubernetes v1.24 and later, dockershim has been deprecated.") + fmt.Println("Current runtime is set to Docker and `cri-dockerd` will be installed to support Kubernetes v1.24 and later.") + fmt.Println("Yoc can also specify a container runtime other than Docker to install Kubernetes v1.24 or later.") fmt.Println("You can set \"spec.kubernetes.containerManager\" in the configuration file to \"containerd\" or add \"--container-manager containerd\" to the \"./kk create cluster\" command.") fmt.Println("For more information, see:") fmt.Println("https://github.com/kubesphere/kubekey/blob/master/docs/commands/kk-create-cluster.md") fmt.Println("https://kubernetes.io/docs/setup/production-environment/container-runtimes/#container-runtimes") fmt.Println("https://kubernetes.io/blog/2022/02/17/dockershim-faq/") + fmt.Println("https://github.com/Mirantis/cri-dockerd") fmt.Println("") - stopFlag = true } if stopFlag { @@ -260,17 +260,32 @@ Warning: k8sV124 := versionutil.MustParseSemantic("v1.24.0") if k8sVersion.AtLeast(k8sV124) && versionutil.MustParseSemantic(currentK8sVersion).LessThan(k8sV124) && strings.Contains(cri, "docker") { fmt.Println("[Notice]") - fmt.Println("Pre-upgrade check failed. The container runtime of the current cluster is Docker.") - fmt.Println("Kubernetes v1.24 and later no longer support dockershim and Docker.") - fmt.Println("Make sure you have completed the migration from Docker to other container runtimes that are compatible with the Kubernetes CRI.") + fmt.Println("For Kubernetes v1.24 and later, dockershim has been deprecated.") + fmt.Println("The container runtime of the current cluster is Docker, `cri-dockerd` will be installed to support Kubernetes v1.24 and later.") + fmt.Println("You can also migrate container runtime from Docker to other runtimes that are compatible with the Kubernetes CRI.") fmt.Println("For more information, see:") fmt.Println("https://kubernetes.io/docs/setup/production-environment/container-runtimes/#container-runtimes") fmt.Println("https://kubernetes.io/blog/2022/02/17/dockershim-faq/") + fmt.Println("https://github.com/Mirantis/cri-dockerd") + fmt.Println("https://kubernetes.io/docs/tasks/administer-cluster/migrating-from-dockershim/change-runtime-containerd/") fmt.Println("") } } } + if featureGates, ok := u.PipelineCache.GetMustString(common.ClusterFeatureGates); ok { + if featureGates != "" { + fmt.Println("[Notice]") + fmt.Println("The feature-gates in the cluster is as follow:") + fmt.Println("") + fmt.Printf(" %s\n", featureGates) + fmt.Println("") + fmt.Println("Please ensure that there are no deprecated feature-gate in the target version.") + fmt.Println("You can modify the feature-gates in `kubeadm-config` and `kubelet-config` configmaps in the kube-system namespace.") + fmt.Println("") + } + } + reader := bufio.NewReader(os.Stdin) confirmOK := false for !confirmOK { diff --git a/cmd/kk/pkg/bootstrap/os/templates/init_script.go b/cmd/kk/pkg/bootstrap/os/templates/init_script.go index 164be49bf..09e14c878 100644 --- a/cmd/kk/pkg/bootstrap/os/templates/init_script.go +++ b/cmd/kk/pkg/bootstrap/os/templates/init_script.go @@ -18,10 +18,10 @@ package templates import ( "fmt" - "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/bootstrap/registry" - "strings" "text/template" + "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/bootstrap/registry" + "github.com/lithammer/dedent" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/common" @@ -249,11 +249,16 @@ func GenerateHosts(runtime connector.ModuleRuntime, kubeConf *common.KubeConf) [ if len(runtime.GetHostsByRole(common.Registry)) > 0 { if kubeConf.Cluster.Registry.PrivateRegistry != "" { - hostsList = append(hostsList, fmt.Sprintf("%s %s", strings.Split(runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(),",")[0], kubeConf.Cluster.Registry.PrivateRegistry)) - hostsList = append(hostsList, fmt.Sprintf("%s %s", strings.Split(runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(),",")[1], kubeConf.Cluster.Registry.PrivateRegistry)) + hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalIPv4Address(), kubeConf.Cluster.Registry.PrivateRegistry)) + if runtime.GetHostsByRole(common.Registry)[0].GetInternalIPv6Address() != "" { + hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalIPv6Address(), kubeConf.Cluster.Registry.PrivateRegistry)) + } + } else { - hostsList = append(hostsList, fmt.Sprintf("%s %s", strings.Split(runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(),",")[0], registry.RegistryCertificateBaseName)) - hostsList = append(hostsList, fmt.Sprintf("%s %s", strings.Split(runtime.GetHostsByRole(common.Registry)[0].GetInternalAddress(),",")[1], registry.RegistryCertificateBaseName)) + hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalIPv4Address(), registry.RegistryCertificateBaseName)) + if runtime.GetHostsByRole(common.Registry)[0].GetInternalIPv6Address() != "" { + hostsList = append(hostsList, fmt.Sprintf("%s %s", runtime.GetHostsByRole(common.Registry)[0].GetInternalIPv6Address(), registry.RegistryCertificateBaseName)) + } } } diff --git a/cmd/kk/pkg/bootstrap/precheck/tasks.go b/cmd/kk/pkg/bootstrap/precheck/tasks.go index c6d60cb71..5517228a9 100644 --- a/cmd/kk/pkg/bootstrap/precheck/tasks.go +++ b/cmd/kk/pkg/bootstrap/precheck/tasks.go @@ -332,5 +332,11 @@ func (g *GetKubernetesNodesStatus) Execute(runtime connector.Runtime) error { return err } g.PipelineCache.Set(common.ClusterNodeCRIRuntimes, cri) + + featureGates, err := runtime.GetRunner().SudoCmd("cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep feature-gates", false) + if err != nil { + return err + } + g.PipelineCache.Set(common.ClusterFeatureGates, featureGates) return nil } diff --git a/cmd/kk/pkg/common/common.go b/cmd/kk/pkg/common/common.go index aeaa69f9c..a57ee1ec2 100644 --- a/cmd/kk/pkg/common/common.go +++ b/cmd/kk/pkg/common/common.go @@ -77,6 +77,7 @@ const ( KubeSphereVersion = "kubeSphereVersion" // current KubeSphere version ClusterNodeStatus = "clusterNodeStatus" ClusterNodeCRIRuntimes = "ClusterNodeCRIRuntimes" + ClusterFeatureGates = "ClusterFeatureGates" DesiredK8sVersion = "desiredK8sVersion" PlanK8sVersion = "planK8sVersion" NodeK8sVersion = "NodeK8sVersion" diff --git a/cmd/kk/pkg/common/kube_prepare.go b/cmd/kk/pkg/common/kube_prepare.go index 0ee6b9835..20be238f9 100644 --- a/cmd/kk/pkg/common/kube_prepare.go +++ b/cmd/kk/pkg/common/kube_prepare.go @@ -136,3 +136,11 @@ type EnableAudit struct { func (e *EnableAudit) PreCheck(_ connector.Runtime) (bool, error) { return e.KubeConf.Cluster.Kubernetes.EnableAudit(), nil } + +type AtLeastV124 struct { + KubePrepare +} + +func (a *AtLeastV124) PreCheck(_ connector.Runtime) (bool, error) { + return a.KubeConf.Cluster.Kubernetes.IsAtLeastV124(), nil +} diff --git a/cmd/kk/pkg/container/docker.go b/cmd/kk/pkg/container/docker.go index 8be4612a3..5455232a9 100644 --- a/cmd/kk/pkg/container/docker.go +++ b/cmd/kk/pkg/container/docker.go @@ -27,7 +27,6 @@ import ( "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/files" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/registry" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/utils" - "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/version/kubernetes" "github.com/pkg/errors" ) @@ -188,7 +187,7 @@ func (d *DisableDocker) Execute(runtime connector.Runtime) error { filepath.Join("/etc/docker", templates.DockerConfig.Name()), } - if kubernetes.IsAtLeastV124(d.KubeConf.Cluster.Kubernetes.Version) && d.KubeConf.Cluster.Kubernetes.ContainerManager == common.Docker { + if d.KubeConf.Cluster.Kubernetes.IsAtLeastV124() && d.KubeConf.Cluster.Kubernetes.ContainerManager == common.Docker { if _, err := runtime.GetRunner().SudoCmd("systemctl disable cri-docker && systemctl stop cri-docker", false); err != nil { return errors.Wrap(errors.WithStack(err), fmt.Sprintf("disable and stop cri-docker failed")) diff --git a/cmd/kk/pkg/container/module.go b/cmd/kk/pkg/container/module.go index 61a7bfa5d..5c732b02a 100644 --- a/cmd/kk/pkg/container/module.go +++ b/cmd/kk/pkg/container/module.go @@ -30,7 +30,6 @@ import ( "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/images" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/kubernetes" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/registry" - versionk8s "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/version/kubernetes" ) type InstallContainerModule struct { @@ -74,19 +73,6 @@ func InstallDocker(m *InstallContainerModule) []task.Interface { Retry: 2, } - syncCriDockerdBinaries := &task.RemoteTask{ - Name: "SyncCriDockerdBinaries", - Desc: "Sync cri-dockerd binaries", - Hosts: m.Runtime.GetHostsByRole(common.K8s), - Prepare: &prepare.PrepareCollection{ - &kubernetes.NodeInCluster{Not: true}, - &CriDockerdExist{Not: true}, - }, - Action: new(SyncCriDockerdBinaries), - Parallel: true, - Retry: 2, - } - generateContainerdService := &task.RemoteTask{ Name: "GenerateContainerdService", Desc: "Generate containerd service", @@ -175,48 +161,6 @@ func InstallDocker(m *InstallContainerModule) []task.Interface { Parallel: true, } - generateCriDockerdService := &task.RemoteTask{ - Name: "GenerateCriDockerdService", - Desc: "Generate cri-dockerd service", - Hosts: m.Runtime.GetHostsByRole(common.K8s), - Prepare: &prepare.PrepareCollection{ - &kubernetes.NodeInCluster{Not: true}, - &CriDockerdExist{Not: true}, - }, - Action: &action.Template{ - Template: templates.CriDockerService, - Dst: filepath.Join("/etc/systemd/system", templates.CriDockerService.Name()), - }, - Parallel: true, - } - - enableCriDockerd := &task.RemoteTask{ - Name: "EnableCriDockerd", - Desc: "Enable cri-dockerd", - Hosts: m.Runtime.GetHostsByRole(common.K8s), - Prepare: &prepare.PrepareCollection{ - &kubernetes.NodeInCluster{Not: true}, - &CriDockerdExist{Not: true}, - }, - Action: new(EnableCriDockerd), - Parallel: true, - } - - if versionk8s.IsAtLeastV124(m.KubeConf.Cluster.Kubernetes.Version) && m.KubeConf.Cluster.Kubernetes.ContainerManager == common.Docker { - return []task.Interface{ - syncBinaries, - syncCriDockerdBinaries, - generateContainerdService, - generateDockerService, - generateDockerConfig, - enableContainerdForDocker, - enableDocker, - dockerLoginRegistry, - generateCriDockerdService, - enableCriDockerd, - } - } - return []task.Interface{ syncBinaries, generateContainerdService, @@ -242,19 +186,6 @@ func InstallContainerd(m *InstallContainerModule) []task.Interface { Retry: 2, } - syncCrictlBinaries := &task.RemoteTask{ - Name: "SyncCrictlBinaries", - Desc: "Sync crictl binaries", - Hosts: m.Runtime.GetHostsByRole(common.K8s), - Prepare: &prepare.PrepareCollection{ - &kubernetes.NodeInCluster{Not: true}, - &CrictlExist{Not: true}, - }, - Action: new(SyncCrictlBinaries), - Parallel: true, - Retry: 2, - } - generateContainerdService := &task.RemoteTask{ Name: "GenerateContainerdService", Desc: "Generate containerd service", @@ -292,6 +223,31 @@ func InstallContainerd(m *InstallContainerModule) []task.Interface { Parallel: true, } + enableContainerd := &task.RemoteTask{ + Name: "EnableContainerd", + Desc: "Enable containerd", + Hosts: m.Runtime.GetHostsByRole(common.K8s), + Prepare: &prepare.PrepareCollection{ + &kubernetes.NodeInCluster{Not: true}, + &ContainerdExist{Not: true}, + }, + Action: new(EnableContainerd), + Parallel: true, + } + + syncCrictlBinaries := &task.RemoteTask{ + Name: "SyncCrictlBinaries", + Desc: "Sync crictl binaries", + Hosts: m.Runtime.GetHostsByRole(common.K8s), + Prepare: &prepare.PrepareCollection{ + &kubernetes.NodeInCluster{Not: true}, + &CrictlExist{Not: true}, + }, + Action: new(SyncCrictlBinaries), + Parallel: true, + Retry: 2, + } + generateCrictlConfig := &task.RemoteTask{ Name: "GenerateCrictlConfig", Desc: "Generate crictl config", @@ -310,25 +266,104 @@ func InstallContainerd(m *InstallContainerModule) []task.Interface { Parallel: true, } - enableContainerd := &task.RemoteTask{ - Name: "EnableContainerd", - Desc: "Enable containerd", + return []task.Interface{ + syncContainerd, + generateContainerdService, + generateContainerdConfig, + enableContainerd, + syncCrictlBinaries, + generateCrictlConfig, + } +} + +type InstallCriDockerdModule struct { + common.KubeModule + Skip bool +} + +func (m *InstallCriDockerdModule) Init() { + m.Name = "InstallCriDockerdModule" + m.Desc = "Install cri-dockerd" + + syncCriDockerdBinaries := &task.RemoteTask{ + Name: "SyncCriDockerdBinaries", + Desc: "Sync cri-dockerd binaries", Hosts: m.Runtime.GetHostsByRole(common.K8s), Prepare: &prepare.PrepareCollection{ - &kubernetes.NodeInCluster{Not: true}, - &ContainerdExist{Not: true}, + &CriDockerdExist{Not: true}, + &common.AtLeastV124{}, }, - Action: new(EnableContainerd), + Action: new(SyncCriDockerdBinaries), Parallel: true, + Retry: 2, } - return []task.Interface{ - syncContainerd, + generateCriDockerdService := &task.RemoteTask{ + Name: "GenerateCriDockerdService", + Desc: "Generate cri-dockerd service", + Hosts: m.Runtime.GetHostsByRole(common.K8s), + Prepare: &prepare.PrepareCollection{ + &CriDockerdExist{Not: true}, + &common.AtLeastV124{}, + }, + Action: &action.Template{ + Template: templates.CriDockerService, + Dst: filepath.Join("/etc/systemd/system", templates.CriDockerService.Name()), + Data: util.Data{ + "SandBoxImage": images.GetImage(m.Runtime, m.KubeConf, "pause").ImageName(), + }, + }, + Parallel: true, + } + + enableCriDockerd := &task.RemoteTask{ + Name: "EnableCriDockerd", + Desc: "Enable cri-dockerd", + Hosts: m.Runtime.GetHostsByRole(common.K8s), + Prepare: &prepare.PrepareCollection{ + &CriDockerdExist{Not: true}, + &common.AtLeastV124{}, + }, + Action: new(EnableCriDockerd), + Parallel: true, + } + + syncCrictlBinaries := &task.RemoteTask{ + Name: "SyncCrictlBinaries", + Desc: "Sync crictl binaries", + Hosts: m.Runtime.GetHostsByRole(common.K8s), + Prepare: &prepare.PrepareCollection{ + &CrictlExist{Not: true}, + &common.AtLeastV124{}, + }, + Action: new(SyncCrictlBinaries), + Parallel: true, + Retry: 2, + } + + generateCrictlConfig := &task.RemoteTask{ + Name: "GenerateCrictlConfig", + Desc: "Generate crictl config", + Hosts: m.Runtime.GetHostsByRole(common.K8s), + Prepare: &prepare.PrepareCollection{ + &common.AtLeastV124{}, + }, + Action: &action.Template{ + Template: templates.CrictlConfig, + Dst: filepath.Join("/etc/", templates.CrictlConfig.Name()), + Data: util.Data{ + "Endpoint": m.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint, + }, + }, + Parallel: true, + } + + m.Tasks = []task.Interface{ + syncCriDockerdBinaries, + generateCriDockerdService, + enableCriDockerd, syncCrictlBinaries, - generateContainerdService, - generateContainerdConfig, generateCrictlConfig, - enableContainerd, } } diff --git a/cmd/kk/pkg/container/templates/cri_dockerd_service.go b/cmd/kk/pkg/container/templates/cri_dockerd_service.go index ed5ca4196..9e873d224 100644 --- a/cmd/kk/pkg/container/templates/cri_dockerd_service.go +++ b/cmd/kk/pkg/container/templates/cri_dockerd_service.go @@ -29,7 +29,7 @@ Documentation=https://docs.mirantis.com [Service] Type=notify -ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image docker.io/kubesphere/pause:3.8 +ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image {{ .SandBoxImage }} ExecReload=/bin/kill -s HUP $MAINPID TimeoutSec=0 RestartSec=2 diff --git a/cmd/kk/pkg/files/file.go b/cmd/kk/pkg/files/file.go index a3a8cc0d4..9079837ad 100644 --- a/cmd/kk/pkg/files/file.go +++ b/cmd/kk/pkg/files/file.go @@ -153,6 +153,9 @@ func NewKubeBinary(name, arch, version, prePath string, getCmd func(path, url st component.Type = CRIDOCKERD component.FileName = fmt.Sprintf("cri-dockerd-%s.tgz", version) component.Url = fmt.Sprintf("https://github.com/Mirantis/cri-dockerd/releases/download/v%s/cri-dockerd-%s.%s.tgz", version, version, arch) + if component.Zone == "cn" { + component.Url = fmt.Sprintf("https://kubernetes-release.pek3b.qingstor.com/cri-dockerd/releases/download/v%s/cri-dockerd-%s.%s.tgz", version, version, arch) + } case crictl: component.Type = CRICTL component.FileName = fmt.Sprintf("crictl-%s-linux-%s.tar.gz", version, arch) diff --git a/cmd/kk/pkg/kubernetes/module.go b/cmd/kk/pkg/kubernetes/module.go index c948c1c03..9123472e2 100644 --- a/cmd/kk/pkg/kubernetes/module.go +++ b/cmd/kk/pkg/kubernetes/module.go @@ -18,9 +18,10 @@ package kubernetes import ( "fmt" + "path/filepath" + "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/util" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/plugins/dns" - "path/filepath" "github.com/pkg/errors" @@ -508,6 +509,7 @@ func (p *ProgressiveUpgradeModule) Init() { Action: new(CalculateNextVersion), } + // prepare download := &task.LocalTask{ Name: "DownloadBinaries", Desc: "Download installation binaries", @@ -524,6 +526,7 @@ func (p *ProgressiveUpgradeModule) Init() { Parallel: true, } + // upgrade kubernetes syncBinary := &task.RemoteTask{ Name: "SyncKubeBinary", Desc: "Synchronize kubernetes binaries", diff --git a/cmd/kk/pkg/kubernetes/tasks.go b/cmd/kk/pkg/kubernetes/tasks.go index c6e8a1d6f..86703000c 100644 --- a/cmd/kk/pkg/kubernetes/tasks.go +++ b/cmd/kk/pkg/kubernetes/tasks.go @@ -46,7 +46,6 @@ import ( "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/images" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/kubernetes/templates" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/utils" - "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/version/kubernetes" ) type GetClusterStatus struct { @@ -307,7 +306,7 @@ func (g *GenerateKubeadmConfig) Execute(runtime connector.Runtime) error { "CgroupDriver": checkCgroupDriver, "BootstrapToken": bootstrapToken, "CertificateKey": certificateKey, - "IPv6Support": host.GetInternalIPv6Address()!="", + "IPv6Support": host.GetInternalIPv6Address() != "", }, } @@ -717,15 +716,10 @@ func (u *UpgradeKubeMaster) Execute(runtime connector.Runtime) error { return errors.Wrap(errors.WithStack(err), fmt.Sprintf("stop kubelet failed: %s", host.GetName())) } - if kubernetes.IsAtLeastV124(u.KubeConf.Cluster.Kubernetes.Version){ - if _, err := runtime.GetRunner().SudoCmd("sed -i 's/ --network-plugin=cni / /g' /var/lib/kubelet/kubeadm-flags.env", false); err != nil { + if u.KubeConf.Cluster.Kubernetes.IsAtLeastV124() { + if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("echo 'KUBELET_KUBEADM_ARGS=\\\"--container-runtime-endpoint=%s\\\"' > /var/lib/kubelet/kubeadm-flags.env", u.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint), false); err != nil { return errors.Wrap(errors.WithStack(err), fmt.Sprintf("update kubelet config failed: %s", host.GetName())) } - if u.KubeConf.Cluster.Kubernetes.ContainerManager == common.Docker { - if _, err := runtime.GetRunner().SudoCmd("sed -i 's/ --container-runtime=remote / /g' /var/lib/kubelet/kubeadm-flags.env", false); err != nil { - return errors.Wrap(errors.WithStack(err), fmt.Sprintf("update kubelet config failed: %s", host.GetName())) - } - } } if err := SetKubeletTasks(runtime, u.KubeAction); err != nil { @@ -753,8 +747,8 @@ func (u *UpgradeKubeWorker) Execute(runtime connector.Runtime) error { if _, err := runtime.GetRunner().SudoCmd("systemctl stop kubelet", true); err != nil { return errors.Wrap(errors.WithStack(err), fmt.Sprintf("stop kubelet failed: %s", host.GetName())) } - if versionutil.MustParseSemantic(u.KubeConf.Cluster.Kubernetes.Version).AtLeast(versionutil.MustParseSemantic("v1.24.0")) { - if _, err := runtime.GetRunner().SudoCmd("sed -i 's/ --network-plugin=cni / /g' /var/lib/kubelet/kubeadm-flags.env", false); err != nil { + if u.KubeConf.Cluster.Kubernetes.IsAtLeastV124() { + if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("echo 'KUBELET_KUBEADM_ARGS=\\\"--container-runtime-endpoint=%s\\\"' > /var/lib/kubelet/kubeadm-flags.env", u.KubeConf.Cluster.Kubernetes.ContainerRuntimeEndpoint), false); err != nil { return errors.Wrap(errors.WithStack(err), fmt.Sprintf("update kubelet config failed: %s", host.GetName())) } } diff --git a/cmd/kk/pkg/pipelines/add_nodes.go b/cmd/kk/pkg/pipelines/add_nodes.go index 61dd067e0..066db3ac3 100644 --- a/cmd/kk/pkg/pipelines/add_nodes.go +++ b/cmd/kk/pkg/pipelines/add_nodes.go @@ -59,6 +59,7 @@ func NewAddNodesPipeline(runtime *common.KubeRuntime) error { &kubernetes.RestartKubeletModule{}, &kubernetes.StatusModule{}, &container.InstallContainerModule{}, + &container.InstallCriDockerdModule{Skip: runtime.Cluster.Kubernetes.ContainerManager != "docker"}, &images.PullModule{Skip: runtime.Arg.SkipPullImages}, &etcd.PreCheckModule{Skip: runtime.Cluster.Etcd.Type != kubekeyapiv1alpha2.KubeKey}, &etcd.CertsModule{}, diff --git a/cmd/kk/pkg/pipelines/create_cluster.go b/cmd/kk/pkg/pipelines/create_cluster.go index 99fc3e51b..4f5935f6c 100644 --- a/cmd/kk/pkg/pipelines/create_cluster.go +++ b/cmd/kk/pkg/pipelines/create_cluster.go @@ -67,6 +67,7 @@ func NewCreateClusterPipeline(runtime *common.KubeRuntime) error { &os.ConfigureOSModule{Skip: runtime.Cluster.System.SkipConfigureOS}, &kubernetes.StatusModule{}, &container.InstallContainerModule{}, + &container.InstallCriDockerdModule{Skip: runtime.Cluster.Kubernetes.ContainerManager != "docker"}, &images.CopyImagesToRegistryModule{Skip: skipPushImages}, &images.PullModule{Skip: runtime.Arg.SkipPullImages}, &etcd.PreCheckModule{Skip: runtime.Cluster.Etcd.Type != kubekeyapiv1alpha2.KubeKey}, diff --git a/cmd/kk/pkg/pipelines/upgrade_cluster.go b/cmd/kk/pkg/pipelines/upgrade_cluster.go index 918f5303c..ef3713141 100644 --- a/cmd/kk/pkg/pipelines/upgrade_cluster.go +++ b/cmd/kk/pkg/pipelines/upgrade_cluster.go @@ -19,6 +19,10 @@ package pipelines import ( "fmt" + "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/binaries" + + "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/container" + "github.com/pkg/errors" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/artifact" @@ -43,6 +47,8 @@ func NewUpgradeClusterPipeline(runtime *common.KubeRuntime) error { &precheck.ClusterPreCheckModule{SkipDependencyCheck: runtime.Arg.SkipDependencyCheck}, &confirm.UpgradeConfirmModule{Skip: runtime.Arg.SkipConfirmCheck}, &artifact.UnArchiveModule{Skip: noArtifact}, + &binaries.NodeBinariesModule{}, + &container.InstallCriDockerdModule{Skip: runtime.Cluster.Kubernetes.ContainerManager != "docker"}, &kubernetes.SetUpgradePlanModule{Step: kubernetes.ToV121}, &kubernetes.ProgressiveUpgradeModule{Step: kubernetes.ToV121}, &loadbalancer.HaproxyModule{Skip: !runtime.Cluster.ControlPlaneEndpoint.IsInternalLBEnabled()}, diff --git a/cmd/kk/pkg/version/kubernetes/version_enum.go b/cmd/kk/pkg/version/kubernetes/version_enum.go index 730466d35..e0fffdbbf 100644 --- a/cmd/kk/pkg/version/kubernetes/version_enum.go +++ b/cmd/kk/pkg/version/kubernetes/version_enum.go @@ -103,16 +103,3 @@ func SupportedK8sVersionList() []string { return versionsList } - -func IsAtLeastV124(clusterVersion string) bool { - parsedVersion, err := versionutil.ParseGeneric(clusterVersion) - if err != nil { - return false - } - - if parsedVersion.AtLeast(versionutil.MustParseSemantic("v1.24.0")) { - return true - } - - return false -} \ No newline at end of file diff --git a/hack/sync-components.sh b/hack/sync-components.sh index 5bd13d4d6..6986a4b0e 100755 --- a/hack/sync-components.sh +++ b/hack/sync-components.sh @@ -45,6 +45,7 @@ CONTAINERD_VERSION=${CONTAINERD_VERSION} RUNC_VERSION=${RUNC_VERSION} COMPOSE_VERSION=${COMPOSE_VERSION} CALICO_VERSION=${CALICO_VERSION} +CRI_DOCKER_VERSION=${CRI_DOCKER_VERSION} # qsctl QSCTL_ACCESS_KEY_ID=${QSCTL_ACCESS_KEY_ID} @@ -297,6 +298,26 @@ if [ $COMPOSE_VERSION ]; then rm -rf binaries fi +# Sync CRI_DDOCKER Binary +if [ $CRI_DOCKER_VERSION ]; then + for arch in ${ARCHS[@]} + do + mkdir -p binaries/cri-dockerd/$CRI_DOCKER_VERSION/$arch + echo "Synchronizing cri-dockerd-$arch" + + curl -L -o binaries/cri-dockerd/$CRI_DOCKER_VERSION/$arch/cri-dockerd-$CRI_DOCKER_VERSION.$arch.tgz \ + https://github.com/Mirantis/cri-dockerd/releases/download/v$CRI_DOCKER_VERSION/cri-dockerd-$CRI_DOCKER_VERSION.$arch.tgz + + sha256sum binaries/cri-dockerd/$CRI_DOCKER_VERSION/$arch/cri-dockerd-$CRI_DOCKER_VERSION.$arch.tgz + + qsctl cp binaries/cri-dockerd/$CRI_DOCKER_VERSION/$arch/cri-dockerd-$CRI_DOCKER_VERSION.$arch.tgz \ + qs://kubernetes-release/cri-dockerd/releases/download/v$CRI_DOCKER_VERSION/cri-dockerd-$CRI_DOCKER_VERSION.$arch.tgz \ + -c qsctl-config.yaml + done + + rm -rf binaries +fi + rm -rf qsctl-config.yaml # Sync NodeLocalDns Images diff --git a/version/components.json b/version/components.json index ff58f9af3..6c64ffd9e 100644 --- a/version/components.json +++ b/version/components.json @@ -979,10 +979,10 @@ }, "cri-dockerd": { "amd64": { - "0.3.9": "a6d9b4b796e9eff830311a2349d259507302cb3955dd07b78296b91e40e8b433" + "0.3.10": "46bc9594c634e1bae9b4c7dfaba1b4b34917b407c3275c08425f93d193024dab" }, "arm64": { - "0.3.9": "f5051002b4f95b0e8fe7fbd5f8de4493350e010834d2a8b647f2b26c45c6c203" + "0.3.10": "8684425e696dc52c3a10086af34af1ce6a7412de0ef819c3994f11e0e17d7fb9" } }, "containerd": { From cd60d9e6ecedbab80ba04d74f7a7d6ebc2abd278 Mon Sep 17 00:00:00 2001 From: pixiake Date: Thu, 29 Feb 2024 21:44:53 +0800 Subject: [PATCH 23/24] upgrade containerd, runc and calico version Signed-off-by: pixiake --- cmd/kk/apis/kubekey/v1alpha2/default.go | 6 +- cmd/kk/pkg/plugins/network/modules.go | 84 +- cmd/kk/pkg/plugins/network/tasks.go | 55 +- .../{calico_v1.16+.go => calico.tmpl} | 255 ++-- .../network/templates/calico_v1.16-.go | 1037 ----------------- hack/sync-components.sh | 10 +- version/components.json | 18 +- 7 files changed, 269 insertions(+), 1196 deletions(-) rename cmd/kk/pkg/plugins/network/templates/{calico_v1.16+.go => calico.tmpl} (96%) delete mode 100644 cmd/kk/pkg/plugins/network/templates/calico_v1.16-.go diff --git a/cmd/kk/apis/kubekey/v1alpha2/default.go b/cmd/kk/apis/kubekey/v1alpha2/default.go index dad33ac1d..29a90ccc9 100644 --- a/cmd/kk/apis/kubekey/v1alpha2/default.go +++ b/cmd/kk/apis/kubekey/v1alpha2/default.go @@ -43,11 +43,11 @@ const ( DefaultEtcdPort = "2379" DefaultDockerVersion = "24.0.9" DefaultCriDockerdVersion = "0.3.10" - DefaultContainerdVersion = "1.7.12" - DefaultRuncVersion = "v1.1.11" + DefaultContainerdVersion = "1.7.13" + DefaultRuncVersion = "v1.1.12" DefaultCrictlVersion = "v1.29.0" DefaultKubeVersion = "v1.23.15" - DefaultCalicoVersion = "v3.26.1" + DefaultCalicoVersion = "v3.27.2" DefaultFlannelVersion = "v0.21.3" DefaultFlannelCniPluginVersion = "v1.1.2" DefaultCniVersion = "v1.2.0" diff --git a/cmd/kk/pkg/plugins/network/modules.go b/cmd/kk/pkg/plugins/network/modules.go index 188e35e2f..ab4b9d151 100644 --- a/cmd/kk/pkg/plugins/network/modules.go +++ b/cmd/kk/pkg/plugins/network/modules.go @@ -18,7 +18,6 @@ package network import ( "path/filepath" - "strings" versionutil "k8s.io/apimachinery/pkg/util/version" @@ -93,34 +92,7 @@ func deployMultus(d *DeployNetworkPluginModule) []task.Interface { } func deployCalico(d *DeployNetworkPluginModule) []task.Interface { - generateCalicoOld := &task.RemoteTask{ - Name: "GenerateCalico", - Desc: "Generate calico", - Hosts: d.Runtime.GetHostsByRole(common.Master), - Prepare: &prepare.PrepareCollection{ - new(common.OnlyFirstMaster), - new(OldK8sVersion), - }, - Action: &action.Template{ - Template: templates.CalicoOld, - Dst: filepath.Join(common.KubeConfigDir, templates.CalicoOld.Name()), - Data: util.Data{ - "KubePodsCIDR": d.KubeConf.Cluster.Network.KubePodsCIDR, - "CalicoCniImage": images.GetImage(d.Runtime, d.KubeConf, "calico-cni").ImageName(), - "CalicoNodeImage": images.GetImage(d.Runtime, d.KubeConf, "calico-node").ImageName(), - "CalicoFlexvolImage": images.GetImage(d.Runtime, d.KubeConf, "calico-flexvol").ImageName(), - "CalicoControllersImage": images.GetImage(d.Runtime, d.KubeConf, "calico-kube-controllers").ImageName(), - "TyphaEnabled": len(d.Runtime.GetHostsByRole(common.K8s)) > 50 || d.KubeConf.Cluster.Network.Calico.Typha(), - "VethMTU": d.KubeConf.Cluster.Network.Calico.VethMTU, - "NodeCidrMaskSize": d.KubeConf.Cluster.Kubernetes.NodeCidrMaskSize, - "IPIPMode": d.KubeConf.Cluster.Network.Calico.IPIPMode, - "VXLANMode": d.KubeConf.Cluster.Network.Calico.VXLANMode, - }, - }, - Parallel: true, - } - - generateCalicoNew := &task.RemoteTask{ + generateCalicoManifests := &task.RemoteTask{ Name: "GenerateCalico", Desc: "Generate calico", Hosts: d.Runtime.GetHostsByRole(common.Master), @@ -128,28 +100,7 @@ func deployCalico(d *DeployNetworkPluginModule) []task.Interface { new(common.OnlyFirstMaster), &OldK8sVersion{Not: true}, }, - Action: &action.Template{ - Template: templates.CalicoNew, - Dst: filepath.Join(common.KubeConfigDir, templates.CalicoNew.Name()), - Data: util.Data{ - "KubePodsV4CIDR": strings.Split(d.KubeConf.Cluster.Network.KubePodsCIDR, ",")[0], - "KubePodsV6CIDR": GetKubePodsV6CIDR(d), - "CalicoCniImage": images.GetImage(d.Runtime, d.KubeConf, "calico-cni").ImageName(), - "CalicoNodeImage": images.GetImage(d.Runtime, d.KubeConf, "calico-node").ImageName(), - "CalicoFlexvolImage": images.GetImage(d.Runtime, d.KubeConf, "calico-flexvol").ImageName(), - "CalicoControllersImage": images.GetImage(d.Runtime, d.KubeConf, "calico-kube-controllers").ImageName(), - "CalicoTyphaImage": images.GetImage(d.Runtime, d.KubeConf, "calico-typha").ImageName(), - "TyphaEnabled": len(d.Runtime.GetHostsByRole(common.K8s)) > 50 || d.KubeConf.Cluster.Network.Calico.Typha(), - "VethMTU": d.KubeConf.Cluster.Network.Calico.VethMTU, - "NodeCidrMaskSize": d.KubeConf.Cluster.Kubernetes.NodeCidrMaskSize, - "IPIPMode": d.KubeConf.Cluster.Network.Calico.IPIPMode, - "VXLANMode": d.KubeConf.Cluster.Network.Calico.VXLANMode, - "ConatinerManagerIsIsula": d.KubeConf.Cluster.Kubernetes.ContainerManager == "isula", - "IPV4POOLNATOUTGOING": d.KubeConf.Cluster.Network.Calico.EnableIPV4POOL_NAT_OUTGOING(), - "DefaultIPPOOL": d.KubeConf.Cluster.Network.Calico.EnableDefaultIPPOOL(), - "IPv6Support": GetKubeIPv6Support(d), - }, - }, + Action: new(GenerateCalicoManifests), Parallel: true, } @@ -163,35 +114,10 @@ func deployCalico(d *DeployNetworkPluginModule) []task.Interface { Retry: 5, } - if K8sVersionAtLeast(d.KubeConf.Cluster.Kubernetes.Version, "v1.16.0") { - return []task.Interface{ - generateCalicoNew, - deploy, - } - } else { - return []task.Interface{ - generateCalicoOld, - deploy, - } - } -} - -func GetKubeIPv6Support(d *DeployNetworkPluginModule) bool { - IPv6Support := false - kubePodsCIDR := strings.Split(d.KubeConf.Cluster.Network.KubePodsCIDR, ",") - if len(kubePodsCIDR)==2 { - IPv6Support = true - } - return IPv6Support -} - -func GetKubePodsV6CIDR(d *DeployNetworkPluginModule) string { - kubePodsV6CIDR := "" - kubePodsCIDR := strings.Split(d.KubeConf.Cluster.Network.KubePodsCIDR, ",") - if len(kubePodsCIDR)==2 { - kubePodsV6CIDR = kubePodsCIDR[1] + return []task.Interface{ + generateCalicoManifests, + deploy, } - return kubePodsV6CIDR } func deployFlannel(d *DeployNetworkPluginModule) []task.Interface { diff --git a/cmd/kk/pkg/plugins/network/tasks.go b/cmd/kk/pkg/plugins/network/tasks.go index cd3cf3419..86efe28e6 100644 --- a/cmd/kk/pkg/plugins/network/tasks.go +++ b/cmd/kk/pkg/plugins/network/tasks.go @@ -19,13 +19,15 @@ package network import ( "embed" "fmt" - "github.com/pkg/errors" "io" "os" "path/filepath" "strings" + "text/template" "time" + "github.com/pkg/errors" + "github.com/kubesphere/kubekey/v3/cmd/kk/apis/kubekey/v1alpha2" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/common" "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/core/action" @@ -35,7 +37,7 @@ import ( "github.com/kubesphere/kubekey/v3/cmd/kk/pkg/plugins/network/templates" ) -//go:embed cilium-1.11.7.tgz hybridnet-0.6.6.tgz +//go:embed cilium-1.11.7.tgz hybridnet-0.6.6.tgz templates/calico.tmpl var f embed.FS @@ -425,3 +427,52 @@ func (d *DeployHybridnet) Execute(runtime connector.Runtime) error { } return nil } + +type GenerateCalicoManifests struct { + common.KubeAction +} + +func (g *GenerateCalicoManifests) Execute(runtime connector.Runtime) error { + calicoContent, err := f.ReadFile("templates/calico.tmpl") + if err != nil { + return err + } + calico := template.Must(template.New("network-plugin.yaml").Parse(string(calicoContent))) + + IPv6Support := false + kubePodsV6CIDR := "" + kubePodsCIDR := strings.Split(g.KubeConf.Cluster.Network.KubePodsCIDR, ",") + if len(kubePodsCIDR) == 2 { + IPv6Support = true + kubePodsV6CIDR = kubePodsCIDR[1] + } + + templateAction := action.Template{ + Template: calico, + Dst: filepath.Join(common.KubeConfigDir, calico.Name()), + Data: util.Data{ + "KubePodsV4CIDR": strings.Split(g.KubeConf.Cluster.Network.KubePodsCIDR, ",")[0], + "KubePodsV6CIDR": kubePodsV6CIDR, + "CalicoCniImage": images.GetImage(runtime, g.KubeConf, "calico-cni").ImageName(), + "CalicoNodeImage": images.GetImage(runtime, g.KubeConf, "calico-node").ImageName(), + "CalicoFlexvolImage": images.GetImage(runtime, g.KubeConf, "calico-flexvol").ImageName(), + "CalicoControllersImage": images.GetImage(runtime, g.KubeConf, "calico-kube-controllers").ImageName(), + "CalicoTyphaImage": images.GetImage(runtime, g.KubeConf, "calico-typha").ImageName(), + "TyphaEnabled": len(runtime.GetHostsByRole(common.K8s)) > 50 || g.KubeConf.Cluster.Network.Calico.Typha(), + "VethMTU": g.KubeConf.Cluster.Network.Calico.VethMTU, + "NodeCidrMaskSize": g.KubeConf.Cluster.Kubernetes.NodeCidrMaskSize, + "IPIPMode": g.KubeConf.Cluster.Network.Calico.IPIPMode, + "VXLANMode": g.KubeConf.Cluster.Network.Calico.VXLANMode, + "ConatinerManagerIsIsula": g.KubeConf.Cluster.Kubernetes.ContainerManager == "isula", + "IPV4POOLNATOUTGOING": g.KubeConf.Cluster.Network.Calico.EnableIPV4POOL_NAT_OUTGOING(), + "DefaultIPPOOL": g.KubeConf.Cluster.Network.Calico.EnableDefaultIPPOOL(), + "IPv6Support": IPv6Support, + }, + } + templateAction.Init(nil, nil) + if err := templateAction.Execute(runtime); err != nil { + return err + } + + return nil +} diff --git a/cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go b/cmd/kk/pkg/plugins/network/templates/calico.tmpl similarity index 96% rename from cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go rename to cmd/kk/pkg/plugins/network/templates/calico.tmpl index 2f6d5fced..73c391022 100644 --- a/cmd/kk/pkg/plugins/network/templates/calico_v1.16+.go +++ b/cmd/kk/pkg/plugins/network/templates/calico.tmpl @@ -1,28 +1,3 @@ -/* - Copyright 2021 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package templates - -import ( - "github.com/lithammer/dedent" - "text/template" -) - -var CalicoNew = template.Must(template.New("network-plugin.yaml").Parse( - dedent.Dedent(` --- # Source: calico/templates/calico-kube-controllers.yaml # This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict @@ -194,11 +169,11 @@ spec: description: Name given to community value. type: string value: - description: Value must be of format aa:nn or aa:nn:mm. - For standard community use aa:nn format, where aa and - nn are 16 bit number. For large community use aa:nn:mm - format, where aa, nn and mm are 32 bit number. Where, - aa is an AS Number, nn and mm are per-AS identifier. + description: Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ type: string type: object @@ -266,12 +241,12 @@ spec: type: string communities: description: Communities can be list of either community names - already defined in Specs.Communities or community value - of format aa:nn or aa:nn:mm. For standard community use - aa:nn format, where aa and nn are 16 bit number. For - large community use aa:nn:mm format, where aa, nn and - mm are 32 bit number. Where,aa is an AS Number, nn and - mm are per-AS identifier. + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. items: type: string type: array @@ -372,12 +347,14 @@ spec: type: string cidr: type: string + interface: + type: string matchOperator: type: string + source: + type: string required: - action - - cidr - - matchOperator type: object type: array exportV6: @@ -391,12 +368,14 @@ spec: type: string cidr: type: string + interface: + type: string matchOperator: type: string + source: + type: string required: - action - - cidr - - matchOperator type: object type: array importV4: @@ -410,12 +389,14 @@ spec: type: string cidr: type: string + interface: + type: string matchOperator: type: string + source: + type: string required: - action - - cidr - - matchOperator type: object type: array importV6: @@ -429,12 +410,14 @@ spec: type: string cidr: type: string + interface: + type: string matchOperator: type: string + source: + type: string required: - action - - cidr - - matchOperator type: object type: array type: object @@ -542,8 +525,8 @@ spec: type: object peerIP: description: The IP address of the peer followed by an optional port - number to peer with. If port number is given, format should be []:port - or : for IPv4. If optional port number is not set, + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, and this peer IP and ASNumber belongs to a calico/node with ListenPort set in BGPConfiguration, then we use that port to peer. type: string @@ -1029,12 +1012,32 @@ spec: - Enable - Disable type: string + bpfCTLBLogFilter: + description: 'BPFCTLBLogFilter specifies, what is logged by connect + time load balancer when BPFLogLevel is debug. Currently has to be + specified as ''all'' when BPFLogFilters is set to see CTLB logs. + [Default: unset - means logs are emitted when BPFLogLevel id debug + and BPFLogFilters not set.]' + type: string + bpfConnectTimeLoadBalancing: + description: 'BPFConnectTimeLoadBalancing when in BPF mode, controls + whether Felix installs the connect-time load balancer. The connect-time + load balancer is required for the host to be able to reach Kubernetes + services and it improves the performance of pod-to-service connections.When + set to TCP, connect time load balancing is available only for services + with TCP ports. [Default: TCP]' + enum: + - TCP + - Enabled + - Disabled + type: string bpfConnectTimeLoadBalancingEnabled: description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode, controls whether Felix installs the connection-time load balancer. The connect-time load balancer is required for the host to be able to reach Kubernetes services and it improves the performance of pod-to-service - connections. The only reason to disable it is for debugging purposes. [Default: + connections. The only reason to disable it is for debugging purposes. + This will be deprecated. Use BPFConnectTimeLoadBalancing [Default: true]' type: boolean bpfDSROptoutCIDRs: @@ -1053,6 +1056,12 @@ spec: the cluster. It should not match the workload interfaces (usually named cali...). type: string + bpfDisableGROForIfaces: + description: BPFDisableGROForIfaces is a regular expression that controls + which interfaces Felix should disable the Generic Receive Offload + [GRO] option. It should not match the workload interfaces (usually + named cali...). + type: string bpfDisableUnprivileged: description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled sysctl to disable unprivileged use of BPF. This ensures that unprivileged @@ -1068,7 +1077,15 @@ spec: with BPF programs regardless of what is the per-interfaces or global setting. Possible values are Disabled, Strict or Loose. [Default: Loose]' + pattern: ^(?i)(Disabled|Strict|Loose)?$ type: string + bpfExcludeCIDRsFromNAT: + description: BPFExcludeCIDRsFromNAT is a list of CIDRs that are to + be excluded from NAT resolution so that host can handle them. A + typical usecase is node local DNS cache. + items: + type: string + type: array bpfExtToServiceConnmark: description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit mark that is set on connections from an external client to a local @@ -1085,12 +1102,31 @@ spec: is sent directly from the remote node. In "DSR" mode, the remote node appears to use the IP of the ingress node; this requires a permissive L2 network. [Default: Tunnel]' + pattern: ^(?i)(Tunnel|DSR)?$ type: string + bpfForceTrackPacketsFromIfaces: + description: 'BPFForceTrackPacketsFromIfaces in BPF mode, forces traffic + from these interfaces to skip Calico''s iptables NOTRACK rule, allowing + traffic from those interfaces to be tracked by Linux conntrack. Should + only be used for interfaces that are not used for the Calico fabric. For + example, a docker bridge device for non-Calico-networked containers. + [Default: docker+]' + items: + type: string + type: array bpfHostConntrackBypass: description: 'BPFHostConntrackBypass Controls whether to bypass Linux conntrack in BPF mode for workloads and services. [Default: true - bypass Linux conntrack]' type: boolean + bpfHostNetworkedNATWithoutCTLB: + description: 'BPFHostNetworkedNATWithoutCTLB when in BPF mode, controls + whether Felix does a NAT without CTLB. This along with BPFConnectTimeLoadBalancing + determines the CTLB behavior. [Default: Enabled]' + enum: + - Enabled + - Disabled + type: string bpfKubeProxyEndpointSlicesEnabled: description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls whether Felix's embedded kube-proxy accepts EndpointSlices or not. @@ -1106,6 +1142,7 @@ spec: minimum time between updates to the dataplane for Felix''s embedded kube-proxy. Lower values give reduced set-up latency. Higher values reduce Felix CPU usage by batching up more work. [Default: 1s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string bpfL3IfacePattern: description: BPFL3IfacePattern is a regular expression that allows @@ -1115,11 +1152,22 @@ spec: as any interfaces that handle incoming traffic to nodeports and services from outside the cluster. type: string + bpfLogFilters: + additionalProperties: + type: string + description: "BPFLogFilters is a map of key=values where the value + is a pcap filter expression and the key is an interface name with + 'all' denoting all interfaces, 'weps' all workload endpoints and + 'heps' all host endpoints. \n When specified as an env var, it accepts + a comma-separated list of key=values. [Default: unset - means all + debug logs are emitted]" + type: object bpfLogLevel: description: 'BPFLogLevel controls the log level of the BPF programs when in BPF dataplane mode. One of "Off", "Info", or "Debug". The logs are emitted to the BPF trace pipe, accessible with the command - tc exec bpf debug. [Default: Off].' + `tc exec bpf debug`. [Default: Off].' + pattern: ^(?i)(Off|Info|Debug)?$ type: string bpfMapSizeConntrack: description: 'BPFMapSizeConntrack sets the size for the conntrack @@ -1184,6 +1232,7 @@ spec: to append mode, be sure that the other rules in the chains signal acceptance by falling through to the Calico rules, otherwise the Calico policy will be bypassed. [Default: insert]' + pattern: ^(?i)(insert|append)?$ type: string dataplaneDriver: description: DataplaneDriver filename of the external dataplane driver @@ -1202,8 +1251,10 @@ spec: debugMemoryProfilePath: type: string debugSimulateCalcGraphHangAfter: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string debugSimulateDataplaneHangAfter: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string defaultEndpointToHostAction: description: 'DefaultEndpointToHostAction controls what happens to @@ -1218,6 +1269,7 @@ spec: endpoint egress policy. Use ACCEPT to unconditionally accept packets from workloads after processing workload endpoint egress policy. [Default: Drop]' + pattern: ^(?i)(Drop|Accept|Return)?$ type: string deviceRouteProtocol: description: This defines the route protocol added to programmed device @@ -1236,6 +1288,7 @@ spec: disableConntrackInvalidCheck: type: boolean endpointReportingDelay: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string endpointReportingEnabled: type: boolean @@ -1303,12 +1356,14 @@ spec: based on auto-detected platform capabilities. Values are specified in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" or "false" will force the feature, empty or omitted values are auto-detected. + pattern: ^([a-zA-Z0-9-_]+=(true|false|),)*([a-zA-Z0-9-_]+=(true|false|))?$ type: string featureGates: description: FeatureGates is used to enable or disable tech-preview Calico features. Values are specified in a comma separated list with no spaces, example; "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". This is used to enable features that are not fully production ready. + pattern: ^([a-zA-Z0-9-_]+=([^=]+),)*([a-zA-Z0-9-_]+=([^=]+))?$ type: string floatingIPs: description: FloatingIPs configures whether or not Felix will program @@ -1370,6 +1425,7 @@ spec: description: InterfaceRefreshInterval is the period at which Felix rescans local interfaces to verify their state. The rescan can be disabled by setting the interval to 0. + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string ipipEnabled: description: 'IPIPEnabled overrides whether Felix should configure @@ -1385,18 +1441,22 @@ spec: all iptables state to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable iptables refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesBackend: description: IptablesBackend specifies which backend of iptables will be used. The default is Auto. + pattern: ^(?i)(Auto|FelixConfiguration|FelixConfigurationList|Legacy|NFT)?$ type: string iptablesFilterAllowAction: + pattern: ^(?i)(Accept|Return)?$ type: string iptablesFilterDenyAction: description: IptablesFilterDenyAction controls what happens to traffic that is denied by network policy. By default Calico blocks traffic with an iptables "DROP" action. If you want to use "REJECT" action instead you can configure it in here. + pattern: ^(?i)(Drop|Reject)?$ type: string iptablesLockFilePath: description: 'IptablesLockFilePath is the location of the iptables @@ -1409,6 +1469,7 @@ spec: wait between attempts to acquire the iptables lock if it is not available. Lower values make Felix more responsive when the lock is contended, but use more CPU. [Default: 50ms]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesLockTimeout: description: 'IptablesLockTimeout is the time that Felix will wait @@ -1417,8 +1478,10 @@ spec: also take the lock. When running Felix inside a container, this requires the /run directory of the host to be mounted into the calico/node or calico/felix container. [Default: 0s disabled]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesMangleAllowAction: + pattern: ^(?i)(Accept|Return)?$ type: string iptablesMarkMask: description: 'IptablesMarkMask is the mask that Felix selects its @@ -1435,6 +1498,7 @@ spec: back in order to check the write was not clobbered by another process. This should only occur if another application on the system doesn''t respect the iptables lock. [Default: 1s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string iptablesRefreshInterval: description: 'IptablesRefreshInterval is the period at which Felix @@ -1445,6 +1509,7 @@ spec: was fixed in kernel version 4.11. If you are using v4.11 or greater you may want to set this to, a higher value to reduce Felix CPU usage. [Default: 10s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string ipv6Support: description: IPv6Support controls whether Felix enables support for @@ -1479,15 +1544,18 @@ spec: logSeverityFile: description: 'LogSeverityFile is the log severity above which logs are sent to the log file. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string logSeveritySys: description: 'LogSeveritySys is the log severity above which logs are sent to the syslog. Set to None for no logging to syslog. [Default: Info]' + pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ type: string maxIpsetSize: type: integer @@ -1526,6 +1594,7 @@ spec: pattern: ^.* x-kubernetes-int-or-string: true netlinkTimeout: + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string openstackRegion: description: 'OpenstackRegion is the name of the region that a particular @@ -1580,21 +1649,25 @@ spec: description: 'ReportingInterval is the interval at which Felix reports its status into the datastore or 0 to disable. Must be non-zero in OpenStack deployments. [Default: 30s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string reportingTTL: description: 'ReportingTTL is the time-to-live setting for process-wide status reports. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string routeRefreshInterval: description: 'RouteRefreshInterval is the period at which Felix re-checks the routes in the dataplane to ensure that no other process has accidentally broken Calico''s rules. Set to 0 to disable route refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string routeSource: description: 'RouteSource configures where Felix gets its routing information. - WorkloadIPs: use workload endpoints to construct routes. - CalicoIPAM: the default - use IPAM data to construct routes.' + pattern: ^(?i)(WorkloadIPs|CalicoIPAM)?$ type: string routeSyncDisabled: description: RouteSyncDisabled will disable all operations performed @@ -1616,7 +1689,7 @@ spec: routeTableRanges: description: Calico programs additional Linux route tables for various purposes. RouteTableRanges specifies a set of table index ranges - that Calico should use. DeprecatesRouteTableRange, overrides RouteTableRange. + that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. items: properties: max: @@ -1634,6 +1707,7 @@ spec: packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", in which case such routing loops continue to be allowed. [Default: Drop]' + pattern: ^(?i)(Drop|Reject|Disabled)?$ type: string sidecarAccelerationEnabled: description: 'SidecarAccelerationEnabled enables experimental sidecar @@ -1649,10 +1723,12 @@ spec: usageReportingInitialDelay: description: 'UsageReportingInitialDelay controls the minimum delay before Felix makes a report. [Default: 300s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string usageReportingInterval: description: 'UsageReportingInterval controls the interval at which Felix makes reports. [Default: 86400s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string useInternalDataplaneDriver: description: UseInternalDataplaneDriver, if true, Felix will use its @@ -1676,6 +1752,14 @@ spec: type: integer vxlanVNI: type: integer + windowsManageFirewallRules: + description: 'WindowsManageFirewallRules configures whether or not + Felix will program Windows Firewall rules. (to allow inbound access + to its own metrics ports) [Default: Disabled]' + enum: + - Enabled + - Disabled + type: string wireguardEnabled: description: 'WireguardEnabled controls whether Wireguard is enabled for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network). @@ -1701,6 +1785,7 @@ spec: wireguardKeepAlive: description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string wireguardListeningPort: description: 'WireguardListeningPort controls the listening port used @@ -1727,6 +1812,7 @@ spec: the allowedSourcePrefixes annotation to send traffic with a source IP address that is not theirs. This is disabled by default. When set to "Any", pods can request any prefix. + pattern: ^(?i)(Disabled|Any)?$ type: string xdpEnabled: description: 'XDPEnabled enables XDP acceleration for suitable untracked @@ -1737,6 +1823,7 @@ spec: all XDP state to ensure that no other process has accidentally broken Calico''s BPF maps or attached programs. Set to 0 to disable XDP refresh. [Default: 90s]' + pattern: ^([0-9]+(\\.[0-9]+)?(ms|s|m|h))*$ type: string type: object type: object @@ -1822,7 +1909,7 @@ spec: will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, global() + as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies @@ -1953,7 +2040,7 @@ spec: the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may - ONLY specify either a exact or a prefix match. The + ONLY specify either a `exact` or a `prefix` match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. @@ -2048,7 +2135,7 @@ spec: will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, global() + as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies @@ -2195,7 +2282,7 @@ spec: will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, global() + as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies @@ -2326,7 +2413,7 @@ spec: the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may - ONLY specify either a exact or a prefix match. The + ONLY specify either a `exact` or a `prefix` match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. @@ -2421,7 +2508,7 @@ spec: will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, global() + as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies @@ -2551,6 +2638,19 @@ spec: with identical order will be applied in alphanumerical order based on the Policy "Name". type: number + performanceHints: + description: "PerformanceHints contains a list of hints to Calico's + policy engine to help process the policy more efficiently. Hints + never change the enforcement behaviour of the policy. \n Currently, + the only available hint is \"AssumeNeededOnEveryNode\". When that + hint is set on a policy, Felix will act as if the policy matches + a local endpoint even if it does not. This is useful for \"preloading\" + any large static policies that are known to be used on every node. + If the policy is _not_ used on a particular node then the work done + to preload the policy (and to maintain it) is wasted." + items: + type: string + type: array preDNAT: description: PreDNAT indicates to apply the rules in this policy before any DNAT. @@ -3485,7 +3585,7 @@ spec: will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, global() + as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies @@ -3616,7 +3716,7 @@ spec: the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may - ONLY specify either a exact or a prefix match. The + ONLY specify either a `exact` or a `prefix` match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. @@ -3711,7 +3811,7 @@ spec: will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, global() + as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies @@ -3858,7 +3958,7 @@ spec: will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, global() + as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies @@ -3989,7 +4089,7 @@ spec: the rule to apply to HTTP requests that use one of the listed HTTP Paths. Multiple paths are OR''d together. e.g: - exact: /foo - prefix: /bar NOTE: Each entry may - ONLY specify either a exact or a prefix match. The + ONLY specify either a `exact` or a `prefix` match. The validator will check for it.' items: description: 'HTTPPath specifies an HTTP path to match. @@ -4084,7 +4184,7 @@ spec: will be selected by the rule. \n For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting only workload endpoints in the same namespace - as the NetworkPolicy. \n For NetworkPolicy, global() + as the NetworkPolicy. \n For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting only GlobalNetworkSet or HostEndpoint. \n For GlobalNetworkPolicy, an empty NamespaceSelector implies @@ -4210,6 +4310,19 @@ spec: with identical order will be applied in alphanumerical order based on the Policy "Name". type: number + performanceHints: + description: "PerformanceHints contains a list of hints to Calico's + policy engine to help process the policy more efficiently. Hints + never change the enforcement behaviour of the policy. \n Currently, + the only available hint is \"AssumeNeededOnEveryNode\". When that + hint is set on a policy, Felix will act as if the policy matches + a local endpoint even if it does not. This is useful for \"preloading\" + any large static policies that are known to be used on every node. + If the policy is _not_ used on a particular node then the work done + to preload the policy (and to maintain it) is wasted." + items: + type: string + type: array selector: description: "The selector is an expression used to pick pick out the endpoints that the policy should be applied to. \n Selector @@ -4896,17 +5009,21 @@ spec: # no effect. - name: CALICO_IPV4POOL_CIDR value: "{{ .KubePodsV4CIDR }}" - - name: CALICO_IPV6POOL_CIDR - value: "{{ .KubePodsV6CIDR }}" - name: CALICO_IPV4POOL_BLOCK_SIZE value: "{{ .NodeCidrMaskSize }}" +{{- if .IPv6Support }} + - name: CALICO_IPV6POOL_CIDR + value: "{{ .KubePodsV6CIDR }}" +{{- end }} {{- else }} - name: NO_DEFAULT_POOLS value: "true" - name: CALICO_IPV4POOL_CIDR value: "" +{{- if .IPv6Support }} - name: CALICO_IPV6POOL_CIDR value: "" +{{- end }} {{- end }} - name: CALICO_DISABLE_FILE_LOGGING value: "true" @@ -4914,8 +5031,13 @@ spec: - name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT" # Disable IPv6 on Kubernetes. +{{- if .IPv6Support }} + - name: FELIX_IPV6SUPPORT + value: "false" +{{- else }} - name: FELIX_IPV6SUPPORT value: "true" +{{- end }} - name: FELIX_HEALTHENABLED value: "true" - name: FELIX_DEVICEROUTESOURCEADDRESS @@ -5144,6 +5266,11 @@ spec: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists + # Make sure Typha can get scheduled on any nodes. + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists # Since Calico can't network a pod until Typha is up, we need to run Typha itself # as a host-networked pod. serviceAccountName: calico-node @@ -5211,5 +5338,3 @@ spec: timeoutSeconds: 10 {{ end }} - - `))) diff --git a/cmd/kk/pkg/plugins/network/templates/calico_v1.16-.go b/cmd/kk/pkg/plugins/network/templates/calico_v1.16-.go deleted file mode 100644 index dc56d7c67..000000000 --- a/cmd/kk/pkg/plugins/network/templates/calico_v1.16-.go +++ /dev/null @@ -1,1037 +0,0 @@ -/* - Copyright 2021 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package templates - -import ( - "github.com/lithammer/dedent" - "text/template" -) - -var CalicoOld = template.Must(template.New("network-plugin.yaml").Parse( - dedent.Dedent(`--- -# Source: calico/templates/calico-config.yaml -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # You must set a non-zero value for Typha replicas below. - typha_service_name: {{ if .TyphaEnabled }}"calico-typha"{{ else }}"none"{{ end }} - # Configure the backend to use. - calico_backend: "bird" - # Configure the MTU to use for workload interfaces and the - # tunnels. For IPIP, set to your network MTU - 20; for VXLAN - # set to your network MTU - 50. - veth_mtu: "{{ .VethMTU }}" - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "mtu": __CNI_MTU__, - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - }, - { - "type": "bandwidth", - "capabilities": {"bandwidth": true} - } - ] - } - ---- -# Source: calico/templates/kdd-crds.yaml - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgppeers.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPPeer - plural: bgppeers - singular: bgppeer - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: blockaffinities.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BlockAffinity - plural: blockaffinities - singular: blockaffinity - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - shortNames: - - gnp - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamblocks.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMBlock - plural: ipamblocks - singular: ipamblock - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamconfigs.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMConfig - plural: ipamconfigs - singular: ipamconfig - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamhandles.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMHandle - plural: ipamhandles - singular: ipamhandle - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: kubecontrollersconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: KubeControllersConfiguration - plural: kubecontrollersconfigurations - singular: kubecontrollersconfiguration ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networksets.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkSet - plural: networksets - singular: networkset - ---- ---- -# Source: calico/templates/rbac.yaml - -# Include a clusterrole for the kube-controllers component, -# and bind it to the calico-kube-controllers serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-kube-controllers -rules: - # Nodes are watched to monitor for deletions. - - apiGroups: [""] - resources: - - nodes - verbs: - - watch - - list - - get - # Pods are queried to check for existence. - - apiGroups: [""] - resources: - - pods - verbs: - - get - # IPAM resources are manipulated when nodes are deleted. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - verbs: - - list - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - # kube-controllers manages hostendpoints. - - apiGroups: ["crd.projectcalico.org"] - resources: - - hostendpoints - verbs: - - get - - list - - create - - update - - delete - # Needs access to update clusterinformations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - clusterinformations - verbs: - - get - - create - - update - # KubeControllersConfiguration is where it gets its config - - apiGroups: ["crd.projectcalico.org"] - resources: - - kubecontrollersconfigurations - verbs: - # read its own config - - get - # create a default if none exists - - create - # update status - - update - # watch for changes - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-kube-controllers -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-kube-controllers -subjects: -- kind: ServiceAccount - name: calico-kube-controllers - namespace: kube-system ---- -# Include a clusterrole for the calico-node DaemonSet, -# and bind it to the calico-node serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-node -rules: - # The CNI plugin needs to get pods, nodes, and namespaces. - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - # Used to discover service IPs for advertisement. - - watch - - list - # Used to discover Typhas. - - get - # Pod CIDR auto-detection on kubeadm needs access to config maps. - - apiGroups: [""] - resources: - - configmaps - verbs: - - get - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - ipamblocks - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - networksets - - clusterinformations - - hostendpoints - - blockaffinities - verbs: - - get - - list - - watch - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only requried for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update - # These permissions are required for Calico CNI to perform IPAM allocations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - - apiGroups: ["crd.projectcalico.org"] - resources: - - ipamconfigs - verbs: - - get - # Block affinities must also be watchable by confd for route aggregation. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - verbs: - - watch - # The Calico IPAM migration needs to get daemonsets. These permissions can be - # removed if not upgrading from an installation using host-local IPAM. - - apiGroups: ["apps"] - resources: - - daemonsets - verbs: - - get - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: calico-node -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system - -{{ if .TyphaEnabled }} ---- -# Source: calico/templates/calico-typha.yaml -# This manifest creates a Service, which will be backed by Calico's Typha daemon. -# Typha sits in between Felix and the API server, reducing Calico's load on the API server. - -apiVersion: v1 -kind: Service -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha -spec: - ports: - - port: 5473 - protocol: TCP - targetPort: calico-typha - name: calico-typha - selector: - k8s-app: calico-typha - ---- - -# This manifest creates a Deployment of Typha to back the above service. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha -spec: - # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the - # typha_service_name variable in the calico-config ConfigMap above. - # - # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential - # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In - # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. - replicas: 1 - revisionHistoryLimit: 2 - selector: - matchLabels: - k8s-app: calico-typha - template: - metadata: - labels: - k8s-app: calico-typha - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' - spec: - nodeSelector: - kubernetes.io/os: linux - hostNetwork: true - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - # Since Calico can't network a pod until Typha is up, we need to run Typha itself - # as a host-networked pod. - serviceAccountName: calico-node - priorityClassName: system-cluster-critical - # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 - securityContext: - fsGroup: 65534 - containers: - - image: {{ .CalicoTyphaImage }} - name: calico-typha - ports: - - containerPort: 5473 - name: calico-typha - protocol: TCP - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - # Enable "info" logging by default. Can be set to "debug" to increase verbosity. - - name: TYPHA_LOGSEVERITYSCREEN - value: "info" - # Disable logging to file and syslog since those don't make sense in Kubernetes. - - name: TYPHA_LOGFILEPATH - value: "none" - - name: TYPHA_LOGSEVERITYSYS - value: "none" - # Monitor the Kubernetes API to find the number of running instances and rebalance - # connections. - - name: TYPHA_CONNECTIONREBALANCINGMODE - value: "kubernetes" - - name: TYPHA_DATASTORETYPE - value: "kubernetes" - - name: TYPHA_HEALTHENABLED - value: "true" - # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, - # this opens a port on the host, which may need to be secured. - #- name: TYPHA_PROMETHEUSMETRICSENABLED - # value: "true" - #- name: TYPHA_PROMETHEUSMETRICSPORT - # value: "9093" - livenessProbe: - httpGet: - path: /liveness - port: 9098 - host: localhost - periodSeconds: 30 - initialDelaySeconds: 30 - securityContext: - runAsNonRoot: true - allowPrivilegeEscalation: false - readinessProbe: - httpGet: - path: /readiness - port: 9098 - host: localhost - periodSeconds: 10 - - --- - - # This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict - - apiVersion: policy/v1beta1 - kind: PodDisruptionBudget - metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha - spec: - maxUnavailable: 1 - selector: - matchLabels: - k8s-app: calico-typha - {{ end }} - ---- -# Source: calico/templates/calico-node.yaml -# This manifest installs the calico-node container, as well -# as the CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - spec: - nodeSelector: - kubernetes.io/os: linux - hostNetwork: true - tolerations: - # Make sure calico-node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: calico-node - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - priorityClassName: system-node-critical - initContainers: - # This container performs upgrade from host-local IPAM to calico-ipam. - # It can be deleted if this is a fresh installation, or if you have already - # upgraded to use calico-ipam. - - name: upgrade-ipam - image: {{ .CalicoCniImage }} - command: ["/opt/cni/bin/calico-ipam", "-upgrade"] - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - volumeMounts: - - mountPath: /var/lib/cni/networks - name: host-local-net-dir - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - securityContext: - privileged: true - # This container installs the CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{ .CalicoCniImage }} - command: ["/opt/cni/bin/install"] - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Prevents the container from sleeping forever. - - name: SLEEP - value: "false" - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - securityContext: - privileged: true - # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes - # to communicate with Felix over the Policy Sync API. - - name: flexvol-driver - image: {{ .CalicoFlexvolImage }} - volumeMounts: - - name: flexvol-driver-host - mountPath: /host/driver - securityContext: - privileged: true - containers: - # Runs calico-node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{ .CalicoNodeImage }} - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" -{{ if .TyphaEnabled }} - # Typha support: controlled by the ConfigMap. - - name: FELIX_TYPHAK8SSERVICENAME - valueFrom: - configMapKeyRef: - name: calico-config - key: typha_service_name -{{ end }} - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,bgp" - # Auto-detect the BGP IP address. - - name: NODEIP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: IP_AUTODETECTION_METHOD - value: "can-reach=$(NODEIP)" - - name: IP - value: "autodetect" - # Enable IPIP - - name: CALICO_IPV4POOL_IPIP - value: "{{ .IPIPMode }}" - # Enable or Disable VXLAN on the default IP pool. - - name: CALICO_IPV4POOL_VXLAN - value: "{{ .VXLANMode }}" - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Set MTU for the VXLAN tunnel device. - - name: FELIX_VXLANMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Set MTU for the Wireguard tunnel device. - - name: FELIX_WIREGUARDMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. - - name: CALICO_IPV4POOL_CIDR - value: "{{ .KubePodsCIDR }}" - - name: CALICO_IPV4POOL_BLOCK_SIZE - value: "{{ .NodeCidrMaskSize }}" - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to "info" - - name: FELIX_LOGSEVERITYSCREEN - value: "info" - - name: FELIX_HEALTHENABLED - value: "true" - - name: FELIX_DEVICEROUTESOURCEADDRESS - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - exec: - command: - - /bin/calico-node - - -felix-live - - -bird-live - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/calico-node - - -felix-ready - - -bird-ready - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - - name: policysync - mountPath: /var/run/nodeagent - # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the - # parent directory. - - name: sysfs - mountPath: /sys/fs/ - # Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host. - # If the host is known to mount that filesystem already then Bidirectional can be omitted. - mountPropagation: Bidirectional - volumes: - # Used by calico-node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - - name: sysfs - hostPath: - path: /sys/fs/ - type: DirectoryOrCreate - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Mount in the directory for host-local IPAM allocations. This is - # used when upgrading from host-local to calico-ipam, and can be removed - # if not using the upgrade-ipam init container. - - name: host-local-net-dir - hostPath: - path: /var/lib/cni/networks - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent - # Used to install Flex Volume Driver - - name: flexvol-driver-host - hostPath: - type: DirectoryOrCreate - path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system - ---- -# Source: calico/templates/calico-kube-controllers.yaml -# See https://github.com/projectcalico/kube-controllers -apiVersion: apps/v1 -kind: Deployment -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers -spec: - # The controllers can only have a single active instance. - replicas: 1 - selector: - matchLabels: - k8s-app: calico-kube-controllers - strategy: - type: Recreate - template: - metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - spec: - nodeSelector: - kubernetes.io/os: linux - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/master - effect: NoSchedule - serviceAccountName: calico-kube-controllers - priorityClassName: system-cluster-critical - containers: - - name: calico-kube-controllers - image: {{ .CalicoControllersImage }} - env: - # Choose which controllers to run. - - name: ENABLED_CONTROLLERS - value: node - - name: DATASTORE_TYPE - value: kubernetes - readinessProbe: - exec: - command: - - /usr/bin/check-status - - -r - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-kube-controllers - namespace: kube-system - - `))) diff --git a/hack/sync-components.sh b/hack/sync-components.sh index 6986a4b0e..d57955217 100755 --- a/hack/sync-components.sh +++ b/hack/sync-components.sh @@ -178,6 +178,8 @@ if [ $CALICO_VERSION ]; then curl -L -o binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch \ https://github.com/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch + sha256sum binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch + qsctl cp binaries/calicoctl/$CALICO_VERSION/$arch/calicoctl-linux-$arch \ qs://kubernetes-release/projectcalico/calico/releases/download/$CALICO_VERSION/calicoctl-linux-$arch \ -c qsctl-config.yaml @@ -237,11 +239,11 @@ if [ $CONTAINERD_VERSION ]; then mkdir -p binaries/containerd/$CONTAINERD_VERSION/$arch echo "Synchronizing containerd-$arch" - sha256sum binaries/containerd/$CONTAINERD_VERSION/$arch/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz - curl -L -o binaries/containerd/$CONTAINERD_VERSION/$arch/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \ https://github.com/containerd/containerd/releases/download/v$CONTAINERD_VERSION/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz + sha256sum binaries/containerd/$CONTAINERD_VERSION/$arch/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz + qsctl cp binaries/containerd/$CONTAINERD_VERSION/$arch/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \ qs://kubernetes-release/containerd/containerd/releases/download/v$CONTAINERD_VERSION/containerd-$CONTAINERD_VERSION-linux-$arch.tar.gz \ -c qsctl-config.yaml @@ -257,11 +259,11 @@ if [ $RUNC_VERSION ]; then mkdir -p binaries/runc/$RUNC_VERSION/$arch echo "Synchronizing runc-$arch" - sha256sum binaries/runc/$RUNC_VERSION/$arch/runc.$arch - curl -L -o binaries/runc/$RUNC_VERSION/$arch/runc.$arch \ https://github.com/opencontainers/runc/releases/download/$RUNC_VERSION/runc.$arch + sha256sum binaries/runc/$RUNC_VERSION/$arch/runc.$arch + qsctl cp binaries/runc/$RUNC_VERSION/$arch/runc.$arch \ qs://kubernetes-release/opencontainers/runc/releases/download/$RUNC_VERSION/runc.$arch \ -c qsctl-config.yaml diff --git a/version/components.json b/version/components.json index 6c64ffd9e..185ebf316 100644 --- a/version/components.json +++ b/version/components.json @@ -1019,7 +1019,8 @@ "1.7.6": "58408cfa025003e671b0af72183b963363d519543d0d0ba186037e9c57489ffe", "1.7.7": "371de359d6102c51f6ee2361d08297948d134ce7379e01cb965ceeffa4365fba", "1.7.8": "5f1d017a5a7359514d6187d6656e88fb2a592d107e6298db7963dbddb9a111d9", - "1.7.12": "6a24d8b996533fa1b0d7348fe9813a78cd01fa16cff865a961ad0d556f5cd665" + "1.7.12": "6a24d8b996533fa1b0d7348fe9813a78cd01fa16cff865a961ad0d556f5cd665", + "1.7.13": "c2371c009dd8b7738663333d91e5ab50d204f8bcae24201f45d59060d12c3a23" }, "arm64": { "1.6.2": "a4b24b3c38a67852daa80f03ec2bc94e31a0f4393477cd7dc1c1a7c2d3eb2a95", @@ -1054,7 +1055,8 @@ "1.7.6": "d844a1c8b993e7e9647f73b9814567004dce1287c0529ce55c50519490eafcce", "1.7.7": "0a104f487193665d2681fcb5ed83f2baa5f97849fe2661188da835c9d4eaf9e3", "1.7.8": "3fc551e8f51150804d80cc1958a271bd2252b6334f0355244d0faa5da7fa55d1", - "1.7.12": "8a1b35a521d071a8828f63fe007a51e5b7ac863a1195f5dee32543b1a9d5f2b6" + "1.7.12": "8a1b35a521d071a8828f63fe007a51e5b7ac863a1195f5dee32543b1a9d5f2b6", + "1.7.13": "118759e398f35337109592b4d237538872dc12a207d38832b9d04515d0acbc4d" } }, "runc": { @@ -1065,7 +1067,8 @@ "v1.1.4": "db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce", "v1.1.9": "b9bfdd4cb27cddbb6172a442df165a80bfc0538a676fbca1a6a6c8f4c6933b43", "v1.1.10": "81f73a59be3d122ab484d7dfe9ddc81030f595cc59968f61c113a9a38a2c113a", - "v1.1.11": "77ae134de014613c44d25e6310a57a219a7a91155cd47d069a0f22a2cad5caea" + "v1.1.11": "77ae134de014613c44d25e6310a57a219a7a91155cd47d069a0f22a2cad5caea", + "v1.1.12": "aadeef400b8f05645768c1476d1023f7875b78f52c7ff1967a6dbce236b8cbd8" }, "arm64": { "v1.1.1": "20c436a736547309371c7ac2a335f5fe5a42b450120e497d09c8dc3902c28444", @@ -1074,7 +1077,8 @@ "v1.1.4": "dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223", "v1.1.9": "b43e9f561e85906f469eef5a7b7992fc586f750f44a0e011da4467e7008c33a0", "v1.1.10": "4830afd426bdeacbdf9cb8729524aa2ed51790b8c4b28786995925593708f1c8", - "v1.1.11": "9f1ee53f06b78cc4a115ca6ae4eec10567999539ce828a22c5351edba043ed12" + "v1.1.11": "9f1ee53f06b78cc4a115ca6ae4eec10567999539ce828a22c5351edba043ed12", + "v1.1.12": "879f910a05c95c10c64ad8eb7d5e3aa8e4b30e65587b3d68e009a3565aed5bb8" } }, "crictl": { @@ -1203,11 +1207,13 @@ "calicoctl": { "amd64": { "v3.23.2": "3784200cdfc0106c9987df2048d219bb91147f0cc3fa365b36279ac82ea37c7a", - "v3.26.1": "c8f61c1c8e2504410adaff4a7255c65785fe7805eebfd63340ccd3c472aa42cf" + "v3.26.1": "c8f61c1c8e2504410adaff4a7255c65785fe7805eebfd63340ccd3c472aa42cf", + "v3.27.2": "692f69dc656e41cd35e23e24f56c98c4aeeb723fed129985b46f71e6eb5e1594" }, "arm64": { "v3.23.2": "232b992e6767c68c8c832cc7027a0d9aacb29901a9b5e8871e25baedbbb9c64c", - "v3.26.1": "bba2fbdd6d2998bca144ae12c2675d65c4fbf51c0944d69b1b2f20e08cd14c22" + "v3.26.1": "bba2fbdd6d2998bca144ae12c2675d65c4fbf51c0944d69b1b2f20e08cd14c22", + "v3.27.2": "0fd1f65a511338cf9940835987d420c94ab95b5386288ba9673b736a4d347463" } } } From 77121ac2343eed6391afa3df2dfb05ddea910fb2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Feb 2024 16:23:27 +0000 Subject: [PATCH 24/24] build(deps): bump release-drafter/release-drafter from 5 to 6 Bumps [release-drafter/release-drafter](https://github.com/release-drafter/release-drafter) from 5 to 6. - [Release notes](https://github.com/release-drafter/release-drafter/releases) - [Commits](https://github.com/release-drafter/release-drafter/compare/v5...v6) --- updated-dependencies: - dependency-name: release-drafter/release-drafter dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/release-drafter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index 17fdb961d..99fe14dba 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -9,6 +9,6 @@ jobs: update_release_draft: runs-on: ubuntu-latest steps: - - uses: release-drafter/release-drafter@v5 + - uses: release-drafter/release-drafter@v6 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}