Architecture
Environment
| Cluster Role | IP Address | Component | Hostname |
|---|---|---|---|
| Master-01 | 192.168.3.11 | kube-apiserver kube-controller-manager kube-scheduler ansible |
K8snode1 |
| Master-02 | 192.168.3.12 | kube-apiserver kube-controller-manager kube-scheduler |
K8snode2 |
| Worker-01 | 192.168.3.13 | Kubelet kube-proxy etcd |
K8snode3 |
| Worker-02 | 192.168.3.14 | Kubelet kube-proxy etcd keepalived haproxy |
K8snode4 |
| Worker-03 | 192.168.3.15 | Kubelet kube-proxy etcd keepalived haproxy |
K8snode5 |
Setup
env prepare on deploy server (k8snode1)
# env prepare on deploy server (k8snode1)
[root@node1 ~]# ssh-keygen
[root@node1 ~]# for i in 192.168.3.{11..15};do ssh-copy-id root@$i;done
[root@node1 ~]# yum install ansible -y
[root@node1 ~]# egrep -v '^#|^$|#' /etc/ansible/hosts
192.168.3.[11:15]
[root@node1 ~]# ansible all -m yum -a 'name=vim,net-tools,epel-release,yum-utils.noarch,bash-completion state=present'
[root@node1 ~]# ansible all -m shell -a 'yum-config-manager --add-repo http://repo.srv/centos7/x86/'
Kubeasz setup k8s
https://github.com/easzlab/kubeasz
# download setup tool
[root@node1 ~]# wget https://github.com/easzlab/kubeasz/releases/download/3.6.2/ezdown
[root@node1 ~]# chmod +x ezdown
[root@node1 ~]# ./ezdown -D # init kubeasz env, all files will be saved in /etc/kubeasz/
[root@node1 ~]# /etc/kubeasz/ezctl -h
[root@node1 ~]# /etc/kubeasz/ezctl new k8s01
[root@node1 ~]# vim /etc/kubeasz/cluster/k8s01/hosts
[root@node1 ~]# vim /etc/kubeasz/cluster/k8s01/config.yml
[root@node1 ~]# egrep -v "^$|^#|#" /etc/kubeasz/clusters/k8slab/hosts
[etcd]
192.168.3.[13:15]
[kube_master]
192.168.3.11 k8s_nodename='master-01'
192.168.3.12 k8s_nodename='master-02'
[kube_node]
192.168.3.13 k8s_nodename='worker-01'
192.168.3.14 k8s_nodename='worker-02'
192.168.3.15 k8s_nodename='worker-03'
[harbor]
[ex_lb]
192.168.3.15 LB_ROLE=backup EX_APISERVER_VIP=192.168.3.250 EX_APISERVER_PORT=8443
192.168.3.14 LB_ROLE=master EX_APISERVER_VIP=192.168.3.250 EX_APISERVER_PORT=8443
[chrony]
192.168.3.11
[all:vars]
SECURE_PORT="6443"
CONTAINER_RUNTIME="containerd"
CLUSTER_NETWORK="calico"
PROXY_MODE="ipvs"
SERVICE_CIDR="10.10.0.0/16"
CLUSTER_CIDR="192.10.0.0/16"
NODE_PORT_RANGE="30000-32767"
CLUSTER_DNS_DOMAIN="cluster.k8slab"
bin_dir="/opt/kube/bin"
base_dir="/etc/kubeasz"
cluster_dir="{{ base_dir }}/clusters/k8slab"
ca_dir="/etc/kubernetes/ssl"
k8s_nodename=''
ansible_python_interpreter=/usr/bin/python
[root@node1 ~]# egrep -v "^$|^#|#" /etc/kubeasz/clusters/k8slab/config.yml
INSTALL_SOURCE: "online"
OS_HARDEN: true
CA_EXPIRY: "876000h"
CERT_EXPIRY: "438000h"
CHANGE_CA: false
CLUSTER_NAME: "cluster1"
CONTEXT_NAME: "context-{{ CLUSTER_NAME }}"
K8S_VER: "1.28.1"
K8S_NODENAME: "{%- if k8s_nodename != '' -%} \
{{ k8s_nodename|replace('_', '-')|lower }} \
{%- else -%} \
{{ inventory_hostname }} \
{%- endif -%}"
ETCD_DATA_DIR: "/var/lib/etcd"
ETCD_WAL_DIR: ""
ENABLE_MIRROR_REGISTRY: true
INSECURE_REG:
- "http://easzlab.io.local:5000"
- "https://{{ HARBOR_REGISTRY }}"
SANDBOX_IMAGE: "easzlab.io.local:5000/easzlab/pause:3.9"
CONTAINERD_STORAGE_DIR: "/var/lib/containerd"
DOCKER_STORAGE_DIR: "/var/lib/docker"
DOCKER_ENABLE_REMOTE_API: false
MASTER_CERT_HOSTS:
- "192.168.3.11"
- "192.168.3.12"
- "192.168.3.250"
- "k8s.easzlab.io"
NODE_CIDR_LEN: 24
KUBELET_ROOT_DIR: "/var/lib/kubelet"
MAX_PODS: 110
KUBE_RESERVED_ENABLED: "no"
SYS_RESERVED_ENABLED: "no"
FLANNEL_BACKEND: "vxlan"
DIRECT_ROUTING: false
flannel_ver: "v0.22.2"
CALICO_IPV4POOL_IPIP: "Always"
IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube_master'][0] }}"
CALICO_NETWORKING_BACKEND: "bird"
CALICO_RR_ENABLED: false
CALICO_RR_NODES: []
calico_ver: "v3.24.6"
calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"
cilium_ver: "1.13.6"
cilium_connectivity_check: true
cilium_hubble_enabled: false
cilium_hubble_ui_enabled: false
kube_ovn_ver: "v1.11.5"
OVERLAY_TYPE: "full"
FIREWALL_ENABLE: true
kube_router_ver: "v1.5.4"
dns_install: "yes"
corednsVer: "1.11.1"
ENABLE_LOCAL_DNS_CACHE: true
dnsNodeCacheVer: "1.22.23"
LOCAL_DNS_CACHE: "169.254.20.10"
metricsserver_install: "yes"
metricsVer: "v0.6.4"
dashboard_install: "yes"
dashboardVer: "v2.7.0"
dashboardMetricsScraperVer: "v1.0.8"
prom_install: "yes"
prom_namespace: "monitor"
prom_chart_ver: "45.23.0"
kubeapps_install: "no"
kubeapps_install_namespace: "kubeapps"
kubeapps_working_namespace: "default"
kubeapps_storage_class: "local-path"
kubeapps_chart_ver: "12.4.3"
local_path_provisioner_install: "no"
local_path_provisioner_ver: "v0.0.24"
local_path_provisioner_dir: "/opt/local-path-provisioner"
nfs_provisioner_install: "no"
nfs_provisioner_namespace: "kube-system"
nfs_provisioner_ver: "v4.0.2"
nfs_storage_class: "managed-nfs-storage"
nfs_server: "192.168.1.10"
nfs_path: "/data/nfs"
network_check_enabled: true
network_check_schedule: "*/5 * * * *"
HARBOR_VER: "v2.6.4"
HARBOR_DOMAIN: "harbor.easzlab.io.local"
HARBOR_PATH: /var/data
HARBOR_TLS_PORT: 8443
HARBOR_REGISTRY: "{{ HARBOR_DOMAIN }}:{{ HARBOR_TLS_PORT }}"
HARBOR_SELF_SIGNED_CERT: true
HARBOR_WITH_NOTARY: false
HARBOR_WITH_TRIVY: false
HARBOR_WITH_CHARTMUSEUM: true
####
[root@node1 ~]# /etc/kubeasz/ezctl setup -h
[root@node1 ~]# /etc/kubeasz/ezctl setup k8s01 all
###
[root@node1 ~]# /etc/kubeasz/ezctl setup k8s01 01
[root@node1 ~]# /etc/kubeasz/ezctl setup k8s01 02
[root@node1 ~]# /etc/kubeasz/ezctl setup k8s01 03
[root@node1 ~]# /etc/kubeasz/ezctl setup k8s01 04
[root@node1 ~]# /etc/kubeasz/ezctl setup k8s01 05
[root@node1 ~]# /etc/kubeasz/ezctl setup k8s01 06
[root@node1 ~]# /etc/kubeasz/ezctl setup k8s01 07
[root@node1 ~]# /etc/kubeasz/ezctl setup k8s01 ex-lb
##
[root@node1 ~]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
master-01 Ready,SchedulingDisabled master 19m v1.28.1 192.168.3.11 <none> CentOS Linux 7 (Core) 3.10.0-1160.71.1.el7.x86_64 containerd://1.6.23
master-02 Ready,SchedulingDisabled master 19m v1.28.1 192.168.3.12 <none> CentOS Linux 7 (Core) 3.10.0-1160.71.1.el7.x86_64 containerd://1.6.23
worker-01 Ready node 17m v1.28.1 192.168.3.13 <none> CentOS Linux 7 (Core) 3.10.0-1160.71.1.el7.x86_64 containerd://1.6.23
worker-02 Ready node 17m v1.28.1 192.168.3.14 <none> CentOS Linux 7 (Core) 3.10.0-1160.71.1.el7.x86_64 containerd://1.6.23
worker-03 Ready node 17m v1.28.1 192.168.3.15 <none> CentOS Linux 7 (Core) 3.10.0-1160.71.1.el7.x86_64 containerd://1.6.23
[root@node1 ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-86b55cf789-6l69w 1/1 Running 0 17m
calico-node-2tlph 1/1 Running 0 17m
calico-node-br4qf 1/1 Running 0 17m
calico-node-bxnpg 1/1 Running 0 17m
calico-node-r74ns 1/1 Running 0 17m
calico-node-tzcm9 1/1 Running 0 17m
coredns-7bc88ddb8b-dtk4t 1/1 Running 0 16m
dashboard-metrics-scraper-77b667b99d-gs6mt 1/1 Running 0 16m
kubernetes-dashboard-74fb9f77fb-cd6pc 1/1 Running 0 16m
metrics-server-dfb478476-q62nr 1/1 Running 0 16m
node-local-dns-chmpp 1/1 Running 0 16m
node-local-dns-cphrq 1/1 Running 0 16m
node-local-dns-wmp4f 1/1 Running 0 16m
node-local-dns-z8hx7 1/1 Running 0 16m
node-local-dns-zbnv8 1/1 Running 0 16m
Live demo
{% asciinema Uunw1gW6PSDLXmSMPqG3y8ouF %}