使用kubeadm搭建生产环境的多master节点K8S高可用集群

艺帆风顺 发布于 2025-04-02 23 次阅读


一、实验环境规划1、配置:4vCPU/8G内存/100G 硬盘2、网络:机器互相可以通信

K8S集群角色IP地址主机名安装的组件
控制节点192.168.1.10master1apiserver、controller manager、schedule、 kubelet、etcd、kube proxy、容器运行时、calico、 keepalived、nginx、 kubeadm、kubectl
控制节点192.168.1.11master2apiserver、controller manager、schedule、 kubelet、etcd、kube proxy、容器运行时、calico、 keepalived、nginx、 kubeadm、kubectl
工作节点192.168.1.12node1Kube-proxy、calico、 coredns、容器运行时、 kubelet、kubeadm、 kubectl

二、对安装K8S的节点进行初始化配置1、编写/etc/hosts文件

    vim /etc/hosts127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4::1         localhost localhost.localdomain localhost6 localhost6.localdomain6192.168.1.10 master1192.168.1.11 master2192.168.1.12 node1

    2、永久关闭selinux

      [root@localhost ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config[root@localhost ~]# getenforce #需要关机重启后生效Disabled

      3、修改主机名

        [root@localhost ~]# hostnamectl set-hostname master1[root@localhost ~]# bash[root@master1 ~][root@master ~]# hostnamectl set-hostname master2[root@master ~]# bash[root@master2 ~][root@localhost ~]# hostnamectl set-hostname node1[root@localhost ~]# bash[root@node1 ~]

        4、安装基础软件包

        yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo libaio-devel vim ncurses-devel autoconf automake zlib-devel epel release openssh-server socat conntrack telnet ipvsadm

        5、配置主机之间无密码登录

          [root@master1 ~]# ssh-keygen -t rsaGenerating public/private rsa key pair.Enter file in which to save the key (/root/.ssh/id_rsa): Created directory '/root/.ssh'.Enter passphrase (empty for no passphrase): Enter same passphrase again: Your identification has been saved in /root/.ssh/id_rsa.Your public key has been saved in /root/.ssh/id_rsa.pub.The key fingerprint is:SHA256:ciMfMFLharbqqjp3N7sS6PWhZSry/WQRTXlY7eHIvFs root@master1The key's randomart image is:+---[RSA 2048]----+|      o..+..     ||     o oo . o    ||    . .+ + .   ||     o +  + o    ||   .+ S  .     ||  .oo.+* o. E    || . ..Bo..  o     ||o +o=o+   .      ||*B+o.+++         |+----[SHA256]-----+[root@master1 ~]# ssh-copy-id master2[root@master1 ~]# ssh-copy-id node1[root@master2 ~]# ssh-keygen -t rsaGenerating public/private rsa key pair.Enter file in which to save the key (/root/.ssh/id_rsa): Enter passphrase (empty for no passphrase): Enter same passphrase again: Your identification has been saved in /root/.ssh/id_rsa.Your public key has been saved in /root/.ssh/id_rsa.pub.The key fingerprint is:SHA256:IyXBiLzKRvEW25pTGDnlwJ4eizcVTHkNtQlxgIXLskY root@master2The key's randomart image is:+---[RSA 2048]----+..o==***o       || .o*+*.oo.o      ||  +.X.= .o       |..* o         ||o.= X . S        ||.+ @   . .       ||. o o            ||                 ||                 |+----[SHA256]-----+[root@master2 ~]# ssh-copy-id master1[root@master2 ~]# ssh-copy-id node1[root@node1 ~]# ssh-keygen -t rsaGenerating public/private rsa key pair.Enter file in which to save the key (/root/.ssh/id_rsa): Enter passphrase (empty for no passphrase): Enter same passphrase again: Your identification has been saved in /root/.ssh/id_rsa.Your public key has been saved in /root/.ssh/id_rsa.pub.The key fingerprint is:SHA256:N+PZ7sT33FOfNKUzmPGbAz2ZaiqCme6Lx5AS7TGFrlU root@node1The key's randomart image is:+---[RSA 2048]----+|   .             ||  . E            || o o             ||. *         .   .|+    +  +.||o +      o *+ Xo.|| . o +    o +oo*=||   .* . .  oo.+=o||  .++. . ..+o  .=|+----[SHA256]-----+[root@node1 ~]# ssh-copy-id master1[root@node1 ~]# ssh-copy-id master2

          6、关闭防火墙

            [root@master1 ~]# systemctl stop firewalld[root@master1 ~]# systemctl disable firewalld[root@master2 ~]# systemctl stop firewalld[root@master2 ~]# systemctl disable firewalld[root@node1 ~]# systemctl stop firewalld[root@node1 ~]# systemctl disable firewalld

            7、关闭交换分区

              [root@master1 ~]# swapoff -a[root@master2 ~]# swapoff -a[root@node1 ~]# swapoff -a永久关闭[root@master1 ~]# sudo sed -i '/swap/s/^/# /' /etc/fstab[root@master1 ~]# cat /etc/fstab ## /etc/fstab# Created by anaconda on Thu Oct 12 04:47:11 2023## Accessible filesystems, by reference, are maintained under '/dev/disk'# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info#/dev/mapper/centos-root /                       xfs     defaults        0 0UUID=88051320-31de-4d1b-ab75-016b19dee78f /boot                   xfs     defaults        0 0/dev/mapper/centos-home /home                   xfs     defaults        0 0# /dev/mapper/centos-swap swap                    swap    defaults        0 0[root@master2 ~]# sudo sed -i '/swap/s/^/# /' /etc/fstab[root@master2 ~]# cat /etc/fstab ## /etc/fstab# Created by anaconda on Tue Sep  3 00:05:49 2024## Accessible filesystems, by reference, are maintained under '/dev/disk'# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info#/dev/mapper/centos-root /                       xfs     defaults        0 0UUID=c626f707-4f74-4924-973a-f9072a9a5db4 /boot                   xfs     defaults        0 0/dev/mapper/centos-home /home                   xfs     defaults        0 0# /dev/mapper/centos-swap swap                    swap    defaults        0 0[root@node1 ~]# sudo sed -i '/swap/s/^/# /' /etc/fstab[root@node1 ~]# cat /etc/fstab## /etc/fstab# Created by anaconda on Tue Sep  3 00:06:31 2024## Accessible filesystems, by reference, are maintained under '/dev/disk'# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info#/dev/mapper/centos-root /                       xfs     defaults        0 0UUID=34ef344e-32ad-487d-bc39-18a1c2a0c5e6 /boot                   xfs     defaults        0 0/dev/mapper/centos-home /home                   xfs     defaults        0 0# /dev/mapper/centos-swap swap                    swap    defaults        0 0

              8、修改内核参数

                [root@master1 ~]# modprobe br_netfilter [root@master1 ~]#cat > /etc/sysctl.d/k8s.conf > net.bridge.bridge-nf-call-ip6tables = > net.bridge.bridge-nf-call-iptables = > net.ipv4.ip_forward = > EOF[root@master1 ~]# sysctl -p /etc/sysctl.d/k8s.confnet.bridge.bridge-nf-call-ip6tables = 1net.bridge.bridge-nf-call-iptables = 1net.ipv4.ip_forward = 1[root@master2 ~]# modprobe br_netfilter [root@master2 ~]# cat > /etc/sysctl.d/k8s.conf > net.bridge.bridge-nf-call-ip6tables = > net.bridge.bridge-nf-call-iptables = > net.ipv4.ip_forward = > EOF[root@master2 ~]# sysctl -p /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1net.bridge.bridge-nf-call-iptables = 1net.ipv4.ip_forward = 1[root@node1 ~]# modprobe br_netfilter [root@node1 ~]# cat > /etc/sysctl.d/k8s.conf > net.bridge.bridge-nf-call-ip6tables = > net.bridge.bridge-nf-call-iptables = > net.ipv4.ip_forward = > EOF[root@node1 ~]# sysctl -p /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1net.bridge.bridge-nf-call-iptables = 1net.ipv4.ip_forward = 1

                9、配置安装docker和containerd的需要的阿里云yum源

                  [root@master1 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repoLoaded plugins: fastestmirroradding repo from: http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repograbbing file http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.reporepo saved to /etc/yum.repos.d/docker-ce.repo[root@master2 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repoLoaded plugins: fastestmirroradding repo from: http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repograbbing file http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.reporepo saved to /etc/yum.repos.d/docker-ce.repo[root@node1 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repoLoaded plugins: fastestmirroradding repo from: http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repograbbing file http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.reporepo saved to /etc/yum.repos.d/docker-ce.repo

                  10、配置安装k8s命令行工具需要的阿里云的yum源

                    [root@master1 ~]# cat > /etc/yum.repos.d/kubernetes.repo > [kubernetes] > name=Kubernetes > baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ > enabled=> gpgcheck=EOF[root@master1 ~]# cat /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=gpgcheck=[root@master2 ~]# cat > /etc/yum.repos.d/kubernetes.repo > [kubernetes] > name=Kubernetes > baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ > enabled=> gpgcheck=EOF

                    11、配置时间同步

                      [root@master1 ~]#  yum -y install chrony同步阿里云的时间[root@master1 ~]# vim /etc/chrony.conf[root@master1 ~]# cat /etc/chrony.conf |grep -v '^#' | grep -v '^$'server ntp1.aliyun.com iburst server ntp2.aliyun.com iburst server ntp1.tencent.com iburst server ntp2.tencent.com iburstdriftfile /var/lib/chrony/driftmakestep 1.0 3rtcsynclogdir /var/log/chrony[root@master1 ~]#  systemctl restart chronyd[root@master2 ~]#  yum -y install chrony[root@master2 ~]# vim /etc/chrony.conf[root@master2 ~]# cat /etc/chrony.conf |grep -v '^#' | grep -v '^$'server ntp1.aliyun.com iburst server ntp2.aliyun.com iburst server ntp1.tencent.com iburst server ntp2.tencent.com iburstdriftfile /var/lib/chrony/driftmakestep 1.0 3rtcsynclogdir /var/log/chrony[root@master2 ~]#  systemctl restart chronyd[root@node1 ~]#  yum -y install chrony[root@node1 ~]# vim /etc/chrony.conf[root@node1 ~]# cat /etc/chrony.conf |grep -v '^#' | grep -v '^$'server ntp1.aliyun.com iburst server ntp2.aliyun.com iburst server ntp1.tencent.com iburst server ntp2.tencent.com iburstdriftfile /var/lib/chrony/driftmakestep 1.0 3rtcsynclogdir /var/log/chrony[root@node1 ~]#  systemctl restart chronyd

                      12、安装containerd

                        [root@master1 ~]# yum install containerd.io-1.6.22 -y[root@master2 ~]# yum install containerd.io-1.6.22 -y[root@node1 ~]# yum install containerd.io-1.6.22 -y所有主机修改配置文件[root@master1 containerd]# vim /etc/containerd/config.tomldisabled_plugins = []imports = []oom_score = 0plugin_dir = ""required_plugins = []root = "/var/lib/containerd"state = "/run/containerd"temp = ""version = 2[cgroup]  path = ""[debug]  address = ""  format = ""  gid = 0  level = ""  uid = 0[grpc]  address = "/run/containerd/containerd.sock"  gid = 0  max_recv_message_size = 16777216  max_send_message_size = 16777216  tcp_address = ""  tcp_tls_ca = ""  tcp_tls_cert = ""  tcp_tls_key = ""  uid = 0[metrics]  address = ""  grpc_histogram = false[plugins]  [plugins."io.containerd.gc.v1.scheduler"]    deletion_threshold = 0    mutation_threshold = 100    pause_threshold = 0.02    schedule_delay = "0s"    startup_delay = "100ms"  [plugins."io.containerd.grpc.v1.cri"]    device_ownership_from_security_context = false    disable_apparmor = false    disable_cgroup = false    disable_hugetlb_controller = true    disable_proc_mount = false    disable_tcp_service = true    enable_selinux = false    enable_tls_streaming = false    enable_unprivileged_icmp = false    enable_unprivileged_ports = false    ignore_image_defined_volumes = false    max_concurrent_downloads = 3    max_container_log_line_size = 16384    netns_mounts_under_state_dir = false    restrict_oom_score_adj = false    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.7"    selinux_category_range = 1024    stats_collect_period = 10    stream_idle_timeout = "4h0m0s"    stream_server_address = "127.0.0.1"    stream_server_port = "0"    systemd_cgroup = false    tolerate_missing_hugetlb_controller = true    unset_seccomp_profile = ""    [plugins."io.containerd.grpc.v1.cri".cni]      bin_dir = "/opt/cni/bin"      conf_dir = "/etc/cni/net.d"      conf_template = ""      ip_pref = ""      max_conf_num = 1    [plugins."io.containerd.grpc.v1.cri".containerd]      default_runtime_name = "runc"      disable_snapshot_annotations = true      discard_unpacked_layers = false      ignore_rdt_not_enabled_errors = false      no_pivot = false      snapshotter = "overlayfs"      [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]        base_runtime_spec = ""        cni_conf_dir = ""        cni_max_conf_num = 0        container_annotations = []        pod_annotations = []        privileged_without_host_devices = false        runtime_engine = ""        runtime_path = ""        runtime_root = ""        runtime_type = ""        [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]          base_runtime_spec = ""          cni_conf_dir = ""          cni_max_conf_num = 0          container_annotations = []          pod_annotations = []          privileged_without_host_devices = false          runtime_engine = ""          runtime_path = ""          runtime_root = ""          runtime_type = "io.containerd.runc.v2"          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]            BinaryName = ""            CriuImagePath = ""            CriuPath = ""            CriuWorkPath = ""            IoGid = 0            IoUid = 0            NoNewKeyring = false            NoPivotRoot = false            Root = ""            ShimCgroup = ""            SystemdCgroup = true      [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]        base_runtime_spec = ""        cni_conf_dir = ""        cni_max_conf_num = 0        container_annotations = []        pod_annotations = []        privileged_without_host_devices = false        runtime_engine = ""        runtime_path = ""        runtime_root = ""        runtime_type = ""        [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]    [plugins."io.containerd.grpc.v1.cri".image_decryption]      key_model = "node"    [plugins."io.containerd.grpc.v1.cri".registry]      config_path = ""      [plugins."io.containerd.grpc.v1.cri".registry.auths]      [plugins."io.containerd.grpc.v1.cri".registry.configs]        [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.1.62".tls]            insecure_skip_verify = true        [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.1.62".auth]            username = "admin"            password = "Harbor12345"      [plugins."io.containerd.grpc.v1.cri".registry.headers]      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]         [plugins."io.containerd.grpc.v1.cri".registry.mirrors."192.168.1.62"]            endpoint = ["https://192.168.1.62:443"]          [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]             endpoint = ["https://vh3bm52y.mirror.aliyuncs.com","https://registry.docker-cn.com"]    [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]      tls_cert_file = ""      tls_key_file = ""  [plugins."io.containerd.internal.v1.opt"]    path = "/opt/containerd"  [plugins."io.containerd.internal.v1.restart"]    interval = "10s"  [plugins."io.containerd.internal.v1.tracing"]    sampling_ratio = 1.0    service_name = "containerd"  [plugins."io.containerd.metadata.v1.bolt"]    content_sharing_policy = "shared"  [plugins."io.containerd.monitor.v1.cgroups"]    no_prometheus = false  [plugins."io.containerd.runtime.v1.linux"]    no_shim = false    runtime = "runc"    runtime_root = ""    shim = "containerd-shim"    shim_debug = false  [plugins."io.containerd.runtime.v2.task"]    platforms = ["linux/amd64"]    sched_core = false  [plugins."io.containerd.service.v1.diff-service"]    default = ["walking"]  [plugins."io.containerd.service.v1.tasks-service"]    rdt_config_file = ""  [plugins."io.containerd.snapshotter.v1.aufs"]    root_path = ""  [plugins."io.containerd.snapshotter.v1.btrfs"]    root_path = ""  [plugins."io.containerd.snapshotter.v1.devmapper"]    async_remove = false    base_image_size = ""    discard_blocks = false    fs_options = ""    fs_type = ""    pool_name = ""    root_path = ""  [plugins."io.containerd.snapshotter.v1.native"]    root_path = ""  [plugins."io.containerd.snapshotter.v1.overlayfs"]    root_path = ""    upperdir_label = false  [plugins."io.containerd.snapshotter.v1.zfs"]    root_path = ""  [plugins."io.containerd.tracing.processor.v1.otlp"]    endpoint = ""    insecure = false    protocol = ""[proxy_plugins][stream_processors]  [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]    accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]    args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]    env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]    path = "ctd-decoder"    returns = "application/vnd.oci.image.layer.v1.tar"  [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]    accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]    args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]    env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]    path = "ctd-decoder"    returns = "application/vnd.oci.image.layer.v1.tar+gzip"[timeouts]  "io.containerd.timeout.bolt.open" = "0s"  "io.containerd.timeout.shim.cleanup" = "5s"  "io.containerd.timeout.shim.load" = "5s"  "io.containerd.timeout.shim.shutdown" = "3s"  "io.containerd.timeout.task.state" = "2s"[ttrpc]  address = ""  gid = 0  uid = 0启动配置开机自启[root@master1 containerd]# systemctl start containerd[root@master1 containerd]# systemctl enable containerdCreated symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /usr/lib/systemd/system/containerd.service.

                        13、安装docker-ce

                          [root@master1 ~]# yum -y install docker-ce[root@master2 ~]# yum -y install docker-ce[root@node1 ~]# yum -y install docker-ce

                          14、配置docker镜像加速器

                            [root@master1 containerd]# vim /etc/docker/daemon.json[root@master1 containerd]# cat /etc/docker/daemon.json {  "registry-mirrors": [    "https://docker.xuanyuan.me",    "https://docker.1ms.run"  ]}[root@master1 containerd]# systemctl daemon-reload[root@master1 containerd]# systemctl restart docker

                            15、安装初始化k8s需要的组件

                              [root@master1 ~]# yum install -y kubelet-1.26.0 kubeadm-1.26.0 kubectl-1.26.0[root@master2 ~]# yum install -y kubelet-1.26.0 kubeadm-1.26.0 kubectl-1.26.0[root@node1 ~]# yum install -y kubelet-1.26.0 kubeadm-1.26.0 kubectl-1.26.0[root@master1 ~]# systemctl enable kubeletCreated symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.[root@master2 ~]# systemctl enable kubeletCreated symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.[root@node1 ~]# systemctl enable kubeletCreated symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

                              三、通过keepalived+nginx 实现k8s apiserver 节点高可用1、安装nginx和keepalived

                                [root@master1 ~]#  yum install epel-release keepalived -y[root@master1 ~]#  yum -y install nginx nginx-mod-stream

                                2、修改nginx配置文件

                                  修改nginx的配置文件(主备一样的)vim /etc/nginx/nginx.conf(在http上面增加)events {    worker_connections 1024;}# 四层负载均衡,为两台Master apiserver组件提供负载均衡stream {    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';    access_log  /var/log/nginx/k8s-access.log  main;    upstream k8s-apiserver {            server 192.168.1.10:6443 weight=5 max_fails=3 fail_timeout=30s;            server 192.168.1.11:6443 weight=5 max_fails=3 fail_timeout=30s;    }    server {       listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突       proxy_pass k8s-apiserver;    }检查配置文件是否正确[root@master1 nginx]# nginx -tnginx: the configuration file /etc/nginx/nginx.conf syntax is oknginx: configuration file /etc/nginx/nginx.conf test is successful

                                  3、修改keepalived配置文件

                                    主keepalived(master1:192.168.1.10)[root@master1 nginx]# vim /etc/keepalived/keepalived.confglobal_defs {    notification_email {      acassen@firewall.loc      failover@firewall.loc      sysadmin@firewall.loc    }    notification_email_from Alexandre.Cassen@firewall.loc     smtp_server 127.0.0.   smtp_connect_timeout 30    router_id NGINX_MASTERvrrp_script check_nginx {    script "/etc/keepalived/check_nginx.sh"}vrrp_instance VI_1 {     state MASTER     interface ens33  # 修改为实际网卡名    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的     priority 100    # 优先级,备服务器设置 90     advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒     authentication {         auth_type PASS              auth_pass 1111     }      # 虚拟IP    virtual_ipaddress {         192.168.1.199/24    }     track_script {        check_nginx    } }备keepalived(master2:192.168.1.11)[root@master2 nginx]# vim /etc/keepalived/keepalived.confglobal_defs {   notification_email {     acassen@firewall.loc     failover@firewall.loc     sysadmin@firewall.loc   }   notification_email_from Alexandre.Cassen@firewall.loc   smtp_server 127.0.0.1   smtp_connect_timeout 30   router_id NGINX_MASTER}vrrp_script check_nginx {    script "/etc/keepalived/check_nginx.sh"}vrrp_instance VI_1 {    state MASTER    interface ens33  # 修改为实际网卡名    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的    priority 90    # 优先级,备服务器设置 90    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒    authentication {        auth_type PASS        auth_pass 1111    }    # 虚拟IP    virtual_ipaddress {        192.168.1.199/24    }    track_script {        check_nginx    }}

                                    4、编写检测nginx状态脚本(主备都写)

                                      [root@master1 nginx]# vim /etc/keepalived/check_nginx.sh#!/bin/bash#1、判断Nginx是否存活counter=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$" )if [ $counter -eq 0 ]; then    #2、如果不存活则尝试启动Nginx    service nginx start    sleep 2    #3、等待2秒后再次获取一次Nginx状态    counter=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$" )    #4、再次进行判断,如Nginx还不存活则停止Keepalived,让地址进行漂移    if [ $counter -eq 0 ]; then        service  keepalived stop    fifi

                                      5、启动服务

                                        [root@master1 nginx]# systemctl daemon-reload && systemctl start nginx && systemctl enable nginxCreated symlink from /etc/systemd/system/multi-user.target.wants/nginx.service to /usr/lib/systemd/system/nginx.service.[root@master1 nginx]# systemctl start keepalived && systemctl enable keepalivedCreated symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.[root@master2 nginx]# systemctl daemon-reload && systemctl start nginx && systemctl enable nginxCreated symlink from /etc/systemd/system/multi-user.target.wants/nginx.service to /usr/lib/systemd/system/nginx.service.[root@master2 nginx]# systemctl start keepalived && systemctl enable keepalivedCreated symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.

                                        6、测试VIP是否绑定成功

                                          [root@master1 nginx]# ip a1: lo: LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00    inet 127.0.0.1/scope host lo       valid_lft forever preferred_lft forever    inet6 ::1/128 scope host        valid_lft forever preferred_lft forever2: ens33: BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000    link/ether 00:0c:29:c4:37:4c brd ff:ff:ff:ff:ff:ff    inet 192.168.1.10/24 brd 192.168.1.255 scope global ens33       valid_lft forever preferred_lft forever    inet 192.168.1.199/24 scope global secondary ens33       valid_lft forever preferred_lft forever    inet6 fe80::20c:29ff:fec4:374c/64 scope link        valid_lft forever preferred_lft forever3: docker0: NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default     link/ether 02:42:34:f3:28:f9 brd ff:ff:ff:ff:ff:ff    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0       valid_lft forever preferred_lft forever[root@master2 keepalived]# ip a1: lo: LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00    inet 127.0.0.1/scope host lo       valid_lft forever preferred_lft forever    inet6 ::1/128 scope host        valid_lft forever preferred_lft forever2: ens33: BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000    link/ether 00:0c:29:c4:a6:fb brd ff:ff:ff:ff:ff:ff    inet 192.168.1.11/24 brd 192.168.1.255 scope global noprefixroute ens33       valid_lft forever preferred_lft forever    inet6 fe80::398e:b420:7067:cb38/64 scope link noprefixroute        valid_lft forever preferred_lft forever

                                          7、测试VIP是否可以漂移成功

                                            [root@master1 ~]# systemctl stop keepalived [root@master1 ~]# ip a1: lo: LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00    inet 127.0.0.1/scope host lo       valid_lft forever preferred_lft forever    inet6 ::1/128 scope host        valid_lft forever preferred_lft forever2: ens33: BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000    link/ether 00:0c:29:c4:37:4c brd ff:ff:ff:ff:ff:ff    inet 192.168.1.10/24 brd 192.168.1.255 scope global ens33       valid_lft forever preferred_lft forever    inet6 fe80::20c:29ff:fec4:374c/64 scope link        valid_lft forever preferred_lft forever3: docker0: NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default     link/ether 02:42:34:f3:28:f9 brd ff:ff:ff:ff:ff:ff    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0       valid_lft forever preferred_lft forever[root@master2 ~]# ip a1: lo: LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00    inet 127.0.0.1/scope host lo       valid_lft forever preferred_lft forever    inet6 ::1/128 scope host        valid_lft forever preferred_lft forever2: ens33: BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000    link/ether 00:0c:29:c4:a6:fb brd ff:ff:ff:ff:ff:ff    inet 192.168.1.11/24 brd 192.168.1.255 scope global noprefixroute ens33       valid_lft forever preferred_lft forever    inet 192.168.1.199/24 scope global secondary ens33       valid_lft forever preferred_lft forever    inet6 fe80::398e:b420:7067:cb38/64 scope link noprefixroute        valid_lft forever preferred_lft forever

                                            四、kubeadm初始化K8S集群

                                              [root@master1 ~]# kubeadm config print init-defaults > kubeadm.yaml编辑配置文件[root@master1 ~]# vim kubeadm.yamlcat kubeadm.yaml apiVersion: kubeadm.k8s.io/v1beta3bootstrapTokens:groups:  - system:bootstrappers:kubeadm:default-node-token  token: abcdef.0123456789abcdef  ttl: 24h0m0s  usages:  - signing  - authenticationkind: InitConfiguration#localAPIEndpoint:#  advertiseAddress: 1.2.3.4#  bindPort: 6443nodeRegistration:  criSocket: unix:///var/run/containerd/containerd.sock  imagePullPolicy: IfNotPresent#  name: node  taints: null---apiServer:  timeoutForControlPlane: 4m0sapiVersion: kubeadm.k8s.io/v1beta3certificatesDir: /etc/kubernetes/pkiclusterName: kubernetescontrollerManager: {}dns: {}etcd:  local:    dataDir: /var/lib/etcd#指定阿里云镜像仓库地址imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containerskind: ClusterConfigurationkubernetesVersion: 1.26.0controlPlaneEndpoint: 192.168.1.199:16443networking:  dnsDomain: cluster.local#指定pod网段  serviceSubnet: 10.244.0.0/12scheduler: {}#以下全部为新增--- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: ipvs --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration cgroupDriver: systemd#初始化过程[root@master1 ~]# kubeadm init --config=kubeadm.yaml --ignore-preflight-errors=SystemVerification[init] Using Kubernetes version: v1.26.0[preflight] Running pre-flight checks[preflight] Pulling images required for setting up a Kubernetes cluster[preflight] This might take a minute or two, depending on the speed of your internet connection[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'[certs] Using certificateDir folder "/etc/kubernetes/pki"[certs] Generating "ca" certificate and key[certs] Generating "apiserver" certificate and key[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master1] and IPs [10.240.0.1 192.168.1.199][certs] Generating "apiserver-kubelet-client" certificate and key[certs] Generating "front-proxy-ca" certificate and key[certs] Generating "front-proxy-client" certificate and key[certs] Generating "etcd/ca" certificate and key[certs] Generating "etcd/server" certificate and key[certs] etcd/server serving cert is signed for DNS names [localhost master1] and IPs [192.168.1.199 127.0.0.1 ::1][certs] Generating "etcd/peer" certificate and key[certs] etcd/peer serving cert is signed for DNS names [localhost master1] and IPs [192.168.1.199 127.0.0.1 ::1][certs] Generating "etcd/healthcheck-client" certificate and key[certs] Generating "apiserver-etcd-client" certificate and key[certs] Generating "sa" key and public key[kubeconfig] Using kubeconfig folder "/etc/kubernetes"W0401 11:10:13.301113   54632 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address[kubeconfig] Writing "admin.conf" kubeconfig fileW0401 11:10:13.394016   54632 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address[kubeconfig] Writing "kubelet.conf" kubeconfig fileW0401 11:10:13.621219   54632 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address[kubeconfig] Writing "controller-manager.conf" kubeconfig fileW0401 11:10:13.796795   54632 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address[kubeconfig] Writing "scheduler.conf" kubeconfig file[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"[kubelet-start] Starting the kubelet[control-plane] Using manifest folder "/etc/kubernetes/manifests"[control-plane] Creating static Pod manifest for "kube-apiserver"[control-plane] Creating static Pod manifest for "kube-controller-manager"[control-plane] Creating static Pod manifest for "kube-scheduler"[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s[apiclient] All control plane components are healthy after 7.534614 seconds[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster[upload-certs] Skipping phase. Please see --upload-certs[mark-control-plane] Marking the node master1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers][mark-control-plane] Marking the node master1 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule][bootstrap-token] Using token: abcdef.0123456789abcdef[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key[addons] Applied essential addon: CoreDNSW0401 11:10:23.311365   54632 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address[addons] Applied essential addon: kube-proxyYour Kubernetes control-plane has initialized successfully!To start using your cluster, you need to run the following as a regular user:  mkdir -p $HOME/.kube  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config  sudo chown $(id -u):$(id -g) $HOME/.kube/configAlternatively, if you are the root user, you can run:  export KUBECONFIG=/etc/kubernetes/admin.confYou should now deploy a pod network to the cluster.Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:  https://kubernetes.io/docs/concepts/cluster-administration/addons/You can now join any number of control-plane nodes by copying certificate authoritiesand service account keys on each node and then running the following as root:  kubeadm join 192.168.1.199:16443 --token abcdef.0123456789abcdef  --discovery-token-ca-cert-hash sha256:3f265724c4e9ed59d93dc2154346955087adb4426e84a9e68a3736c864ee304d  --control-plane Then you can join any number of worker nodes by running the following on each as root:kubeadm join 192.168.1.199:16443 --token abcdef.0123456789abcdef  --discovery-token-ca-cert-hash sha256:3f265724c4e9ed59d93dc2154346955087adb4426e84a9e68a3736c864ee304d #配置kubectl的配置文件config,相当于对kubectl进行授权,这样kubectl命令可以使用这个证书对k8s集群进行管理[root@master1 ~]# mkdir -p $HOME/.kube[root@master1 ~]#   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config[root@master1 ~]#   sudo chown $(id -u):$(id -g) $HOME/.kube/config[root@master1 ~]# export KUBECONFIG=/etc/kubernetes/admin.conf

                                              五、扩容K8S控制节点,把master2加入到K8S集群

                                                #把master1节点的证书拷贝到master2上 [root@master2 ~]# cd /root/ && mkdir -p /etc/kubernetes/pki/etcd && mkdir -p ~/.kube[root@master1 ~]# scp /etc/kubernetes/pki/ca.crt master2:/etc/kubernetes/pki/ca.crt                                                                1001099   282.5KB/s   00:00    [root@master1 ~]# scp /etc/kubernetes/pki/ca.key master2:/etc/kubernetes/pki/ca.key                                                                1001675   756.0KB/s   00:00    [root@master1 ~]# scp /etc/kubernetes/pki/sa.key master2:/etc/kubernetes/pki/sa.key                                                                1001679   702.4KB/s   00:00    [root@master1 ~]# scp /etc/kubernetes/pki/sa.pub master2:/etc/kubernetes/pki/sa.pub                                                                100%  451   195.2KB/s   00:00    [root@master1 ~]# scp /etc/kubernetes/pki/front-proxy-ca.crt master2:/etc/kubernetes/pki/front-proxy-ca.crt                                                    1001115   519.5KB/s   00:00    [root@master1 ~]# scp /etc/kubernetes/pki/front-proxy-ca.key master2:/etc/kubernetes/pki/front-proxy-ca.key                                                    1001679   522.0KB/s   00:00    [root@master1 ~]# scp /etc/kubernetes/pki/etcd/ca.crt master2:/etc/kubernetes/pki/etcd/ca.crt                                                                1001086   425.1KB/s   00:00    [root@master1 ~]# scp /etc/kubernetes/pki/etcd/ca.key master2:/etc/kubernetes/pki/etcd/ca.key                                                                1001675   607.6KB/s   00:00   #在master1上查看加入节点的命令 [root@master1 ~]# kubeadm token create --print-join-commandkubeadm join 192.168.1.199:16443 --token ro42bd.4143kdpzvvez4ljx --discovery-token-ca-cert-hash sha256:3f265724c4e9ed59d93dc2154346955087adb4426e84a9e68a3736c864ee304d#在master2上执行[root@master2 ~]# kubeadm join 192.168.1.199:16443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:3f265724c4e9ed59d93dc2154346955087adb4426e84a9e68a3736c864ee304d --control-plane --ignore-preflight-errors=SystemVerification[preflight] Running pre-flight checks[preflight] Reading configuration from the cluster...[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'[preflight] Running pre-flight checks before initializing the new control plane instance[preflight] Pulling images required for setting up a Kubernetes cluster[preflight] This might take a minute or two, depending on the speed of your internet connection[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'[certs] Using certificateDir folder "/etc/kubernetes/pki"[certs] Generating "apiserver-kubelet-client" certificate and key[certs] Generating "apiserver" certificate and key[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master2] and IPs [10.240.0.1 192.168.1.11 192.168.1.199][certs] Generating "etcd/server" certificate and key[certs] etcd/server serving cert is signed for DNS names [localhost master2] and IPs [192.168.1.11 127.0.0.1 ::1][certs] Generating "etcd/healthcheck-client" certificate and key[certs] Generating "apiserver-etcd-client" certificate and key[certs] Generating "etcd/peer" certificate and key[certs] etcd/peer serving cert is signed for DNS names [localhost master2] and IPs [192.168.1.11 127.0.0.1 ::1][certs] Generating "front-proxy-client" certificate and key[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"[certs] Using the existing "sa" key[kubeconfig] Generating kubeconfig files[kubeconfig] Using kubeconfig folder "/etc/kubernetes"W0401 12:15:23.736289  115592 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address[kubeconfig] Writing "admin.conf" kubeconfig fileW0401 12:15:23.864860  115592 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address[kubeconfig] Writing "controller-manager.conf" kubeconfig fileW0401 12:15:24.054538  115592 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address[kubeconfig] Writing "scheduler.conf" kubeconfig file[control-plane] Using manifest folder "/etc/kubernetes/manifests"[control-plane] Creating static Pod manifest for "kube-apiserver"[control-plane] Creating static Pod manifest for "kube-controller-manager"[control-plane] Creating static Pod manifest for "kube-scheduler"[check-etcd] Checking that the etcd cluster is healthy[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"[kubelet-start] Starting the kubelet[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...[etcd] Announced new etcd member joining to the existing etcd cluster[etcd] Creating static Pod manifest for "etcd"[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s[kubelet-check] Initial timeout of 40s passed.The 'update-status' phase is deprecated and will be removed in a future release. Currently it performs no operation[mark-control-plane] Marking the node master2 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers][mark-control-plane] Marking the node master2 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]This node has joined the cluster and a new control plane instance was created:Certificate signing request was sent to apiserver and approval was received.The Kubelet was informed of the new secure connection details.Control plane label and taint were applied to the new node.The Kubernetes control plane instances scaled up.A new etcd member was added to the local/stacked etcd cluster.To start administering your cluster from this node, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/configRun 'kubectl get nodes' to see this node join the cluster.#在master1上查看节点[root@master1 ~]# kubectl get nodesNAME      STATUS     ROLES           AGE    VERSIONmaster1   NotReady   control-plane   145m   v1.26.0master2   NotReady   control-plane   79m    v1.26.0

                                                六、扩容K8S集群-添加第一个工作节点

                                                  [root@master1 ~]# kubeadm token create --print-join-commandkubeadm join 192.168.1.199:16443 --token ro42bd.4143kdpzvvez4ljx --discovery-token-ca-cert-hash sha256:3f265724c4e9ed59d93dc2154346955087adb4426e84a9e68a3736c864ee304d[root@master1 ~]# kubectl get nodes -owideNAME      STATUS     ROLES           AGE     VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION           CONTAINER-RUNTIMEmaster1   NotReady   control-plane   161m    v1.26.0   192.168.1.10   none>        CentOS Linux 7 (Core)   3.10.0-1160.el7.x86_64   containerd://1.6.33master2   NotReady   control-plane   96m     v1.26.0   192.168.1.11   none>        CentOS Linux 7 (Core)   3.10.0-1160.el7.x86_64   containerd://1.6.33node1     NotReady   none>          2m45s   v1.26.0   192.168.1.12   none>        CentOS Linux 7 (Core)   3.10.0-1160.el7.x86_64   containerd://1.6.33

                                                  七、安装kubernetes网络组件-Calico导入calico镜像(我的镜像包为了方便是自行导出的,有需要的可以主页联系我)

                                                    [root@master1 ~]# ctr -n=k8s.io images import calico.tar.gz unpacking docker.io/calico/cni:v3.18.0 (sha256:3f4da42b983e5cdcd6ca8f5f18ab9228988908f0d0fc7b4ccdfdea133badac4b)...doneunpacking docker.io/calico/node:v3.18.0 (sha256:ea61434ae750a9bc6b7e998f6fc4d8eeab43f53ba2de89fc5bbf1459a7eee667)...doneunpacking docker.io/calico/pod2daemon-flexvol:v3.18.0 (sha256:d18a19134ccf88a2f97f220400953934655b5734eb846d3ac1a72e8e32f0df32)...doneunpacking docker.io/calico/kube-controllers:v3.18.0 (sha256:c9c9ea8416dc0d09c5df883a3a79bad028516beb5a04d380e2217f41e9aff1f0)...done[root@master2 ~]# ctr -n=k8s.io images import calico.tar.gzunpacking docker.io/calico/cni:v3.18.0 (sha256:3f4da42b983e5cdcd6ca8f5f18ab9228988908f0d0fc7b4ccdfdea133badac4b)...doneunpacking docker.io/calico/node:v3.18.0 (sha256:ea61434ae750a9bc6b7e998f6fc4d8eeab43f53ba2de89fc5bbf1459a7eee667)...doneunpacking docker.io/calico/pod2daemon-flexvol:v3.18.0 (sha256:d18a19134ccf88a2f97f220400953934655b5734eb846d3ac1a72e8e32f0df32)...doneunpacking docker.io/calico/kube-controllers:v3.18.0 (sha256:c9c9ea8416dc0d09c5df883a3a79bad028516beb5a04d380e2217f41e9aff1f0)...done[root@node1 ~]# ctr -n=k8s.io images import calico.tar.gzunpacking docker.io/calico/cni:v3.18.0 (sha256:3f4da42b983e5cdcd6ca8f5f18ab9228988908f0d0fc7b4ccdfdea133badac4b)...doneunpacking docker.io/calico/node:v3.18.0 (sha256:ea61434ae750a9bc6b7e998f6fc4d8eeab43f53ba2de89fc5bbf1459a7eee667)...doneunpacking docker.io/calico/pod2daemon-flexvol:v3.18.0 (sha256:d18a19134ccf88a2f97f220400953934655b5734eb846d3ac1a72e8e32f0df32)...doneunpacking docker.io/calico/kube-controllers:v3.18.0 (sha256:c9c9ea8416dc0d09c5df883a3a79bad028516beb5a04d380e2217f41e9aff1f0)...done修改calico.yaml在    - name: CLUSTER_TYPE    value: "k8s,bgp"下新增  - name: IP_AUTODETECTION_METHOD    value: "interface=ens33"[root@master1 ~]# kubectl apply -f calico.yaml configmap/calico-config createdcustomresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org createdclusterrole.rbac.authorization.k8s.io/calico-kube-controllers createdclusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers createdclusterrole.rbac.authorization.k8s.io/calico-node createdclusterrolebinding.rbac.authorization.k8s.io/calico-node createddaemonset.apps/calico-node createdserviceaccount/calico-node createddeployment.apps/calico-kube-controllers createdserviceaccount/calico-kube-controllers createdpoddisruptionbudget.policy/calico-kube-controllers created[root@master1 ~]# kubectl get nodesNAME      STATUS   ROLES           AGE     VERSIONmaster1   Ready    control-plane   3h26m   v1.26.0master2   Ready    control-plane   141m    v1.26.0node1     Ready              47m     v1.26.0