一、实验环境规划1、配置:4vCPU/8G内存/100G 硬盘2、网络:机器互相可以通信
K8S集群角色 | IP地址 | 主机名 | 安装的组件 |
控制节点 | 192.168.1.10 | master1 | apiserver、controller manager、schedule、 kubelet、etcd、kube proxy、容器运行时、calico、 keepalived、nginx、 kubeadm、kubectl |
控制节点 | 192.168.1.11 | master2 | apiserver、controller manager、schedule、 kubelet、etcd、kube proxy、容器运行时、calico、 keepalived、nginx、 kubeadm、kubectl |
工作节点 | 192.168.1.12 | node1 | Kube-proxy、calico、 coredns、容器运行时、 kubelet、kubeadm、 kubectl |
二、对安装K8S的节点进行初始化配置1、编写/etc/hosts文件 2、永久关闭selinux 3、修改主机名 4、安装基础软件包 5、配置主机之间无密码登录 6、关闭防火墙 7、关闭交换分区 8、修改内核参数 9、配置安装docker和containerd的需要的阿里云yum源 10、配置安装k8s命令行工具需要的阿里云的yum源 11、配置时间同步 12、安装containerd 13、安装docker-ce 14、配置docker镜像加速器 15、安装初始化k8s需要的组件 三、通过keepalived+nginx 实现k8s apiserver 节点高可用1、安装nginx和keepalived 2、修改nginx配置文件 3、修改keepalived配置文件 4、编写检测nginx状态脚本(主备都写) 5、启动服务 6、测试VIP是否绑定成功 7、测试VIP是否可以漂移成功 四、kubeadm初始化K8S集群 五、扩容K8S控制节点,把master2加入到K8S集群 六、扩容K8S集群-添加第一个工作节点 七、安装kubernetes网络组件-Calico导入calico镜像(我的镜像包为了方便是自行导出的,有需要的可以主页联系我)vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.10 master1
192.168.1.11 master2
192.168.1.12 node1
[root
[root
Disabled
[
[
[
[
[
[
[
[
[
yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo libaio-devel vim ncurses-devel autoconf automake zlib-devel epel release openssh-server socat conntrack telnet ipvsadm
[root
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:ciMfMFLharbqqjp3N7sS6PWhZSry/WQRTXlY7eHIvFs root
The key's randomart image is:
+---[RSA 2048]----+
| o..+.. |
| o oo . o |
| . = .+ + . |
| o + + o |
| .+ + S . |
| .oo.+* o. E |
| . ..Bo.. o |
|o +o=o+ . |
|*B+o.+++ |
+----[SHA256]-----+
[root
[root
[root
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:IyXBiLzKRvEW25pTGDnlwJ4eizcVTHkNtQlxgIXLskY root
The key's randomart image is:
+---[RSA 2048]----+
| ..o==***o |
| .o*+*.oo.o |
| +.X.= .o |
| ..E * o |
|o.= X . S |
|.+ @ . . |
|. o o |
| |
| |
+----[SHA256]-----+
[root
[root
[root
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:N+PZ7sT33FOfNKUzmPGbAz2ZaiqCme6Lx5AS7TGFrlU root
The key's randomart image is:
+---[RSA 2048]----+
| . |
| . E |
| o o |
|. * . .|
| = + S + * +.|
|o + o *+ Xo.|
| . o + o +oo*=|
| .* . . oo.+=o|
| .++. . ..+o .=|
+----[SHA256]-----+
[root
[root
[
[
[
[
[
[
[root@master1 ~]# swapoff -a
[root@master2 ~]# swapoff -a
[root@node1 ~]# swapoff -a
永久关闭
[root@master1 ~]# sudo sed -i '/swap/s/^/# /' /etc/fstab
[root@master1 ~]# cat /etc/fstab
#
# /etc/fstab
# Created by anaconda on Thu Oct 12 04:47:11 2023
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root / xfs defaults 0 0
UUID=88051320-31de-4d1b-ab75-016b19dee78f /boot xfs defaults 0 0
/dev/mapper/centos-home /home xfs defaults 0 0
# /dev/mapper/centos-swap swap swap defaults 0 0
[root@master2 ~]# sudo sed -i '/swap/s/^/# /' /etc/fstab
[root@master2 ~]# cat /etc/fstab
#
# /etc/fstab
# Created by anaconda on Tue Sep 3 00:05:49 2024
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root / xfs defaults 0 0
UUID=c626f707-4f74-4924-973a-f9072a9a5db4 /boot xfs defaults 0 0
/dev/mapper/centos-home /home xfs defaults 0 0
# /dev/mapper/centos-swap swap swap defaults 0 0
[root@node1 ~]# sudo sed -i '/swap/s/^/# /' /etc/fstab
[root@node1 ~]# cat /etc/fstab
#
# /etc/fstab
# Created by anaconda on Tue Sep 3 00:06:31 2024
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root / xfs defaults 0 0
UUID=34ef344e-32ad-487d-bc39-18a1c2a0c5e6 /boot xfs defaults 0 0
/dev/mapper/centos-home /home xfs defaults 0 0
# /dev/mapper/centos-swap swap swap defaults 0 0
[
[
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.ipv4.ip_forward = 1
> EOF
[
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[
[
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.ipv4.ip_forward = 1
> EOF
[
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[
[
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.ipv4.ip_forward = 1
> EOF
[
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[root@master1 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
Loaded plugins: fastestmirror
adding repo from: http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root@master2 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
Loaded plugins: fastestmirror
adding repo from: http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root@node1 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
Loaded plugins: fastestmirror
adding repo from: http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root@master1 ~]# cat > /etc/yum.repos.d/kubernetes.repo
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
> enabled=1
> gpgcheck=0
> EOF
[root@master1 ~]# cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
[root@master2 ~]# cat > /etc/yum.repos.d/kubernetes.repo
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
> enabled=1
> gpgcheck=0
> EOF
[
同步阿里云的时间
[
[
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp1.tencent.com iburst
server ntp2.tencent.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
[
[
[
[
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp1.tencent.com iburst
server ntp2.tencent.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
[
[
[
[
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp1.tencent.com iburst
server ntp2.tencent.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
[
[
[
[
所有主机修改配置文件
[
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2
[
path = ""
[
address = ""
format = ""
gid = 0
level = ""
uid = 0
[
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[
address = ""
grpc_histogram = false
[
[
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_image_defined_volumes = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.7"
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""
[
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
ip_pref = ""
max_conf_num = 1
[
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "overlayfs"
[
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[
[
[
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
[
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = true
[
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[
[
key_model = "node"
[
config_path = ""
[
[
[
insecure_skip_verify = true
[
username = "admin"
password = "Harbor12345"
[
[
[
endpoint = ["https://192.168.1.62:443"]
[
endpoint = ["https://vh3bm52y.mirror.aliyuncs.com","https://registry.docker-cn.com"]
[
tls_cert_file = ""
tls_key_file = ""
[
path = "/opt/containerd"
[
interval = "10s"
[
sampling_ratio = 1.0
service_name = "containerd"
[
content_sharing_policy = "shared"
[
no_prometheus = false
[
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[
platforms = ["linux/amd64"]
sched_core = false
[
default = ["walking"]
[
rdt_config_file = ""
[
root_path = ""
[
root_path = ""
[
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[
root_path = ""
[
root_path = ""
upperdir_label = false
[
root_path = ""
[
endpoint = ""
insecure = false
protocol = ""
[
[
[
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[
address = ""
gid = 0
uid = 0
启动配置开机自启
[
[
Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /usr/lib/systemd/system/containerd.service.
[
[
[
[
[
{
"registry-mirrors": [
"https://docker.xuanyuan.me",
"https://docker.1ms.run"
]
}
[
[
[root@master1 ~]# yum install -y kubelet-1.26.0 kubeadm-1.26.0 kubectl-1.26.0
[root@master2 ~]# yum install -y kubelet-1.26.0 kubeadm-1.26.0 kubectl-1.26.0
[root@node1 ~]# yum install -y kubelet-1.26.0 kubeadm-1.26.0 kubectl-1.26.0
[root@master1 ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@master2 ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@node1 ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@master1 ~]# yum install epel-release keepalived -y
[root@master1 ~]# yum -y install nginx nginx-mod-stream
修改nginx的配置文件(主备一样的)
vim /etc/nginx/nginx.conf(在http上面增加)
events {
worker_connections 1024;
}
# 四层负载均衡,为两台Master apiserver组件提供负载均衡
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.1.10:6443 weight=5 max_fails=3 fail_timeout=30s;
server 192.168.1.11:6443 weight=5 max_fails=3 fail_timeout=30s;
}
server {
listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
proxy_pass k8s-apiserver;
}
检查配置文件是否正确
[root
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
主keepalived(master1:192.168.1.10)
[root@master1 nginx]# vim /etc/keepalived/keepalived.conf
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/etc/keepalived/check_nginx.sh"
}
vrrp_instance VI_1 {
state MASTER
interface ens33 # 修改为实际网卡名
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 100 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
# 虚拟IP
virtual_ipaddress {
192.168.1.199/24
}
track_script {
check_nginx
}
}
备keepalived(master2:192.168.1.11)
[root@master2 nginx]# vim /etc/keepalived/keepalived.conf
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/etc/keepalived/check_nginx.sh"
}
vrrp_instance VI_1 {
state MASTER
interface ens33 # 修改为实际网卡名
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 90 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
# 虚拟IP
virtual_ipaddress {
192.168.1.199/24
}
track_script {
check_nginx
}
}
[root@master1 nginx]# vim /etc/keepalived/check_nginx.sh
#!/bin/bash
#1、判断Nginx是否存活
counter=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$" )
if [ $counter -eq 0 ]; then
#2、如果不存活则尝试启动Nginx
service nginx start
sleep 2
#3、等待2秒后再次获取一次Nginx状态
counter=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$" )
#4、再次进行判断,如Nginx还不存活则停止Keepalived,让地址进行漂移
if [ $counter -eq 0 ]; then
service keepalived stop
fi
fi
[root
Created symlink from /etc/systemd/system/multi-user.target.wants/nginx.service to /usr/lib/systemd/system/nginx.service.
[root
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root
Created symlink from /etc/systemd/system/multi-user.target.wants/nginx.service to /usr/lib/systemd/system/nginx.service.
[root
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@master1 nginx]# ip a
1: lo: LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:c4:37:4c brd ff:ff:ff:ff:ff:ff
inet 192.168.1.10/24 brd 192.168.1.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.1.199/24 scope global secondary ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fec4:374c/64 scope link
valid_lft forever preferred_lft forever
3: docker0: NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:34:f3:28:f9 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
[root@master2 keepalived]# ip a
1: lo: LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:c4:a6:fb brd ff:ff:ff:ff:ff:ff
inet 192.168.1.11/24 brd 192.168.1.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet6 fe80::398e:b420:7067:cb38/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@master1 ~]# systemctl stop keepalived
[root@master1 ~]# ip a
1: lo: LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:c4:37:4c brd ff:ff:ff:ff:ff:ff
inet 192.168.1.10/24 brd 192.168.1.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fec4:374c/64 scope link
valid_lft forever preferred_lft forever
3: docker0: NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:34:f3:28:f9 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
[root@master2 ~]# ip a
1: lo: LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:c4:a6:fb brd ff:ff:ff:ff:ff:ff
inet 192.168.1.11/24 brd 192.168.1.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.1.199/24 scope global secondary ens33
valid_lft forever preferred_lft forever
inet6 fe80::398e:b420:7067:cb38/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@master1 ~]# kubeadm config print init-defaults > kubeadm.yaml
编辑配置文件
[root@master1 ~]# vim kubeadm.yaml
cat kubeadm.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
#localAPIEndpoint:
# advertiseAddress: 1.2.3.4
# bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
# name: node
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
#指定阿里云镜像仓库地址
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.26.0
controlPlaneEndpoint: 192.168.1.199:16443
networking:
dnsDomain: cluster.local
#指定pod网段
serviceSubnet: 10.244.0.0/12
scheduler: {}
#以下全部为新增
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
#初始化过程
[root@master1 ~]# kubeadm init --config=kubeadm.yaml --ignore-preflight-errors=SystemVerification
[init] Using Kubernetes version: v1.26.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master1] and IPs [10.240.0.1 192.168.1.199]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master1] and IPs [192.168.1.199 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master1] and IPs [192.168.1.199 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
W0401 11:10:13.301113 54632 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
W0401 11:10:13.394016 54632 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
W0401 11:10:13.621219 54632 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
W0401 11:10:13.796795 54632 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 7.534614 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node master1 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
W0401 11:10:23.311365 54632 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.1.199:16443 --token abcdef.0123456789abcdef
--discovery-token-ca-cert-hash sha256:3f265724c4e9ed59d93dc2154346955087adb4426e84a9e68a3736c864ee304d
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.1.199:16443 --token abcdef.0123456789abcdef
--discovery-token-ca-cert-hash sha256:3f265724c4e9ed59d93dc2154346955087adb4426e84a9e68a3736c864ee304d
#配置kubectl的配置文件config,相当于对kubectl进行授权,这样kubectl命令可以使用这个证书对k8s集群进行管理
[root@master1 ~]# mkdir -p $HOME/.kube
[root@master1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master1 ~]# export KUBECONFIG=/etc/kubernetes/admin.conf
#把master1节点的证书拷贝到master2上
[root
[root
ca.crt 100% 1099 282.5KB/s 00:00
[root
ca.key 100% 1675 756.0KB/s 00:00
[root
sa.key 100% 1679 702.4KB/s 00:00
[root
sa.pub 100% 451 195.2KB/s 00:00
[root
front-proxy-ca.crt 100% 1115 519.5KB/s 00:00
[root
front-proxy-ca.key 100% 1679 522.0KB/s 00:00
[root
ca.crt 100% 1086 425.1KB/s 00:00
[root
ca.key 100% 1675 607.6KB/s 00:00
#在master1上查看加入节点的命令
[root
kubeadm join 192.168.1.199:16443 --token ro42bd.4143kdpzvvez4ljx --discovery-token-ca-cert-hash sha256:3f265724c4e9ed59d93dc2154346955087adb4426e84a9e68a3736c864ee304d
#在master2上执行
[root
> --discovery-token-ca-cert-hash sha256:3f265724c4e9ed59d93dc2154346955087adb4426e84a9e68a3736c864ee304d
> --control-plane --ignore-preflight-errors=SystemVerification
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master2] and IPs [10.240.0.1 192.168.1.11 192.168.1.199]
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master2] and IPs [192.168.1.11 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master2] and IPs [192.168.1.11 127.0.0.1 ::1]
[certs] Generating "front-proxy-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
W0401 12:15:23.736289 115592 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
W0401 12:15:23.864860 115592 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
W0401 12:15:24.054538 115592 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Checking that the etcd cluster is healthy
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[etcd] Announced new etcd member joining to the existing etcd cluster
[etcd] Creating static Pod manifest for "etcd"
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
[kubelet-check] Initial timeout of 40s passed.
The 'update-status' phase is deprecated and will be removed in a future release. Currently it performs no operation
[mark-control-plane] Marking the node master2 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node master2 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
This node has joined the cluster and a new control plane instance was created:
* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.
To start administering your cluster from this node, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Run 'kubectl get nodes' to see this node join the cluster.
#在master1上查看节点
[root
NAME STATUS ROLES AGE VERSION
master1 NotReady control-plane 145m v1.26.0
master2 NotReady control-plane 79m v1.26.0
[root@master1 ~]# kubeadm token create --print-join-command
kubeadm join 192.168.1.199:16443 --token ro42bd.4143kdpzvvez4ljx --discovery-token-ca-cert-hash sha256:3f265724c4e9ed59d93dc2154346955087adb4426e84a9e68a3736c864ee304d
[root@master1 ~]# kubectl get nodes -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
master1 NotReady control-plane 161m v1.26.0 192.168.1.10 none> CentOS Linux 7 (Core) 3.10.0-1160.el7.x86_64 containerd://1.6.33
master2 NotReady control-plane 96m v1.26.0 192.168.1.11 none> CentOS Linux 7 (Core) 3.10.0-1160.el7.x86_64 containerd://1.6.33
node1 NotReady none> 2m45s v1.26.0 192.168.1.12 none> CentOS Linux 7 (Core) 3.10.0-1160.el7.x86_64 containerd://1.6.33
[root@master1 ~]# ctr -n=k8s.io images import calico.tar.gz
unpacking docker.io/calico/cni:v3.18.0 (sha256:3f4da42b983e5cdcd6ca8f5f18ab9228988908f0d0fc7b4ccdfdea133badac4b)...done
unpacking docker.io/calico/node:v3.18.0 (sha256:ea61434ae750a9bc6b7e998f6fc4d8eeab43f53ba2de89fc5bbf1459a7eee667)...done
unpacking docker.io/calico/pod2daemon-flexvol:v3.18.0 (sha256:d18a19134ccf88a2f97f220400953934655b5734eb846d3ac1a72e8e32f0df32)...done
unpacking docker.io/calico/kube-controllers:v3.18.0 (sha256:c9c9ea8416dc0d09c5df883a3a79bad028516beb5a04d380e2217f41e9aff1f0)...done
[root@master2 ~]# ctr -n=k8s.io images import calico.tar.gz
unpacking docker.io/calico/cni:v3.18.0 (sha256:3f4da42b983e5cdcd6ca8f5f18ab9228988908f0d0fc7b4ccdfdea133badac4b)...done
unpacking docker.io/calico/node:v3.18.0 (sha256:ea61434ae750a9bc6b7e998f6fc4d8eeab43f53ba2de89fc5bbf1459a7eee667)...done
unpacking docker.io/calico/pod2daemon-flexvol:v3.18.0 (sha256:d18a19134ccf88a2f97f220400953934655b5734eb846d3ac1a72e8e32f0df32)...done
unpacking docker.io/calico/kube-controllers:v3.18.0 (sha256:c9c9ea8416dc0d09c5df883a3a79bad028516beb5a04d380e2217f41e9aff1f0)...done
[root@node1 ~]# ctr -n=k8s.io images import calico.tar.gz
unpacking docker.io/calico/cni:v3.18.0 (sha256:3f4da42b983e5cdcd6ca8f5f18ab9228988908f0d0fc7b4ccdfdea133badac4b)...done
unpacking docker.io/calico/node:v3.18.0 (sha256:ea61434ae750a9bc6b7e998f6fc4d8eeab43f53ba2de89fc5bbf1459a7eee667)...done
unpacking docker.io/calico/pod2daemon-flexvol:v3.18.0 (sha256:d18a19134ccf88a2f97f220400953934655b5734eb846d3ac1a72e8e32f0df32)...done
unpacking docker.io/calico/kube-controllers:v3.18.0 (sha256:c9c9ea8416dc0d09c5df883a3a79bad028516beb5a04d380e2217f41e9aff1f0)...done
修改calico.yaml
在 - name: CLUSTER_TYPE
value: "k8s,bgp"下
新增 - name: IP_AUTODETECTION_METHOD
value: "interface=ens33"
[root@master1 ~]# kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
poddisruptionbudget.policy/calico-kube-controllers created
[root@master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master1 Ready control-plane 3h26m v1.26.0
master2 Ready control-plane 141m v1.26.0
node1 Ready 47m v1.26.0