二进制部署高可用kubernetes集群

一、集群节点规划

集群为3个master,3个worker,3个etcd,受限于机器资源,这三者都部署在下面的三台机器中

hostname ip role 需要安装的组件
node164 192.168.0.164 master node kube-apiserver kuber-controller-manager kube-scheduler etcd calico kube-proxy docker kubelet coredns
node165 192.168.0.165 master node kube-apiserver kuber-controller-manager kube-scheduler etcd calico kube-proxy docker kubelet coredns
node166 192.168.0.166 master node kube-apiserver kuber-controller-manager kube-scheduler etcd calico kube-proxy docker kubelet coredns

二、基础环境配置

2.1、配置host

node164-166上操作

#!/bin/bash

cat <<EOF>> /etc/hosts
192.168.0.164 node164
192.168.0.165 node165
192.168.0.166 node166
EOF

2.2、关闭防火墙

node164~166上操作
systemctl stop firewalld && systemctl disable firewalld && systemctl daemon-reload

2.3、关闭swap

node164~166上操作
swapoff -a或者编辑文件/etc/fstab注释到swap挂载
echo "vm.swappiness = 0" >> /etc/sysctl.conf
sysctl -p

2.4、禁用selinux

node164~166上操作
sed -i 's/enforcing/disabled/g' /etc/selinux/config

2.5、系统配置

node164~166上操作

#!/bin/bash

ulimit -SHn 65535
cat <<EOF >> /etc/security/limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF

2.6、修改yum源

node164~166上操作
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo && yum makecache

2.7、配置ipvs

node164~166上操作
yum install ipvsadm ipset sysstat conntrack libseccomp -y

#!/bin/bash

# 配置ipvs模块,内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4   
modprobe -- ip_vs 
modprobe -- ip_vs_rr 
modprobe -- ip_vs_wrr 
modprobe -- ip_vs_sh 
modprobe -- nf_conntrack_ipv4     

cat >/etc/modules-load.d/ipvs.conf <<EOF 
ip_vs 
ip_vs_lc 
ip_vs_wlc 
ip_vs_rr 
ip_vs_wrr 
ip_vs_lblc 
ip_vs_lblcr 
ip_vs_dh 
ip_vs_sh  
ip_vs_nq 
ip_vs_sed 
ip_vs_ftp 
ip_vs_sh 
nf_conntrack_ipv4 
ip_tables 
ip_set 
xt_set 
ipt_set 
ipt_rpfilter 
ipt_REJECT 
ipip 
EOF

systemctl enable systemd-modules-load.service && systemctl daemon-reload

2.8、配置免密

node164上操作
ssh-keygen

ssh-copy-id -i /root/.ssh/id_rsa.pub node165
ssh-copy-id -i /root/.ssh/id_rsa.pub node166

2.9、内核优化

node164~166上操作

#!/bin/bash

cat <<EOF > /etc/sysctl.d/k8s.conf   
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 131072
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF

sysctl --system
lsmod | grep --color=auto -e ip_vs -e nf_conntrack_ipv4

2.10、时区配置

node164~166上操作
echo "Asia/Shanghai" > /etc/timezone
ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

2.11、部署ntp服务

node164上操作
cat ntp.sh

#!/bin/bash

# 安装ntp服务
yum -y install ntp ntpdate
ntpdate ntp1.aliyun.com
ntpdate ntp2.aliyun.com

# 备份ntp配置文件
[ -f "/etc/ntp.conf" ] && mv /etc/ntp.conf /etc/ntp.confbak

# 配置ntp.conf
cat <<EOF>> /etc/ntp.conf
restrict default nomodify notrap noquery
 
restrict 127.0.0.1
restrict 192.168.0.0 mask 255.255.255.0 nomodify    
#只允许$net网段的客户机进行时间同步。如果允许任何IP的客户机都可以进行时间同步,就修改为"restrict default nomodify"
 
server ntp1.aliyun.com
server ntp2.aliyun.com
server time1.aliyun.com
server time2.aliyun.com
server time-a.nist.gov
server time-b.nist.gov
 
server  127.127.1.0     
# local clock
fudge   127.127.1.0 stratum 10
 
driftfile /var/lib/ntp/drift
broadcastdelay  0.008
keys            /etc/ntp/keys
EOF
# 启动服务
systemctl restart ntpd
systemctl enable ntpd
systemctl daemon-reload
# 加入计划任务
cat <<EOF>> /etc/crontab
0 0,6,12,18 * * * /usr/sbin/ntpdate ntp1.aliyun.com; /sbin/hwclock -w
EOF
systemctl restart crond

2.12、时区同步

node165~166上操作
yum -y install ntpdate && ntpdate 192.168.0.164
添加计划任务
echo "0 0,6,12,18 * * * /usr/sbin/ntpdate 192.168.0.164;/sbin/hwclock -w" >> /etc/crontab
systemctl restart crond

三、基础软件部署

3.1、部署docker

node164~166上操作
参考文章容器云项目-kubernetes集群部署中的部署docker部分

3.2、部署haproxy与keepalived

node164~166上操作
yum install keepalived haproxy -y

3.3、配置haproxy

node164~166上操作
cat haproxy.sh

#!/bin/bash

cat >/etc/haproxy/haproxy.cfg<<"EOF"
global
 maxconn 2000
 ulimit-n 16384
 log 127.0.0.1 local0 err
 stats timeout 30s

defaults
 log global
 mode http
 option httplog
 timeout connect 5000
 timeout client 50000
 timeout server 50000
 timeout http-request 15s
 timeout http-keep-alive 15s

frontend monitor-in
 bind *:33305
 mode http
 option httplog
 monitor-uri /monitor

frontend k8s-master
 bind 0.0.0.0:16443
 bind 127.0.0.1:16443
 mode tcp
 option tcplog
 tcp-request inspect-delay 5s
 default_backend k8s-master

backend k8s-master
 mode tcp
 option tcplog
 option tcp-check
 balance roundrobin
 default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
 server  node164  192.168.0.164:6443 check
 server  node165  192.168.0.165:6443 check
 server  node166  192.168.0.166:6443 check
EOF

systemctl start haproxy && systemctl status haproxy && systemctl enable haproxy

3.4、配置keepalived

node164上操作
cat keepalived.sh

#!/bin/bash

cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
   router_id LVS_DEVEL
script_user root
   enable_script_security
}
vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
   interval 5
   weight -5
   fall 2 
rise 1
}
vrrp_instance VI_1 {
   state MASTER    # 主节点
   interface ens33 # 网卡
   mcast_src_ip 192.168.0.164  # ip
   virtual_router_id 51  
   priority 100 # 优先级,该值越大表示优先级越强
   advert_int 2
   authentication {
       auth_type PASS
       auth_pass K8SHA_KA_AUTH
   }
   virtual_ipaddress {
       192.168.0.167 # 虚拟路由ip
   }
   track_script {
      chk_apiserver
   }
}
EOF

node165上操作
cat keepalived.sh

#!/bin/bash
cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
   router_id LVS_DEVEL
script_user root
   enable_script_security
}
vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
   interval 5
   weight -5
   fall 2 
rise 1
}
vrrp_instance VI_1 {
   state BACKUP
   interface ens33
   mcast_src_ip 192.168.0.165
   virtual_router_id 51
   priority 99
   advert_int 2
   authentication {
       auth_type PASS
       auth_pass K8SHA_KA_AUTH
   }
   virtual_ipaddress {
       192.168.0.167
   }
   track_script {
      chk_apiserver
   }
}
EOF

node166上操作
cat keepalived.sh

#!/bin/bash   

cat  >/etc/keepalived/keepalived.conf<<"EOF" 
! Configuration File for keepalived
global_defs {
   router_id LVS_DEVEL
script_user root
   enable_script_security
}
vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
   interval 5
   weight -5
   fall 2 
rise 1
}
vrrp_instance VI_1 {
   state BACKUP
   interface ens33
   mcast_src_ip 192.168.0.166
   virtual_router_id 51
   priority 98
   advert_int 2
   authentication {
       auth_type PASS
       auth_pass K8SHA_KA_AUTH
   }
   virtual_ipaddress {
       192.168.0.167
   }
   track_script {
      chk_apiserver
   }
}
EOF

3.5、配置健康检查脚本

node164~166上操作
cat /etc/keepalived/check_apiserver.sh

#!/bin/bash
err=0
for k in $(seq 1 3)
do
   check_code=$(pgrep haproxy)
   if [[ $check_code == "" ]]; then
       err=$(expr $err + 1)
       sleep 1
       continue
   else
       err=0
       break
   fi
done

if [[ $err != "0" ]]; then
   echo "systemctl stop keepalived"
   /usr/bin/systemctl stop keepalived
   exit 1
else
   exit 0
fi

systemctl start keepalived && systemctl status keepalived && systemctl enable keepalived

3.6、状态检查

node164上操作
ip addr
返回

2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:5e:24:49 brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.164/24 brd 192.168.0.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.0.167/32 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::6ad0:f3d:634b:77bd/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

从上面可以看到vip

node164~166上操作
ping 192.168.0.167 -c 4
如果开始可以ping通,一段时间后无法Ping通,可以先清除keepalived的master节点的arp缓存表,待vip可以ping通后,清理backup节点的arp
ip neigh flush dev ens33

测试vip切换
可以分别停掉某个节点的keepalived看vip是否会根据优先级切换

四、集群部署

4.1、证书生成

node164上操作
mkdir /root/cfssl创建证书生成目录

部署cfssl工具
cd /root/cfssl
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl*
cp cfssl_linux-amd64 /usr/local/bin/cfssl
cp cfssljson_linux-amd64 /usr/local/bin/cfssljson
cp cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

配置ca请求文件
cat ca-csr.json

{
  "CN": "kubernetes",
  "key": {
      "algo": "rsa",
      "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "shiyan",
      "O": "k8s",
      "OU": "system"
    }
  ],
  "ca": {
          "expiry": "87600h"
  }
}

生成ca
cfssl gencert -initca ca-csr.json | cfssljson -bare ca

配置ca
cat ca-config.json

{
  "signing": {
      "default": {
          "expiry": "87600h"
        },
      "profiles": {
          "kubernetes": {
              "usages": [
                  "signing",
                  "key encipherment",
                  "server auth",
                  "client auth"
              ],
              "expiry": "87600h"
          }
      }
  }
}

配置etcd请求文件
cat etcd-csr.json

{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.0.164",
    "192.168.0.165",
    "192.168.0.166"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [{
    "C": "CN",
    "ST": "Hubei",
    "L": "shiyan",
    "O": "k8s",
    "OU": "system"
  }]
}

生成etcd证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd

4.2、etcd集群部署

node164上操作
etcd下载
mkdir /root/etcd-work
cd /root/etcd-work
tar -xvf etcd-v3.5.0-linux-amd64.tar.gz
cp -p etcd-v3.5.0-linux-amd64/etcd* /usr/local/bin/
scp etcd-v3.5.0-linux-amd64/etcd* node165:/usr/local/bin/
scp etcd-v3.5.0-linux-amd64/etcd* node166:/usr/local/bin/

创建etcd配置
cat etcd.conf

#[Member]
ETCD_NAME="etcd1"  # 节点名称
ETCD_DATA_DIR="/var/lib/etcd/default.etcd" # 数据目录
ETCD_LISTEN_PEER_URLS="https://192.168.0.164:2380" # 集群通信地址
ETCD_LISTEN_CLIENT_URLS="https://192.168.0.164:2379,http://127.0.0.1:2379" # 客户端访问地址

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.164:2380" # 集群通告地址
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.164:2379" # 客户端通告地址
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.0.164:2380,etcd2=https://192.168.0.165:2380,etcd3=https://192.168.0.166:2380" # 集群节点地址
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" # 集群token
ETCD_INITIAL_CLUSTER_STATE="new" 

创建etcd服务
cat etcd.service

[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-client-cert-auth \
  --client-cert-auth
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

node164~166上操作
创建相关目录
mkdir -p /etc/etcd
mkdir -p /etc/etcd/ssl
mkdir -p /var/lib/etcd/default.etcd

node164上操作
cd /root/etcd-work
cp etcd.conf /etc/etcd/
cp etcd.service /usr/lib/systemd/system/
scp etcd.service node165:/usr/lib/systemd/system/
scp etcd.service node166:/usr/lib/systemd/system/
cd /root/cfssl
cp ca*.pem /etc/etcd/ssl
cp etcd*.pem /etc/etcd/ssl
scp ca*.pem etcd*.pem node165:/etc/etcd/ssl
scp ca*.pem etcd*.pem node166:/etc/etcd/ssl

node165上操作
创建etcd配置
cat /etc/etcd/etcd.conf

#[Member]
ETCD_NAME="etcd2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.0.165:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.0.165:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.165:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.165:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.0.164:2380,etcd2=https://192.168.0.165:2380,etcd3=https://192.168.0.166:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

node166上操作
cat /etc/etcd/etcd.conf

#[Member]
ETCD_NAME="etcd3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.0.166:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.0.166:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.166:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.166:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.0.164:2380,etcd2=https://192.168.0.165:2380,etcd3=https://192.168.0.166:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

node164~165
启动集群
systemctl start etcd && systemctl enable etcd && systemctl daemon-reload && systemctl status etcd
ps:单个启动某一个节点会处于activing,需要将剩余节点启动后所有节点才会处于running
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.0.164:2379,https://192.168.0.165:2379,https://192.168.0.166:2379 endpoint health验证集群健康,正常返回

+----------------------------+--------+-------------+-------+
|          ENDPOINT          | HEALTH |    TOOK     | ERROR |
+----------------------------+--------+-------------+-------+
| https://192.168.0.165:2379 |   true | 40.402278ms |       |
| https://192.168.0.166:2379 |   true |  45.20044ms |       |
| https://192.168.0.164:2379 |   true | 53.227118ms |       |
+----------------------------+--------+-------------+-------+

4.3、kubernetes集群部署

kubernetes下载

部署kube-apiserver

node164上操作
mkdir -p /root/k8s-work
cd /root/k8s-work
tar -xvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
cp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy /usr/local/bin/
scp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy node165:/usr/local/bin/
scp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy node166:/usr/local/bin/

node164~166上操作
创建相关工作目录
mkdir -p /etc/kubernetes/
mkdir -p /etc/kubernetes/ssl
mkdir -p /var/log/kubernetes

node164上操作
cd /root/cfssl

配置apiserver请求文件
cat kube-apiserver-csr.json

{
"CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.0.164",
    "192.168.0.165",
    "192.168.0.166",
    "192.168.0.167",
    "10.96.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "shiyan",
      "O": "k8s",
      "OU": "system"
    }
  ]
}

ps:host字段可以指定授权使用该证书的域名或者ip,这里证书要被整个kubernetes集群使用,故需要将所有节点ip加上,同时需要将serviceIP第一个地址加上,这里设计的service网段是10.96.0.0/16
生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver

创建token
cat token.csv

$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"

cd /root/k8s-work
创建apiserver配置文件
cat kube-apiserver.conf

KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --anonymous-auth=false \
  --bind-address=192.168.0.164 \
  --secure-port=6443 \
  --advertise-address=192.168.0.164 \
  --insecure-port=0 \
  --authorization-mode=Node,RBAC \
  --runtime-config=api/all=true \
  --enable-bootstrap-token-auth \
  --service-cluster-ip-range=10.96.0.0/16 \
  --token-auth-file=/etc/kubernetes/token.csv \
  --service-node-port-range=30000-50000 \
  --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
  --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \
  --service-account-issuer=api \
  --etcd-cafile=/etc/etcd/ssl/ca.pem \
  --etcd-certfile=/etc/etcd/ssl/etcd.pem \
  --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.0.164:2379,https://192.168.0.165:2379,https://192.168.0.166:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/kube-apiserver-audit.log \
  --event-ttl=1h \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=4"

创建apiserver的服务文件
cat kube-apiserver.service

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service

[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

cd /root/cfssl
cp ca*.pem /etc/kubernetes/ssl
cp kube-apiserver*.pem /etc/kubernetes/ssl
cp token.csv /etc/kubernetes
cd /root/k8s-work
cp kube-apiserver.conf /etc/kubernetes
cp kube-apiserver.service /usr/lib/systemd/system/
cd /root/cfssl
scp token.csv node165:/etc/kubernetes
scp token.csv node166:/etc/kubernetes
scp kube-apiserver*.pem node165:/etc/kubernetes/ssl
scp kube-apiserver*.pem node166:/etc/kubernetes/ssl
scp ca*.pem node165:/etc/kubernetes/ssl
scp ca*.pem node166:/etc/kubernetes/ssl
cd /root/k8s-work
scp kube-apiserver.service node165:/usr/lib/systemd/system/
scp kube-apiserver.service node166:/usr/lib/systemd/system/
scp kube-apiserver.conf node165:/etc/kubernetes
scp kube-apiserver.conf node166:/etc/kubernetes

node165~166上操作
分别修改kube-apiserver.conf中的地址为本机地址

node164~166上操作
systemctl start kube-apiserver && systemctl status kube-apiserver && systemctl enable kube-apiserver

部署kubectl
node164上操作
配置kubectl的csr请求文件
cd /root/cfssl
cat admin-csr.json

{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "shiyan",
      "O": "system:masters",             
      "OU": "system"
    }
  ]
}

生成kubectl证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

配置kubeconfig
kube.config 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.0.167:16443 --kubeconfig=kube.config
kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config
kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config
kubectl config use-context kubernetes --kubeconfig=kube.config
mkdir -p /root/.kube
cp kube.config ~/.kube/config
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=/root/.kube/config

查看集群状态
kubectl cluster-info返回

Kubernetes control plane is running at https://192.168.0.167:16443

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

kubectl get cs返回

Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                        ERROR
controller-manager   Unhealthy   Get "https://127.0.0.1:10257/healthz": dial tcp 127.0.0.1:10257: connect: connection refused   
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused    
etcd-1               Healthy     {"health":"true","reason":""}                                                                  
etcd-2               Healthy     {"health":"true","reason":""}                                                                  
etcd-0               Healthy     {"health":"true","reason":""}

kubectl get all --all-namespaces返回

default service/kubernetes ClusterIP 10.96.0.1 443/TCP 7h13m