kubeadm安装高可用的k8s集群

1.搭建环境

1.首先先规划好节点

角色IP地址
master01192.168.1.100
master02192.168.1.101
master03192.168.1.102
node01192.168.1.103
node02192.168.1.104
node03192.168.1.105
harbor192.168.1.106
nginx192.168.1.107
NFS192.168.1.108
#harbor nginx NFS(虚拟机环境带不动省略没有配置)

如有开启防火墙需要设置开放以下端口

master

TCP入站6443Kubernetes API 服务器所有组件
TCP入站2379-2380etcd 服务器客户端 APIkube-apiserver, etcd
TCP入站10250Kubelet APIkubelet 自身、控制平面组件
TCP入站10251kube-schedulerkube-scheduler 自身
TCP入站10252kube-controller-managerkube-controller-manager 自身
TCP入站6666haproxy使用该端口与API server通信(自定义端口)

node

TCP入站10250Kubelet APIkubelet 自身、控制平面组件
TCP入站30000-32767NodePort 服务†所有组件

所有节点关闭selinux 防火墙 设置文件最大描述符 时间同步,节点解析

# 将 SELinux 设置为 permissive 模式(相当于将其禁用)
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
# 关闭防火墙
systemctl stop firewalld
systemctl enabled firewalld
# 设置系统最大连接描述符
ulimit -HSn 65535 #临时设置
vim /etc/security/limits.conf  #永久设置
# 添加如下代码到最后
*   soft    nofile  65535
*   hard    nofile  65535
*   soft    nproc   65535
*   hard    nofile  65535
# 时间同步,因是测试环境所以比较随意,生产环境最好搭建一个ntp时间同步服务器
yum -y install ntpdate
ntpdate ntp1.aliyun.com
# 配置所有节点互相解析
vim /etc/hosts
192.168.1.100 master01
192.168.1.101 master02
192.168.1.102 master03
192.168.1.103 node01
192.168.1.104 node02
192.168.1.105 node03

2.配置环境

关闭所有节点的sawp交换分区,以免影响kubelet正常工作,必须 禁用交换分区

[root@master01 ~]# swapoff  #临时关闭
[root@master01 ~]# sed -i 's#^\/dev\/mapper\/centos-swap#\#\/dev\/mapper\/centos-swap#' /etc/fstab #永久关闭

所有节点允许iptables 检查桥接流量

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system

2.1所有节点配置ssh互相免密

[root@master01 ~]# ssh-keygen -t rsa -N ''  -f /root/.ssh/id_rsa
[root@master01 ~]# for i in {1..5} ;do ssh-copy-id root@192.168.1.10$i ;done #只做了master01其他节点一样

2.2所有节点安装docker ce

#(安装 Docker CE)
#设置仓库
#安装依赖
yum install -y yum-utils device-mapper-persistent-data lvm2
#新增 Docker 仓库
yum-config-manager --add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
#安装 Docker CE
yum update -y && sudo yum install -y \
containerd.io-1.2.13 \
docker-ce-19.03.11 \
docker-ce-cli-19.03.11
#创建 /etc/docker 目录
mkdir /etc/docker
#设置 Docker daemon
cat <<EOF | sudo tee /etc/docker/daemon.json
{
 "exec-opts": ["native.cgroupdriver=systemd"],
 "log-driver": "json-file",
 "log-opts": {
   "max-size": "100m"
},
 "storage-driver": "overlay2",  
 "storage-opts": [
   "overlay2.override_kernel_check=true"
],
 "registry-mirrors": ["https://ybzd84iy.mirror.aliyuncs.com"],
  "insecure-registries": ["http://xxxx.harbor.com"]  
}
EOF
# Create /etc/systemd/system/docker.service.d
mkdir -p /etc/systemd/system/docker.service.d
#docer启动前添加iptables forward转发为允许
vim /usr/lib/systemd/system/docker.service
...
ExecStartPost=/sbin/iptables -P FORWARD ACCEPT
...
# 重启 Docker
systemctl daemon-reload
systemctl restart docker

如果你想开机即启动 docker 服务,执行以下命令:

systemctl enable docker

2.3所有节点安装 kubeadm、kubelet 和 kubectl

#使用清华k8s镜像仓库
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=kubernetes
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-$basearch
enabled=1
gpgcheck=0
EOF

yum install -y kubelet kubeadm kubectl

systemctl enable kubelet

2.4所有master节点安装haproxy+keepalived

yum -y install haproxy keepalived

修改haproxy配置文件

# use if not designated in their block
#---------------------------------------------------------------------
defaults
   mode                    http
   log                     global
   option                  httplog
   option                  dontlognull
   option http-server-close
   option forwardfor       except 127.0.0.0/8
   option                  redispatch
   retries                 1
   timeout http-request    10s
   timeout queue           20s
   timeout connect         5s
   timeout client          20s
   timeout server          20s
   timeout http-keep-alive 10s
   timeout check           10s

#---------------------------------------------------------------------
# apiserver frontend which proxys to the masters
#---------------------------------------------------------------------
frontend apiserver
   bind *:6666    #指定一个自定义端口来访问api server
   mode tcp
   option tcplog
   default_backend apiserver

#---------------------------------------------------------------------
# round robin balancing for apiserver
#---------------------------------------------------------------------
backend apiserver
   option httpchk GET /healthz
   http-check expect status 200
   mode tcp
   option ssl-hello-chk
   balance     roundrobin
       server master01 192.168.1.100:6443 check #负载均衡到各个master的api server
       server master02 192.168.1.101:6443 check
       server master03 192.168.1.102:6443 check

修改haproxy配置文件

! /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
    script_user root         
    enable_script_security
​
}
vrrp_script check_apiserver {
  script "/etc/keepalived/check_apiserver.sh"  #指定探测api server脚本
  interval 3
  weight -2
  fall 10
  rise 2
}
​
vrrp_instance VI_1 {
    state MASTER     #其他master设置为BACKUP
    interface ens33
    virtual_router_id 51
    priority 101
    authentication {
        auth_type PASS
        auth_pass 123456
    }
    virtual_ipaddress {
        192.168.1.200   #设置一个VIP
​
    }
    track_script {
        check_apiserver
    }
}
​

探测api server端口是否存在的脚本

#!/bin/sh

errorExit() {
   echo "*** $*" 1>&2
   exit 1
}

curl --silent --max-time 2 --insecure https://localhost:6666/ -o /dev/null || errorExit "Error GET https://localhost:6666/"
if ip addr | grep -q 192.168.1.200; then
   curl --silent --max-time 2 --insecure https://192.168.1.200:6666/ -o /dev/null || errorExit "Error GET https://192.168.1.200:6666/"
else

fi

2.5下载k8s组件镜像

#docker hub下载有点慢,有需要的可以下载我的镜像
#百度网盘地址:https://pan.baidu.com/s/1AnIFjjpMaRh7WFtIrC3Y4Q 
#密钥:xfxf
#查看镜像并写入到文件,所有master节点执行脚本操作
#如果有安装私人镜像仓库如harbor,可以都上传到harbor方便拉取
kubeadm config images list >> k8simagelist.txt
k8s.gcr.io/kube-apiserver:v1.20.4
k8s.gcr.io/kube-controller-manager:v1.20.4
k8s.gcr.io/kube-scheduler:v1.20.4
k8s.gcr.io/kube-proxy:v1.20.4
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0
#使用脚本批量下载镜像
#!/bin/bash
for i in $(cat  k8simagelist.txt)
do
​
docker pull $i
​
done
​
#work node镜像从已经下载的镜像导出
docker image save k8s.gcr.io/kube-proxy:v1.20.4 k8s.gcr.io/pause:3.2 -o nodeimage.tar
#导入到所有work node节点
docker image load -i nodeimage.tar

3.初始化集群

[root@master01 ~]# kubeadm init --kubernetes-version=v1.20.4 --control-plane-endpoint=192.168.1.200:6666  --pod-network-cidr=172.16.0.0/16 --upload-certs
#初始化成功后,得到以下信息,将信息保存在文档等会使用
To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.1.200:6666 --token m8h2jc.8bgz25lla8oqij8l \
    --discovery-token-ca-cert-hash sha256:4ec3cffd7b17344c5c733f092636db97f7e76ebe9bbb3fc2d68766bfbb539b1e \
    --control-plane --certificate-key a87ba093dba102a3b6dc16627ae48e79654cff053d0fcfc9d72b9671fb33b1ca

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.1.200:6666 --token m8h2jc.8bgz25lla8oqij8l \
    --discovery-token-ca-cert-hash sha256:4ec3cffd7b17344c5c733f092636db97f7e76ebe9bbb3fc2d68766bfbb539b1e

其他master节点加入到共享端点

kubeadm join 192.168.1.200:6666 --token m8h2jc.8bgz25lla8oqij8l \
    --discovery-token-ca-cert-hash sha256:4ec3cffd7b17344c5c733f092636db97f7e76ebe9bbb3fc2d68766bfbb539b1e \
    --control-plane --certificate-key a87ba093dba102a3b6dc16627ae48e79654cff053d0fcfc9d72b9671fb33b1ca

所有master节点配置可以使用kubectl

mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

3.1安装calico网络组件

下载calico.yam文件

curl https://docs.projectcalico.org/manifests/calico-typha.yaml -o calico.yaml

修改calico.yaml中CALICO_IPV4POOL_CIDR,默认是192.168.0.0/16 修改与我们初始化–pod-network-cidr的值一致

vim calico.yaml
...
 replicas: 3   #生产环境官方推荐使用3个副本
 revisionHistoryLimit: 3  
...
- name: CALICO_IPV4POOL_CIDR  #取消注释
  value: "172.16.0.0/16"     #修改为kubeadm初始化一致的值
...

3.2下载calico所需组件镜像

#docker hub下载有点慢,有需要的可以下载我的镜像
#百度网盘地址:https://pan.baidu.com/s/1AnIFjjpMaRh7WFtIrC3Y4Q
#密钥:xfxf
[root@master01 ~]# docker pull calico/node:v3.18.1 calico/typha:v3.18.1 calico/pod2daemon-flexvol:v3.18.1 calico/cni:v3.18.1 calico/kube-controllers:v3.18.1 
#镜像打包
[root@master01 ~]# docker image save calico/node:v3.18.1 calico/typha:v3.18.1 calico/pod2daemon-flexvol:v3.18.1 calico/cni:v3.18.1 calico/kube-controllers:v3.18.1 -o calicaoimage.tar
#复制到其他节点解压
[root@master0X ~]# docker image load -i calicaoimage.tar

3.3应用calico.yaml文件

X代表所有master节点

[root@master0X ~]# kubectl apply -f calico.yaml

3.4node节点加入集群

X代表所有node节点

[root@node0X ~]# kubeadm join 192.168.1.200:6666 --token m8h2jc.8bgz25lla8oqij8l \
    --discovery-token-ca-cert-hash sha256:4ec3cffd7b17344c5c733f092636db97f7e76ebe9bbb3fc2d68766bfbb539b1e

查看当前令牌

kubeadm token list

令牌会在24小时后过期。如果要在当前令牌过期后将节点加入集群, 则可以通过在master节点上运行以下命令来创建新令牌

kubeadm token create

输出类似于以下内容:

5didvk.d09sbcov8ph2amjw

如果你没有 --discovery-token-ca-cert-hash 的值,则可以通过在master节点上执行以下命令链来获取它:

openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
   openssl dgst -sha256 -hex | sed 's/^.* //'

输出类似于以下内容:

8cb2de97839780a412b93877f8507ad6c94f73add17d5d7058e91741c9d5ec78

要重新上传证书并生成新的解密密钥,请在已加入集群节点的master上使用以下命令

kubeadm init phase upload-certs --upload-certs

您还可以在 init 期间指定自定义的 --certificate-key,以后可以由 join 使用。 要生成这样的密钥,可以使用以下命令

kubeadm alpha certs certificate-key

3.5进行验证

[root@master01 ~]# kubectl get nodes
此条目发表在k8s分类目录。将固定链接加入收藏夹。

发表评论