最低配置2核 2G
192.168.1.11 master1 192.168.1.12 master2
192.168.1.13 master3 192.168.1.20 node1

一、初始化实验环境,各个节点操作

1、修改主机名,配置hosts文件,修改yum源,防火墙,selinux,时间同步,swap

[root@master1 ~]# hostnamectl set-hostname master1
[root@master1 ~]# vim /etc/hosts
192.168.0.6  master1
192.168.0.16 master2
192.168.0.26 master3
192.168.0.56 node1
[root@master1 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@master1 ~]# curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@master1 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
EOF
[root@master1 ~]# yum clean all
[root@master1 ~]# yum makecache fast
[root@master1 ~]# systemctl stop firewalld  && systemctl  disable  firewalld
[root@master1 ~]# ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
[root@master1 ~]# ntpdate cn.pool.ntp.org
[root@master1 ~]# crontab -e
* */1 * * * /usr/sbin/ntpdate   cn.pool.ntp.org
[root@master1 ~]# systemctl restart crond
[root@master1 ~]# sed -i  's/SELINUX=enforcing/SELINUX=disabled/g'  /etc/selinux/config
[root@master1 ~]# swapoff  -a
[root@master1 ~]# sed -i '/swap/s/^/#/g' /etc/fstab
[root@master1 ~]# reboot -f

2、修改内核参数,docker配置

[root@master1 ~]# cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@master1 ~]# modprobe br_netfilter
[root@master1 ~]# sysctl --system
[root@master1 ~]# mkdir -p /etc/docker
[root@master1 ~]# cat > /etc/docker/daemon.json <<EOF
{"exec-opts": ["native.cgroupdriver=systemd"],"log-driver": "json-file","log-opts": {"max-size": "100m"},"storage-driver": "overlay2","storage-opts": ["overlay2.override_kernel_check=true"]
}
EOF
[root@master1 ~]# yum install -y docker-ce-19.03.7-3.el7
[root@master1 ~]# systemctl enable docker && systemctl start docker

3、网桥配置永久生效,开启ipvs

[root@master1 ~]# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
[root@master1 ~]# echo 1 >/proc/sys/net/bridge/bridge-nf-call-ip6tables
[root@master1 ~]# echo """
vm.swappiness = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
""" > /etc/sysctl.conf
[root@master1 ~]# sysctl -p
[root@master1 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1if [ $? -eq 0 ]; then/sbin/modprobe \${kernel_module}fi
done
EOF
[root@master1 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
[root@master1 ~]# lsmod | grep ip_vs
nf_nat                 26583  4 ip_vs_ftp,nf_nat_ipv4,nf_nat_ipv6,nf_nat_masquerade_ipv4
ip_vs_sed              12519  0
ip_vs_nq               12516  0
ip_vs_sh               12688  0
ip_vs_dh               12688  0
ip_vs_lblcr            12922  0
ip_vs_lblc             12819  0
ip_vs_wrr              12697  0
ip_vs_rr               12600  35
ip_vs_wlc              12519  0
ip_vs_lc               12516  0
ip_vs                 145458  59 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
nf_conntrack          139264  9 ip_vs,nf_nat,nf_nat_ipv4,nf_nat_ipv6,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4,nf_conntrack_ipv6
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

4、安装kubernetes1.18.2

[root@master1 ~]# yum -y install kubelet-1.18.2 kubeadm-1.18.2
Installed:kubeadm.x86_64 0:1.18.2-0         kubelet.x86_64 0:1.18.2-0
Dependency Installed:cri-tools.x86_64 0:1.19.0-0          kubectl.x86_64 0:1.23.0-0          kubernetes-cni.x86_64 0:0.8.7-0

上传镜像到master1、master2、master3和node1节点

[root@master1 ~]# for i in `ls *.gz`;do docker load -i $i;done
[root@master1 ~]# docker images
REPOSITORY                           TAG       IMAGE ID       CREATED         SIZE
k8s.gcr.io/etcd                      3.4.3-0   303ce5db0e90   2 years ago     288MB
k8s.gcr.io/kube-apiserver            v1.18.2   6ed75ad404bd   20 months ago   173MB
k8s.gcr.io/kube-scheduler            v1.18.2   a3099161e137   20 months ago   95.3MB
k8s.gcr.io/kube-controller-manager   v1.18.2   ace0a8c17ba9   20 months ago   162MB
node上只需要下面三个
k8s.gcr.io/pause                     3.2       80d28bedfe5d   22 months ago   683kB
k8s.gcr.io/coredns                   1.6.7     67da37a9a360   22 months ago   43.8MB
k8s.gcr.io/kube-proxy                v1.18.2   0d40868643c6   20 months ago   117MB

部署keepalive+lvs实现master节点高可用-对apiserver做高可用

keepalived.conf全删除重写,以192.168.1.188位虚拟IP写入

[root@master1 ~]# yum install -y socat keepalived ipvsadm conntrack
[root@master1 ~]# systemctl enable kubelet
[root@master1 ~]# vim /etc/keepalived/keepalived.conf
global_defs {router_id LVS_DEVEL
}
vrrp_instance VI_1 {state BACKUPnopreemptinterface ens33virtual_router_id 80priority 100advert_int 1authentication {auth_type PASSauth_pass just0kk}virtual_ipaddress {192.168.1.188}
}
virtual_server 192.168.1.188 6443 {delay_loop 6lb_algo loadbalancelb_kind DRnet_mask 255.255.255.0persistence_timeout 0protocol TCPreal_server 192.168.1.11 6443 {weight 1SSL_GET {url {path /healthzstatus_code 200}connect_timeout 3nb_get_retry 3delay_before_retry 3}}real_server 192.168.1.12 6443 {weight 1SSL_GET {url {path /healthzstatus_code 200}connect_timeout 3nb_get_retry 3delay_before_retry 3}}real_server 192.168.1.13 6443 {weight 1SSL_GET {url {path /healthzstatus_code 200}connect_timeout 3nb_get_retry 3delay_before_retry 3}}
}

其他两个master节点区别在于

7 interface ens33 #实际网卡名
9 priority 100 #权重100 110 90

[root@master2 ~]# vim /etc/keepalived/keepalived.conf
[root@master3 ~]# vim /etc/keepalived/keepalived.conf

在master1、master2、master3依次执行如下命令

[root@master1 ~]# systemctl enable keepalived.service && systemctl start keepalived.service   [root@master1 ~]# systemctl status keepalived.service
[root@master1 ~]# ip  add
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000link/ether 00:0c:29:9d:7b:09 brd ff:ff:ff:ff:ff:ffinet 192.168.1.11/24 brd 192.168.0.255 scope global noprefixroute ens33valid_lft forever preferred_lft foreverinet 192.168.1.188/32 scope global ens33
[root@master1 ~]# ping 192.168.1.188

在master1节点初始化k8s集群

[root@master1 ~]# vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.2
controlPlaneEndpoint: 192.168.1.188:6443
apiServer:certSANs:
#证书生成的节点- 192.168.1.11- 192.168.1.12- 192.168.1.13- 192.168.1.21- 192.168.1.188
networking:podSubnet: 10.244.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind:  KubeProxyConfiguration
mode: ipvs

初始化k8s集群

[root@master1 ~]# kubeadm init --config kubeadm-config.yaml
...
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:mkdir -p $HOME/.kubesudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/configYou should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:kubeadm join 192.168.1.188:6443 --token h13awv.1mkmz2majgn4gesg \--discovery-token-ca-cert-hash sha256:32414237c63a9f7b70fa1f8e2c644a8c328a010208d909897cd1db33d45c8750 \--control-plane
Then you can join any number of worker nodes by running the following on each as root:kubeadm join 192.168.1.188:6443 --token h13awv.1mkmz2majgn4gesg \--discovery-token-ca-cert-hash sha256:32414237c63a9f7b70fa1f8e2c644a8c328a010208d909897cd1db33d45c8750

在master1节点执行如下,这样才能有权限操作k8s资源

[root@master1 ~]# mkdir -p $HOME/.kube
[root@master1 ~]# sudo cp -i  /etc/kubernetes/admin.conf  $HOME/.kube/config
[root@master1 ~]# sudo chown $(id -u):$(id -g)  $HOME/.kube/config
[root@master1 ~]# kubectl get nodes
NAME      STATUS     ROLES    AGE     VERSION
master1   NotReady   master   8m11s   v1.18.2
[root@master1 ~]# kubectl get pods -n kube-system
...
coredns-7ff77c879f-j48h6         0/1     Pending  0  3m16s
coredns-7ff77c879f-lrb77         0/1      Pending  0  3m16s

因为没有安装网络插件,所以还是 node NotReady,coredns 是Pending状态,需要安装calico或者flannel。

[root@master1 ~]# docker load -i cni.tar.gz;docker load -i calico-node.tar.gz
[root@master1 ~]# vim calico.yaml
167               value: "can-reach=192.168.1.11"
181               value: "10.244.0.0/16"
[root@master1 ~]# kubectl apply -f calico.yaml
[root@master1 ~]# kubectl get nodes
NAME      STATUS   ROLES    AGE   VERSION
master1   Ready    master   37m   v1.18.2

master2、master3同样操作

把master1节点的证书拷贝到master2和master3上

(1)在master2和master3上创建证书存放目录

[root@master2 ~]# cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/

(2)在master1节点把证书拷贝到master2和master3上

[root@master1 ~]# cd /etc/kubernetes/pki/
[root@master1 pki]# for i in ca* sa* front-proxy-ca*;do scp /etc/kubernetes/pki/$i master2:/etc/kubernetes/pki/;done
ca.crt                                            100% 1025   452.5KB/s   00:00
ca.key                                           100% 1679     1.5MB/s   00:00
sa.key                                           100% 1679     1.0MB/s   00:00
sa.pub                                           100%  451   176.1KB/s   00:00
front-proxy-ca.crt                                 100% 1038   369.9KB/s   00:00
front-proxy-ca.key                                100% 1679     1.4MB/s   00:00
[root@master1 pki]# scp /etc/kubernetes/pki/etcd/ca* master2:/etc/kubernetes/pki/etcd/
ca.crt                                     100% 1017   774.0KB/s   00:00
ca.key                                      100% 1017   774.0KB/s   00:00

(3)证书拷贝之后在master2和master3上执行如下命令

这个数据在master1初始化的时候有显示

[root@master2~]# kubeadm join 192.168.1.188:6443 --token h13awv.1mkmz2majgn4gesg \--discovery-token-ca-cert-hash sha256:32414237c63a9f7b70fa1f8e2c644a8c328a010208d909897cd1db33d45c8750 \--control-plane
[root@master2~]# mkdir -p $HOME/.kube
[root@master2~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master2~]# sudo chown $(id -u):$(id -g)$HOME/.kube/config```
#### 把node1节点加入到k8s集群,在node1节点操作
[root@node1~]# kubeadm join 192.168.1.188:6443 --token h13awv.1mkmz2majgn4gesg \--discovery-token-ca-cert-hash sha256:32414237c63a9f7b70fa1f8e2c644a8c328a010208d909897cd1db33d45c8750

在master1节点查看集群节点状态

[root@master1 ~]# kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
master1   Ready    master   65m     v1.18.2
master2   Ready    master   5m15s   v1.18.2
master3   Ready    master   2m4s    v1.18.2
node1     Ready    <none>   29s     v1.18.2

各个节点安装traefik,master1上做证书

[root@master1 ~]# docker load -i traefik_1_7_9.tar.gz
[root@master1 ~]# mkdir ikube/tls -p
echo """
[req]
distinguished_name = req_distinguished_name
prompt = yes[ req_distinguished_name ]
countryName = Country Name (2 letter code)
countryName_value=CNstateOrProvinceName = State or Province Name (full name)
stateOrProvinceName_value    = BeijinglocalityName= Locality Name (eg, city)
localityName_value = HaidianorganizationName  = Organization Name (eg, company)
organizationName_value = ChannelsoftorganizationalUnitName = Organizational Unit Name (eg, section)
organizationalUnitName_value  = R & D DepartmentcommonName = Common Name (eg, your name or your server\'s hostname)
commonName_value = *.multi.ioemailAddress = Email Address
emailAddress_value = lentil1016@gmail.com
""" > ikube/tls/openssl.cnf
[root@master1 ~]# openssl req -newkey rsa:4096 -nodes -config ~/ikube/tls/openssl.cnf -days 3650 -x509 -out ~/ikube/tls/tls.crt -keyout ~/ikube/tls/tls.key
[root@master1 ~]# kubectl create -n kube-system secret tls ssl --cert ~/ikube/tls/tls.crt --key ~/ikube/tls/tls.key
secret/ssl created
[root@master1 ~]# kubectl apply -f traefik.yaml
[root@master1 ~]# kubectl get pod -n kube-system -owide |grep traefik
traefik-ingress-controller-dqb6f   1/1     Running   0    62s   192.168.1.11   master1   <none>
traefik-ingress-controller-nwsk5   1/1     Running   0    62s   192.168.1.13   master3   <none>
traefik-ingress-controller-pwxpx   1/1     Running   0    62s   192.168.1.12   master2   <none>
traefik-ingress-controller-qxqkh   1/1     Running   0    62s   192.168.1.20   node1     <none>

安装kubernetes-dashboard-2版本

[root@master1 ~]# docker load -i dashboard_2_0_0.tar.gz ;docker load -i metrics-scrapter-1-0-1.tar.gz
[root@master1 ~]# kubectl apply -f kubernetes-dashboard.yaml
[root@master1 ~]# kubectl get pods -n kubernetes-dashboard
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-694557449d-pj9cp   1/1     Running   0          16s
kubernetes-dashboard-5f98bdb684-h6c9t        1/1     Running   0          17s
[root@master1 ~]# kubectl get svc -n kubernetes-dashboard
NAME                        TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
dashboard-metrics-scraper   ClusterIP   10.107.91.74   <none>       8000/TCP   49s
kubernetes-dashboard        ClusterIP   10.99.59.88    <none>     443/TCP    50s

修改service type类型变成NodePort:clusterIP只能集群内部访问

[root@master1 ~]# kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard30   type: NodePort
[root@master1 ~]# kubectl get svc -n kubernetes-dashboard
NAME                        TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.107.91.74   <none>        8000/TCP        11m
kubernetes-dashboard        NodePort    10.99.59.88    <none>        443:30260/TCP   11m

访问https://192.168.1.188/ #可以登录,关闭任意1个master再次刷新依然能够进入登录界面

通过yaml文件里指定的默认的token登陆dashboard

[root@master1 ~]# kubectl get secret -n kubernetes-dashboard
NAME                               TYPE                                  DATA   AGE
default-token-wzcwc                kubernetes.io/service-account-token   3      16m
kubernetes-dashboard-certs         Opaque                                0      16m
kubernetes-dashboard-csrf          Opaque                                1      16m
kubernetes-dashboard-key-holder    Opaque                                2      16m
kubernetes-dashboard-token-lqws6   kubernetes.io/service-account-token   3      16m
[root@master1 ~]# kubectl describe secret kubernetes-dashboard-token-lqws6 -n kubernetes-dashboard
...
token:      eyJhbGciOiJSUzI1NiIsI...超级长串,复制它

再次登入填入token

此时默认是只能看到default名称空间内容

创建管理员token,可查看任何空间权限

[root@master1 ~]# kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard

刷新就可以看到和操作任何名称空间的资源了


安装metrics插件,此时kubectl top还不能用

[root@master1 ~]# kubectl top nodes
error: Metrics API not available        此时top还不能用
[root@master1 ~]# docker load -i metrics-server-amd64_0_3_1.tar.gz ;docker load -i addon.tar.gz
[root@master1 ~]# kubectl apply -f metrics.yaml
[root@master1 ~]# kubectl get pod -n kube-system -owide
metrics-server-8459f8db8c-r6mrz    2/2     Running   0          43s    10.244.3.4     node1
[root@master1 ~]#  kubectl top nodes
NAME      CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%
master1   225m         11%    1220Mi          70%
master2   171m         8%     1165Mi          67%
master3   150m         7%     1114Mi          64%
node1     97m          4%     671Mi           39%
[root@master1 ~]# kubectl top pods -n kube-system
NAME                               CPU(cores)   MEMORY(bytes)
calico-node-5rk6g                  21m          29Mi
coredns-66bff467f8-hnwkm         3m           10Mi
etcd-master1                       57m          89Mi

如果你想在非集群节点使用kubect命令,你可以

mkdir  ~/.kube
然后scp   /root/.kube/config 和kubect  当前主机
kubect -s   https://master-IP:6443 get nodes

部署keepalive+lvs多master节点高可用相关推荐

  1. k8s1.18多master节点高可用集群安装-超详细中文官方文档

    kubernetes安装系列文章 kubernetes1.17.3安装-超详细的安装步骤 安装kubernetes1.17.3多master节点的高可用集群 k8s1.18单master节点高可用集群 ...

  2. keepalive+lvs负载均衡及高可用总结

    keepalived实现两个主要的功能: 1.负载均衡,2.高可用(消除单点故障) keepalive负载均衡的实现方式: keepalive其实是通过lvs实现负载均衡的,在keepalive中可以 ...

  3. kubeadm安装生产环境多master节点高可用集群

    一.环境规划 k8s环境规划: podSubnet(pod网段) 10.244.0.0/16 serviceSubnet(service网段): 10.96.0.0/16 操作系统:centos7.6 ...

  4. 部署一套完整的K8s高可用集群(二进制-V1.20)

    <部署一套完整的企业级K8s集群> v1.20,二进制方式 作者信息 李振良(阿良),微信:xyz12366699 DevOps实战学院 http://www.aliangedu.cn 说 ...

  5. 【7W字长文】使用LVS+Keepalived实现Nginx高可用,一文搞懂Nginx

    往期文章一览 分布式会话与单点登录SSO系统CAS,包含完整示例代码实现 [15W字长文]主从复制高可用Redis集群,完整包含Redis所有知识点 使用LVS+Keepalived实现Nginx高可 ...

  6. Keepalive+redis主从复制实现redis高可用

    Keepalive+redis主从复制实现redis高可用 一.背景描述 项目上使用的软件是使用的单机版本redis,客户质疑为什么现在还用单机呢?故障了怎么办,要求做高可用. redis有三种方式 ...

  7. mysql lvs 读写分离_mysql高可用架构方案之二(keepalived+lvs+读写分离+负载均衡)

    mysql主从复制与lvs+keepalived实现负载高可用 目录 1.前言    4 2.原理    4 2.1.概要介绍    4 2.2.工作原理    4 2.3.实际作用    4 3方案 ...

  8. LVS(DR)+keepalived实现高可用负载均衡

    一.lvs+keepalived 1.lvs LVS集群采用IP负载均衡技术和基于内容请求分发技术.调度器具有很好的吞吐率,将请求均衡地转移到不同的服务器上执行,且调度器自动屏蔽掉服务器的故障,从而将 ...

  9. 实战:部署一套完整的企业级高可用K8s集群(成功测试-博客输出)-20211019

    目录 文章目录 目录 实验环境 实验软件 一.基础环境配置**(all节点均要配置)** 二.部署Nginx+Keepalived高可用负载均衡器**(只需在2个master节点配置即可)** 1.安 ...

最新文章

  1. oracle hang analyze,hanganalyze 分析数据库挂起
  2. centos 6.5 rsync+inotify 实时同步
  3. 计算机网络基础知识题,计算机网络基础知识试题及答案
  4. 熟练操作mac系统只需十分钟(Mac新手必看)!!
  5. 华为数通笔记-网络准入控制
  6. 讨论《蔚蓝(Celeste)》的设计
  7. 程序员如何写好设计方案
  8. No module named ‘frontend‘
  9. 安卓项目图片缓存实现
  10. msm8953-配置I2C 速率
  11. python爬去新浪微博_荐爬虫实战 新浪微博爬取 详细分析
  12. 排列组合常见解题方法
  13. PKCS1_SignatureScheme_PSS
  14. 离线百度地图,QT添加按钮点击切换卫星地图和街道地图
  15. 转 影像质量评估之锐利度--MTF(SFR)
  16. python 读取excle 批注信息
  17. DLNA UPnP协议简介
  18. 淘宝拼多多抖音1688苏宁淘特京东等关键词搜索商品API接口(关键词搜索商品API接口,关键词搜索商品列表接口,分类ID搜索商品列表接口,关键词搜索商品销量接口)
  19. DVWA指点迷津-CSP Bypass
  20. 从微信云托管容器镜像的选择-alpine 说起

热门文章

  1. 创业第一步:创业方程式
  2. 在背景色和背景图片同时存在的情况下,为什么还要设置背景色?
  3. python随机函数random、画、星轨_如何使用 NVIDIA StyleGAN 生成自己的动漫(老婆)头像...
  4. Redis及可视化工具安装
  5. arduino实现rgb灯循环亮起
  6. Latex的Visual Studio Code+SumatraPDF环境配置(自用)
  7. HTML 文件里开头 Doctype 的作用是什么?
  8. YOLOV3论文阅读(学习笔记)
  9. 能力开放平台系列-概述
  10. 一本书读懂财报 | 利润表(损益表)剖析