原因:iptables没有具体的设备响应,kube-proxy需要使用--proxy-mode=ipvs

[root@kubernetes bak4]# kubectl get pods -n kube-system
NAME                                 READY   STATUS    RESTARTS   AGE
coredns-6694fb884c-mgn79             1/1     Running   0          146m
coredns-6694fb884c-ncqh6             1/1     Running   0          146m
etcd-kubernetes                      1/1     Running   11         49d
kube-apiserver-kubernetes            1/1     Running   10         49d
kube-controller-manager-kubernetes   1/1     Running   6          49d
kube-flannel-ds-amd64-5cv9n          1/1     Running   6          49d
kube-flannel-ds-amd64-6tzvm          1/1     Running   5          49d
kube-flannel-ds-amd64-827f9          1/1     Running   6          49d
kube-proxy-7ndzn                     1/1     Running   6          49d
kube-proxy-ft6wc                     1/1     Running   5          49d
kube-proxy-nvc4l                     1/1     Running   6          49d
kube-scheduler-kubernetes            1/1     Running   6          49d

kube-proxy有报错
[root@kubernetes bak4]# kubectl logs -n kube-system kube-proxy-7ndzn
W1110 09:13:34.247156       1 proxier.go:493] Failed to load kernel module ip_vs with modprobe. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules
W1110 09:13:34.248189       1 proxier.go:493] Failed to load kernel module ip_vs_rr with modprobe. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules
W1110 09:13:34.250441       1 proxier.go:493] Failed to load kernel module ip_vs_wrr with modprobe. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules
W1110 09:13:34.251811       1 proxier.go:493] Failed to load kernel module ip_vs_sh with modprobe. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules
W1110 09:13:34.256361       1 server_others.go:295] Flag proxy-mode="" unknown, assuming iptables proxy
I1110 09:13:34.264897       1 server_others.go:148] Using iptables Proxier.
I1110 09:13:34.265123       1 server_others.go:178] Tearing down inactive rules.
I1110 09:13:34.282035       1 server.go:464] Version: v1.13.3
I1110 09:13:34.288611       1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_max' to 131072
I1110 09:13:34.288637       1 conntrack.go:52] Setting nf_conntrack_max to 131072
I1110 09:13:34.290663       1 conntrack.go:83] Setting conntrack hashsize to 32768
I1110 09:13:34.290831       1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400
I1110 09:13:34.290892       1 conntrack.go:100] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600
I1110 09:13:34.291011       1 config.go:102] Starting endpoints config controller
I1110 09:13:34.291023       1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I1110 09:13:34.291040       1 config.go:202] Starting service config controller
I1110 09:13:34.291044       1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I1110 09:13:34.391530       1 controller_utils.go:1034] Caches are synced for service config controller
I1110 09:13:34.391624       1 controller_utils.go:1034] Caches are synced for endpoints config controller

[root@kubernetes bak4]# kubectl edit cm kube-proxy -n kube-system
修改的部分的截图
    ipvs:
      excludeCIDRs: null
      minSyncPeriod: 0s
      scheduler: ""
      syncPeriod: 30s
    kind: KubeProxyConfiguration
    metricsBindAddress: 127.0.0.1:10249
    mode: "ipvs"

所有的节点都需要更改
[root@kubernetes bak4]# cat  /etc/sysconfig/modules/ipvs.modules
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4

[root@kubernetes bak4]# chmod 755 /etc/sysconfig/modules/ipvs.modules 
[root@kubernetes bak4]# bash /etc/sysconfig/modules/ipvs.modules

[root@kubernetes bak4]# lsmod |grep -e ip_vs -e nf_conntrack_ipv4
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs                 141092  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack_ipv4      15053  6 
nf_defrag_ipv4         12729  1 nf_conntrack_ipv4
nf_conntrack          133387  9 ip_vs,nf_nat,nf_nat_ipv4,nf_nat_ipv6,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4,nf_conntrack_ipv6
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

重启kube-proxy
[root@kubernetes bak4]# kubectl get pods -n kube-system |grep kube-proxy
kube-proxy-7ndzn                     1/1     Running   6          49d
kube-proxy-ft6wc                     1/1     Running   5          49d
kube-proxy-nvc4l                     1/1     Running   6          49d

[root@kubernetes bak4]# kubectl get pods -n kube-system |grep kube-proxy|awk '{print $1}'| xargs kubectl delete pod  -n kube-system
pod "kube-proxy-7ndzn" deleted
pod "kube-proxy-ft6wc" deleted
pod "kube-proxy-nvc4l" deleted

已经无报错
[root@kubernetes-node2 ~]# kubectl logs -n kube-system kube-proxy-h6kwp
I1110 15:30:58.565092       1 server_others.go:189] Using ipvs Proxier.
W1110 15:30:58.565309       1 proxier.go:381] IPVS scheduler not specified, use rr by default
I1110 15:30:58.565420       1 server_others.go:216] Tearing down inactive rules.
I1110 15:30:58.603102       1 server.go:464] Version: v1.13.3
I1110 15:30:58.608057       1 conntrack.go:52] Setting nf_conntrack_max to 131072
I1110 15:30:58.608802       1 config.go:202] Starting service config controller
I1110 15:30:58.608813       1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I1110 15:30:58.608910       1 config.go:102] Starting endpoints config controller
I1110 15:30:58.608915       1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I1110 15:30:58.709075       1 controller_utils.go:1034] Caches are synced for endpoints config controller
I1110 15:30:58.709124       1 controller_utils.go:1034] Caches are synced for service config controller

[root@kubernetes-node2 ~]# kubectl logs -n kube-system kube-proxy-kbxcr
I1110 15:30:55.564636       1 server_others.go:189] Using ipvs Proxier.
W1110 15:30:55.564845       1 proxier.go:381] IPVS scheduler not specified, use rr by default
I1110 15:30:55.565141       1 graceful_termination.go:160] Trying to delete rs: 10.96.0.1:443/TCP/192.168.73.133:6443
I1110 15:30:55.565179       1 graceful_termination.go:174] Deleting rs: 10.96.0.1:443/TCP/192.168.73.133:6443
I1110 15:30:55.565208       1 graceful_termination.go:160] Trying to delete rs: 192.168.73.172:31247/TCP/10.244.1.30:80
I1110 15:30:55.565220       1 graceful_termination.go:174] Deleting rs: 192.168.73.172:31247/TCP/10.244.1.30:80
I1110 15:30:55.565247       1 graceful_termination.go:160] Trying to delete rs: 192.168.73.168:31247/TCP/10.244.1.30:80
I1110 15:30:55.565259       1 graceful_termination.go:174] Deleting rs: 192.168.73.168:31247/TCP/10.244.1.30:80
I1110 15:30:55.565277       1 graceful_termination.go:160] Trying to delete rs: 192.168.73.133:31247/TCP/10.244.1.30:80
I1110 15:30:55.565288       1 graceful_termination.go:174] Deleting rs: 192.168.73.133:31247/TCP/10.244.1.30:80
I1110 15:30:55.565310       1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:9153/TCP/10.244.2.22:9153
I1110 15:30:55.565325       1 graceful_termination.go:174] Deleting rs: 10.96.0.10:9153/TCP/10.244.2.22:9153
I1110 15:30:55.565335       1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:9153/TCP/10.244.1.24:9153
I1110 15:30:55.565346       1 graceful_termination.go:174] Deleting rs: 10.96.0.10:9153/TCP/10.244.1.24:9153
I1110 15:30:55.565364       1 graceful_termination.go:160] Trying to delete rs: 192.168.73.101:31247/TCP/10.244.1.30:80
I1110 15:30:55.565376       1 graceful_termination.go:174] Deleting rs: 192.168.73.101:31247/TCP/10.244.1.30:80
I1110 15:30:55.565394       1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:53/TCP/10.244.2.22:53
I1110 15:30:55.565428       1 graceful_termination.go:174] Deleting rs: 10.96.0.10:53/TCP/10.244.2.22:53
I1110 15:30:55.565441       1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:53/TCP/10.244.1.24:53
I1110 15:30:55.565455       1 graceful_termination.go:174] Deleting rs: 10.96.0.10:53/TCP/10.244.1.24:53
I1110 15:30:55.565474       1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:53/UDP/10.244.2.22:53
I1110 15:30:55.565487       1 graceful_termination.go:174] Deleting rs: 10.96.0.10:53/UDP/10.244.2.22:53
I1110 15:30:55.565497       1 graceful_termination.go:160] Trying to delete rs: 10.96.0.10:53/UDP/10.244.1.24:53
I1110 15:30:55.565509       1 graceful_termination.go:174] Deleting rs: 10.96.0.10:53/UDP/10.244.1.24:53
I1110 15:30:55.565558       1 graceful_termination.go:160] Trying to delete rs: 10.97.64.43:80/TCP/10.244.1.30:80
I1110 15:30:55.565592       1 graceful_termination.go:174] Deleting rs: 10.97.64.43:80/TCP/10.244.1.30:80
I1110 15:30:55.565616       1 graceful_termination.go:160] Trying to delete rs: 10.244.0.0:31247/TCP/10.244.1.30:80
I1110 15:30:55.565629       1 graceful_termination.go:174] Deleting rs: 10.244.0.0:31247/TCP/10.244.1.30:80
I1110 15:30:55.565648       1 graceful_termination.go:160] Trying to delete rs: 127.0.0.1:31247/TCP/10.244.1.30:80
I1110 15:30:55.565664       1 graceful_termination.go:174] Deleting rs: 127.0.0.1:31247/TCP/10.244.1.30:80
I1110 15:30:55.565682       1 graceful_termination.go:160] Trying to delete rs: 172.17.0.1:31247/TCP/10.244.1.30:80
I1110 15:30:55.565693       1 graceful_termination.go:174] Deleting rs: 172.17.0.1:31247/TCP/10.244.1.30:80
I1110 15:30:55.565713       1 graceful_termination.go:160] Trying to delete rs: 10.96.0.3:10051/TCP/10.244.2.26:10051
I1110 15:30:55.565726       1 graceful_termination.go:174] Deleting rs: 10.96.0.3:10051/TCP/10.244.2.26:10051
I1110 15:30:55.565750       1 server_others.go:216] Tearing down inactive rules.
E1110 15:30:55.594545       1 proxier.go:432] Failed to execute iptables-restore for nat: exit status 1 (iptables-restore: line 7 failed
)
I1110 15:30:55.597338       1 server.go:464] Version: v1.13.3
I1110 15:30:55.602835       1 conntrack.go:52] Setting nf_conntrack_max to 131072
I1110 15:30:55.605060       1 config.go:102] Starting endpoints config controller
I1110 15:30:55.605073       1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I1110 15:30:55.605293       1 config.go:202] Starting service config controller
I1110 15:30:55.605300       1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I1110 15:30:55.705688       1 controller_utils.go:1034] Caches are synced for service config controller
I1110 15:30:55.705688       1 controller_utils.go:1034] Caches are synced for endpoints config controller

[root@kubernetes-node2 ~]# kubectl logs -n kube-system kube-proxy-s86dr 
I1110 15:31:00.779612       1 server_others.go:189] Using ipvs Proxier.
W1110 15:31:00.779923       1 proxier.go:381] IPVS scheduler not specified, use rr by default
I1110 15:31:00.779999       1 server_others.go:216] Tearing down inactive rules.
I1110 15:31:00.820185       1 server.go:464] Version: v1.13.3
I1110 15:31:00.824642       1 conntrack.go:52] Setting nf_conntrack_max to 131072
I1110 15:31:00.825227       1 config.go:202] Starting service config controller
I1110 15:31:00.825237       1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I1110 15:31:00.825247       1 config.go:102] Starting endpoints config controller
I1110 15:31:00.825249       1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I1110 15:31:00.925362       1 controller_utils.go:1034] Caches are synced for service config controller
I1110 15:31:00.925368       1 controller_utils.go:1034] Caches are synced for endpoints config controller

容器内部测试成功
bash-5.0$ ping 10.96.0.10
PING 10.96.0.10 (10.96.0.10) 56(84) bytes of data.
64 bytes from 10.96.0.10: icmp_seq=1 ttl=64 time=0.051 ms
64 bytes from 10.96.0.10: icmp_seq=2 ttl=64 time=0.052 ms
64 bytes from 10.96.0.10: icmp_seq=3 ttl=64 time=0.052 ms
64 bytes from 10.96.0.10: icmp_seq=4 ttl=64 time=0.055 ms
^C
--- 10.96.0.10 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.051/0.052/0.055/0.007 ms

bash-5.0$ ping zabbix-server
PING zabbix-server.default.svc.cluster.local (10.96.0.3) 56(84) bytes of data.
64 bytes from zabbix-server.default.svc.cluster.local (10.96.0.3): icmp_seq=1 ttl=64 time=0.037 ms
64 bytes from zabbix-server.default.svc.cluster.local (10.96.0.3): icmp_seq=2 ttl=64 time=0.052 ms
64 bytes from zabbix-server.default.svc.cluster.local (10.96.0.3): icmp_seq=3 ttl=64 time=0.047 ms
^C
--- zabbix-server.default.svc.cluster.local ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2ms
rtt min/avg/max/mdev = 0.037/0.045/0.052/0.008 ms

bash-5.0$ ping zabbix-web
PING zabbix-web.default.svc.cluster.local (10.97.64.43) 56(84) bytes of data.
64 bytes from zabbix-web.default.svc.cluster.local (10.97.64.43): icmp_seq=1 ttl=64 time=0.056 ms
64 bytes from zabbix-web.default.svc.cluster.local (10.97.64.43): icmp_seq=2 ttl=64 time=0.050 ms
64 bytes from zabbix-web.default.svc.cluster.local (10.97.64.43): icmp_seq=3 ttl=64 time=0.047 ms
^C
--- zabbix-web.default.svc.cluster.local ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 4ms
rtt min/avg/max/mdev = 0.047/0.051/0.056/0.003 ms

bash-5.0$ ping mysql-server
PING mysql-server.default.svc.cluster.local (10.99.100.149) 56(84) bytes of data.
64 bytes from mysql-server.default.svc.cluster.local (10.99.100.149): icmp_seq=1 ttl=64 time=0.046 ms
64 bytes from mysql-server.default.svc.cluster.local (10.99.100.149): icmp_seq=2 ttl=64 time=0.052 ms
64 bytes from mysql-server.default.svc.cluster.local (10.99.100.149): icmp_seq=3 ttl=64 time=0.065 ms
^C
--- mysql-server.default.svc.cluster.local ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2ms
rtt min/avg/max/mdev = 0.046/0.054/0.065/0.010 ms

bash-5.0$ ping 10.99.100.149
PING 10.99.100.149 (10.99.100.149) 56(84) bytes of data.
64 bytes from 10.99.100.149: icmp_seq=1 ttl=64 time=0.137 ms
64 bytes from 10.99.100.149: icmp_seq=2 ttl=64 time=0.044 ms
^C
--- 10.99.100.149 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.044/0.090/0.137/0.047 ms

bash-5.0$ ping 10.96.0.3
PING 10.96.0.3 (10.96.0.3) 56(84) bytes of data.
64 bytes from 10.96.0.3: icmp_seq=1 ttl=64 time=0.057 ms
64 bytes from 10.96.0.3: icmp_seq=2 ttl=64 time=0.053 ms
^C
--- 10.96.0.3 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1ms
rtt min/avg/max/mdev = 0.053/0.055/0.057/0.002 ms
bash-5.0$

bash-5.0$ ping 10.97.64.43
PING 10.97.64.43 (10.97.64.43) 56(84) bytes of data.
64 bytes from 10.97.64.43: icmp_seq=1 ttl=64 time=0.051 ms
64 bytes from 10.97.64.43: icmp_seq=2 ttl=64 time=0.044 ms
^C
--- 10.97.64.43 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 2ms
rtt min/avg/max/mdev = 0.044/0.047/0.051/0.007 ms

k8s集群ClusterIP不能使用相关推荐

  1. 手把手从零开始搭建k8s集群超详细教程

    本教程根据B站课程云原生Java架构师的第一课K8s+Docker+KubeSphere+DevOps同步所做笔记教程 k8s集群搭建超详细教程 1. 基本环境搭建 1. 创建私有网络 2. 创建服务 ...

  2. 2、使用 kubeadm 方式快速部署K8S集群

    文章目录 一.kubernetes 官方提供的三种部署方式 二.使用kubeadm搭建k8s集群 2.1 基础环境设置 2.2 安装Docker 2.3 添加kubernetes软件源 2.4 安装k ...

  3. 【好文收藏】K8S集群部署CoreDNS服务

    K8S集群部署CoreDNS服务 k8s集群中的应用通常是通过ingress实现微服务发布的,前文介绍过在K8S集群中使用traefik实现服务的自动发布,其实现方式是traefik通过集群的DNS服 ...

  4. kubeadmin 安装 k8s集群

    文章目录 一 环境部署 1.1 服务器规划 1.2 环境准备 二: 所有节点安装docker 2.1 配置yum源,安装docker 2.2 配置daemon.json文件 三:所有节点安装kubea ...

  5. vmware 搭建k8s无法ping通子节点_一波四折 —— 记一次K8S集群应用故障排查

    一波四折--记一次K8S集群应用故障排查Part1 初露端倪 一个周四的下午,客户的报障打破了微信群的平静. "我们部署在自建K8S集群上的应用突然无法正常访问了,现在业务受到了影响!&qu ...

  6. centos8搭建k8s集群

    1. 系统初始化 关闭防火墙 systemctl disable firewalld 关闭swap sed -ri 's/.*swap.*/#&/' /etc/fstab 关闭selinux ...

  7. k8s集群PHP环境使用

    一.环境介绍 k8s版本: 1.15.2 存储: 阿里云NAS 测试代码: wordpress 二.下载wordpress和创建好数据库等 1.下载wordpress wget https://cn. ...

  8. g hub安装失败_树莓派k8s集群安装mysql及监控

    安装准备 树莓派k8s集群 root@pi4-master01:~# kubectl get nodes -o wideNAME STATUS ROLES AGE VERSION INTERNAL-I ...

  9. 备份k8s_树莓派k8s集群安装kafka集群及监控

    安装准备 树莓派k8s集群 root@pi4-master01:~# kubectl get nodes -o wideNAME STATUS ROLES AGE VERSION INTERNAL-I ...

最新文章

  1. 战地1如何修改服务器地址,《战地1》服务器加入方法一览
  2. shell 下的运算表达
  3. 上海张江又一款AI芯发布!性能超英伟达T4,AMD背景团队创办,快手投资
  4. 2015.08.15冒泡排序
  5. PAT-1124. Raffle for Weibo Followers (20)
  6. mysql查询两个日期之间的数据
  7. STP端口状态特点、STP端口角色特点、链路状态发生变化,STP如何重新收敛? TCN何时发?uplinkfast技术、Portfast技术、backbonefast技术、常见的STP调整命令:
  8. php精准函数,PHP常用函数大全
  9. c:递归算法的三个demo:八皇后问题、台阶问题、汉诺塔
  10. 2059 mysql
  11. 深山红叶袖珍PE系统工具箱说明 V22 正式版
  12. c语言 标准正态分布表,标准正态分布表
  13. python 拼接 遥感影像_如何用Python| 制作遥感影像拼接
  14. 已解决urllib.error.HTTPError: HTTP Error 403: Forbidden
  15. Qt模仿QQ登录界面(一)
  16. python第一天----爬取优美图库的图片
  17. Android在中国的发展及就业前景解析
  18. Android手机投屏后没有声音,乐播投屏电视没有声音怎么办_乐播投屏电视没有声音手机有声音解决办法_3DM手游...
  19. 2020秋季甲级PAT 7-4 Professional Ability Test (30 分)
  20. JS实现随机抽奖功能

热门文章

  1. 提示Could not calculate build plan Plugin org.apache.maven.pluginsmaven-resources
  2. 数据架构:数据中心 主备、双活
  3. 域用户如何更改计算机名,加入域后的计算机,怎么改计算机名?
  4. Atitit 手机号码选号 规范 流程 attilax总结 v2 r99.docx
  5. 网络安全特训之——网络信息安全攻防学习平台(选择题)
  6. 南京大学交叉培养计算机与金融招生人数,教务处组织召开计算机与金融工程实验班师生见面会...
  7. 蓝牙也宽带 诺基亚3230蓝牙共享宽带教程(内网用户设置或网关无响应的解决)
  8. 获取crumbIssuer
  9. 高仿微信上划取消录音
  10. matlab批量处理程序设计,Matlab实现批量处理图像的两种方法