k8s&Docker安装

# 将 SELinux 设置为 permissive 模式(相当于将其禁用)
sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 关闭swap
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
# 配置内核参数:
cat <<EOF >/etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF#若需要离线安装,可通过 yum reinstall --downloadonly --downloaddir=~即可下载部署安装包即可
#wget -O /etc/yum.repos.d/docker-ce.repo https://repo.huaweicloud.com/docker-ce/linux/centos/docker-ce.repo
#sudo sed -i 's+download.docker.com+repo.huaweicloud.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
wget -O /etc/yum.repos.d/aliyun-docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum makecache fastsudo yum remove docker docker-common docker-selinux docker-engine
sudo yum install -y yum-utils device-mapper-persistent-data lvm2#安装Docker,安装前可使用yum list docker-ce --showduplicates  |sort -r查看yum源中的docker列表
sudo yum install -y  docker-ce
sudo systemctl enable docker
sudo systemctl start docker
sudo systemctl status docker
docker --version#Cgroup Driver配置
cat <<EOF>> /etc/docker/daemon.json
{"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF#停止所有容器
docker stop $(docker ps -q)
#删除所有容器
docker rm $(docker ps -aq)
#删除所有镜像
docker rmi `docker images -q`
#kube-proxy开启ipvs的前置条件
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
#加载模块
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#安装了ipset软件包
sudo yum install ipset -y
#安装管理工具ipvsadm
sudo yum install ipvsadm -y
---------------------------------------------------------------------------------------------------
#导入yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF#如遇到signature could not be verified for kubernetes,可调整gpgcheck,若设为1,会进行校验,就会报错如下,可将这里设为0sudo yum list kubelet  --showduplicates |sort -r
sudo yum install kubelet-1.23.1-0 kubeadm-1.23.1-0 kubectl-1.23.1-0 -ysudo systemctl enable --now kubelet
---------------------------------------------------------------------------------------------------
#查看需下载的k8s镜像
kubeadm config images list#通过脚本进行离线下载
#!/bin/bash
for i in k8s.gcr.io/kube-apiserver:v1.23.1 k8s.gcr.io/kube-controller-manager:v1.23.1 k8s.gcr.io/kube-scheduler:v1.23.1 k8s.gcr.io/kube-proxy:v1.23.1 k8s.gcr.io/pause:3.6 k8s.gcr.io/etcd:3.5.1-0 k8s.gcr.io/coredns:v1.8.6; dotemp=${i#k8s.gcr.io/}docker pull registry.aliyuncs.com/google_containers/${temp}docker tag registry.aliyuncs.com/google_containers/${temp} k8s.gcr.io/${temp}docker rmi registry.aliyuncs.com/google_containers/${temp};
done;#或配置docker镜像下载镜像
vim /etc/sysconfig/docker
OPTIONS='--selinux-enabled --log-driver=journald --registry-mirror=http://xxxx.mirror.aliyuncs.com'#!/bin/bash
images=(kube-apiserver:v1.23.1 kube-controller-manager:v1.23.1 kube-scheduler:v1.23.1 kube-proxy:v1.23.1 pause:3.6 etcd:3.5.1-0 coredns/coredns:v1.8.6)
for imageName in ${images[@]} ; dodocker pull keveon/$imageNamedocker tag keveon/$imageName k8s.gcr.io/$imageNamedocker rmi keveon/$imageName
done# 手动操作
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.1  k8s.gcr.io/kube-apiserver:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.1  k8s.gcr.io/kube-controller-manager:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.1  k8s.gcr.io/kube-scheduler:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.1  k8s.gcr.io/kube-proxy:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6  k8s.gcr.io/pause:3.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0  k8s.gcr.io/etcd:3.5.1-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6  k8s.gcr.io/coredns/coredns:v1.8.6docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/elasticsearch:7.16.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/elasticsearch:7.16.2  google_containers/elasticsearch:7.16.2:newTag
#设置docker的Cgroup Driver
cat <<EOF>> /etc/docker/daemon.json
{"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
#修改kubelet的Cgroup Driver
cat <<EOF>> /etc/sysconfig/kubelet
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"
EOF
sudo systemctl restart docker
#master执行init初始化k8s 集群,指定pod网络为10.244.0.0/16,服务网络为10.1.0.0/16,这两个均为集群内部网络,API-server为master节点IP
kubeadm init \
--kubernetes-version=1.23.1 \
--apiserver-advertise-address=10.0.20.1 \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16---------------------------------------------------------------------------------------------------
#flannel网络配置
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml#或者进行calico网络配置
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
wget https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yamlvim calico.yaml
- name: CALICO_IPV4POOL_IPIPvalue: "off"- name: IP_AUTODETECTION_METHODvalue: "interface=ens.*"replicas: 1revisionHistoryLimit: 2- name: CALICO_IPV4POOL_CIDRvalue: "10.244.0.0/16"kubectl apply -f calico.yaml
---------------------------------------------------------------------------------------------------
#slave节点kubectl join加入集群
kubeadm join 10.0.20.1:6443 --token kjgine.g0fafdff1ro505wj \--discovery-token-ca-cert-hash sha256:da5a7952ef25b8a4eb77d46aa4765009fd5d9a4f1ced493d5698af361ba5d07d
#若后续忘记该命令
kubeadm token create --print-join-command
#若需移除节点
kubectl delete node demo01
#移除的节点上执行
kubeadm reset -f #执行kubectl命令若提示The connection to the server localhost:8080 was refused - did you specify the right host or port?
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile
source /etc/profile
# 检视节点状态
kubectl get nodes
# 检视pods状态
kubectl get pods --all-namespaces
kubectl get ns
# 检视K8S丛集状态
kubectl get cs
kubectl get pods -nkube-system#配置集群角色
kubectl label node 节点名称 node-role.kubernetes.io/worker=worker
kubectl label node --all node-role.kubernetes.io/worker=worker#如果需要使用master节点作为woker还需执行以下命令
kubectl taint nodes 节点名称 node-role.kubernetes.io/master-

Kuboard面板部署

kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
# 您也可以使用下面的指令,唯一的区别是,该指令使用华为云的镜像仓库替代 docker hub 分发 Kuboard 所需要的镜像
# kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3-swr.yamlwatch kubectl get pods -n kuboard在浏览器中打开链接 http://your-node-ip-address:30080
输入初始用户名和密码,并登录
用户名: admin
密码: Kuboard123#若需要卸载
kubectl delete -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
#在 master 节点以及带有 k8s.kuboard.cn/role=etcd 标签的节点上执行
rm -rf /usr/share/kuboard

KubeSphere面板部署

kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml#kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml
#kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml
#执行指令 watch kubectl get pods -n kuboard,等待 kuboard 名称空间中所有的 Pod 就绪,如下所示,
#通过NodePort (IP:30880) 使用默认帐户和密码 (admin/P@88w0rd) 访问 Web 控制台。#如需要使用 KuboardSpray 安装kubernetes_v1.23.1
docker run -d \--restart=unless-stopped \--name=kuboard-spray \-p 80:80/tcp \-v /var/run/docker.sock:/var/run/docker.sock \-v ~/kuboard-spray-data:/data \eipwork/kuboard-spray:latest-amd64# 如果抓不到这个镜像,可以尝试一下这个备用地址:# swr.cn-east-2.myhuaweicloud.com/kuboard/kuboard-spray:latest-amd64

Kubernetes Dashboard

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta5/aio/deploy/recommended.yamlkubectl apply -f https://kuboard.cn/install-script/k8s-dashboard/v2.0.0-beta5.yaml

ES StatefulSet 本地部署

#建议访问nfs共享
#mkdir -p /data/share/pv/es
mkdir -p /data/es
#命名空间创建
cat <<EOF>> elastic.namespace.yaml
---
apiVersion: v1
kind: Namespace
metadata:name: elasticsearch
---
EOF
kubectl apply -f elastic.namespace.yaml---------------------------------------------------------------------------------------------------
#配置storageclass,用于动态创建pvc,并自动绑定pv
cat <<EOF>> sc.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
EOF---------------------------------------------------------------------------------------------------
cat <<EOF>> pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:name: local-storage-pv-1namespace: elasticsearchlabels:name: local-storage-pv-1
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncepersistentVolumeReclaimPolicy: RetainstorageClassName: local-storagelocal:path: /data/esnodeAffinity:required:nodeSelectorTerms:- matchExpressions:- key: kubernetes.io/hostnameoperator: Invalues:- demo01
---
apiVersion: v1
kind: PersistentVolume
metadata:name: local-storage-pv-2namespace: elasticsearchlabels:name: local-storage-pv-2
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncepersistentVolumeReclaimPolicy: RetainstorageClassName: local-storagelocal:path: /data/esnodeAffinity:required:nodeSelectorTerms:- matchExpressions:- key: kubernetes.io/hostnameoperator: Invalues:- demo02
---
apiVersion: v1
kind: PersistentVolume
metadata:name: local-storage-pv-3namespace: elasticsearchlabels:name: local-storage-pv-3
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncepersistentVolumeReclaimPolicy: RetainstorageClassName: local-storagelocal:path: /data/esnodeAffinity:required:nodeSelectorTerms:- matchExpressions:- key: kubernetes.io/hostnameoperator: Invalues:- demo03
#---
#apiVersion: v1
#kind: PersistentVolume
#metadata:
#  name: local-storage-pv-4
#  namespace: elasticsearch
#  labels:
#    name: local-storage-pv-4
#spec:
#  capacity:
#    storage: 1Gi
#  accessModes:
#  - ReadWriteOnce
#  persistentVolumeReclaimPolicy: Retain
#  storageClassName: local-storage
#  local:
#    path: /data/es
#  nodeAffinity:
#    required:
#      nodeSelectorTerms:
#      - matchExpressions:
#        - key: kubernetes.io/hostname
#          operator: In
#          values:
#          - node1
#---
#apiVersion: v1
#kind: PersistentVolume
#metadata:
#  name: local-storage-pv-5
#  namespace: elasticsearch
#  labels:
#    name: local-storage-pv-5
#spec:
#  capacity:
#    storage: 1Gi
#  accessModes:
#  - ReadWriteOnce
#  persistentVolumeReclaimPolicy: Retain
#  storageClassName: local-storage
#  local:
#    path: /data/es
#  nodeAffinity:
#    required:
#      nodeSelectorTerms:
#      - matchExpressions:
#        - key: kubernetes.io/hostname
#          operator: In
#          values:
#          - node2
EOF
#一共是5个PV,每个都通过nodeSelectorTerms跟k8s节点绑定。---------------------------------------------------------------------------------------------------
#创建StatefulSet,ES属于数据库类型的应用,此类应用适合StatefulSet类型
cat <<EOF>> sts.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:name: es7-clusternamespace: elasticsearch
spec:serviceName: elasticsearch7replicas: 3selector:matchLabels:app: elasticsearch7template:metadata:labels:app: elasticsearch7spec:containers:- name: elasticsearch7image: elasticsearch:7.16.2resources:limits:cpu: 1000mrequests:cpu: 100mports:- containerPort: 9200name: restprotocol: TCP- containerPort: 9300name: inter-nodeprotocol: TCPvolumeMounts:- name: datamountPath: /usr/share/elasticsearch/dataenv:- name: cluster.namevalue: k8s-logs- name: node.namevalueFrom:fieldRef:fieldPath: metadata.name- name: discovery.zen.minimum_master_nodesvalue: "2"- name: discovery.seed_hostsvalue: "es7-cluster-0.elasticsearch7,es7-cluster-1.elasticsearch7,es7-cluster-2.elasticsearch7"- name: cluster.initial_master_nodesvalue: "es7-cluster-0,es7-cluster-1,es7-cluster-2"- name: ES_JAVA_OPTSvalue: "-Xms1g -Xmx1g"initContainers:- name: fix-permissionsimage: busybox:1.35.0command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]securityContext:privileged: truevolumeMounts:- name: datamountPath: /usr/share/elasticsearch/data- name: increase-vm-max-mapimage: busybox:1.35.0command: ["sysctl", "-w", "vm.max_map_count=262144"]securityContext:privileged: true- name: increase-fd-ulimitimage: busybox:1.35.0command: ["sh", "-c", "ulimit -n 65536"]volumeClaimTemplates:- metadata:name: dataspec:accessModes: [ "ReadWriteOnce" ]storageClassName: "local-storage"resources:requests:storage: 10Gi
EOF
#该ES集群通过volumeClaimTemplates来关联storageClass,并自动绑定相应的PV。---------------------------------------------------------------------------------------------------
#创建NodePort类型的Service来蒋ES集群暴漏出去
cat <<EOF>> svc.yaml
apiVersion: v1
kind: Service
metadata:name: elasticsearch7namespace: elasticsearch
spec:selector:app: elasticsearch7type: NodePortports:- port: 9200nodePort: 30002targetPort: 9200
EOF---------------------------------------------------------------------------------------------------
kubectl apply -f sc.yaml
kubectl apply -f pv.yaml
kubectl apply -f sts.yaml
kubectl apply -f svc.yaml
---------------------------------------------------------------------------------------------------
#当前资源状态
kubectl get sc
[root@demo01 ~]# kubectl get sc
NAME            PROVISIONER                    RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
local-storage   kubernetes.io/no-provisioner   Delete          WaitForFirstConsumer   false                  4m45s
#PV
[root@demo01 ~]# kubectl get pv
NAME                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS    REASON   AGE
local-storage-pv-1   1Gi        RWO            Retain           Available           local-storage            3m28s
local-storage-pv-2   1Gi        RWO            Retain           Available           local-storage            3m28s
local-storage-pv-3   1Gi        RWO            Retain           Available           local-storage            3m28s
local-storage-pv-4   1Gi        RWO            Retain           Available           local-storage            3m28s
local-storage-pv-5   1Gi        RWO            Retain           Available           local-storage            3m28s
#StatefulSet
[root@demo01 elk]# kubectl get pods -n elasticsearch
[root@demo01 ~]# kubectl get statefulset -n elasticsearch
NAME          READY   AGE
es7-cluster   3/3     57m
[root@master1 tmp]# watch kubectl get pod -n elasticsearch
NAME            READY   STATUS    RESTARTS   AGE
es7-cluster-0   1/1     Running   0          18m
es7-cluster-1   1/1     Running   0          18m
es7-cluster-2   1/1     Running   0          54m

NFS部署

yum -y install nfs-utils rpcbind
mkdir -p /data/k8s
chmod 755 /data/k8s
vim /etc/exports
/data/k8s  10.0.0.0/8(rw,sync,no_root_squash)
systemctl start rpcbind.service
systemctl start nfs.service
journalctl -xlu nfs#访问节点安装客户端
yum -y install nfs-utils
systemctl start nfs && systemctl enable nfs

ElasticSearch NFS部署

#命名空间创建
cat <<EOF>> elastic.namespace.yaml
apiVersion: v1
kind: Namespace
metadata:name: elasticsearch-ns
EOF
kubectl apply -f elastic.namespace.yaml
kubectl get ns
---------------------------------------------------------------------------------------------------
#svc
#如果仅内部访问
cat <<EOF>> elasticsearch-svc.yaml
#kind: Service
#apiVersion: v1
#metadata:
#  name: elasticsearch
#  namespace: elasticsearch-ns
#  labels:
#    app: elasticsearch
#spec:
#  selector:
#    app: elasticsearch
#  clusterIP: None
#  ports:
#    - port: 9200
#      name: rest
#    - port: 9300
#      name: inter-node#若需暴露给外部访问
apiVersion: v1
kind: Service
metadata:name: elasticsearchnamespace: elasticsearch-nslabels:app: elasticsearch
spec:selector:app: elasticsearchtype: NodePortports:- port: 9200name: resttargetPort: 9200nodePort: 31200- port: 9300targetPort: 9300nodePort: 31300name: inter-nodeEOF
kubectl apply -f elasticsearch-svc.yaml
kubectl get svc -n elasticsearch-ns
kubectl edit svc elasticsearch  -n elasticsearch-ns
---------------------------------------------------------------------------------------------------
#创建statefulset的资源清单
cat <<EOF>> elasticsearch-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:name: esnamespace: elasticsearch-ns
spec:serviceName: elasticsearchreplicas: 3selector:matchLabels:app: elasticsearchtemplate:metadata:labels: app: elasticsearchspec:nodeSelector:es: loginitContainers:- name: increase-vm-max-mapimage: busybox:1.35.0command: ["sysctl", "-w", "vm.max_map_count=262144"]securityContext:privileged: true- name: increase-fd-ulimitimage: busybox:1.35.0command: ["sh", "-c", "ulimit -n 65536"]securityContext:privileged: truecontainers:- name: elasticsearchimage: docker.elastic.co/elasticsearch/elasticsearch:7.16.2ports:- name: restcontainerPort: 9200- name: intercontainerPort: 9300resources:limits:cpu: 1000mrequests:cpu: 1000mvolumeMounts:- name: datamountPath: /usr/share/elasticsearch/dataenv:- name: cluster.namevalue: k8s-logs- name: node.namevalueFrom:fieldRef:fieldPath: metadata.name- name: cluster.initial_master_nodesvalue: "es-0,es-1,es-2"- name: discovery.zen.minimum_master_nodesvalue: "2"- name: discovery.seed_hostsvalue: "elasticsearch"- name: ES_JAVA_OPTSvalue: "-Xms512m -Xmx512m"- name: network.hostvalue: "0.0.0.0"volumeClaimTemplates:- metadata:name: datalabels:app: elasticsearchspec:accessModes: [ "ReadWriteOnce" ]storageClassName: es-data-dbresources:requests:storage: 50Gi
EOF
#注意添加了nodeSelector策略,需要在每个节点上加上label标签为es=log,es集群才能部署成功,执行以下命令。
#或将es: log修改为kubernetes.io/worker=worker
kubectl label nodes node名 es=log
kubectl get nodes --show-labels
---------------------------------------------------------------------------------------------------
#创建 Provisioner,使用nfs-client 的自动配置程序
#kubectl explain DaemonSet.apiVersion
cat <<EOF>> nfs-client.yaml
kind: Deployment
apiVersion: apps/v1
metadata:name: nfs-client-provisioner#namespace: elasticsearch-ns
spec:replicas: 1selector: matchLabels: app: nfs-client-provisionerstrategy:type: Recreatetemplate:metadata:labels:app: nfs-client-provisionerspec:serviceAccountName: nfs-client-provisionercontainers:- name: nfs-client-provisioner#由于SelfLink。此字段不提供任何新信息,更换为nfs-subdir-external-provisioner#image: quay.io/external_storage/nfs-client-provisioner:latestimage: registry.cn-shenzhen.aliyuncs.com/shuhui/nfs-subdir-external-provisioner:v4.0.2volumeMounts:- name: nfs-client-rootmountPath: /persistentvolumesenv:- name: PROVISIONER_NAMEvalue: fuseim.pri/ifs- name: NFS_SERVERvalue: 10.0.21.1- name: NFS_PATHvalue: /data/k8svolumes:- name: nfs-client-rootnfs:server: 10.0.21.1path: /data/k8s
EOF
---------------------------------------------------------------------------------------------------
#创建 sa,然后绑定上对应的权限
cat <<EOF>>  nfs-client-sa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:name: nfs-client-provisioner#namespace: elasticsearch-ns---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: nfs-client-provisioner-runner
rules:- apiGroups: [""]resources: ["persistentvolumes"]verbs: ["get", "list", "watch", "create", "delete"]- apiGroups: [""]resources: ["persistentvolumeclaims"]verbs: ["get", "list", "watch", "update"]- apiGroups: ["storage.k8s.io"]resources: ["storageclasses"]verbs: ["get", "list", "watch"]- apiGroups: [""]resources: ["events"]verbs: ["list", "watch", "create", "update", "patch"]- apiGroups: [""]resources: ["endpoints"]verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: run-nfs-client-provisioner
subjects:- kind: ServiceAccountname: nfs-client-provisionernamespace: default
roleRef:kind: ClusterRolename: nfs-client-provisioner-runnerapiGroup: rbac.authorization.k8s.io
EOF
---------------------------------------------------------------------------------------------------
#创建StorageClass
cat <<EOF>> elasticsearch-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:name: es-data-db#namespace: elasticsearch-ns
provisioner: fuseim.pri/ifs
EOF
---------------------------------------------------------------------------------------------------
#部署
kubectl create -f nfs-client.yaml
kubectl create -f nfs-client-sa.yaml
kubectl create -f elasticsearch-storageclass.yaml
kubectl get po -n elasticsearch-ns
kubectl describe pvc data-es-0  -n elasticsearch-ns
kubectl logs nfs-client-provisioner-5c66746f46-hrlqm
---------------------------------------------------------------------------------------------------
#现在直接使用 kubectl 工具部署elasticsearch statefulset资源
kubectl create -f elasticsearch-statefulset.yaml
kubectl get sts -n elasticsearch-ns
kubectl get po -n elasticsearch-ns
kubectl get pv
kubectl get pods
kubectl describe pods
kubectl describe pod nfs-client-provisioner-5c66746f46-w277s
#若需要远程访问,使用下面的命令将本地端口9200 转发到 Elasticsearch 节点(如es-0)对应的端口
kubectl port-forward es-0 9200:9200 --namespace=elasticsearch-ns
#测试
curl http://localhost:9200/
curl http://localhost:9200/_cluster/state?pretty
#docker pull quay.io/external_storage/nfs-client-provisioner:latest --registry-mirror=https://docker.mirrors.ustc.edu.cn
---------------------------------------------------------------------------------------------------kubectl apply -f elastic.namespace.yaml
kubectl apply -f elasticsearch-svc.yaml
kubectl create -f nfs-client.yaml
kubectl create -f nfs-client-sa.yaml
kubectl create -f elasticsearch-storageclass.yaml
kubectl create -f elasticsearch-statefulset.yamlkubectl delete -f elasticsearch-statefulset.yaml
kubectl delete -f elasticsearch-storageclass.yaml
kubectl delete -f nfs-client-sa.yaml
kubectl delete -f nfs-client.yaml
kubectl delete -f elasticsearch-svc.yaml
kubectl delete -f elastic.namespace.yaml

kibana部署

cat <<EOF>> kibana.yaml
apiVersion: v1
kind: Service
metadata:name: kibananamespace: elasticsearchlabels:app: kibana
spec:ports:- port: 5601targetPort: 5601nodePort: 30001type: NodePortselector:app: kibana
---
apiVersion: apps/v1
kind: Deployment
metadata:name: kibananamespace: elasticsearchlabels:app: kibana
spec:selector:matchLabels:app: kibanatemplate:metadata:labels:app: kibanaspec:nodeSelector:node: node2containers:- name: kibanaimage: kibana:7.16.2resources:limits:cpu: 1000mrequests:cpu: 1000menv:- name: ELASTICSEARCH_HOSTSvalue: http://elasticsearch7:9200- name: SERVER_PUBLICBASEURLvalue: "0.0.0.0:5601"- name: I18N.LOCALEvalue: zh-CNports: - containerPort: 5601
EOFkubectl apply -f kibana.yaml

zookeeper*kafka leolee32部署

#命名空间创建
cat <<EOF>> zk-kafka.namespace.yaml
apiVersion: v1
kind: Namespace
metadata:name: zk-kafkalabels:name: zk-kafka
EOF
kubectl apply -f zk-kafka.namespace.yaml---------------------------------------------------------------------------------------------------
#配置pv
cat <<EOF>>  zk_pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:namespace: zk-kafkaname: zk-data1
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncenfs:server: 10.0.21.1path: /data/k8s/zk/data1
---
apiVersion: v1
kind: PersistentVolume
metadata:namespace: zk-kafkaname: zk-data2
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncenfs:server: 10.0.21.1path: /data/k8s/zk/data2
---
apiVersion: v1
kind: PersistentVolume
metadata:namespace: zk-kafkaname: zk-data3
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncenfs:server: 10.0.21.1path: /data/k8s/zk/data3
EOF
mkdir /data/k8s/zk/{data1,data2,data3}
kubectl apply -f zk_pv.yaml---------------------------------------------------------------------------------------------------
cat <<EOF>> zk.ymal
apiVersion: v1
kind: Service
metadata:namespace: zk-kafkaname: zk-hslabels:app: zk
spec:ports:- port: 2888name: server- port: 3888name: leader-electionclusterIP: Noneselector:app: zk
---
apiVersion: v1
kind: Service
metadata:namespace: zk-kafkaname: zk-cslabels:app: zk
spec:type: NodePortports:- port: 2181targetPort: 2181name: clientnodePort: 32181selector:app: zk
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:namespace: zk-kafkaname: zk-pdb
spec:selector:matchLabels:app: zkmaxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:namespace: zk-kafkaname: zok
spec:serviceName: zk-hsreplicas: 3selector:matchLabels:app: zktemplate:metadata:labels:app: zkspec:affinity:podAntiAffinity:requiredDuringSchedulingIgnoredDuringExecution:- labelSelector:matchExpressions:- key: "app"operator: Invalues:- zktopologyKey: "kubernetes.io/hostname"containers:- name: kubernetes-zookeeperimagePullPolicy: Alwaysimage: leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10resources:requests:memory: "1Gi"cpu: "0.5"ports:- containerPort: 2181name: client- containerPort: 2888name: server- containerPort: 3888name: leader-electioncommand:- sh- -c- "start-zookeeper \--servers=3 \--data_dir=/var/lib/zookeeper/data \--data_log_dir=/var/lib/zookeeper/data/log"
apiVersion: v1
kind: Service
metadata:namespace: zk-kafkaname: zk-hslabels:app: zk
spec:ports:- port: 2888name: server- port: 3888name: leader-electionclusterIP: Noneselector:app: zk
---
apiVersion: v1
kind: Service
metadata:namespace: zk-kafkaname: zk-cslabels:app: zk
spec:type: NodePortports:- port: 2181targetPort: 2181name: clientnodePort: 32181selector:app: zk
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:namespace: zk-kafkaname: zk-pdb
spec:selector:matchLabels:app: zkmaxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:namespace: zk-kafkaname: zok
spec:serviceName: zk-hsreplicas: 3selector:matchLabels:app: zktemplate:metadata:labels:app: zkspec:affinity:podAntiAffinity:requiredDuringSchedulingIgnoredDuringExecution:- labelSelector:matchExpressions:- key: "app"operator: Invalues:- zktopologyKey: "kubernetes.io/hostname"containers:- name: kubernetes-zookeeperimagePullPolicy: Alwaysimage: leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10resources:requests:memory: "1Gi"cpu: "0.5"ports:- containerPort: 2181name: client- containerPort: 2888name: server- containerPort: 3888name: leader-electioncommand:- sh- -c- "start-zookeeper \--servers=3 \--data_dir=/var/lib/zookeeper/data \--data_log_dir=/var/lib/zookeeper/data/log \--conf_dir=/opt/zookeeper/conf \--client_port=2181 \--election_port=3888 \--server_port=2888 \--tick_time=2000 \--init_limit=10 \--sync_limit=5 \--heap=512M \--max_client_cnxns=60 \--snap_retain_count=3 \--purge_interval=12 \--max_session_timeout=40000 \--min_session_timeout=4000 \--log_level=INFO"readinessProbe:exec:command:- sh- -c- "zookeeper-ready 2181"initialDelaySeconds: 10timeoutSeconds: 5livenessProbe:exec:command:- sh- -c- "zookeeper-ready 2181"initialDelaySeconds: 10timeoutSeconds: 5volumeMounts:- name: datadirmountPath: /var/lib/zookeepervolumeClaimTemplates:- metadata:name: datadirspec:accessModes: [ "ReadWriteOnce" ]resources:requests:storage: 10GiEOF
kubectl apply -f zk.ymal---------------------------------------------------------------------------------------------------
mkdir -p /data/k8s/kafka/{data1,data2,data3}
cat <<EOF>> kafka_pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:namespace: zk-kafkaname: kafka-data1
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncenfs:server: 10.0.21.1path: /data/k8s/kafka/data1
---
apiVersion: v1
kind: PersistentVolume
metadata:namespace: zk-kafkaname: kafka-data2
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncenfs:server: 10.0.21.1path: /data/k8s/kafka/data2
---
apiVersion: v1
kind: PersistentVolume
metadata:namespace: zk-kafkaname: kafka-data3
spec:capacity:storage: 10GiaccessModes:- ReadWriteOncenfs:server: 10.0.21.1path: /data/k8s/kafka/data3
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka.yaml
apiVersion: v1
kind: Service
metadata:namespace: zk-kafkaname: kafka-hslabels:app: kafka
spec:ports:- port: 1099name: jmxclusterIP: Noneselector:app: kafka
---
apiVersion: v1
kind: Service
metadata:namespace: zk-kafkaname: kafka-cslabels:app: kafka
spec:type: NodePortports:- port: 9092targetPort: 9092name: clientnodePort: 9092selector:app: kafka
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:namespace: zk-kafkaname: kafka-pdb
spec:selector:matchLabels:app: kafkamaxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:namespace: zk-kafkaname: kafoka
spec:serviceName: kafka-hsreplicas: 3selector:matchLabels:app: kafkatemplate:metadata:labels:app: kafkaspec:affinity:podAntiAffinity:requiredDuringSchedulingIgnoredDuringExecution:- labelSelector:matchExpressions:- key: "app"operator: Invalues:- kafkatopologyKey: "kubernetes.io/hostname"containers:- name: k8skafkaimagePullPolicy: Alwaysimage: leey18/k8skafkaresources:requests:memory: "1Gi"cpu: "0.5"ports:- containerPort: 9092name: client- containerPort: 1099name: jmxcommand:- sh- -c- "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \--override listeners=PLAINTEXT://:9092 \--override zookeeper.connect=zok-0.zk-hs.zk-kafka.svc.cluster.local:2181,zok-1.zk-hs.zk-kafka.svc.cluster.local:2181,zok-2.zk-hs.zk-kafka.svc.cluster.local:2181 \--override log.dirs=/var/lib/kafka \--override auto.create.topics.enable=true \--override auto.leader.rebalance.enable=true \--override background.threads=10 \--override compression.type=producer \--override delete.topic.enable=false \--override leader.imbalance.check.interval.seconds=300 \--override leader.imbalance.per.broker.percentage=10 \--override log.flush.interval.messages=9223372036854775807 \--override log.flush.offset.checkpoint.interval.ms=60000 \--override log.flush.scheduler.interval.ms=9223372036854775807 \--override log.retention.bytes=-1 \--override log.retention.hours=168 \--override log.roll.hours=168 \--override log.roll.jitter.hours=0 \--override log.segment.bytes=1073741824 \--override log.segment.delete.delay.ms=60000 \--override message.max.bytes=1000012 \--override min.insync.replicas=1 \--override num.io.threads=8 \--override num.network.threads=3 \--override num.recovery.threads.per.data.dir=1 \--override num.replica.fetchers=1 \--override offset.metadata.max.bytes=4096 \--override offsets.commit.required.acks=-1 \--override offsets.commit.timeout.ms=5000 \--override offsets.load.buffer.size=5242880 \--override offsets.retention.check.interval.ms=600000 \--override offsets.retention.minutes=1440 \--override offsets.topic.compression.codec=0 \--override offsets.topic.num.partitions=50 \--override offsets.topic.replication.factor=3 \--override offsets.topic.segment.bytes=104857600 \--override queued.max.requests=500 \--override quota.consumer.default=9223372036854775807 \--override quota.producer.default=9223372036854775807 \--override replica.fetch.min.bytes=1 \--override replica.fetch.wait.max.ms=500 \--override replica.high.watermark.checkpoint.interval.ms=5000 \--override replica.lag.time.max.ms=10000 \--override replica.socket.receive.buffer.bytes=65536 \--override replica.socket.timeout.ms=30000 \--override request.timeout.ms=30000 \--override socket.receive.buffer.bytes=102400 \--override socket.request.max.bytes=104857600 \--override socket.send.buffer.bytes=102400 \--override unclean.leader.election.enable=true \--override zookeeper.session.timeout.ms=6000 \--override zookeeper.set.acl=false \--override broker.id.generation.enable=true \--override connections.max.idle.ms=600000 \--override controlled.shutdown.enable=true \--override controlled.shutdown.max.retries=3 \--override controlled.shutdown.retry.backoff.ms=5000 \--override controller.socket.timeout.ms=30000 \--override default.replication.factor=1 \--override fetch.purgatory.purge.interval.requests=1000 \--override group.max.session.timeout.ms=300000 \--override group.min.session.timeout.ms=6000 \--override inter.broker.protocol.version=0.10.2-IV0 \--override log.cleaner.backoff.ms=15000 \--override log.cleaner.dedupe.buffer.size=134217728 \--override log.cleaner.delete.retention.ms=86400000 \--override log.cleaner.enable=true \--override log.cleaner.io.buffer.load.factor=0.9 \--override log.cleaner.io.buffer.size=524288 \--override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \--override log.cleaner.min.cleanable.ratio=0.5 \--override log.cleaner.min.compaction.lag.ms=0 \--override log.cleaner.threads=1 \--override log.cleanup.policy=delete \--override log.index.interval.bytes=4096 \--override log.index.size.max.bytes=10485760 \--override log.message.timestamp.difference.max.ms=9223372036854775807 \--override log.message.timestamp.type=CreateTime \--override log.preallocate=false \--override log.retention.check.interval.ms=300000 \--override max.connections.per.ip=2147483647 \--override num.partitions=1 \--override producer.purgatory.purge.interval.requests=1000 \--override replica.fetch.backoff.ms=1000 \--override replica.fetch.max.bytes=1048576 \--override replica.fetch.response.max.bytes=10485760 \--override reserved.broker.max.id=1000 "env:- name: KAFKA_HEAP_OPTSvalue : "-Xmx512M -Xms512M"- name: KAFKA_OPTSvalue: "-Dlogging.level=INFO"volumeMounts:- name: kafkadatadirmountPath: /var/lib/kafkareadinessProbe:exec:command:- sh- -c- "/opt/kafka/bin/kafka-broker-api-versions.sh --bootstrap-server=localhost:9092"volumeClaimTemplates:- metadata:name: kafkadatadirspec:accessModes: [ "ReadWriteOnce" ]resources:requests:storage: 10Gi
EOF
---------------------------------------------------------------------------------------------------

zk&kafka部署

#命名空间创建
cat <<EOF>> zookeeper.namespace.yaml
apiVersion: v1
kind: Namespace
metadata:name: zk-kafkalabels:name: zk-kafka
EOF
kubectl apply -f zookeeper.namespace.yaml---------------------------------------------------------------------------------------------------
cat <<EOF>> zookeeper-svc.yaml
apiVersion: v1
kind: Service
metadata:name: zoo1namespace: zk-kafkalabels:app: zookeeper-1
spec:ports:- name: clientport: 2181protocol: TCP- name: followerport: 2888protocol: TCP- name: leaderport: 3888protocol: TCPselector:app: zookeeper-1
---
apiVersion: v1
kind: Service
metadata:name: zoo2namespace: zk-kafkalabels:app: zookeeper-2
spec:ports:- name: clientport: 2181protocol: TCP- name: followerport: 2888protocol: TCP- name: leaderport: 3888protocol: TCPselector:app: zookeeper-2
---
apiVersion: v1
kind: Service
metadata:name: zoo3namespace: zk-kafkalabels:app: zookeeper-3
spec:ports:- name: clientport: 2181protocol: TCP- name: followerport: 2888protocol: TCP- name: leaderport: 3888protocol: TCPselector:app: zookeeper-3
EOF
kubectl apply -f zookeeper-svc.yaml---------------------------------------------------------------------------------------------------
cat > zookeeper-sts.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:name: zookeeper-deployment-1namespace: zk-kafka
spec:replicas: 1selector:matchLabels:app: zookeeper-1name: zookeeper-1template:metadata:labels:app: zookeeper-1name: zookeeper-1spec:containers:- name: zoo1image: zookeeper:3.7.0imagePullPolicy: IfNotPresentports:- containerPort: 2181env:- name: ZOO_MY_IDvalue: "1"- name: ZOO_SERVERSvalue: "server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181"
---
apiVersion: apps/v1
kind: Deployment
metadata:name: zookeeper-deployment-2namespace: zk-kafka
spec:replicas: 1selector:matchLabels:app: zookeeper-2name: zookeeper-2template:metadata:labels:app: zookeeper-2name: zookeeper-2spec:containers:- name: zoo2image: zookeeper:3.7.0imagePullPolicy: IfNotPresentports:- containerPort: 2181env:- name: ZOO_MY_IDvalue: "2"- name: ZOO_SERVERSvalue: "server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181"
---
apiVersion: apps/v1
kind: Deployment
metadata:name: zookeeper-deployment-3namespace: zk-kafka
spec:replicas: 1selector:matchLabels:app: zookeeper-3name: zookeeper-3template:metadata:labels:app: zookeeper-3name: zookeeper-3spec:containers:- name: zoo3image: zookeeper:3.7.0imagePullPolicy: IfNotPresentports:- containerPort: 2181env:- name: ZOO_MY_IDvalue: "3"- name: ZOO_SERVERSvalue: "server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181"
EOFkubectl apply -f zookeeper-sts.yaml---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka-svc.yaml
apiVersion: v1
kind: Service
metadata:name: kafka-service-1namespace: zk-kafkalabels:app: kafka-service-1
spec:type: NodePortports:- port: 9092name: kafka-service-1targetPort: 9092nodePort: 30901protocol: TCPselector:app: kafka-service-1
---
apiVersion: v1
kind: Service
metadata:name: kafka-service-2namespace: zk-kafkalabels:app: kafka-service-2
spec:type: NodePortports:- port: 9092name: kafka-service-2targetPort: 9092nodePort: 30902protocol: TCPselector:app: kafka-service-2
---
apiVersion: v1
kind: Service
metadata:name: kafka-service-3namespace: zk-kafkalabels:app: kafka-service-3
spec:type: NodePortports:- port: 9092name: kafka-service-3targetPort: 9092nodePort: 30903protocol: TCPselector:app: kafka-service-3
EOF---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: kafka-deployment-1namespace: zk-kafka
spec:replicas: 1selector:matchLabels:name: kafka-service-1template:metadata:labels:name: kafka-service-1app: kafka-service-1spec:containers:- name: kafka-1image: wurstmeister/kafka:2.12-2.4.1imagePullPolicy: IfNotPresentports:- containerPort: 9092env:- name: KAFKA_ADVERTISED_PORTvalue: "9092"- name: KAFKA_ADVERTISED_HOST_NAMEvalue: <kafka-svc1-CLUSTER-IP>- name: KAFKA_ZOOKEEPER_CONNECTvalue: zoo1:2181,zoo2:2181,zoo3:2181- name: KAFKA_BROKER_IDvalue: "1"- name: KAFKA_CREATE_TOPICSvalue: mytopic:2:1- name: KAFKA_ADVERTISED_LISTENERSvalue: PLAINTEXT://10.0.22.1:30901#value: PLAINTEXT://<master-ip例如192.168.128.52>:30903- name: KAFKA_LISTENERSvalue: PLAINTEXT://0.0.0.0:9092
---
apiVersion: apps/v1
kind: Deployment
metadata:name: kafka-deployment-2namespace: zk-kafka
spec:replicas: 1selector:matchLabels:name: kafka-service-2template:metadata:labels:name: kafka-service-2app: kafka-service-2spec:containers:- name: kafka-2image: wurstmeister/kafka:2.12-2.4.1imagePullPolicy: IfNotPresentports:- containerPort: 9092env:- name: KAFKA_ADVERTISED_PORTvalue: "9092"- name: KAFKA_ADVERTISED_HOST_NAMEvalue: <kafka-svc2-CLUSTER-IP>- name: KAFKA_ZOOKEEPER_CONNECTvalue: zoo1:2181,zoo2:2181,zoo3:2181- name: KAFKA_BROKER_IDvalue: "2"- name: KAFKA_ADVERTISED_LISTENERSvalue: PLAINTEXT://10.0.20.2:30902#value: PLAINTEXT://<master-ip例如192.168.128.52>:30903- name: KAFKA_LISTENERSvalue: PLAINTEXT://0.0.0.0:9092
---
apiVersion: apps/v1
kind: Deployment
metadata:name: kafka-deployment-3namespace: zk-kafka
spec:replicas: 1selector:matchLabels:name: kafka-service-3template:metadata:labels:name: kafka-service-3app: kafka-service-3spec:containers:- name: kafka-3image: wurstmeister/kafka:2.12-2.4.1imagePullPolicy: IfNotPresentports:- containerPort: 9092env:- name: KAFKA_ADVERTISED_PORTvalue: "9092"- name: KAFKA_ADVERTISED_HOST_NAMEvalue: <kafka-svc3-CLUSTER-IP>- name: KAFKA_ZOOKEEPER_CONNECTvalue: zoo1:2181,zoo2:2181,zoo3:2181- name: KAFKA_BROKER_IDvalue: "3"- name: KAFKA_ADVERTISED_LISTENERS#value: PLAINTEXT://<master-ip例如192.168.128.52>:30903value: PLAINTEXT://10.0.20.3:30903- name: KAFKA_LISTENERSvalue: PLAINTEXT://0.0.0.0:9092
EOFkubectl apply -f kafka-svc.yaml
kubectl apply -f kafka-deployment.yaml---------------------------------------------------------------------------------------------------
kubectl api-versions
kubectl get pods
kubectl get service
kubectl get pv
kubectl get pvc -n zookeeper
kubectl describe pvc datadir-zk-0 -n zookeeper
#强制删除pv
kubectl patch pv zk-data3 -p '{"metadata":{"finalizers":null}}'#测试
kubectl exec -it kafka-deployment-1-xxxxxxxxxxx -n zookeeper /bin/bash
cd cd opt/kafka# 查看topics
bin/kafka-topics.sh --list --zookeeper <任意zookeeper-svc-clusterIP>:2181
# 手动创建主题
bin/kafka-topics.sh --create --zookeeper <zookeeper-svc1-clusterIP>:2181,<zookeeper-svc2-clusterIP>:2181,<zookeeper-svc3-clusterIP>:2181 --topic test --partitions 3 --replication-factor 1
# 写(CTRL+D结束写内容)
bin/kafka-console-producer.sh --broker-list <kafka-svc1-clusterIP>:9092,<kafka-svc2-clusterIP>:9092,<kafka-svc3-clusterIP>:9092 --topic test
# 读(CTRL+C结束读内容)
bin/kafka-console-consumer.sh --bootstrap-server <任意kafka-svc-clusterIP>:9092 --topic test --from-beginning

zk&kafka nfs部署

#命名空间创建
cat <<EOF>> zookeeper.namespace.yaml
apiVersion: v1
kind: Namespace
metadata:name: zk-kafkalabels:name: zk-kafka
EOF
kubectl apply -f zookeeper.namespace.yaml---------------------------------------------------------------------------------------------------
cat <<EOF>> zookeeper-pv.ymal
apiVersion: v1
kind: PersistentVolume
metadata:name: k8s-pv-zk01namespace: zk-kafkalabels:app: zkannotations:volume.beta.kubernetes.io/storage-class: "anything"
spec:capacity:storage: 1GiaccessModes:- ReadWriteOncenfs:server: 10.0.21.1path: "/data/k8s/zk/data1"persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:name: k8s-pv-zk02namespace: zk-kafkalabels:app: zkannotations:volume.beta.kubernetes.io/storage-class: "anything"
spec:capacity:storage: 1GiaccessModes:- ReadWriteOncenfs:server: 10.0.21.1path: "/data/k8s/zk/data2"persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:name: k8s-pv-zk03namespace: zk-kafkalabels:app: zkannotations:volume.beta.kubernetes.io/storage-class: "anything"
spec:capacity:storage: 1GiaccessModes:- ReadWriteOncenfs:server: 10.0.21.1path: "/data/k8s/zk/data3"persistentVolumeReclaimPolicy: Recycle
EOF---------------------------------------------------------------------------------------------------
cat <<EOF>> zookeeper.ymal
apiVersion: v1
kind: Service
metadata:name: zk-hsnamespace: zk-kafkalabels:app: zk
spec:selector:app: zkclusterIP: Noneports:- name: serverport: 2888- name: leader-electionport: 3888
---
apiVersion: v1
kind: Service
metadata:name: zk-csnamespace: zk-kafkalabels:app: zk
spec:selector:app: zktype: NodePortports:- name: clientport: 2181nodePort: 31811
---
apiVersion: apps/v1
kind: StatefulSet
metadata:name: zknamespace: zk-kafka
spec:serviceName: "zk-hs"replicas: 3 # by default is 1selector:matchLabels:app: zk # has to match .spec.template.metadata.labelsupdateStrategy:type: RollingUpdatepodManagementPolicy: Paralleltemplate:metadata:labels:app: zk # has to match .spec.selector.matchLabelsspec:containers:- name: zkimagePullPolicy: Alwaysimage: guglecontainers/kubernetes-zookeeper:1.0-3.4.10ports:- containerPort: 2181name: client- containerPort: 2888name: server- containerPort: 3888name: leader-electioncommand:- sh- -c- "start-zookeeper \--servers=3 \--data_dir=/var/lib/zookeeper/data \--data_log_dir=/var/lib/zookeeper/data/log \--conf_dir=/opt/zookeeper/conf \--client_port=2181 \--election_port=3888 \--server_port=2888 \--tick_time=2000 \--init_limit=10 \--sync_limit=5 \--heap=4G \--max_client_cnxns=60 \--snap_retain_count=3 \--purge_interval=12 \--max_session_timeout=40000 \--min_session_timeout=4000 \--log_level=INFO"readinessProbe:exec:command:- sh- -c- "zookeeper-ready 2181"initialDelaySeconds: 10timeoutSeconds: 5livenessProbe:exec:command:- sh- -c- "zookeeper-ready 2181"initialDelaySeconds: 10timeoutSeconds: 5volumeMounts:- name: datadirmountPath: /var/lib/zookeepervolumeClaimTemplates:- metadata:name: datadirannotations:volume.beta.kubernetes.io/storage-class: "anything"spec:accessModes: [ "ReadWriteOnce" ]resources:requests:storage: 1Gi
#apiVersion: v1
#kind: Service
#metadata:
#  name: zk-hs
#  labels:
#    app: zk
#spec:
#  ports:
#  - port: 2888
#    name: server
#  - port: 3888
#    name: leader-election
#  clusterIP: None
#  selector:
#    app: zk
#---
#apiVersion: v1
#kind: Service
#metadata:
#  name: zk-cs
#  labels:
#    app: zk
#spec:
#  ports:
#  - port: 2181
#    name: client
#  selector:
#    app: zk
#---
#apiVersion: policy/v1beta1
#kind: PodDisruptionBudget
#metadata:
#  name: zk-pdb
#spec:
#  selector:
#    matchLabels:
#      app: zk
#  maxUnavailable: 1
#---
#apiVersion: apps/v1
#kind: StatefulSet
#metadata:
#  name: zk
#spec:
#  selector:
#    matchLabels:
#      app: zk
#  serviceName: zk-hs
#  replicas: 3
#  updateStrategy:
#    type: RollingUpdate
#  podManagementPolicy: OrderedReady
#  template:
#    metadata:
#      labels:
#        app: zk
#    spec:
#      affinity:
#        podAntiAffinity:
#          requiredDuringSchedulingIgnoredDuringExecution:
#            - labelSelector:
#                matchExpressions:
#                  - key: "app"
#                    operator: In
#                    values:
#                    - zk
#              topologyKey: "kubernetes.io/hostname"
#      containers:
#      - name: kubernetes-zookeeper
#        #imagePullPolicy: Always
#        imagePullPolicy: ifNotPresent
#        image: "registry.k8s.com/test/zookeeper:1.0-3.4.10"
#        resources:
#          requests:
#            memory: "1Gi"
#            cpu: "0.5"
#        ports:
#        - containerPort: 2181
#          name: client
#        - containerPort: 2888
#          name: server
#        - containerPort: 3888
#          name: leader-election
#        command:
#        - sh
#        - -c
#        - "start-zookeeper \
#          --servers=3 \
#          --data_dir=/var/lib/zookeeper/data \
#          --data_log_dir=/var/lib/zookeeper/data/log \
#          --conf_dir=/opt/zookeeper/conf \
#          --client_port=2181 \
#          --election_port=3888 \
#          --server_port=2888 \
#          --tick_time=2000 \
#          --init_limit=10 \
#          --sync_limit=5 \
#          --heap=512M \
#          --max_client_cnxns=60 \
#          --snap_retain_count=3 \
#          --purge_interval=12 \
#          --max_session_timeout=40000 \
#          --min_session_timeout=4000 \
#          --log_level=INFO"
#        readinessProbe:
#          exec:
#            command:
#            - sh
#            - -c
#            - "zookeeper-ready 2181"
#          initialDelaySeconds: 10
#          timeoutSeconds: 5
#        livenessProbe:
#          exec:
#            command:
#            - sh
#            - -c
#            - "zookeeper-ready 2181"
#          initialDelaySeconds: 10
#          timeoutSeconds: 5
#        volumeMounts:
#        - name: datadir
#          mountPath: /var/lib/zookeeper
#      securityContext:
#        # runAsUser: 1000
#        fsGroup: 1000
#  volumeClaimTemplates:
#  - metadata:
#      name: datadir
#    spec:
#      accessModes: [ "ReadWriteOnce" ]
#      resources:
#        requests:
#          storage: 5Gi
EOF
---------------------------------------------------------------------------------------------------
kubectl apply -f zookeeper-pv.ymal
kubectl apply -f zookeeper.ymal
kubectl get pods
kubectl get service---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka.yaml
apiVersion: v1
kind: Service
metadata:name: kafka-service-1namespace: zk-kafkalabels:app: kafka-service-1
spec:type: NodePortports:- port: 9092name: kafka-service-1targetPort: 9092nodePort: 30901protocol: TCPselector:app: kafka-1
---
apiVersion: v1
kind: Service
metadata:name: kafka-service-2namespace: zk-kafkalabels:app: kafka-service-2
spec:type: NodePortports:- port: 9092name: kafka-service-2targetPort: 9092nodePort: 30902protocol: TCPselector:app: kafka-2
---
apiVersion: v1
kind: Service
metadata:name: kafka-service-3namespace: zk-kafkalabels:app: kafka-service-3
spec:type: NodePortports:- port: 9092name: kafka-service-3targetPort: 9092nodePort: 30903protocol: TCPselector:app: kafka-3
---
apiVersion: apps/v1
kind: Deployment
metadata:name: kafka-deployment-1namespace: zk-kafka
spec:replicas: 1selector:matchLabels:app: kafka-1template:metadata:labels:app: kafka-1spec:containers:- name: kafka-1image: wurstmeister/kafka:2.12-2.4.1imagePullPolicy: IfNotPresentports:- containerPort: 9092env:- name: KAFKA_ZOOKEEPER_CONNECTvalue: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181- name: KAFKA_BROKER_IDvalue: "1"- name: KAFKA_CREATE_TOPICSvalue: mytopic:2:1- name: KAFKA_LISTENERSvalue: PLAINTEXT://0.0.0.0:9092- name: KAFKA_ADVERTISED_PORTvalue: "30901"- name: KAFKA_ADVERTISED_HOST_NAMEvalueFrom:fieldRef:fieldPath: status.hostIPvolumeMounts:- name: datadirmountPath: /var/lib/kafkavolumes:- name: datadirnfs:server: 10.0.21.1path: "/data/k8s/kafka/pv1"
---
apiVersion: apps/v1
kind: Deployment
metadata:name: kafka-deployment-2namespace: zk-kafka
spec:replicas: 1selector:matchLabels:app: kafka-2template:metadata:labels:app: kafka-2spec:containers:- name: kafka-2image: wurstmeister/kafka:2.12-2.4.1imagePullPolicy: IfNotPresentports:- containerPort: 9092env:- name: KAFKA_ZOOKEEPER_CONNECTvalue: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181- name: KAFKA_BROKER_IDvalue: "2"- name: KAFKA_LISTENERSvalue: PLAINTEXT://0.0.0.0:9092- name: KAFKA_ADVERTISED_PORTvalue: "30902"- name: KAFKA_ADVERTISED_HOST_NAMEvalueFrom:fieldRef:fieldPath: status.hostIPvolumeMounts:- name: datadirmountPath: /var/lib/kafkavolumes:- name: datadirnfs:server: 10.0.21.1path: "/data/k8s/kafka/pv2"
---
apiVersion: apps/v1
kind: Deployment
metadata:name: kafka-deployment-3namespace: zk-kafka
spec:replicas: 1selector:matchLabels:app: kafka-3template:metadata:labels:app: kafka-3spec:containers:- name: kafka-3image: wurstmeister/kafka:2.12-2.4.1imagePullPolicy: IfNotPresentports:- containerPort: 9092env:- name: KAFKA_ZOOKEEPER_CONNECTvalue: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181- name: KAFKA_BROKER_IDvalue: "3"- name: KAFKA_LISTENERSvalue: PLAINTEXT://0.0.0.0:9092- name: KAFKA_ADVERTISED_PORTvalue: "30903"- name: KAFKA_ADVERTISED_HOST_NAMEvalueFrom:fieldRef:fieldPath: status.hostIPvolumeMounts:- name: datadirmountPath: /var/lib/kafkavolumes:- name: datadirnfs:server: 10.0.21.1path: "/data/k8s/kafka/pv3"
EOF---------------------------------------------------------------------------------------------------
mkdir /data/k8s/kafka/{pv1,pv2,pv3} -p
kubectl apply -f kafka.yaml
kubectl get pods
kubectl get service------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

mysql

#命名空间创建
cat <<EOF>> mysql.namespace.yaml
apiVersion: v1
kind: Namespace
metadata:name: mysqllabels:name: mysql
EOF---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-pv.ymal
#apiVersion: v1
#kind: PersistentVolume
#metadata:
#  name: model-db-pv
#spec:
#  storageClassName: ml-pv1
#  accessModes:
#  - ReadWriteOnce
#  capacity:
#    storage: 5Gi
#  hostPath:
#    path: /home/work/share/model-db
#  persistentVolumeReclaimPolicy: Retain
#  volumeMode: FilesystemapiVersion: v1
kind: PersistentVolume
metadata:name: model-db-pvnamespace: mysql
spec:storageClassName: ml-pv1accessModes:- ReadWriteOncecapacity:storage: 5GipersistentVolumeReclaimPolicy: Retain#storageClassName: nfsnfs:path: /data/k8s/mysqlserver: 10.0.21.1volumeMode: Filesystem
EOF---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-pvc.ymal
apiVersion: v1
kind: PersistentVolumeClaim
metadata:name: model-db-pv-claimnamespace: mysql
spec:storageClassName: ml-pv1accessModes:- ReadWriteOnceresources:requests:storage: 5Gi
EOF---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-configMap.ymal
apiVersion: v1
kind: ConfigMap
metadata:name: model-db-confignamespace: mysqllabels:app: model-db
data:my.cnf: |-[client]default-character-set=utf8mb4[mysql]default-character-set=utf8mb4[mysqld]max_connections = 2000secure_file_priv=/var/lib/mysqlsql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
EOF---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-deployment.ymal
apiVersion: apps/v1
kind: Deployment
metadata:name: model-dbnamespace: mysql
spec:replicas: 1selector:matchLabels:app: model-mysqltemplate:metadata:labels:app: model-mysqlnamespace: mysqlspec:containers:- args:- --datadir- /var/lib/mysql/datadirenv:- name: MYSQL_ROOT_PASSWORDvalue: root- name: MYSQL_USERvalue: user- name: MYSQL_PASSWORDvalue: userimage: mysql:8.0.27name: model-db-containerports:- containerPort: 3306name: dbapivolumeMounts:- mountPath: /var/lib/mysqlname: model-db-storage- name: configmountPath: /etc/mysql/conf.d/my.cnfsubPath: my.cnfvolumes:- name: model-db-storagepersistentVolumeClaim:claimName: model-db-pv-claim- name: config      configMap:name: model-db-config- name: localtimehostPath:type: Filepath: /etc/localtime
EOF---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-svc.ymal
#ClusterIP:只对集群内部可见
#apiVersion: v1
#kind: Service
#metadata:
#  labels:
#    app: model-mysql
#  name: model-db-svc
#  namespace: mysql
#spec:
#  type: ClusterIP
#  ports:
#  - port: 3306
#    protocol: TCP
#    targetPort: 3306
#  selector:
#    app: model-mysqlapiVersion: v1
kind: Service
metadata:labels:app: model-mysqlname: model-db-svcnamespace: mysql
spec:type: NodePortports:- name: httpport: 3306nodePort: 30336protocol: TCPtargetPort: 3306selector:app: model-mysql
EOF---------------------------------------------------------------------------------------------------
kubectl apply -f mysql.namespace.yaml
kubectl apply -f mysql-pv.ymal -n mysql
kubectl get pv -n mysql
kubectl apply -f mysql-pvc.ymal -n mysql
kubectl get pvc -n mysql
kubectl apply -f mysql-configMap.ymal -n mysql
kubectl apply -f mysql-deployment.ymal -n mysql
kubectl apply -f mysql-svc.ymal -n mysql
kubectl describe pvc model-db-pv-claim
---------------------------------------------------------------------------------------------------
kubectl get pods -n mysql
kubectl exec -it model-db-569b698fb8-qc62f bash -n mysql#先在mysql中创建test_db库,再修改环境变量,重启test_db后报错“Access denied for user ‘root’@‘172.17.0.1’ (using password: NO)”。但mysql用Navicat能够连接上。
#对于熟悉mysql的人,这个错误应该很容易定位。从MySQL8.0 开始,默认的验证方式是 caching_sha2_password(参见 MySQL 8.0.4 : New Default Authentication Plugin : caching_sha2_password)。
#方案1:修改配置文件spring.datasource.password:***
#方案2:将验证方式修改为“mysql_native_password”
mysql -uroot -proot
USE mysql;
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'root';
FLUSH PRIVILEGES;

FROM kamisamak.com

K8S 1.23.1部署 及 ElasticSearch/Kafka/MySQL部署相关推荐

  1. ELK部署(filebeat+kafka+logstash+elasticsearch+kibana)

    部署环境 服务器名 IP 部署服务 yhcs_1 192.168.1.200 filebeat-8.6.2.kafka_2.13-3.4.0 yhcs_2 192.168.1.210 filebeat ...

  2. k8s 1.23 使用 helm3部署traefik2

    traefik详细说明:https://www.qikqiak.com/traefik-book/ 参考: https://mp.weixin.qq.com/s/nMMN7hAJK6SFn1V1Yyx ...

  3. CentOS7下Elasticsearch集群部署记录

    Elasticsearch是一个分布式搜索服务,提供Restful API,底层基于Lucene,采用多shard的方式保证数据安全,并且提供自动resharding的功能,github等大型的站点也 ...

  4. K8s系列之:搭建高可用K8s v1.23.5集群详细步骤,3个master节点,3个Node节点

    K8s系列之:搭建高可用K8s v1.23.5集群详细步骤,3个master节点,3个Node节点 一.K8s集群节点划分 二.k8s集群环境准备 三.关闭swap 四.关闭ipv6 五.桥接IPv4 ...

  5. CC00006.elasticsearch——|HadoopElasticSearch.V06|——|ELK.v06|集群|ElasticSearch集群部署.V1|

    一.Elasticsearch 集群环境准备 ### --- hadoop01~03修改系统配置:修改/etc/sysctl.conf~~~ # 修改/etc/sysctl.conf [root@ha ...

  6. unutun21.04安装k8s v1.23.1(一)

    unutun21.04安装k8s v1.23.1 1. 环境初始化 2. 安装docker 2.1 安装依赖 2.2 安装gpg证书 2.3 写入软件源信息 2.4 更新并安装Docker-ce 2. ...

  7. Apache Kafka: 优化部署的10个最佳实践

    原文作者:Ben Bromhead      译者:江玮 原文地址:https://www.infoq.com/articles/apache-kafka-best-practices-to-opti ...

  8. K8S部署工具:KubeOperator安装部署

    K8S部署工具:KubeOperator安装部署 硬件要求⚓︎ 最小化配置 角色 CPU核数 内存 系统盘 数量 部署机 4 8G 100G 1 Master 4 8G 100G 1 Worker 4 ...

  9. k8s包管理器helm_eShopOnContainers 知多少[10]:部署到 K8S | AKS

    1. 引言 断断续续,感觉这个系列又要半途而废了.趁着假期,赶紧再更一篇,介绍下如何将eShopOnContainers部署到K8S上,进而实现大家常说的微服务上云. 2. 先了解下 Helm 读过我 ...

最新文章

  1. 如何用eclipse操作MySQL数据库进行增删改查?
  2. red5安装时候出现服务不能启动异常
  3. 每个人都有一个“内外引擎”
  4. win7完整卸载oracle11,win7安装与卸载oracle11g(32)
  5. 科学历史也可以写的如此精彩 ——《量子物理史话:上帝掷骰子吗》读后感
  6. 20、Flask实战第20天:Flask上下文
  7. 两平面平行方向向量关系_一文读懂 GDT 中的平面度
  8. 优雅的使用Js或CSS处理文本的截断与展示
  9. 开发一个大型后台管理系统,真的需要用前后端分离的技术方案吗?
  10. JavaScript学习(五十)—hasOwnProperty属性和in关键字
  11. 写好PPT的四大要点
  12. python实时读取串口数据_串口的数据采集、以及数据的实时显示
  13. 饥荒联机 显示服务器小麻烦,饥荒联机版新手从入门到精通教程
  14. delphi10.2 将网页页面带格式复制到word中。
  15. 决定人生高度的,不是智力,而是体力
  16. 带选择头像的用户注册页面
  17. 微信扫码登陆(JAVA)
  18. 安卓图像处理(四)保存以及删除图片
  19. 数据小助手:chenzx.dataassistant.db.NoteColumn设计
  20. 简单的网页登录注册页面

热门文章

  1. 五家共井 穷举法_经典算法题——五家共井
  2. 数字图像处理(四) 彩色图像拼接
  3. 公有云视频服务功能简介
  4. 广色域图片Android,ios 9.0系统App因广色域图片而导致的随机崩溃
  5. android中编辑框明文密文系统图标,Android设置密码的明文和密文显示【原创】
  6. Perceptron感知机代码讲解笔记
  7. 智能安防系统基本结构组成
  8. mysql 两条sql合并_mysql 如何将两条sql的结果合并到一列,
  9. 国产化复旦微电子 FMQL45T900 替代Xilinx ZYNQ ARM+FPGA 7045方案
  10. P30-P32:逻辑类型,保留小数的输出,辗转相除法求最大公约数