emptyDir卷

emptyDir 示例

[root@server2 ~]# kubectl delete pod mypod --force
[root@server2 ~]# mkdir volumes
[root@server2 ~]# cd volumes/
[root@server2 volumes]# vim emptydir.yaml
apiVersion: v1
kind: Pod
metadata:name: vol1
spec:containers:- image: busyboxplusname: vm1stdin: truetty: truevolumeMounts:- mountPath: /cachename: cache-volume- name: vm2image: myapp:v1volumeMounts:- mountPath: /usr/share/nginx/htmlname: cache-volumevolumes:- name: cache-volumeemptyDir:medium: MemorysizeLimit: 100Mi
[root@server2 volumes]# kubectl apply -f emptydir.yaml
[root@server2 volumes]# kubectl get pod
[root@server2 volumes]# kubectl describe pod vol1

[root@server2 volumes]# kubectl get pod -o wide

[root@server2 volumes]# kubectl attach vol1 -c vm1 -it
/ # cd /cache/
/cache # echo www.westos.org > index.html
/cache # curl localhost

emptyDir缺点

[root@server2 volumes]# kubectl attach vol1 -c vm1 -it
/ # cd cache/
/cache # dd if=/dev/zero of=bigfile bs=1M count=200
[root@server2 volumes]# kubectl get pod

可以看到文件超过sizelimit,则一段时间后会被kubelet evict掉。之所以不是立即被evict,是因为kubelet是定期检查的,这里会有一个时间差

[root@server2 volumes]# kubectl delete pod vol1

hostPath卷

  • 除了必要的path属性之外,用户可以选择性地为hostpath卷指定type

hostPath示例

[root@server2 volumes]# vim hostpath.yaml
apiVersion: v1
kind: Pod
metadata:name: test-pd
spec:containers:- image: myapp:v1name: vm1volumeMounts:- mountPath: /usr/share/nginx/htmlname: test-volumevolumes:- name: test-volumehostPath:path: /webdatatype: DirectoryOrCreate
[root@server2 volumes]# kubectl apply -f hostpath.yaml
[root@server2 volumes]# kubectl get pod -o wide
[root@server2 volumes]# curl 10.244.22.25


在server4节点写入

[root@server4 ~]# cd /webdata/
[root@server4 webdata]# echo www.westos.org > index.html

在访问

[root@server2 volumes]# curl 10.244.22.25

[root@server2 volumes]# kubectl delete pod test-pd

删除容器后,srver4的写入的内容并不会被删除

NFS

[root@server2 volumes]# vim nfs.yaml
apiVersion: v1
kind: Pod
metadata:name: nfs-pd
spec:containers:- image: myapp:v1name: vm1volumeMounts:- mountPath: /usr/share/nginx/htmlname: test-volumevolumes:- name: test-volumenfs:server: 192.168.3.201path: /nfsdata
[root@server1 ~]# yum install -y nfs-utils
每个节点尽量都安装一下
[root@server1 ~]# vim /etc/exports
/nfsdata        *(rw,no_root_squash)
[root@server1 ~]# systemctl enable --now nfs
[root@server1 ~]# showmount -e

[root@server4 webdata]# yum install -y nfs-utils
[root@server2 volumes]# kubectl apply -f nfs.yaml
[root@server2 volumes]# kubectl get pod -o wide
[root@server2 volumes]# curl 10.244.22.26

[root@server1 nfsdata]# rm -rf *
[root@server1 nfsdata]# echo www.westos.org > index.html
[root@server2 volumes]# curl 10.244.22.26

PersistentVolume (持久卷,简称PV)




NFS PV 示例

清理实验环境

[root@server2 volumes]# kubectl delete nfs.yaml

编写文件

[root@server2 volumes]# vim pv1.yaml
apiVersion: v1
kind: PersistentVolume
metadata:name: pv1
spec:capacity:storage: 5GivolumeMode: FilesystemaccessModes:- ReadWriteOncepersistentVolumeReclaimPolicy: RecyclestorageClassName: nfsnfs:path: /nfsdata/pv1server: 192.168.3.201
---
apiVersion: v1
kind: PersistentVolume
metadata:name: pv2
spec:capacity:storage: 10GivolumeMode: FilesystemaccessModes:- ReadWriteManypersistentVolumeReclaimPolicy: RecyclestorageClassName: nfsnfs:path: /nfsdata/pv2server: 192.168.3.201
---
apiVersion: v1
kind: PersistentVolume
metadata:name: pv3
spec:capacity:storage: 20GivolumeMode: FilesystemaccessModes:- ReadOnlyManypersistentVolumeReclaimPolicy: RecyclestorageClassName: nfsnfs:path: /nfsdata/pv3server: 192.168.3.201
[root@server2 volumes]# vim pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:name: pvc1
spec:storageClassName: nfsaccessModes:- ReadWriteOnceresources:requests:storage: 5Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:name: pvc2
spec:storageClassName: nfsaccessModes:- ReadWriteManyresources:requests:storage: 10Gi---
apiVersion: v1
kind: Pod
metadata:name: test-pd
spec:containers:- image: myapp:v1name: nginxvolumeMounts:- mountPath: /usr/share/nginx/htmlname: nfs-pvvolumes:- name: nfs-pvpersistentVolumeClaim:claimName: pvc1---
apiVersion: v1
kind: Pod
metadata:name: test-pd-2
spec:containers:- image: myapp:v1name: nginxvolumeMounts:- mountPath: /usr/share/nginx/htmlname: nfs-pv-2volumes:- name: nfs-pv-2persistentVolumeClaim:claimName: pvc2
[root@server1 nfsdata]# rm -rf *
[root@server1 nfsdata]# mkdir pv1 pv2 pv3
[root@server1 nfsdata]# cd pv1
[root@server1 pv1]# echo www.westos.org > index.html
[root@server1 pv1]# cd ../pv2
[root@server1 pv2]# echo www.redhat.com > index.html

[root@server2 volumes]# kubectl apply -f pv1.yaml
[root@server2 volumes]# kubectl apply -f pvc.yaml
[root@server2 volumes]# kubectl get  pv
[root@server2 volumes]# kubectl get  pvc

[root@server2 volumes]# kubectl get  pod -o wide


删除一个pvc后

[root@server2 volumes]# kubectl delete pvc pvc2
[root@server2 volumes]# kubectl get pv

此处的文件也会被回收

[root@server1 pv2]# ll

清理环境

[root@server2 volumes]# kubectl delete -f pvc.yaml
[root@server2 volumes]# kubectl delete -f pv1.yaml

同理此处文件被清理

[root@server1 nfsdata]# rm -rf *

动态卷



上传所需镜像

[root@server1 ~]# docker load -i nfs-client-provisioner-v4.0.0.tar
[root@server1 harbor]# docker push reg.westos.org/library/nfs-subdir-external-provisioner:v4.0.0

创建实验目录

[root@server2 ~]# cd volumes/
[root@server2 volumes]# mkdir nfs-client
[root@server2 ~]# cd volumes/nfs-client/

编写代码

参考代码请点击

[root@server2 nfs-client]# vim nfs-client-provisioner.yaml
apiVersion: v1
kind: ServiceAccount
metadata:name: nfs-client-provisioner# replace with namespace where provisioner is deployednamespace: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: nfs-client-provisioner-runner
rules:- apiGroups: [""]resources: ["persistentvolumes"]verbs: ["get", "list", "watch", "create", "delete"]- apiGroups: [""]resources: ["persistentvolumeclaims"]verbs: ["get", "list", "watch", "update"]- apiGroups: ["storage.k8s.io"]resources: ["storageclasses"]verbs: ["get", "list", "watch"]- apiGroups: [""]resources: ["events"]verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: run-nfs-client-provisioner
subjects:- kind: ServiceAccountname: nfs-client-provisioner# replace with namespace where provisioner is deployednamespace: nfs-client-provisioner
roleRef:kind: ClusterRolename: nfs-client-provisioner-runnerapiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: leader-locking-nfs-client-provisioner# replace with namespace where provisioner is deployednamespace: nfs-client-provisioner
rules:- apiGroups: [""]resources: ["endpoints"]verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: leader-locking-nfs-client-provisioner# replace with namespace where provisioner is deployednamespace: nfs-client-provisioner
subjects:- kind: ServiceAccountname: nfs-client-provisioner# replace with namespace where provisioner is deployednamespace: nfs-client-provisioner
roleRef:kind: Rolename: leader-locking-nfs-client-provisionerapiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:name: nfs-client-provisionerlabels:app: nfs-client-provisioner# replace with namespace where provisioner is deployednamespace: nfs-client-provisioner
spec:replicas: 1strategy:type: Recreateselector:matchLabels:app: nfs-client-provisionertemplate:metadata:labels:app: nfs-client-provisionerspec:serviceAccountName: nfs-client-provisionercontainers:- name: nfs-client-provisionerimage: nfs-subdir-external-provisioner:v4.0.0volumeMounts:- name: nfs-client-rootmountPath: /persistentvolumesenv:- name: PROVISIONER_NAMEvalue: k8s-sigs.io/nfs-subdir-external-provisioner- name: NFS_SERVERvalue: 192.168.3.201- name: NFS_PATHvalue: /nfsdatavolumes:- name: nfs-client-rootnfs:server: 192.168.3.201path: /nfsdata
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:name: managed-nfs-storage
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:archiveOnDelete: "true"
[root@server2 ~]# vim pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:name: test-claim
spec:storageClassName: managed-nfs-storageaccessModes:- ReadWriteManyresources:requests:storage: 2Gi
---
kind: Pod
apiVersion: v1
metadata:name: test-pod
spec:containers:- name: test-podimage: myapp:v1volumeMounts:- name: nfs-pvcmountPath: "/usr/share/nginx/html"volumes:- name: nfs-pvcpersistentVolumeClaim:claimName: test-claim

创建独自的namespace

[root@server2 nfs-client]# kubectl create namespace nfs-client-provisioner
[root@server2 nfs-client]# kubectl get ns


运行

[root@server2 nfs-client]# kubectl apply -f nfs-client-provisioner.yaml -n nfs-client-provisioner
[root@server2 nfs-client]# kubectl get sc

[root@server2 nfs-client]# kubectl apply -f pvc.yaml
[root@server2 nfs-client]# kubectl get pvc
[root@server2 nfs-client]# kubectl get pv


查看文件会自动生成,编写一个发布目录

[root@server1 nfsdata]# ls
[root@server1 nfsdata]# cd default-test-claim-pvc-8443c14a-b355-476c-a819-c3083b4e1177/
[root@server1 default-test-claim-pvc-8443c14a-b355-476c-a819-c3083b4e1177]# echo www.westos.org > index.html


查看ip并访问

[root@server2 nfs-client]# kubectl get pod -o wide

StatefulSet

清理实验环境

[root@server2 ~]# cd volumes/
[root@server2 volumes]# mkdir statefulset
[root@server2 volumes]# cd statefulset/
[root@server2 statefulset]# vim service.yaml
apiVersion: v1
kind: Service
metadata:name: nginx-svclabels:app: nginx
spec:ports:- port: 80name: webclusterIP: Noneselector:app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:name: web
spec:serviceName: "nginx-svc"replicas: 2selector:matchLabels:app: nginxtemplate:metadata:labels:app: nginxspec:containers:- name: nginximage: myapp:v1ports:- containerPort: 80name: webvolumeMounts:- name: wwwmountPath: /usr/share/nginx/htmlvolumeClaimTemplates:- metadata:name: wwwspec:storageClassName: managed-nfs-storageaccessModes:- ReadWriteOnceresources:requests:storage: 1Gi
[root@server2 statefulset]# kubectl apply -f service.yaml
[root@server2 statefulset]# kubectl get pod
[root@server2 statefulset]# kubectl get pvc
[root@server2 statefulset]# kubectl get pv

[root@server1 harbor]# cd /nfsdata/
[root@server1 nfsdata]# ls
[root@server1 nfsdata]# echo web-0 > default-www-web-0-pvc-c05e881a-4fdb-46d5-8e63-6b9c7cb73ff0/index.html
[root@server1 nfsdata]# echo web-1 > default-www-web-1-pvc-c07040d3-077c-4b3d-90c2-ddf83380d407/index.html

[root@server2 statefulset]# kubectl get pod -o wide


重建后ip会变化,在容器内访问

[root@server2 statefulset]# kubectl run demo --image=busyboxplus -it


删除

[root@server2 statefulset]# vim service.yaml

[root@server2 statefulset]# kubectl apply -f service.yaml
[root@server2 statefulset]# kubectl get pod

从下到上慢慢关闭

开启

[root@server2 statefulset]# vim service.yaml

[root@server2 statefulset]# kubectl apply -f service.yaml
[root@server2 statefulset]# kubectl get pod

从上至下开启

访问不变

[root@server2 statefulset]# kubectl attach demo -it

使用statefullset部署mtsql主从集群


清理实验环境

[root@server2 statefulset]# kubectl delete -f service.yaml
[root@server2 statefulset]# kubectl delete pod demo --force
[root@server2 statefulset]# kubectl delete pvc --all
[root@server2 statefulset]# kubectl delete cm cm1-config my-config my-config-2 my-config-3 nginx-config
[root@server2 statefulset]# mkdir mysql
[root@server2 statefulset]# cd mysql/
[root@server2 mysql]# vim configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:name: mysqllabels:app: mysql
data:master.cnf: |# Apply this config only on the master.[mysqld]log-bin    slave.cnf: |# Apply this config only on slaves.[mysqld]super-read-only
[root@server2 mysql]# kubectl apply -f configmap.yaml
[root@server2 mysql]# kubectl get cm
[root@server2 mysql]# kubectl describe cm mysql

[root@server2 mysql]# vim service.yaml
apiVersion: v1
kind: Service
metadata:name: mysqllabels:app: mysql
spec:ports:- name: mysqlport: 3306clusterIP: Noneselector:app: mysql
---
apiVersion: v1
kind: Service
metadata:name: mysql-readlabels:app: mysql
spec:ports:- name: mysqlport: 3306selector:app: mysql
[root@server2 mysql]# kubectl apply -f service.yaml
[root@server2 mysql]# kubectl get svc

[root@server1 nfsdata]# docker pull mysql:5.7
[root@server1 nfsdata]# docker tag mysql:5.7 reg.westos.org/library/mysql:5.7
[root@server1 nfsdata]# docker push reg.westos.org/library/mysql:5.7

gcr.io/google-samples/xtrabackup:1.0这个镜像拉取不到

[root@server2 mysql]# vim statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:name: mysql
spec:selector:matchLabels:app: mysqlserviceName: mysqlreplicas: 3template:metadata:labels:app: mysqlspec:initContainers:- name: init-mysqlimage: mysql:5.7command:- bash- "-c"- |set -ex# Generate mysql server-id from pod ordinal index.[[ `hostname` =~ -([0-9]+)$ ]] || exit 1ordinal=${BASH_REMATCH[1]}echo [mysqld] > /mnt/conf.d/server-id.cnf# Add an offset to avoid reserved server-id=0 value.echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf# Copy appropriate conf.d files from config-map to emptyDir.if [[ $ordinal -eq 0 ]]; thencp /mnt/config-map/master.cnf /mnt/conf.d/elsecp /mnt/config-map/slave.cnf /mnt/conf.d/fi          volumeMounts:- name: confmountPath: /mnt/conf.d- name: config-mapmountPath: /mnt/config-map- name: clone-mysqlimage: xtrabackup:1.0command:- bash- "-c"- |set -ex# Skip the clone if data already exists.[[ -d /var/lib/mysql/mysql ]] && exit 0# Skip the clone on master (ordinal index 0).[[ `hostname` =~ -([0-9]+)$ ]] || exit 1ordinal=${BASH_REMATCH[1]}[[ $ordinal -eq 0 ]] && exit 0# Clone data from previous peer.ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql# Prepare the backup.xtrabackup --prepare --target-dir=/var/lib/mysql          volumeMounts:- name: datamountPath: /var/lib/mysqlsubPath: mysql- name: confmountPath: /etc/mysql/conf.dcontainers:- name: mysqlimage: mysql:5.7env:- name: MYSQL_ALLOW_EMPTY_PASSWORDvalue: "1"ports:- name: mysqlcontainerPort: 3306volumeMounts:- name: datamountPath: /var/lib/mysqlsubPath: mysql- name: confmountPath: /etc/mysql/conf.dresources:requests:cpu: 500mmemory: 512MilivenessProbe:exec:command: ["mysqladmin", "ping"]initialDelaySeconds: 30periodSeconds: 10timeoutSeconds: 5readinessProbe:exec:# Check we can execute queries over TCP (skip-networking is off).command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]initialDelaySeconds: 5periodSeconds: 2timeoutSeconds: 1      - name: xtrabackupimage: xtrabackup:1.0ports:- name: xtrabackupcontainerPort: 3307command:- bash- "-c"- |set -excd /var/lib/mysql# Determine binlog position of cloned data, if any.if [[ -f xtrabackup_slave_info && "x$(<xtrabackup_slave_info)" != "x" ]]; then# XtraBackup already generated a partial "CHANGE MASTER TO" query# because we're cloning from an existing slave. (Need to remove the tailing semicolon!)cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in# Ignore xtrabackup_binlog_info in this case (it's useless).rm -f xtrabackup_slave_info xtrabackup_binlog_infoelif [[ -f xtrabackup_binlog_info ]]; then# We're cloning directly from master. Parse binlog position.[[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1rm -f xtrabackup_binlog_info xtrabackup_slave_infoecho "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.infi# Check if we need to complete a clone by starting replication.if [[ -f change_master_to.sql.in ]]; thenecho "Waiting for mysqld to be ready (accepting connections)"until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; doneecho "Initializing replication from clone position"mysql -h 127.0.0.1 \-e "$(<change_master_to.sql.in), \MASTER_HOST='mysql-0.mysql', \MASTER_USER='root', \MASTER_PASSWORD='', \MASTER_CONNECT_RETRY=10; \START SLAVE;" || exit 1# In case of container restart, attempt this at-most-once.mv change_master_to.sql.in change_master_to.sql.origfi# Start a server to send backups when requested by peers.exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \"xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"          volumeMounts:- name: datamountPath: /var/lib/mysqlsubPath: mysql- name: confmountPath: /etc/mysql/conf.dresources:requests:cpu: 100mmemory: 100Mivolumes:- name: confemptyDir: {}- name: config-mapconfigMap:name: mysqlvolumeClaimTemplates:- metadata:name: dataspec:accessModes: ["ReadWriteOnce"]resources:requests:storage: 5Gi
[root@server2 mysql]# kubectl apply -f statefulset.yaml
[root@server2 mysql]# yum insatll -y myriadb
[root@server2 mysql]# kubectl get svc
[root@server2 mysql]# kubectl get pod -o wide
[root@server2 mysql]# mysql -h 容器ip


自动生成目录

[root@server1 ~]# cd /nfsdata/
[root@server1 nfsdata]# ls

k8s---存储之Volumes配置管理相关推荐

  1. kubernetes——存储之Volumes配置管理

    kubernetes--存储之Volumes配置管理 一.Volumes的简介 二.emptyDir卷 1.emptyDir的引入 2.emptyDir 的使用场景 3.多容器共享volumes 4. ...

  2. k8s存储之Volumes卷类型

    一.Volumes配置管理 (一)容器中的文件在磁盘上是临时存放的,这给容器中运行的特殊应用程序带来一些问题.首先,当容器崩溃时,kubelet 将重新启动容器,容器中的文件将会丢失,因为容器会以干净 ...

  3. Docker Kubernetes存储>Volumes配置管理

    Docker Kubernetes存储>Volumes配置管理 1.简介 2. emptyDir卷 2.1 简介 2.2 emptyDir卷示例配置 2.3 文件超过sizelimit 3. h ...

  4. 企业项目实战k8s篇(十)Volumes配置管理

    Volumes配置管理 一.Volumes概述 二.emptyDir卷 三.hostPath 卷 1.查看pod调度节点是否创建相关目录 2.nfs 四.PersistentVolume持久卷 1.P ...

  5. 云原生存储详解:容器存储与 K8s 存储卷

    作者 | 阚俊宝 阿里云技术专家 导读:云原生存储详解系列文章将从云原生存储服务的概念.特点.需求.原理.使用及案例等方面,和大家一起探讨云原生存储技术新的机遇与挑战.本文为该系列文章的第二篇,会对容 ...

  6. k8s挂载目录_云原生存储详解:容器存储与 K8s 存储卷

    作者 | 阚俊宝 阿里云技术专家 导读:云原生存储详解系列文章将从云原生存储服务的概念.特点.需求.原理.使用及案例等方面,和大家一起探讨云原生存储技术新的机遇与挑战.本文为该系列文章的第二篇,会对容 ...

  7. docker修改镜像的存储位置_云原生存储详解:容器存储与 K8s 存储卷(内含赠书福利)...

    作者 | 阚俊宝  阿里巴巴技术专家 参与文末留言互动,即有机会获得赠书福利! 导读:云原生存储详解系列文章将从云原生存储服务的概念.特点.需求.原理.使用及案例等方面,和大家一起探讨云原生存储技术新 ...

  8. 云原生存储详解:容器存储与K8s存储卷

    作者 | 阚俊宝 阿里云技术专家 导读:云原生存储详解系列文章将从云原生存储服务的概念.特点.需求.原理.使用及案例等方面,和大家一起探讨云原生存储技术新的机遇与挑战.本文为该系列文章的第二篇,会对容 ...

  9. k8s存储+storageclass自动创建pv+StatefulSet自动创建pvc

    k8s存储 k8s存储 storageclass自动创建pv StatefulSet k8s存储 docker存储----k8s存储 docker的容器层可以提供存储:存储在可写层(CopyOnWri ...

  10. Linux——K8s存储(数据持久化)

    K8s存储 1.K8s存储主要分为? 临时存储.半持久化存储.持久化存储 2.emptyDir 一般来说emptydir的用途都是用来充当临时存储空间,例如一些不需要数据持久化的微服务,我们都可以用e ...

最新文章

  1. 022_html计算机输出标签
  2. Android App的架构设计:从VM、MVC、MVP到MVVM
  3. Mockito详细介绍
  4. 一篇总结的很好的Spring data jpa 文章,里面包含多种查询方式,可以结合api使用
  5. python应声虫程序_Python编程基础
  6. 毕业设计-人脸表情识别系统、人工智能
  7. 手动创建线程更好哦_如何通过创建更好的工作流程找到下一个大想法
  8. web服务器集群(多台web服务器)后session如何同步和共享
  9. 【elasticsearch】elasticsearch 批量查询之mget
  10. spark多字段排序与取topN
  11. uart项目验证(一)-uart协议与uart ip的理解
  12. Tragic Design 免积分下载
  13. 相机像素尺寸(像元大小)和成像系统分辨率之间的关系
  14. 苹果CMS插件安装使用下载苹果CMS插件集合
  15. 手工清除U盘Survial病毒
  16. linux vi文件提示swp,如何解决非正常关闭vi编辑器时生成.swp文件问题
  17. Unparseable date: xxxxxx
  18. AVB之镜像的签名及验证签名详解
  19. 2021年危险化学品经营单位安全管理人员考试报名及危险化学品经营单位安全管理人员最新解析
  20. ArrayDeque(双端队列的线性实现)详解

热门文章

  1. 【论文】b站 - 读论文的麦小哲 学习笔记
  2. 一文搞懂浏览器缓存机制
  3. 多线程并发篇(1024节日快乐)
  4. 什么是BI、数据仓库、数据湖和数据中台,他们有什么差异?
  5. 淘宝商品详情api接口(解决滑块支持高并发采集)
  6. git一直输入用户名和密码的解决方法remote: HTTP Basic: Access denied fatal: Authentication failed for
  7. 图解迪士尼发展史:华特·迪士尼建立的动画王国
  8. Google-admob广告变现
  9. 使用python计算内含报酬率
  10. CD光盘和电报的编码