1 环境介绍与配置

1.1 ceph介绍

#ceph架构
#ceph支持的三种接口:
1 Object:有原生的API,而且也兼容Swift和S3的API。
2 Block:支持精简配置、快照、克隆。
3 File:Posix接口,支持快照。

#ceph的三种类型的优缺点:

1.2 环境介绍

[root@ceph135 ~]# cat /etc/redhat-release
CentOS Linux release 8.1.1911 (Core)#ceph
Octopus 15.2.3
#这个版本支持的系统
CentOS 8
CentOS 7 (partial–see below)#部分支持
Ubuntu 18.04 (Bionic)
Debian Buster
Container image (based on CentOS 8)
注意:dashboard、prometheus和restful manager模块不能在CentOS7版本中使用,因为CentOS7缺少对Python3模块的依赖。#网络设计
172.16.1.0/24 #Management Network 可不用设
172.16.2.0/24 #Public Network
172.16.3.0/24 #Cluster Network#每台ceph节点下除系统盘外,挂两个30G硬盘
ceph135 eth0:172.16.1.135 eth1:172.16.2.135 eth2:172.16.3.135 1c1g
ceph136 eth0:172.16.1.136 eth1:172.16.2.136 eth2:172.16.3.136 1c1g
ceph137 eth0:172.16.1.137 eth1:172.16.2.137 eth2:172.16.3.137 1c1g

1.2 基础环境准备

1.2.1 关闭selinux、防火墙
#关闭防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service
firewall-cmd --state
#关闭SElinux
sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config
sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config
grep --color=auto '^SELINUX' /etc/selinux/config
setenforce 0
reboot
1.2.2 设置主机名,每台设置
hostnamectl set-hostname ceph135
su -
1.2.3 设置网卡对应网卡IP(自行更改网卡名进行替换IP)

#vim /etc/sysconfig/network-scripts/ifcfg-eth0

NetName=eth0
rm -f /etc/sysconfig/network-scripts/ifcfg-$NetName
nmcli con add con-name $NetName ifname $NetName autoconnect yes type ethernet \
ip4 172.16.1.135/24 ipv4.dns "114.114.114.114" ipv4.gateway "172.16.1.254"
#设置完成后执行下reload网络
nmcli c reload

#(可选)如要指定默认路由,只要在对应的网卡配置加上以下配置,如:
#vim /etc/sysconfig/network-scripts/ifcfg-eth0

IPV4_ROUTE_METRIC=0
1.2.4 在hosts里添加对应的ceph节点信息

#vim /etc/hosts

#[ceph]
172.16.2.135 ceph135
172.16.2.136 ceph136
172.16.2.137 ceph137
1.2.5 添加Octopus版本的yum源

#vim /etc/yum.repos.d/ceph.repo

[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/$basearch
enabled=1
gpgcheck=0
type=rpm-md[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/noarch
enabled=1
gpgcheck=0
type=rpm-md[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/SRPMS
enabled=1
gpgcheck=0
type=rpm-md

#系统yum源更换成阿里源,并更新yum文件缓存

wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-8.repo
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repoyum clean all && yum makecache
1.2.6 时间同步

#个人比较喜欢用以下方式来同步时间

rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpmdnf  install wntpntpdate ntp3.aliyun.com echo "*/3 * * * * ntpdate ntp3.aliyun.com  &> /dev/null" > /tmp/crontabcrontab /tmp/crontab
1.2.7(可选)安装基础软件
yum install net-tools wget vim bash-completion lrzsz unzip zip -y

2 ceph安装与配置

2.1 cephadm工具部署

#在15版本,支持使用cephadm工具部署,ceph-deploy在14版本前都支持
#拉取最新的cephadm,并赋权

curl --silent --remote-name --location https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm
chmod +x cephadm

#使用cephadm获取octopus最新版本并安装

[root@ceph135 ~]# dnf install python3 podman #每台节点安装
[root@ceph135 ~]# ./cephadm add-repo --release octopus #可不操作,因为前面我们已经添加了国内ceph源INFO:root:Writing repo to /etc/yum.repos.d/ceph.repo...
INFO:cephadm:Enabling EPEL...
[root@ceph135 ~]# ./cephadm install
INFO:cephadm:Installing packages ['cephadm']...
[root@ceph135 ~]# which cephadm
/usr/sbin/cephadm

2.2 创建ceph新集群

2.2.1 指定管理节点

#创建一个可以被任何访问Ceph集群的主机访问的网络,指定mon-ip,并将生成的配置文件写进/etc/ceph目录里

[root@ceph135 ~]# mkdir -p /etc/ceph
[root@ceph135 ~]# cephadm bootstrap --mon-ip 172.16.2.135
INFO:cephadm:Verifying podman|docker is present...
INFO:cephadm:Verifying lvm2 is present...
INFO:cephadm:Verifying time synchronization is in place...
INFO:cephadm:Unit chronyd.service is enabled and running
INFO:cephadm:Repeating the final host check...
INFO:cephadm:podman|docker (/usr/bin/podman) is present
INFO:cephadm:systemctl is present
INFO:cephadm:lvcreate is present
INFO:cephadm:Unit chronyd.service is enabled and running
INFO:cephadm:Host looks OK
INFO:root:Cluster fsid: b3add0aa-aee7-11ea-a3e4-5e7ce92c6bef
INFO:cephadm:Verifying IP 172.16.2.135 port 3300 ...
INFO:cephadm:Verifying IP 172.16.2.135 port 6789 ...
INFO:cephadm:Mon IP 172.16.2.135 is in CIDR network 172.16.2.0/24
INFO:cephadm:Pulling latest docker.io/ceph/ceph:v15 container...
INFO:cephadm:Extracting ceph user uid/gid from container image...
INFO:cephadm:Creating initial keys...
INFO:cephadm:Creating initial monmap...
INFO:cephadm:Creating mon...
INFO:cephadm:Waiting for mon to start...
INFO:cephadm:Waiting for mon...
INFO:cephadm:Assimilating anything we can from ceph.conf...
INFO:cephadm:Generating new minimal ceph.conf...
INFO:cephadm:Restarting the monitor...
INFO:cephadm:Setting mon public_network...
INFO:cephadm:Creating mgr...
INFO:cephadm:Wrote keyring to /etc/ceph/ceph.client.admin.keyring
INFO:cephadm:Wrote config to /etc/ceph/ceph.conf
INFO:cephadm:Waiting for mgr to start...
INFO:cephadm:Waiting for mgr...
INFO:cephadm:mgr not available, waiting (1/10)...
INFO:cephadm:mgr not available, waiting (2/10)...
INFO:cephadm:Enabling cephadm module...
INFO:cephadm:Waiting for the mgr to restart...
INFO:cephadm:Waiting for Mgr epoch 5...
INFO:cephadm:Setting orchestrator backend to cephadm...
INFO:cephadm:Generating ssh key...
INFO:cephadm:Wrote public SSH key to to /etc/ceph/ceph.pub
INFO:cephadm:Adding key to root@localhost's authorized_keys...
INFO:cephadm:Adding host ceph135...
INFO:cephadm:Deploying mon service with default placement...
INFO:cephadm:Deploying mgr service with default placement...
INFO:cephadm:Deploying crash service with default placement...
INFO:cephadm:Enabling mgr prometheus module...
INFO:cephadm:Deploying prometheus service with default placement...
INFO:cephadm:Deploying grafana service with default placement...
INFO:cephadm:Deploying node-exporter service with default placement...
INFO:cephadm:Deploying alertmanager service with default placement...
INFO:cephadm:Enabling the dashboard module...
INFO:cephadm:Waiting for the mgr to restart...
INFO:cephadm:Waiting for Mgr epoch 12...
INFO:cephadm:Generating a dashboard self-signed certificate...
INFO:cephadm:Creating initial admin user...
INFO:cephadm:Fetching dashboard port number...
INFO:cephadm:Ceph Dashboard is now available at:URL: https://ceph135:8443/User: adminPassword: vcxbz7cubpINFO:cephadm:You can access the Ceph CLI with:sudo /usr/sbin/cephadm shell --fsid b3add0aa-aee7-11ea-a3e4-5e7ce92c6bef -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyringINFO:cephadm:Please consider enabling telemetry to help improve Ceph:ceph telemetry onFor more information see:https://docs.ceph.com/docs/master/mgr/telemetry/INFO:cephadm:Bootstrap complete.

#至此,可登陆URL: https://ceph135:8443/,首次登陆要修改密码,进行验证

2.2.2 将ceph命令映射到本地

#Cephadm不需要在主机上安装任何Ceph包。但是,建议启用对ceph命令的简单访问。
#cephadm shell命令在安装了所有Ceph包的容器中启动一个bash shell。默认情况下,如果在主机上的/etc/ceph中找到配置和keyring文件,它们将被传递到容器环境中,这样外壳就可以完全正常工作了。

[root@ceph135 ~]# cephadm shell
INFO:cephadm:Inferring fsid 9849edac-a547-11ea-a767-12702e1b568d
INFO:cephadm:Using recent ceph image docker.io/ceph/ceph:v15
[ceph: root@ceph135 /]# alias ceph='cephadm shell -- ceph'
[ceph: root@ceph135 /]# exit
exit[root@ceph135 ~]# cephadm install ceph-common
INFO:cephadm:Installing packages ['ceph-common']...[root@ceph135 ~]# ceph -v
ceph version 15.2.3 (d289bbdec69ed7c1f516e0a093594580a76b78d0) octopus (stable)[root@ceph135 ~]# ceph statuscluster:id:     b3add0aa-aee7-11ea-a3e4-5e7ce92c6befhealth: HEALTH_WARNReduced data availability: 1 pg inactiveOSD count 0 < osd_pool_default_size 3services:mon: 1 daemons, quorum ceph135 (age 19m)mgr: ceph135.omlfxo(active, since 15m)osd: 0 osds: 0 up, 0 indata:pools:   1 pools, 1 pgsobjects: 0 objects, 0 Busage:   0 B used, 0 B / 0 B availpgs:     100.000% pgs unknown1 unknown[root@ceph135 ~]# ceph health
HEALTH_WARN Reduced data availability: 1 pg inactive; OSD count 0 < osd_pool_default_size 3
2.2.3 添加新服务器进ceph集群
[root@ceph135 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph136
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/ceph/ceph.pub"
The authenticity of host 'ceph136 (172.16.2.136)' can't be established.
ECDSA key fingerprint is SHA256:UiF5sLefJuaY6uueUxyu0t0Xdeha8BPZXGvQHZrco1M.
ECDSA key fingerprint is MD5:87:59:6e:b5:42:6d:c4:02:d8:ef:29:56:4e:0d:1d:09.
Are you sure you want to continue connecting (yes/no)? yes
root@ceph136's password:Number of key(s) added: 1Now try logging into the machine, with:   "ssh 'root@ceph136'"
and check to make sure that only the key(s) you wanted were added.[root@ceph135 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph137
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/ceph/ceph.pub"
The authenticity of host 'ceph137 (172.16.2.137)' can't be established.
ECDSA key fingerprint is SHA256:UiF5sLefJuaY6uueUxyu0t0Xdeha8BPZXGvQHZrco1M.
ECDSA key fingerprint is MD5:87:59:6e:b5:42:6d:c4:02:d8:ef:29:56:4e:0d:1d:09.
Are you sure you want to continue connecting (yes/no)? yes
root@ceph137's password:Number of key(s) added: 1Now try logging into the machine, with:   "ssh 'root@ceph137'"
and check to make sure that only the key(s) you wanted were added.[root@ceph135 ~]# ceph orch host add ceph136
Added host 'ceph136'
[root@ceph135 ~]# ceph orch host add ceph137
Added host 'ceph137'
2.2.4 部署添加 monitor

#设置public_network网段,提供给client访问

ceph config set mon public_network 172.16.2.0/24

#选择需要设置mon的节点,我这里是全选

[root@ceph135 ~]# ceph orch host label add ceph135 mon
Added label mon to host ceph135
[root@ceph135 ~]# ceph orch host label add ceph136 mon
Added label mon to host ceph136
[root@ceph135 ~]# ceph orch host label add ceph137 mon
Added label mon to host ceph137
[root@ceph135 ~]# ceph orch host ls
HOST     ADDR     LABELS  STATUS
ceph135  ceph135  mon
ceph136  ceph136  mon
ceph137  ceph137  mon

#告诉cephadm根据标签部署mon,这步需要等待各节点拉取images并启动容器

[root@ceph135 ~]# ceph orch apply mon label:mon
Scheduled mon update...
#具体验证是否安装完成,其他两台节点可查看下
[root@ceph136 ~]# podman ps -a
CONTAINER ID  IMAGE                                COMMAND               CREATED         STATUS             PORTS  NAMES
a24ab51b5f62  docker.io/prom/node-exporter:latest  --no-collector.ti...  5 minutes ago   Up 5 minutes ago          ceph-b3add0aa-ae
37ef832554fd  docker.io/ceph/ceph:v15              -n mon.ceph136 -f...  6 minutes ago   Up 6 minutes ago          ceph-b3add0aa-ae
10122c06ad1a  docker.io/ceph/ceph:v15              -n mgr.ceph136.iy...  7 minutes ago   Up 7 minutes ago          ceph-b3add0aa-ae
df5275a6684f  docker.io/ceph/ceph:v15              -n client.crash.c...  12 minutes ago  Up 12 minutes ago         ceph-b3add0aa-ae
[root@ceph136 ~]# podman images
REPOSITORY                     TAG      IMAGE ID       CREATED       SIZE
docker.io/ceph/ceph            v15      d72755c420bc   2 weeks ago   1.13 GB
docker.io/prom/node-exporter   latest   14191dbfb45b   2 weeks ago   27.7 MB
2.2.5 部署OSD

#查看可使用的硬盘

[root@ceph135 ~]# ceph orch device ls
HOST     PATH      TYPE   SIZE  DEVICE                     AVAIL  REJECT REASONS
ceph135  /dev/sdb  hdd   32.0G  QEMU_HARDDISK_drive-scsi1  True
ceph135  /dev/sdc  hdd   32.0G  QEMU_HARDDISK_drive-scsi2  True
ceph135  /dev/sda  hdd   20.0G  QEMU_HARDDISK_drive-scsi0  False  locked
ceph136  /dev/sdb  hdd   32.0G  QEMU_HARDDISK_drive-scsi1  True
ceph136  /dev/sdc  hdd   32.0G  QEMU_HARDDISK_drive-scsi2  True
ceph136  /dev/sda  hdd   20.0G  QEMU_HARDDISK_drive-scsi0  False  locked
ceph137  /dev/sdb  hdd   32.0G  QEMU_HARDDISK_drive-scsi2  True
ceph137  /dev/sdc  hdd   32.0G  QEMU_HARDDISK_drive-scsi1  True
ceph137  /dev/sda  hdd   20.0G  QEMU_HARDDISK_drive-scsi0  False  locked

#为了图方便,我这里直接使用所有可用硬盘

[root@ceph135 ~]# ceph orch apply osd --all-available-devices
NAME                  HOST    DATA     DB WAL
all-available-devices ceph135 /dev/sdb -  -
all-available-devices ceph135 /dev/sdc -  -
all-available-devices ceph136 /dev/sdb -  -
all-available-devices ceph136 /dev/sdc -  -
all-available-devices ceph137 /dev/sdb -  -
all-available-devices ceph137 /dev/sdc -  -
#添加单块盘的方式
ceph orch daemon add osd ceph135:/dev/sdb

#验证部署情况

[root@ceph135 ~]# ceph osd df
ID  CLASS  WEIGHT   REWEIGHT  SIZE     RAW USE  DATA     OMAP  META   AVAIL    %USE  VAR   PGS  STATUS0    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    1      up1    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    0      up2    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    0      up3    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    1      up4    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    0      up5    hdd  0.03119   1.00000   32 GiB  1.0 GiB  5.4 MiB   0 B  1 GiB   31 GiB  3.14  1.00    1      upTOTAL  192 GiB  6.0 GiB   32 MiB   0 B  6 GiB  186 GiB  3.14
MIN/MAX VAR: 1.00/1.00  STDDEV: 0

3 存储部署

3.1 CephFS部署

#部署cephfs的mds服务,指定集群名及mds的数量

[root@ceph135 ~]# ceph orch apply mds fs-cluster --placement=3
Scheduled mds.fs-cluster update...

#验证:

[root@ceph135 ~]# ceph -scluster:id:     b3add0aa-aee7-11ea-a3e4-5e7ce92c6befhealth: HEALTH_OKservices:mon: 3 daemons, quorum ceph135,ceph136,ceph137 (age 47s)mgr: ceph135.omlfxo(active, since 89m), standbys: ceph136.iyehke, ceph137.fywkvwmds:  3 up:standbyosd: 6 osds: 6 up (since 20m), 6 in (since 20m)data:pools:   1 pools, 1 pgsobjects: 0 objects, 0 Busage:   6.0 GiB used, 186 GiB / 192 GiB availpgs:     1 active+clean

3.2 部署RGW

#创建一个领域:

[root@ceph135 ~]# radosgw-admin realm create --rgw-realm=rgw-org --default
{"id": "31424ff4-38a1-48d9-bab4-fcfe8d75efcc","name": "rgw-org","current_period": "06f0511d-58cd-4acd-aac1-da25ea785454","epoch": 1
}

#创建一个zonegroup

[root@ceph135 ~]# radosgw-admin zonegroup create --rgw-zonegroup=rgwgroup --master --default
{"id": "35dcfee7-fa47-4e53-b41d-9718fd029782","name": "rgwgroup","api_name": "rgwgroup","is_master": "true","endpoints": [],"hostnames": [],"hostnames_s3website": [],"master_zone": "","zones": [],"placement_targets": [],"default_placement": "","realm_id": "31424ff4-38a1-48d9-bab4-fcfe8d75efcc","sync_policy": {"groups": []}
}

#创建一个区域

[root@ceph135 ~]# radosgw-admin zone create --rgw-zonegroup=rgwgroup --rgw-zone=zone-dc1 --master --default
{"id": "ec441ad3-1167-459d-9d1c-cf21e5625cbf","name": "zone-dc1","domain_root": "zone-dc1.rgw.meta:root","control_pool": "zone-dc1.rgw.control","gc_pool": "zone-dc1.rgw.log:gc","lc_pool": "zone-dc1.rgw.log:lc","log_pool": "zone-dc1.rgw.log","intent_log_pool": "zone-dc1.rgw.log:intent","usage_log_pool": "zone-dc1.rgw.log:usage","roles_pool": "zone-dc1.rgw.meta:roles","reshard_pool": "zone-dc1.rgw.log:reshard","user_keys_pool": "zone-dc1.rgw.meta:users.keys","user_email_pool": "zone-dc1.rgw.meta:users.email","user_swift_pool": "zone-dc1.rgw.meta:users.swift","user_uid_pool": "zone-dc1.rgw.meta:users.uid","otp_pool": "zone-dc1.rgw.otp","system_key": {"access_key": "","secret_key": ""},"placement_pools": [{"key": "default-placement","val": {"index_pool": "zone-dc1.rgw.buckets.index","storage_classes": {"STANDARD": {"data_pool": "zone-dc1.rgw.buckets.data"}},"data_extra_pool": "zone-dc1.rgw.buckets.non-ec","index_type": 0}}],"realm_id": "31424ff4-38a1-48d9-bab4-fcfe8d75efcc"
}

#为特定领域和区域部署一组radosgw守护进程,这里只指定了两个节点开启rgw

[root@ceph135 ~]# ceph orch apply rgw rgw-org zone-dc1 --placement="2 ceph136 ceph137"
Scheduled rgw.rgw-org.zone-dc1 update...

#验证

[root@ceph135 ~]# ceph -scluster:id:     b3add0aa-aee7-11ea-a3e4-5e7ce92c6befhealth: HEALTH_WARN1 daemons have recently crashedservices:mon: 3 daemons, quorum ceph135,ceph136,ceph137 (age 9m)mgr: ceph135.omlfxo(active, since 108m), standbys: ceph136.iyehke, ceph137.fywkvwmds:  3 up:standbyosd: 6 osds: 6 up (since 39m), 6 in (since 39m)rgw: 2 daemons active (rgw-org.zone-dc1.ceph136.ddujbi, rgw-org.zone-dc1.ceph137.mnfhhp)task status:data:pools:   5 pools, 129 pgsobjects: 105 objects, 5.4 KiBusage:   6.1 GiB used, 186 GiB / 192 GiB availpgs:     1.550% pgs not active127 active+clean2   peeringio:client:   7.9 KiB/s rd, 0 B/s wr, 8 op/s rd, 4 op/s wrprogress:PG autoscaler decreasing pool 5 PGs from 32 to 8 (0s)[............................]

#为RGW开启dashborad
#创建rgw的管理用户

[root@ceph135 ~]# radosgw-admin user create --uid=admin --display-name=admin --system
{"user_id": "admin","display_name": "admin","email": "","suspended": 0,"max_buckets": 1000,"subusers": [],"keys": [{"user": "admin","access_key": "XY518C4I2RO51D4S2JGT","secret_key": "e9akFxQwOM8Y9zxDum4CLCQEOXaImVomGiqIsutC"}],"swift_keys": [],"caps": [],"op_mask": "read, write, delete","system": "true","default_placement": "","default_storage_class": "","placement_tags": [],"bucket_quota": {"enabled": false,"check_on_raw": false,"max_size": -1,"max_size_kb": 0,"max_objects": -1},"user_quota": {"enabled": false,"check_on_raw": false,"max_size": -1,"max_size_kb": 0,"max_objects": -1},"temp_url_keys": [],"type": "rgw","mfa_ids": []
}

#设置dashboard凭证

[root@ceph135 ~]# ceph dashboard set-rgw-api-access-key XY518C4I2RO51D4S2JGT
Option RGW_API_ACCESS_KEY updated
[root@ceph135 ~]# ceph dashboard set-rgw-api-secret-key e9akFxQwOM8Y9zxDum4CLCQEOXaImVomGiqIsutC
Option RGW_API_SECRET_KEY updated

#设置禁用证书验证、http访问方式及使用admin账号

[root@ceph135 ~]# ceph dashboard set-rgw-api-ssl-verify False
Option RGW_API_SSL_VERIFY updated
[root@ceph135 ~]# ceph dashboard set-rgw-api-scheme http
Option RGW_API_SCHEME updated
[root@ceph135 ~]# ceph dashboard set-rgw-api-host 172.16.2.137
Option RGW_API_HOST updated
[root@ceph135 ~]# ceph dashboard set-rgw-api-port 80
Option RGW_API_PORT updated
[root@ceph135 ~]# ceph dashboard set-rgw-api-user-id admin
Option RGW_API_USER_ID updated

#重启RGW

[root@ceph135 ~]# ceph orch restart rgw
restart rgw.rgw-org.zone-dc1.ceph136.ddujbi from host 'ceph136'
restart rgw.rgw-org.zone-dc1.ceph137.mnfhhp from host 'ceph137'

X.部署过程遇到的问题

eg1.
[root@ceph135 ~]# cephadm shell
ERROR: Cannot infer an fsid, one must be specified: ['00482894-a564-11ea-8617-12702e1b568d', '9849edac-a547-11ea-a767-12702e1b568d']
解决方案:删除掉old集群数据,只留新集群文件夹即可
[root@ceph135 ceph]# cd /var/lib/ceph
[root@ceph135 ceph]# ls
00482894-a564-11ea-8617-12702e1b568d  9849edac-a547-11ea-a767-12702e1b568d
[root@ceph135 ceph]# rm -rf 9849edac-a547-11ea-a767-12702e1b568d/
[root@ceph135 ceph]# lleg2.[root@ceph135 ~]# ./cephadm add-repo --release octopus
-bash: ./cephadm: /usr/bin/python3: bad interpreter: No such file or directory
解决方案:dnf install python3eg3.[root@ceph135 ~]# ./cephadm install
Unable to locate any of ['podman', 'docker']
解决方案:dnf install -y podmaneg4.ERROR: lvcreate binary does not appear to be installed
解决方案:yum install lvm2

CentOS8安装与配置Ceph Octopus教程相关推荐

  1. CentOs6.5中安装和配置vsftp简明教程

    这篇文章主要介绍了CentOs6.5中安装和配置vsftp简明教程,需要的朋友可以参考下 一.vsftp安装篇 复制代码代码如下: # 安装vsftpd yum -y install vsftpd # ...

  2. Nginx服务器上安装并配置PHPMyAdmin的教程

    这篇文章主要介绍了Nginx服务器上安装并配置PHPMyAdmin的教程,附带一条PHPMyAdmin加载慢的解决方法:)需要的朋友可以参考下 一. 准备工作: 如果mysql的root账号为空,需要 ...

  3. post修改服务器数据源,postgresql安装及配置超详细教程

    1. 安装 根据业务需求选择版本,官网下载 初始化数据库 执行完初始化任务之后,postgresql 会自动创建和生成两个用户和一个数据库: linux 系统用户 postgres:管理数据库的系统用 ...

  4. MySQL免安装版配置部署详细教程

    MySQL免安装版配置部署详细教程 这里写目录标题 MySQL免安装版配置部署详细教程 部署MySQL免安装版(windows版) 1.windows的mysql配置文件是my.ini,将此文件放到m ...

  5. Ubuntu下的Java和Hadoop安装及配置伪分布式教程

    Hadoop安装及配置伪分布式教程 1.创建hadoop用户 更新apt 2.安装SSH.配置SSH无密码登陆 安装SSH 配置SSH无密码登录 3.安装Java环境 4.安装Hadoop3.1.3 ...

  6. mysql 5.7免装版教程_mysql 5.7.9 免安装版配置方法图文教程

    #如果没有my-default.ini,可自己新建my.ini或者从其他地方中获取 ######################################################### ...

  7. CentOS8使用cephadm部署和配置Ceph Octopus

    部署工具:cephadm 操作系统:CentOS 8 Ceph版本:Octopus 操作用户:root 部署前,请注意:根据目前(2020年8月)Ceph官方文档的介绍,cephadm的对各服务的支持 ...

  8. c mysql安装教程 pdf_MySQL下载安装、配置与使用教程详细版(win7x64)

    用过MySQL之后,不论容量的话,发现比其他两个(sql server .oracle)好用的多,一下子就喜欢上了.下面给那些还不知道怎么弄的童鞋们写下具体的方法步骤. 我这个写得有点太详细了,甚至有 ...

  9. PyCharm2020.2.3社区版安装,配置及使用教程(Windows)

    原文链接:https://www.cnblogs.com/temari/p/13048977.html 参考链接:https://www.runoob.com/w3cnote/pycharm-wind ...

最新文章

  1. 《算法设计编程实验:大学程序设计课程与竞赛训练教材》——2.3 构造法模拟的实验范例...
  2. Java源码分析之HashMap(JDK1.8)
  3. mysql的付费功能_MYSQL对游戏用户付费行为分析
  4. 阿里云数据库四位小伙伴聚齐!共同开启生态合作新篇章!
  5. 别在Java代码里乱打日志了,这才是正确的打日志姿势!
  6. 安徽中职计算机专业对口高考,安徽对口高考和普通高考有什么区别?
  7. css 外弧_css 弧度
  8. R语言-单一变量分析
  9. 阿里云数据库产品专家胡航丽:数据库自动驾驶平台DAS重磅助力数据库领域智能未来...
  10. JSON and Microsoft Technologies(翻译)
  11. Docker删除容器命令
  12. oracle财务模块培训,ORACLE财务管理系统培训手册之现金模块
  13. android-倒计时工具类
  14. snake与LunarLander源代码分析
  15. CentOS源码安装PostgreSQL
  16. 数字兆欧表的使用及安全注意事项
  17. 计算机网络安全的关键技术有,计算机网络安全的几项关键技术
  18. KCP协议:从TCP到UDP家族QUIC/KCP/ENET
  19. 达梦数据守护(主备)
  20. 学校计算机机房建设的重要性,计算机机房建设标准_浅谈高校计算机机房管理...

热门文章

  1. 5秒钟清除word页眉下边的横线
  2. 攻防世界forgot
  3. [GoFrame学习] 报错 implement not found for interface IMenu, forgot register?
  4. 北京公共计算机考试准考证打印
  5. 最短路径——迪杰斯特拉(Dijkstra)算法
  6. Windows 找不到网络 计算机或设备,“win7系统宽带拨号提示找不到设备”的解决方案...
  7. 【程序设计】暴力枚举
  8. October cms-Backend (后端-表单)
  9. seo文章重复率高不利于收录
  10. 二维码生成和解码(二)