1.创建pool

ses01:~ # ceph osd pool create test_pool 10240 10240

pool 'test_pool' created

2.查看修改pool的副本数

ceph osd dump|grep 'replicated size'

pool 0 'rbd' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 2048 pgp_num 2048 last_change 424 flags hashpspool stripe_width 0

pool 1 'cephfs_metadata' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 175 flags hashpspool stripe_width 0

pool 2 'cephfs_data' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 256 pgp_num 256 last_change 464 flags hashpspool crash_replay_interval 45 stripe_width 0

pool 4 'test_pool' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 10240 pgp_num 10240 last_change 11562 flags hashpspool stripe_width 0

ses01:~ # ceph osd pool set test_pool size 3

set pool 4 size to 3

ses01:~ # ceph osd dump|grep 'replicated size'

pool 0 'rbd' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 2048 pgp_num 2048 last_change 424 flags hashpspool stripe_width 0

pool 1 'cephfs_metadata' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 175 flags hashpspool stripe_width 0

pool 2 'cephfs_data' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 256 pgp_num 256 last_change 464 flags hashpspool crash_replay_interval 45 stripe_width 0

pool 4 'test_pool' replicated size 3 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 10240 pgp_num 10240 last_change 11564 flags hashpspool stripe_width 0

3.删除一个pool

ses01:~ # ceph osd pool delete test_pool test_pool --yes-i-really-really-mean-it

pool 'test_pool' removed

4.创建rbd image

ses01:~ # rbd map test_rbd --pool rbd --id admin

/dev/rbd0

5.查看mapping

ses01:~ # rbd showmapped

id pool image snap device

0 rbd test_rbd - /dev/rbd0

6.取消image的mapping

ses01:~ # rbd unmap /dev/rbd0

7.格式化盘,并创建挂载点

ses01:~ # mkfs.ext4 -q /dev/rbd0

ses01:~ # mkdir -p /mnt/ceph-rbd0

8.进行挂载

ses01:/mnt/ceph-rbd0 # mount /dev/rbd0 /mnt/ceph-rbd0

ses01:/mnt/ceph-rbd0 # df

Filesystem 1K-blocks Used Available Use% Mounted on

devtmpfs 131642164 8 131642156 1% /dev

tmpfs 131650892 144 131650748 1% /dev/shm

tmpfs 131650892 1968928 129681964 2% /run

tmpfs 131650892 0 131650892 0% /sys/fs/cgroup

/dev/sdg2 32900924 8505272 22717676 28% /

/dev/sdg1 151380 4612 146768 4% /boot/efi

/dev/sdg5 32900924 49172 31173776 1% /var/backup

/dev/sdg4 153428 0 153428 0% /var/backup/boot/efi

/dev/sdf1 11242668012 2618768324 8623899688 24% /var/lib/ceph/osd/ceph-5

/dev/sdc1 11242668012 2927767068 8314900944 27% /var/lib/ceph/osd/ceph-2

/dev/sdb1 11242668012 2295717280 8946950732 21% /var/lib/ceph/osd/ceph-1

/dev/sda1 11242668012 3100207472 8142460540 28% /var/lib/ceph/osd/ceph-0

/dev/sde1 11242668012 2510867344 8731800668 23% /var/lib/ceph/osd/ceph-4

/dev/sdd1 11242668012 2356968620 8885699392 21% /var/lib/ceph/osd/ceph-3

tmpfs 26330180 16 26330164 1% /run/user/1000

tmpfs 26330180 0 26330180 0% /run/user/0

/dev/rbd0 103081248 61044 97760940 1% /mnt/ceph-rbd0

9.写入数据测试

ses01:/mnt/ceph-rbd0 # touch ceph_rbd_test

ses01:/mnt/ceph-rbd0 # ls -l

total 0

-rw-r--r-- 1 root root 0 Dec 14 09:42 ceph_rbd_test

ses01:/mnt/ceph-rbd0 #

ses01:/mnt/ceph-rbd0 #

ses01:/mnt/ceph-rbd0 #

ses01:/mnt/ceph-rbd0 #

ses01:/mnt/ceph-rbd0 # echo "this is a ceph rbd test" > ceph_rbd_test

ses01:/mnt/ceph-rbd0 #

ses01:/mnt/ceph-rbd0 #

ses01:/mnt/ceph-rbd0 #

ses01:/mnt/ceph-rbd0 # cat ceph_rbd_test

this is a ceph rbd test

10.进行权限查看

ses01:/etc/ceph # ceph auth list

installed auth entries:

mds.mgt01

key: AQBzyh5ZMA+FHhAAnJ/VEaUIOvBB5kYxUpjHBg==

caps: [mds] allow

caps: [mon] allow profile mds

caps: [osd] allow rwx

mds.mgt02

key: AQB0yh5ZAr0pNBAAXWlY33eQVSdl71coRTfIQA==

caps: [mds] allow

caps: [mon] allow profile mds

caps: [osd] allow rwx

osd.0

key: AQDCxR5ZgjrjDBAAfFynqg6Qhyhyo1CvReZtOg==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.1

key: AQDzxR5ZG6zKNBAAnPZeDGuKV3orhIYu0Q0Teg==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.10

key: AQDGxx5Z9mM7JBAAU8FA4i9AVnEv+zvaSuKgNQ==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.11

key: AQD5xx5ZbLLbJBAANK1ym3kwkaweZM95FOpSIg==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.12

key: AQAwyB5ZKC/WGxAA0AWbvYGxg2nFK+doq2KbiA==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.13

key: AQBQyB5Z5o1mDRAAYRfe4OFgOELnNNnZxwjFjg==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.14

key: AQBwyB5ZJlewExAAj++/jffRhJseim69vDySjg==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.15

key: AQCPyB5Z1YKgMBAAU2neiP07igfqOCWjnM4ZsQ==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.16

key: AQCuyB5ZQJZOBBAALchJAsBk2T++OGCtqiAN8g==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.17

key: AQDNyB5ZatYoEBAAotu9ezxssPXzyomCMzl6qQ==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.18

key: AQD3yB5ZxMAIBhAAVmhneS6YV6tIdM1eG1I3ig==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.19

key: AQAVyR5Z8F4ZKRAAzdKXQ2DOZA+ThNQ6aRhNsA==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.2

key: AQAmxh5ZT5XoLBAAZKZtusZWxIopBXTCj8nQ8A==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.20

key: AQA6yR5ZaMVNHBAABw2KQNSjQDwnA0b7JCBixg==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.21

key: AQBfyR5ZIY83MRAAZVrtz3/aLJdBCB+xdJ0G2w==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.22

key: AQCAyR5ZupXSBhAAcV8JkQmzESkOOHC4nTX70Q==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.23

key: AQCfyR5ZQZXXGBAAGN1lRWzRi36Kn9XYHhSrLA==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.3

key: AQBZxh5ZhSF2IhAAxvVlD09D++9Tuxle+MT6PQ==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.4

key: AQCMxh5Zxo1qOxAAjuxaS5gKMhhlQYxlOd3pbQ==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.5

key: AQDAxh5Zl7qwBxAAApphv87tZRJ4pR/6JonT2w==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.6

key: AQD2xh5ZUEVbJBAAMLgApEeRRsy//jgqTzWyQw==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.7

key: AQArxx5ZRiTbAxAAjpDp0wTMFZmkIc0NJHwMmQ==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.8

key: AQBfxx5ZasIHGxAA+30qRwS78PlW4/o17aUv7w==

caps: [mon] allow profile osd

caps: [osd] allow *

osd.9

key: AQCSxx5Z/X4VLhAAzA+OExfY66loRKSpmJWP2g==

caps: [mon] allow profile osd

caps: [osd] allow *

client.admin

key: AQBLxR5Z9A8OIRAABLWw92AbW1+wv09MPjWWNg==

caps: [mds] allow *

caps: [mon] allow *

caps: [osd] allow *

client.bootstrap-mds

key: AQBLxR5Zl/P3KhAAqwRevxUiv7XttA+DEa/VQg==

caps: [mon] allow profile bootstrap-mds

client.bootstrap-osd

key: AQBLxR5Z7KzMNBAAWLt9blr1BJ0h0stXjvJPDg==

caps: [mon] allow profile bootstrap-osd

client.bootstrap-rgw

key: AQBMxR5ZV55kAxAAq2RYh5EdBl16Xxyh6dhAIw==

caps: [mon] allow profile bootstrap-rgw

11.文件自动挂载 cd /etc/ceph,并重启节点进行检查

ses01:/etc/ceph # vi rbdmap

# RbdDevice Parameters

#poolname/imagename id=client,keyring=/etc/ceph/ceph.client.keyring

rbd/test_rbd id=admin,keyring=/etc/ceph/ceph.client.keyring

ses01:~ # vi /etc/fstab

UUID=5bf92f41-031c-4adb-95b6-6b050331c1ac / ext3 acl,user_xattr 1 1

UUID=fc0c26ed-be46-45df-ab05-96154d348dae swap swap defaults 0 0

UUID=01dfa59c-f8ad-492f-85d3-87dafa8f3bab /var/backup ext3 acl,user_xattr 1 2

UUID=6EC8-E532 /boot/efi vfat umask=0002,utf8=true 0 0

UUID=6EF0-4367 /var/backup/boot/efi vfat umask=0002,utf8=true 0 0

/dev/rbd/rbd/test_rbd /mnt/ceph-rbd0 ext4 defaults,noatime,_netdev

12.在线进行rbd 扩容,完成后文件系统容量不变,还需要对文件系统进行扩容,并检查原数据

ses01:~ # rbd resize rbd/test_rbd --size 204800

Resizing image: 100% complete...done.

ses01:~ # resize2fs /dev/rbd0

resize2fs 1.42.11 (09-Jul-2014)

Filesystem at /dev/rbd0 is mounted on /mnt/ceph-rbd0; on-line resizing required

old_desc_blocks = 7, new_desc_blocks = 13

The filesystem on /dev/rbd0 is now 52428800 blocks long.

ses01:~ # df

Filesystem 1K-blocks Used Available Use% Mounted on

devtmpfs 131642164 8 131642156 1% /dev

tmpfs 131650892 144 131650748 1% /dev/shm

tmpfs 131650892 1968928 129681964 2% /run

tmpfs 131650892 0 131650892 0% /sys/fs/cgroup

/dev/sdg2 32900924 8464808 22758140 28% /

/dev/sdg1 151380 4612 146768 4% /boot/efi

/dev/sdg5 32900924 49172 31173776 1% /var/backup

/dev/sdg4 153428 0 153428 0% /var/backup/boot/efi

/dev/sdf1 11242668012 2618758360 8623909652 24% /var/lib/ceph/osd/ceph-5

/dev/sdc1 11242668012 2927768812 8314899200 27% /var/lib/ceph/osd/ceph-2

/dev/sdb1 11242668012 2295713364 8946954648 21% /var/lib/ceph/osd/ceph-1

/dev/sda1 11242668012 3100194064 8142473948 28% /var/lib/ceph/osd/ceph-0

/dev/sde1 11242668012 2510869496 8731798516 23% /var/lib/ceph/osd/ceph-4

/dev/sdd1 11242668012 2356943764 8885724248 21% /var/lib/ceph/osd/ceph-3

tmpfs 26330180 16 26330164 1% /run/user/1000

tmpfs 26330180 0 26330180 0% /run/user/0

/dev/rbd0 206293688 60684 196758568 1% /mnt/ceph-rbd0

补充:扩容文件系统

查看挂载的rbd块信息

```

root@node-1:~# rbd -p test_pool info test_image

rbd image 'test_image':

size 102400 kB in 25 objects

order 22 (4096 kB objects)

block_name_prefix: rbd_data.de582a238e1f29

format: 2

features: layering

flags:

```

查看挂载的文件系统

```

root@node-1:~# df

Filesystem 1K-blocks Used Available Use% Mounted on

udev 65998452 0 65998452 0% /dev

tmpfs 13203516 1331572 11871944 11% /run

/dev/mapper/ubuntu14--x8664--vg-root 311973248 160247404 135980820 55% /

tmpfs 66017572 54636 65962936 1% /dev/shm

tmpfs 5120 0 5120 0% /run/lock

tmpfs 66017572 0 66017572 0% /sys/fs/cgroup

/dev/vda1 515929528 89790944 399907852 19% /data

/dev/sda1 240972 124359 104172 55% /boot

cgmfs 100 0 100 0% /run/cgmanager/fs

tmpfs 13203516 0 13203516 0% /run/user/0

10.122.19.9:/vmdata/image-create 4260974592 3407704064 638505984 85% /image-create

tmpfs 13203516 0 13203516 0% /run/user/1002

tmpfs 13203516 0 13203516 0% /run/user/111

tmpfs 13203516 0 13203516 0% /run/user/1004

tmpfs 13203516 0 13203516 0% /run/user/1001

/dev/rbd0 99150 47569 46461 51% /ceph-rbd0

```

对rbd块进行扩容

```

root@node-1:~# rbd resize test_pool/test_image -s 10G

Resizing image: 100% complete...done.

```

blockdev --getsize64 /dev/rbd0 也可以查看rbd的容量大小

```

root@node-1:~# rbd -p test_pool info test_image

rbd image 'test_image':

size 10240 MB in 2560 objects

order 22 (4096 kB objects)

block_name_prefix: rbd_data.de582a238e1f29

format: 2

features: layering

flags:

```

发现此时文件系统容量空间并没有随之改变,使用resize2fs进行ext4的文件系统扩容

```

root@node-1:~# resize2fs /dev/rbd0

resize2fs 1.42.13 (17-May-2015)

Filesystem at /dev/rbd0 is mounted on /ceph-rbd0; on-line resizing required

old_desc_blocks = 1, new_desc_blocks = 40

The filesystem on /dev/rbd0 is now 10485760 (1k) blocks long.

```

查看文件系统挂载以及容量

```

root@node-1:~# df

Filesystem 1K-blocks Used Available Use% Mounted on

udev 65998452 0 65998452 0% /dev

tmpfs 13203516 1331544 11871972 11% /run

/dev/mapper/ubuntu14--x8664--vg-root 311973248 160255728 135972496 55% /

tmpfs 66017572 54636 65962936 1% /dev/shm

tmpfs 5120 0 5120 0% /run/lock

tmpfs 66017572 0 66017572 0% /sys/fs/cgroup

/dev/vda1 515929528 89790944 399907852 19% /data

/dev/sda1 240972 124359 104172 55% /boot

cgmfs 100 0 100 0% /run/cgmanager/fs

tmpfs 13203516 0 13203516 0% /run/user/0

10.122.19.9:/vmdata/image-create 4260974592 3407704064 638505984 85% /image-create

tmpfs 13203516 0 13203516 0% /run/user/1002

tmpfs 13203516 0 13203516 0% /run/user/111

tmpfs 13203516 0 13203516 0% /run/user/1004

tmpfs 13203516 0 13203516 0% /run/user/1001

/dev/rbd0 10166424 49288 9697871 1% /ceph-rbd0

```

对于XFS,要再 resize之后执行

# xfs_growfs /mnt

13.取消挂载,以及修改自动挂载脚本,删除rbd

umount /mnt/ceph-rbd0

ses01:~ # rbd rm test_rbd

Removing image: 100% complete...done.

14.创建snap 快照并查看信息

ses01:~ # rbd snap create rbd/image1@image1_snap

ses01:~ # rbd snap list rbd/image1

SNAPID NAME SIZE

4 image1_snap 1024 MB

ses01:~ # rbd ls rbd -l

NAME SIZE PARENT FMT PROT LOCK

hana01_data 24576G 2

hana01_log 6144G 2

hana02_data 24576G 2

hana02_log 6144G 2

hana03_data 24576G 2

hana03_log 6144G 2

hana04_data 8192G 2

image1 1024M 2

image1@image1_snap 1024M 2

ses01:~ # rbd info rbd/image1

rbd image 'image1':

size 1024 MB in 256 objects

order 22 (4096 kB objects)

block_name_prefix: rbd_data.9c444238e1f29

format: 2

features: layering   (支持snap)

flags:

15.创建image1的快照 mnt/image1目录下kkk文件, 创建image1_snap,创建aaa文件,写入数据,创建image1_snap1

I.进行回滚到image1_snap状态

ses01:~ # rbd snap rollback rbd/image1@image1_snap

Rolling back to snapshot: 100% complete...done.

检查文件:

ses01:~ # mount /dev/rbd0 /mnt/image1

ses01:~ # cd /mnt/image1/

ses01:/mnt/image1 # ls

kkk lost+found

ses01:/mnt/image1 # more kkk

this a good test

II.再进行回滚到image1_snap1状态

ses01:/mnt/image1 # rbd snap rollback rbd/image1@image1_snap1

Rolling back to snapshot: 100% complete...done.

es01:/mnt/image1 # ls

aaa kkk lost+found

在回滚后,都需要重新挂载文件系统。

16.删除快照和清除快照

删除快照

rbd snap rm rbd/image1@image1_snap

清除快照

ses01:~ # rbd snap purge rbd/image1

Removing all snapshots: 100% complete...done.

17,进行snap快照保护和取消

进行保护

ses01:~ # rbd snap protect rbd/image1@image_snap1

ses01:~ #

ses01:~ #

ses01:~ # rbd snap rm rbd/image1@image_snap1

rbd: snapshot 'image_snap1' is protected from removal.

2017-12-14 17:10:46.858343 7f0965e69e80 -1 librbd::Operations: snapshot is protected

ses01:~ # rbd snap ls rbd/image1

SNAPID NAME SIZE

14 image_snap1 1024 MB

取消保护

ses01:~ # rbd snap unprotect rbd/image1@image_snap1

18.克隆

ses01:~ # rbd clone rbd/image1@image1_snap rbd/image1-snap_clone

ses01:~ # rbd list

hana01_data

hana01_log

hana02_data

hana02_log

hana03_data

hana03_log

hana04_data

image1

image1-snap_clone

ceph rbd mysql_Ceph 实战之 RBD相关推荐

  1. ceph rbd mysql_ceph的rbd使用和理解(全)

    Ceph支持一个非常好的特性,以COW(写时复制)的方式从RBD快照创建克隆,在Ceph中被称为快照分层.分层特性允许用户创建多个CEPH RBD克隆实例.这些特性应用于OpenStack等云平台中, ...

  2. 2. Ceph的权限管理、RBD与Cephfs的挂载使用和MDS的高可用

    1. Ceph用户的权限管理及授权流程 Ceph使用Ceph X协议对客户端进行身份的认证. 客户端与Mon节点的通讯均需要通过Cephx认证,可在Mon节点关闭Cephx认证,关闭认证后将允许所有访 ...

  3. Rook部署测试Ceph和wordpress实战应用

    环境 Rook Ceph 需要使用 RBD 内核模块,我们可以通过运行 modprobe rbd 来测试 Kubernetes 节点是否有该模块,如果没有,则需要更新下内核版本. 另外需要在节点上安装 ...

  4. 【rbd 删除失败】rbd: error: image still has watchers

    背景 执行rbd 删除操作,发现RBD被占用,但又不知道被谁占用,如何查询呢? rbd rm sfs_pool/pgsql + rbd rm sfs_pool/pgsql 2021-05-06 09: ...

  5. 2-1 分布式文件存储系统Ceph,java实战第二版pdf拉乌尔

    yum install ntp ntpdate ntp-doc -y 设为开机启动: systemctl enable ntpd 设置每隔1小时自动校准同步.编辑 vi /etc/rc.d/rc.lo ...

  6. 理解 QEMU/KVM 和 Ceph(2):QEMU 的 RBD 块驱动(block driver)

    本系列文章会总结 QEMU/KVM 和 Ceph 之间的整合: (1)QEMU-KVM 和 Ceph RBD 的 缓存机制总结 (2)QEMU 的 RBD 块驱动(block driver) (3)存 ...

  7. ceph rbd resize之后文件系统的调节

    2019独角兽企业重金招聘Python工程师标准>>> ###ceph的rbd是可以进行调节大小的,但是调节需要进行一些操作,否则你看不到调节之后的效果,等于没调节,因为rbd就相当 ...

  8. Ceph rbd cmd练习

    目的:阅读官方文档,熟悉rbd命令. rbd命令行参数: http://docs.ceph.com/docs/wip-5900/man/8/rbd/ image相关 http://docs.ceph. ...

  9. Centos7下部署ceph 12.2.1 (luminous)集群及RBD使用

    原创,本文地址http://www.cnblogs.com/pcxie/p/7799372.html 前言 本文搭建了一个由三节点(master.slave1.slave2)构成的ceph分布式集群, ...

  10. Ceph集群搭建系列(六):RBD块设备的使用场景、原理分析及其创建

    一.前言 下图数据出自2018年ceph社区用户调查报告,Ceph RBD接口的使用场景主要是VM和DB数据库. 而使用Ceph RBD的接口方式的用户数据如下,主要是librbd 和 kernel ...

最新文章

  1. 帮AI摆脱“智障”之名,NLP这条路还有多远?
  2. sql php修改mysql结构_sql怎么修改表内容
  3. java与js交互,相互调用传参
  4. 《Java技术》第九次作业计科1501赵健宇-IO
  5. 一种低延迟的超时中心实现方式
  6. 使用C++和LIBSVM实现机器学习+样本分类
  7. HTML5 纯css圆角代码
  8. 华为5500v3多路径linux6,CentOS7 DM-Multipath+HUAWEI OceanStor存储多路径配置
  9. UCINET使用经验分享
  10. SOC厂商--全志瑞芯微
  11. Flask项目基本流程
  12. 如何进行航拍全景摄影(上)
  13. 人民币对美元汇率中间价报6.7025元 上调318个基点
  14. Linux环境下配置虚拟ip,方法2:单网卡绑定多IP
  15. 共享扫码娃娃机无现金化扫码支付
  16. 【报错解决】ERROR: pip‘s dependency resolver does not currently take into account all the packages
  17. Squirrel中的类与实例
  18. Java实现在线考试系统(系统介绍)
  19. javaScript快速入门(笔记)
  20. Python 数据挖掘(三) matplotlib模块 简单使用

热门文章

  1. java获取docx_java使用poi读取doc和docx文件的实现示例
  2. MediaRecorder之视频录制
  3. Pixhawk之启动代码和入口函数(.mk、rcS、__start、hrt)
  4. 【操作系统】第二章--进程的描述与控制--深入与解释(1)
  5. Maven Compile 编译报错
  6. pmv计算c语言软件,晨光暖通计算工具
  7. 投 资 网 站 建 设 方 案
  8. 经典逻辑题笔试题和答案(不断更新)
  9. 《猎头局中局》观后感
  10. VirtualBox 打开虚拟机后,上面的那栏菜单栏不见了的解决办法