×

新建OSD使用lvm划分逻辑卷并指定创建

穆琪 穆琪 发表于2018-10-16 23:05:42 浏览723 评论0

抢沙发发表评论


环境

经测试

在我的虚拟机集群中创建成功{

    centos 7

    ceph mimic (13.2.2)

}

步骤

1.挂载一块硬盘

2.使用ceph-deploy disk zap ceph01 /dev/sdc擦除

3.执行以下命令来划分逻辑卷

# pvcreate /dev/sdc

 Physical volume "/dev/sdc" successfully created.

# vgcreate ceph-pool /dev/sdc

 Volume group "ceph-pool" successfully created

# lvcreate -n osd4.wal -L 1G ceph-pool

Logical volume "osd4.wal" created.

# lvcreate -n osd4.db -L 1G ceph-pool

Logical volume "osd4.db" created.

# lvcreate -n osd4 -l 100%FREE ceph-pool

Logical volume "osd4" created.



# ceph-deploy osd create --data ceph-pool/osd4 --block-db ceph-pool/osd4.db --block-wal ceph-pool/osd4.wal --bluestore ceph01

[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf

[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --data ceph-pool/osd4 --block-db ceph-pool/osd4.db --block-wal ceph-pool/osd4.wal --bluestore ceph01

[ceph_deploy.cli][INFO  ] ceph-deploy options:

[ceph_deploy.cli][INFO  ]  verbose                       : False

[ceph_deploy.cli][INFO  ]  bluestore                     : True

[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f398ad1b8c0>

[ceph_deploy.cli][INFO  ]  cluster                       : ceph

[ceph_deploy.cli][INFO  ]  fs_type                       : xfs

[ceph_deploy.cli][INFO  ]  block_wal                     : ceph-pool/osd4.wal

[ceph_deploy.cli][INFO  ]  default_release               : False

[ceph_deploy.cli][INFO  ]  username                      : None

[ceph_deploy.cli][INFO  ]  journal                       : None

[ceph_deploy.cli][INFO  ]  subcommand                    : create

[ceph_deploy.cli][INFO  ]  host                          : ceph01

[ceph_deploy.cli][INFO  ]  filestore                     : None

[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f398b166c80>

[ceph_deploy.cli][INFO  ]  ceph_conf                     : None

[ceph_deploy.cli][INFO  ]  zap_disk                      : False

[ceph_deploy.cli][INFO  ]  data                          : ceph-pool/osd4

[ceph_deploy.cli][INFO  ]  block_db                      : ceph-pool/osd4.db

[ceph_deploy.cli][INFO  ]  dmcrypt                       : False

[ceph_deploy.cli][INFO  ]  overwrite_conf                : False

[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys

[ceph_deploy.cli][INFO  ]  quiet                         : False

[ceph_deploy.cli][INFO  ]  debug                         : False

[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device ceph-pool/osd4

[ceph01][DEBUG ] connected to host: ceph01

[ceph01][DEBUG ] detect platform information from remote host

[ceph01][DEBUG ] detect machine type

[ceph01][DEBUG ] find the location of an executable

[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.5.1804 Core

[ceph_deploy.osd][DEBUG ] Deploying osd to ceph01

[ceph01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf

[ceph01][DEBUG ] find the location of an executable

[ceph01][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data ceph-pool/osd4 --block.wal ceph-pool/osd4.wal --block.db ceph-pool/osd4.db

[ceph01][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key

[ceph01][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 9c4f077d-35fd-47ce-b882-378a27c43e8d

[ceph01][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key

[ceph01][DEBUG ] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4

[ceph01][DEBUG ] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-4

[ceph01][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-pool/osd4

[ceph01][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-5

[ceph01][DEBUG ] Running command: /bin/ln -s /dev/ceph-pool/osd4 /var/lib/ceph/osd/ceph-4/block

[ceph01][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap

[ceph01][DEBUG ]  stderr: got monmap epoch 1

[ceph01][DEBUG ] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-4/keyring --create-keyring --name osd.4 --add-key AQAyWsVbXVDXGxAAabksGl6ZyjtMP28y3jYpkw==

[ceph01][DEBUG ]  stdout: creating /var/lib/ceph/osd/ceph-4/keyring

[ceph01][DEBUG ] added entity osd.4 auth auth(auid = 18446744073709551615 key=AQAyWsVbXVDXGxAAabksGl6ZyjtMP28y3jYpkw== with 0 caps)

[ceph01][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring

[ceph01][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/

[ceph01][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-pool/osd4.wal

[ceph01][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-3

[ceph01][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-pool/osd4.db

[ceph01][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-4

[ceph01][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --bluestore-block-wal-path /dev/ceph-pool/osd4.wal --bluestore-block-db-path /dev/ceph-pool/osd4.db --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid 9c4f077d-35fd-47ce-b882-378a27c43e8d --setuser ceph --setgroup ceph

[ceph01][DEBUG ] --> ceph-volume lvm prepare successful for: ceph-pool/osd4

[ceph01][DEBUG ] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-pool/osd4 --path /var/lib/ceph/osd/ceph-4 --no-mon-config

[ceph01][DEBUG ] Running command: /bin/ln -snf /dev/ceph-pool/osd4 /var/lib/ceph/osd/ceph-4/block

[ceph01][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block

[ceph01][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-5

[ceph01][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4

[ceph01][DEBUG ] Running command: /bin/ln -snf /dev/ceph-pool/osd4.db /var/lib/ceph/osd/ceph-4/block.db

[ceph01][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-pool/osd4.db

[ceph01][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-4

[ceph01][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block.db

[ceph01][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-4

[ceph01][DEBUG ] Running command: /bin/ln -snf /dev/ceph-pool/osd4.wal /var/lib/ceph/osd/ceph-4/block.wal

[ceph01][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-pool/osd4.wal

[ceph01][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-3

[ceph01][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block.wal

[ceph01][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-3

[ceph01][DEBUG ] Running command: /bin/systemctl enable ceph-volume@lvm-4-9c4f077d-35fd-47ce-b882-378a27c43e8d

[ceph01][DEBUG ]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-4-9c4f077d-35fd-47ce-b882-378a27c43e8d.service to /usr/lib/systemd/system/ceph-volume@.service.

[ceph01][DEBUG ] Running command: /bin/systemctl enable --runtime ceph-osd@4

[ceph01][DEBUG ]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@4.service to /usr/lib/systemd/system/ceph-osd@.service.

[ceph01][DEBUG ] Running command: /bin/systemctl start ceph-osd@4

[ceph01][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 4

[ceph01][DEBUG ] --> ceph-volume lvm create successful for: ceph-pool/osd4

[ceph01][INFO  ] checking OSD status...

[ceph01][DEBUG ] find the location of an executable

[ceph01][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json

[ceph01][WARNIN] there is 1 OSD down

[ceph01][WARNIN] there is 1 OSD out

[ceph_deploy.osd][DEBUG ] Host ceph01 is now ready for osd use.



# cd /var/lib/ceph/osd/ceph-4

# ll -a

total 48

drwxrwxrwt 2 ceph ceph 340 Oct 16 11:25 .

drwxr-x--- 4 ceph ceph  34 Oct 16 11:25 ..

-rw-r--r-- 1 ceph ceph 402 Oct 16 11:25 activate.monmap

lrwxrwxrwx 1 ceph ceph  19 Oct 16 11:25 block -> /dev/ceph-pool/osd4

lrwxrwxrwx 1 ceph ceph  22 Oct 16 11:25 block.db -> /dev/ceph-pool/osd4.db

lrwxrwxrwx 1 ceph ceph  23 Oct 16 11:25 block.wal -> /dev/ceph-pool/osd4.wal

-rw-r--r-- 1 ceph ceph   2 Oct 16 11:25 bluefs

-rw-r--r-- 1 ceph ceph  37 Oct 16 11:25 ceph_fsid

-rw-r--r-- 1 ceph ceph  37 Oct 16 11:25 fsid

-rw------- 1 ceph ceph  55 Oct 16 11:25 keyring

-rw-r--r-- 1 ceph ceph   8 Oct 16 11:25 kv_backend

-rw-r--r-- 1 ceph ceph  21 Oct 16 11:25 magic

-rw-r--r-- 1 ceph ceph   4 Oct 16 11:25 mkfs_done

-rw-r--r-- 1 ceph ceph  41 Oct 16 11:25 osd_key

-rw-r--r-- 1 ceph ceph   6 Oct 16 11:25 ready

-rw-r--r-- 1 ceph ceph  10 Oct 16 11:25 type

-rw-r--r-- 1 ceph ceph   2 Oct 16 11:25 whoami



# lsblk

NAME                                                                                                  MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

sda                                                                                                     8:0    0   50G  0 disk

├─sda1                                                                                                  8:1    0    1G  0 part /boot

└─sda2                                                                                                  8:2    0   49G  0 part

  ├─centos-root                                                                                       253:0    0   47G  0 lvm  /

  └─centos-swap                                                                                       253:1    0    2G  0 lvm  [SWAP]

sdb                                                                                                     8:16   0   10G  0 disk

sdc                                                                                                     8:32   0   10G  0 disk

├─ceph--pool-osd4.wal                                                                                 253:3    0    1G  0 lvm

├─ceph--pool-osd4.db                                                                                  253:4    0    1G  0 lvm

└─ceph--pool-osd4                                                                                     253:5    0    8G  0 lvm

sdd                                                                                                     8:48   0   20G  0 disk

└─ceph--4e49b940--c5b1--4cef--83d2--1219d29d48b2-osd--block--24c3b06d--3325--45de--8240--f94ef4587bc8 253:2    0   20G  0 lvm

sr0

4.osd tree

# ceph osd tree

ID CLASS WEIGHT  TYPE NAME       STATUS REWEIGHT PRI-AFF

-1       0.07214 root default

-3       0.03317     host ceph01

 0   hdd 0.01949         osd.0       up  1.00000 1.00000

 3   hdd 0.00490         osd.3     down        0 1.00000

 4   hdd 0.00879         osd.4       up  1.00000 1.00000

-5       0.01949     host ceph02

 1   hdd 0.01949         osd.1       up  1.00000 1.00000

-7       0.01949     host ceph03

 2   hdd 0.01949         osd.2       up  1.00000 1.00000