外包技能--Ceph(L版)手动剔除osd与增加osd

2023-08-02 18:28:05 浏览数 (2)

Ceph手动剔除osd与增加osd

代码语言:javascript复制
[root@server1 ceph-ansible]# ceph --version
ceph version 12.2.13 (584a20eb0237c657dc0567da126be145106aa47e) luminous (stable)
[root@server1 ceph-ansible]# 

进入维护模式

代码语言:javascript复制
ceph osd set noout
ceph osd set nobackfill
ceph osd set norecover
ceph osd set norebalance

手动停止OSD

代码语言:javascript复制
#首先收集osd信息
[root@server3 ~]# ceph osd tree
ID CLASS WEIGHT  TYPE NAME        STATUS REWEIGHT PRI-AFF 
-1       0.14575 root default                             
-3       0.04858     host server1                         
 1   hdd 0.02429         osd.1        up  1.00000 1.00000 
 2   hdd 0.02429         osd.2        up  1.00000 1.00000 
-7       0.04858     host server2                         
 4   hdd 0.02429         osd.4        up  1.00000 1.00000 
 5   hdd 0.02429         osd.5        up  1.00000 1.00000 
-5       0.04858     host server3                         
 0   hdd 0.02429         osd.0        up  1.00000 1.00000 
 3   hdd 0.02429         osd.3        up  1.00000 1.00000 
[root@server3 ~]# 
​
​
#查看osd挂载
​
ot@server3 ~]# df -TH
Filesystem              Type      Size  Used Avail Use% Mounted on
/dev/mapper/centos-root xfs        19G  1.5G   17G   9% /
devtmpfs                devtmpfs  969M     0  969M   0% /dev
tmpfs                   tmpfs     982M     0  982M   0% /dev/shm
tmpfs                   tmpfs     982M   11M  972M   2% /run
tmpfs                   tmpfs     982M     0  982M   0% /sys/fs/cgroup
/dev/sda1               xfs       1.1G  149M  915M  14% /boot
tmpfs                   tmpfs     197M     0  197M   0% /run/user/0
/dev/sdb1               xfs       102M  5.6M   96M   6% /var/lib/ceph/osd/ceph-0
/dev/sdc1               xfs       102M  5.6M   96M   6% /var/lib/ceph/osd/ceph-3
[root@server3 ~]# 
​
代码语言:javascript复制
[root@server1 ceph-ansible]# ceph-disk list
/dev/dm-0 other, xfs, mounted on /
/dev/dm-1 other, swap
/dev/sda :
 /dev/sda1 other, xfs, mounted on /boot
 /dev/sda2 other, LVM2_member
/dev/sdb :
 /dev/sdb1 ceph data, active, cluster ceph, osd.1, block /dev/sdb2, block.db /dev/sdd2
 /dev/sdb2 ceph block, for /dev/sdb1
/dev/sdc :
 /dev/sdc1 ceph data, active, cluster ceph, osd.2, block /dev/sdc2, block.db /dev/sdd3
 /dev/sdc2 ceph block, for /dev/sdc1
/dev/sdd :
 /dev/sdd2 ceph block.db, for /dev/sdb1
 /dev/sdd3 ceph block.db, for /dev/sdc1
/dev/sr0 other, unknown
[root@server1 ceph-ansible]#
代码语言:javascript复制
systemctl stop ceph-osd@3

手动剔除osd

代码语言:javascript复制
[root@server3 ~]# systemctl stop ceph-osd@3
[root@server3 ~]# ceph osd out 3
marked out osd.3. 
[root@server3 ~]# ceph osd crush remove osd.3
removed item id 3 name 'osd.3' from crush map
[root@server3 ~]# ceph auth del osd.3
updated
[root@server3 ~]# ceph osd rm osd.3
removed osd.3
[root@server3 ~]# umount /var/lib/ceph/osd/ceph-3
[root@server3 ~]#

解除维护状态

代码语言:javascript复制
ceph osd unset noout
ceph osd unset nobackfill
ceph osd unset norecover
ceph osd unset norebalance
代码语言:javascript复制
[root@server3 ~]# ceph osd unset noout
noout is unset
[root@server3 ~]# ceph osd unset nobackfill
nobackfill is unset
[root@server3 ~]# ceph osd unset norecover
norecover is unset
[root@server3 ~]# ceph osd unset norebalance
norebalance is unset
[root@server3 ~]# 
代码语言:javascript复制
[root@server1 ceph-ansible]# ceph -s
  cluster:
    id:     33852410-b6bd-46bc-a163-88a816221ecc
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum server1,server2,server3
    mgr: server1(active), standbys: server3, server2
    mds: cephfs-1/1/1 up  {0=server1=up:active}, 2 up:standby
    osd: 5 osds: 5 up, 5 in
    rgw: 3 daemons active
 
  data:
    pools:   6 pools, 48 pgs
    objects: 210 objects, 4.63KiB
    usage:   5.04GiB used, 119GiB / 125GiB avail
    pgs:     48 active clean
 
  io:
    recovery: 57B/s, 7objects/s
 
[root@server1 ceph-ansible]# ceph osd tree
ID CLASS WEIGHT  TYPE NAME        STATUS REWEIGHT PRI-AFF 
-1       0.12146 root default                             
-3       0.04858     host server1                         
 1   hdd 0.02429         osd.1        up  1.00000 1.00000 
 2   hdd 0.02429         osd.2        up  1.00000 1.00000 
-7       0.04858     host server2                         
 4   hdd 0.02429         osd.4        up  1.00000 1.00000 
 5   hdd 0.02429         osd.5        up  1.00000 1.00000 
-5       0.02429     host server3                         
 0   hdd 0.02429         osd.0        up  1.00000 1.00000 
[root@server1 ceph-ansible]# 
​

0 人点赞