Rocky8.5 部署 Ceph

2024-08-07 12:55:37 浏览数 (1)

环境

代码语言:javascript复制
192.168.0.14	ceph-01
192.168.0.15	ceph-02
192.168.0.16	ceph-03

配置主机名

代码语言:javascript复制
[root@ceph-01 ~]# hostnamectl set-hostname ceph-01
[root@ceph-02 ~]# hostnamectl set-hostname ceph-02
[root@ceph-03 ~]# hostnamectl set-hostname ceph-03

配置免密

代码语言:javascript复制
[root@ceph-01 ~]# ssh-keygen -t rsa
[root@ceph-01 ~]# ssh-copy-id 192.168.0.14
[root@ceph-01 ~]# ssh-copy-id 192.168.0.15
[root@ceph-01 ~]# ssh-copy-id 192.168.0.16

安装 docker

代码语言:javascript复制
# 三个节点都要安装
[root@ceph-01 ~]# dnf install epel* -y
[root@ceph-01 ~]# dnf install ceph-mon ceph-osd ceph-mds ceph-radosgw -y
[root@ceph-01 ~]# curl -o /etc/yum.repos.d/docker-ce.repo https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/docker-ce.repo
[root@ceph-01 ~]# dnf install docker-ce lvm2 -y
[root@ceph-01 ~]# systemctl enable --now docker

时间同步

代码语言:javascript复制
# 服务端(192.168.0.14)
[root@ceph-01 ~]# dnf install chrony -y

# 配置
[root@ceph-01 ~]# vim /etc/chrony.conf

pool ntp.aliyun.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.1.0/24
local stratum 10
keyfile /etc/chrony.keys
leapsectz right/UTC

[root@ceph-01 ~]# systemctl enable --now chronyd 

# 客户端
dnf install chrony -y
vim /etc/chrony.conf

pool 192.168.0.14 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony

systemctl enable --now chronyd

# 客户端验证
chronyc sources -v

安装 ceph 集群

代码语言:javascript复制
# 安装 cephadm 工具
[root@ceph-01 ~]# curl --silent --remote-name --location https://mirrors.chenby.cn/https://github.com/ceph/ceph/raw/quincy/src/cephadm/cephadm
[root@ceph-01 ~]# chmod  x cephadm

# 创建源信息
[root@ceph-01 ~]# ./cephadm add-repo --release 17.2.5
[root@ceph-01 ~]# sed -i 's#download.ceph.com#mirrors.ustc.edu.cn/ceph#' /etc/yum.repos.d/ceph.repo 
[root@ceph-01 ~]# ./cephadm install

# 引导集群
[root@ceph-01 ~]# cephadm bootstrap --mon-ip 192.168.0.14
Creating directory /etc/ceph for ceph.conf
Verifying podman|docker is present...
Verifying lvm2 is present...
Verifying time synchronization is in place...
Unit chronyd.service is enabled and running
Repeating the final host check...
docker (/usr/bin/docker) is present
systemctl is present
lvcreate is present
Unit chronyd.service is enabled and running
Host looks OK
Cluster fsid: 642858bc-9714-11ed-9e73-fa163e075f6d
Verifying IP 192.168.0.14 port 3300 ...
Verifying IP 192.168.0.14 port 6789 ...
Mon IP `192.168.0.14` is in CIDR network `192.168.0.0/24`
Mon IP `192.168.0.14` is in CIDR network `192.168.0.0/24`
Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network
Pulling container image quay.io/ceph/ceph:v17...
Ceph version: ceph version 17.2.5 (98318ae89f1a893a6ded3a640405cdbb33e08757) quincy (stable)
Extracting ceph user uid/gid from container image...
Creating initial keys...
Creating initial monmap...
Creating mon...
Waiting for mon to start...
Waiting for mon...
mon is available
Assimilating anything we can from ceph.conf...
Generating new minimal ceph.conf...
Restarting the monitor...
Setting mon public_network to 192.168.0.0/24
Wrote config to /etc/ceph/ceph.conf
Wrote keyring to /etc/ceph/ceph.client.admin.keyring
Creating mgr...
Verifying port 9283 ...
Waiting for mgr to start...
Waiting for mgr...
mgr not available, waiting (1/15)...
mgr not available, waiting (2/15)...
mgr not available, waiting (3/15)...
mgr not available, waiting (4/15)...
mgr is available
Enabling cephadm module...
Waiting for the mgr to restart...
Waiting for mgr epoch 5...
mgr epoch 5 is available
Setting orchestrator backend to cephadm...
Generating ssh key...
Wrote public SSH key to /etc/ceph/ceph.pub
Adding key to root@localhost authorized_keys...
Adding host ceph-01...
Deploying mon service with default placement...
Deploying mgr service with default placement...
Deploying crash service with default placement...
Deploying prometheus service with default placement...
Deploying grafana service with default placement...
Deploying node-exporter service with default placement...
Deploying alertmanager service with default placement...
Enabling the dashboard module...
Waiting for the mgr to restart...
Waiting for mgr epoch 9...
mgr epoch 9 is available
Generating a dashboard self-signed certificate...
Creating initial admin user...
Fetching dashboard port number...
Ceph Dashboard is now available at:

	     URL: https://ceph-01:8443/
	    User: admin
	Password: k6qfw0g8l8

Enabling client.admin keyring and conf on hosts with "admin" label
Saving cluster configuration to /var/lib/ceph/642858bc-9714-11ed-9e73-fa163e075f6d/config directory
Enabling autotune for osd_memory_target
You can access the Ceph CLI as following in case of multi-cluster or non-default config:

	sudo /usr/sbin/cephadm shell --fsid 642858bc-9714-11ed-9e73-fa163e075f6d -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring

Or, if you are only running a single cluster on this host:

	sudo /usr/sbin/cephadm shell 

Please consider enabling telemetry to help improve Ceph:

	ceph telemetry on

For more information see:

	https://docs.ceph.com/docs/master/mgr/telemetry/

Bootstrap complete.

查看容器

代码语言:javascript复制
[root@ceph-01 ~]# docker images
REPOSITORY                        TAG       IMAGE ID       CREATED         SIZE
quay.io/ceph/ceph                 v17       cc65afd6173a   3 months ago    1.36GB
quay.io/prometheus/alertmanager   v0.23.0   ba2b418f427c   17 months ago   57.5MB

[root@ceph-01 ~]# docker ps
CONTAINER ID   IMAGE                                     COMMAND                  CREATED              STATUS              PORTS     NAMES
9126eb49b75d   quay.io/ceph/ceph                         "/usr/bin/ceph-crash…"   About a minute ago   Up About a minute             ceph-642858bc-9714-11ed-9e73-fa163e075f6d-crash-ceph-01
5793f4280687   quay.io/prometheus/alertmanager:v0.23.0   "/bin/alertmanager -…"   About a minute ago   Up About a minute             ceph-642858bc-9714-11ed-9e73-fa163e075f6d-alertmanager-ceph-01
36aa03ac5393   quay.io/ceph/ceph:v17                     "/usr/bin/ceph-mgr -…"   3 minutes ago        Up 3 minutes                  ceph-642858bc-9714-11ed-9e73-fa163e075f6d-mgr-ceph-01-fsikhk
78345de76bca   quay.io/ceph/ceph:v17                     "/usr/bin/ceph-mon -…"   3 minutes ago        Up 3 minutes                  ceph-642858bc-9714-11ed-9e73-fa163e075f6d-mon-ceph-01

使用 shell 命令

代码语言:javascript复制
# 切换模式
[root@ceph-01 ~]# cephadm shell
Inferring fsid 642858bc-9714-11ed-9e73-fa163e075f6d
Inferring config /var/lib/ceph/642858bc-9714-11ed-9e73-fa163e075f6d/mon.ceph-01/config
Using ceph image with id 'cc65afd6173a' and tag 'v17' created on 2022-10-18 07:41:41  0800 CST
quay.io/ceph/ceph@sha256:0560b16bec6e84345f29fb6693cd2430884e6efff16a95d5bdd0bb06d7661c45

[ceph: root@ceph-01 /]# ceph -s
  cluster:
    id:     642858bc-9714-11ed-9e73-fa163e075f6d
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 1 daemons, quorum ceph-01 (age 6m)
    mgr: ceph-01.fsikhk(active, since 99s)
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:
      
# 查看目录集群内运行的组件(包括其他节点)
[ceph: root@ceph-01 /]# ceph orch ps
NAME                   HOST     PORTS        STATUS         REFRESHED  AGE  MEM USE  MEM LIM  VERSION  IMAGE ID      CONTAINER ID  
alertmanager.ceph-01   ceph-01  *:9093,9094  running (20s)    11s ago   5m    16.2M        -           ba2b418f427c  171435b3d2fd  
crash.ceph-01          ceph-01               running (5m)     11s ago   5m    6920k        -  17.2.5   cc65afd6173a  9126eb49b75d  
grafana.ceph-01        ceph-01  *:3000       running (17s)    11s ago   2m    37.9M        -  8.3.5    dad864ee21e9  19b27ffbe53d  
mgr.ceph-01.fsikhk     ceph-01  *:9283       running (6m)     11s ago   6m     455M        -  17.2.5   cc65afd6173a  36aa03ac5393  
mon.ceph-01            ceph-01               running (6m)     11s ago   6m    43.4M    2048M  17.2.5   cc65afd6173a  78345de76bca  
node-exporter.ceph-01  ceph-01  *:9100       running (2m)     11s ago   2m    9748k        -           1dbe0e931976  fc01fd2a4a22  
prometheus.ceph-01     ceph-01  *:9095       running (29s)    11s ago  29s    27.5M        -           514e6a882f6e  f0f6a7be2cf8  

# 查看某一组件的状态
[ceph: root@ceph-01 /]# ceph orch ps --daemon-type mon
NAME         HOST     PORTS  STATUS        REFRESHED  AGE  MEM USE  MEM LIM  VERSION  IMAGE ID      CONTAINER ID  
mon.ceph-01  ceph-01         running (7m)    44s ago   7m    43.4M    2048M  17.2.5   cc65afd6173a  78345de76bca  

# 退出命令模式
[ceph: root@ceph-01 /]# exit
exit

# ceph 命令第二种用法
[root@ceph-01 ~]# cephadm shell -- ceph -s
Inferring fsid 642858bc-9714-11ed-9e73-fa163e075f6d
Inferring config /var/lib/ceph/642858bc-9714-11ed-9e73-fa163e075f6d/mon.ceph-01/config
Using ceph image with id 'cc65afd6173a' and tag 'v17' created on 2022-10-18 07:41:41  0800 CST
quay.io/ceph/ceph@sha256:0560b16bec6e84345f29fb6693cd2430884e6efff16a95d5bdd0bb06d7661c45
  cluster:
    id:     642858bc-9714-11ed-9e73-fa163e075f6d
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 1 daemons, quorum ceph-01 (age 8m)
    mgr: ceph-01.fsikhk(active, since 3m)
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:

安装 ceph-common 包

代码语言:javascript复制
# 安装
[root@ceph-01 ~]# cephadm install ceph-common
Installing packages ['ceph-common']...

# 查看版本
[root@ceph-01 ~]# ceph -v
ceph version 17.2.5 (98318ae89f1a893a6ded3a640405cdbb33e08757) quincy (stable)

# 启用 ceph 组件
[root@ceph-01 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-02
[root@ceph-01 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-03

创建 mon 和 mgr

代码语言:javascript复制
# 创建 mon 和 mgr
[root@ceph-01 ~]# ceph orch host add ceph-02
[root@ceph-01 ~]# ceph orch host add ceph-03

# 查看集群的节点
[root@ceph-01 ~]# ceph orch host ls
HOST     ADDR          LABELS  STATUS  
ceph-01  192.168.0.14  _admin          
ceph-02  192.168.0.15                  
ceph-03  192.168.0.16                  
3 hosts in cluster

# ceph 集群默认会允许存在5个 mon 和2个 mgr,可以使用 ceph orch apply 手动修改
[root@ceph-01 ~]# ceph orch apply mon --placement="3 ceph-01 ceph-02 ceph-03"
[root@ceph-01 ~]# ceph orch apply mgr --placement="3 ceph-01 ceph-02 ceph-03"

[root@ceph-01 ~]# ceph orch ls
NAME           PORTS        RUNNING  REFRESHED  AGE  PLACEMENT                        
alertmanager   ?:9093,9094      1/1  4m ago     21m  count:1                          
crash                           1/2  4m ago     21m  *                                
grafana        ?:3000           1/1  4m ago     21m  count:1                          
mgr                             1/3  4m ago     70s  ceph-01;ceph-02;ceph-03;count:3  
mon                             1/3  4m ago     82s  ceph-01;ceph-02;ceph-03;count:3  
node-exporter  ?:9100           1/2  4m ago     21m  *                                
prometheus     ?:9095           1/1  4m ago     21m  count:1                     

创建 osd

代码语言:javascript复制
[root@ceph-01 ~]# ceph orch daemon add osd ceph-01:/dev/vdb 
Created osd(s) 0 on host 'ceph-01'
[root@ceph-01 ~]# ceph orch daemon add osd ceph-02:/dev/vdb 
Created osd(s) 1 on host 'ceph-02'
[root@ceph-01 ~]# ceph orch daemon add osd ceph-03:/dev/vdb 
Created osd(s) 2 on host 'ceph-03'

创建 mds

代码语言:javascript复制
# 首先创建 cephfs,不指定 pg 的话,默认会自动调整
# 集群中单个池的 PG 数计算公式如下:PG总数=(OSD数*100)/最大副本数/池数
[root@ceph-01 ~]# ceph osd pool create cephfs_data 16
pool 'cephfs_data' created

[root@ceph-01 ~]# ceph osd pool create cephfs_metadata 16
pool 'cephfs_metadata' created

[root@ceph-01 ~]# ceph fs new cephfs cephfs_metadata cephfs_data
new fs with metadata pool 3 and data pool 2

[root@ceph-01 ~]# ceph df
--- RAW STORAGE ---
CLASS    SIZE   AVAIL     USED  RAW USED  %RAW USED
hdd    30 GiB  30 GiB  133 MiB   133 MiB       0.43
TOTAL  30 GiB  30 GiB  133 MiB   133 MiB       0.43
 
--- POOLS ---
POOL                         ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
.mgr                          1    1  449 KiB        2  1.3 MiB      0    9.5 GiB
cephfs_data                   2   32      0 B        0      0 B      0    9.5 GiB
cephfs_metadata               3   16  4.3 KiB       22   96 KiB      0    9.5 GiB
.rgw.root                     4   32  2.2 KiB       13  144 KiB      0    9.5 GiB
cn-east-1.rgw.log             5   32   23 KiB      306  1.9 MiB      0    9.5 GiB
cn-east-1.rgw.control         6   32      0 B        8      0 B      0    9.5 GiB
cn-east-1.rgw.meta            7   32  1.3 KiB        7   72 KiB      0    9.5 GiB
cn-east-1.rgw.buckets.index   8   32      0 B       11      0 B      0    9.5 GiB
[root@ceph-01 ~]# 

# 开启 mds 组件
# cephfs:文件系统名称
# --placement:指定集群内需要几个 mds,后面跟主机名
[root@ceph-01 ~]# ceph orch apply mds cephfs --placement="3 ceph-01 ceph-02 ceph-03"
Scheduled mds.cephfs update...

# 查看各节点是否已启用 mds 容器,还可以使用 ceph orch ps 查看某一节点运行的容器
[root@ceph-01 ~]# ceph orch ps --daemon-type mds
NAME                       HOST     PORTS  STATUS         REFRESHED  AGE  MEM USE  MEM LIM  VERSION  IMAGE ID      CONTAINER ID  
mds.cephfs.ceph-01.wvhlor  ceph-01         running (80s)    72s ago  81s    11.8M        -  17.2.5   cc65afd6173a  cf05e87d5f3d  
mds.cephfs.ceph-02.idubtl  ceph-02         running (79s)    73s ago  79s    11.5M        -  17.2.5   cc65afd6173a  b766479a4ef2  
mds.cephfs.ceph-03.woyaku  ceph-03         running (82s)    73s ago  82s    13.5M        -  17.2.5   cc65afd6173a  9db7d1a0fedf  

创建 rgw

代码语言:javascript复制
# 创建一个领域
[root@ceph-01 ~]# radosgw-admin realm create --rgw-realm=myorg --default
{
    "id": "4cc868f8-462a-4491-845b-9e91d6a66564",
    "name": "myorg",
    "current_period": "f560e7b3-7b1b-45f5-adb6-57c91a2ae522",
    "epoch": 1
}

# 创建区域组
[root@ceph-01 ~]# radosgw-admin zonegroup create --rgw-zonegroup=default --master --default
{
    "id": "6a1f0262-d134-4d16-bb08-f818feb42695",
    "name": "default",
    "api_name": "default",
    "is_master": "true",
    "endpoints": [],
    "hostnames": [],
    "hostnames_s3website": [],
    "master_zone": "",
    "zones": [],
    "placement_targets": [],
    "default_placement": "",
    "realm_id": "4cc868f8-462a-4491-845b-9e91d6a66564",
    "sync_policy": {
        "groups": []
    }
}

# 创建区域
[root@ceph-01 ~]# radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=cn-east-1 --master --default
{
    "id": "6fc134ed-8f14-4c8d-a315-b2948d0a06cb",
    "name": "cn-east-1",
    "domain_root": "cn-east-1.rgw.meta:root",
    "control_pool": "cn-east-1.rgw.control",
    "gc_pool": "cn-east-1.rgw.log:gc",
    "lc_pool": "cn-east-1.rgw.log:lc",
    "log_pool": "cn-east-1.rgw.log",
    "intent_log_pool": "cn-east-1.rgw.log:intent",
    "usage_log_pool": "cn-east-1.rgw.log:usage",
    "roles_pool": "cn-east-1.rgw.meta:roles",
    "reshard_pool": "cn-east-1.rgw.log:reshard",
    "user_keys_pool": "cn-east-1.rgw.meta:users.keys",
    "user_email_pool": "cn-east-1.rgw.meta:users.email",
    "user_swift_pool": "cn-east-1.rgw.meta:users.swift",
    "user_uid_pool": "cn-east-1.rgw.meta:users.uid",
    "otp_pool": "cn-east-1.rgw.otp",
    "system_key": {
        "access_key": "",
        "secret_key": ""
    },
    "placement_pools": [
        {
            "key": "default-placement",
            "val": {
                "index_pool": "cn-east-1.rgw.buckets.index",
                "storage_classes": {
                    "STANDARD": {
                        "data_pool": "cn-east-1.rgw.buckets.data"
                    }
                },
                "data_extra_pool": "cn-east-1.rgw.buckets.non-ec",
                "index_type": 0
            }
        }
    ],
    "realm_id": "4cc868f8-462a-4491-845b-9e91d6a66564",
    "notif_pool": "cn-east-1.rgw.log:notif"
}

# 为特定领域和区域部署 radosgw 守护程序
[root@ceph-01 ~]# ceph orch apply rgw myorg cn-east-1 --placement="3 ceph-01 ceph-02 ceph-03"
Scheduled rgw.myorg update...

# 验证各节点是否启动 rgw 容器
[root@ceph-01 ~]# ceph orch ps --daemon-type rgw
NAME                      HOST     PORTS  STATUS        REFRESHED  AGE  MEM USE  MEM LIM  VERSION  IMAGE ID      CONTAINER ID  
rgw.myorg.ceph-01.ijvgni  ceph-01  *:80   running (8m)    33s ago   8m    90.8M        -  17.2.5   cc65afd6173a  9cc08bcf3ff1  
rgw.myorg.ceph-02.nbxmae  ceph-02  *:80   running (8m)    48s ago   8m    97.1M        -  17.2.5   cc65afd6173a  2afe3832e810  
rgw.myorg.ceph-03.gpxahk  ceph-03  *:80   running (8m)    50s ago   8m    92.5M        -  17.2.5   cc65afd6173a  66ca66f2f50d

为所有节点安装 ceph-common 包

代码语言:javascript复制
[root@ceph-01 ~]# scp /etc/yum.repos.d/ceph.repo ceph-02:/etc/yum.repos.d/
[root@ceph-01 ~]# scp /etc/yum.repos.d/ceph.repo ceph-03:/etc/yum.repos.d/

[root@ceph-02 ~]# yum install ceph-common -y
[root@ceph-03 ~]# yum install ceph-common -y

[root@ceph-01 ~]# scp /etc/ceph/ceph.conf ceph-02:/etc/ceph/
[root@ceph-01 ~]# scp /etc/ceph/ceph.conf ceph-03:/etc/ceph/

[root@ceph-01 ~]# scp /etc/ceph/ceph.client.admin.keyring ceph-02:/etc/ceph/
[root@ceph-01 ~]# scp /etc/ceph/ceph.client.admin.keyring ceph-03:/etc/ceph/

测试

代码语言:javascript复制
[root@ceph-01 ~]# ceph -s
  cluster:
    id:     642858bc-9714-11ed-9e73-fa163e075f6d
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-01,ceph-03,ceph-02 (age 10m)
    mgr: ceph-03.dkhqhc(active, since 10m), standbys: ceph-02.vokncq, ceph-01.fsikhk
    mds: 1/1 daemons up, 2 standby
    osd: 3 osds: 3 up (since 10m), 3 in (since 60m)
    rgw: 3 daemons active (3 hosts, 1 zones)
 
  data:
    volumes: 1/1 healthy
    pools:   7 pools, 177 pgs
    objects: 224 objects, 459 KiB
    usage:   118 MiB used, 30 GiB / 30 GiB avail
    pgs:     177 active clean

常用命令

代码语言:javascript复制
# 列出集群内运行的组件
ceph orch ls

# 列出集群内的主机
ceph orch host ls

# 列出集群内容器的详细信息
ceph orch ps

# 调整组件的数量
ceph orch apply mon --placement="3 node1 node2 node3"

# --daemon-type:指定查看的组件
ceph orch ps --daemon-type rgw

# 给某个主机指定标签
ceph orch host label add node1 mon

# 告诉cephadm根据标签部署mon,修改后只有包含mon的主机才会成为mon,不过原来启动的mon现在暂时不会关闭
ceph orch apply mon label:mon

# 列出集群内的存储设备
ceph orch device ls

例如,要在newhost1IP地址10.1.2.123上部署第二台监视器,并newhost2在网络10.1.2.0/24中部署第三台monitor
ceph orch apply mon --unmanaged    #禁用mon自动部署
ceph orch daemon add mon newhost1:10.1.2.123
ceph orch daemon add mon newhost2:10.1.2.0/24

启用 CephFS

代码语言:javascript复制
# 请确认至少有一个节点启用了 mds 服务,集群中单个池的 PG 数计算公式如下:PG总数=(OSD数*100)/最大副本数/池数
# 在 ceph 集群,其中一个节点执行即可。
# 首先创建 cephfs,不指定 pg 的话,默认会自动调整
[root@ceph-01 ~]# ceph osd pool create cephfs_data 16
pool 'cephfs_data' created

[root@ceph-01 ~]# ceph osd pool create cephfs_metadata 16
pool 'cephfs_metadata' created

[root@ceph-01 ~]# ceph fs new cephfs cephfs_metadata cephfs_data
new fs with metadata pool 3 and data pool 2

[root@ceph-01 ~]# ceph df
--- RAW STORAGE ---
CLASS    SIZE   AVAIL     USED  RAW USED  %RAW USED
hdd    30 GiB  30 GiB  133 MiB   133 MiB       0.43
TOTAL  30 GiB  30 GiB  133 MiB   133 MiB       0.43
 
--- POOLS ---
POOL                         ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
.mgr                          1    1  449 KiB        2  1.3 MiB      0    9.5 GiB
cephfs_data                   2   32      0 B        0      0 B      0    9.5 GiB
cephfs_metadata               3   16  4.3 KiB       22   96 KiB      0    9.5 GiB
.rgw.root                     4   32  2.2 KiB       13  144 KiB      0    9.5 GiB
cn-east-1.rgw.log             5   32   23 KiB      306  1.9 MiB      0    9.5 GiB
cn-east-1.rgw.control         6   32      0 B        8      0 B      0    9.5 GiB
cn-east-1.rgw.meta            7   32  1.3 KiB        7   72 KiB      0    9.5 GiB
cn-east-1.rgw.buckets.index   8   32      0 B       11      0 B      0    9.5 GiB


# 挂载 CephFS

启用块存储

代码语言:javascript复制
# 在 ceph 集群,其中一台执行命令即可
# 初始化 rbd 池
[root@ceph-01 ~]# ceph osd pool create rbd_storage 16 16 replicated
pool 'rbd_storage' created

# 创建一个块设备
[root@ceph-01 ~]# rbd create --size 1024 rbd_image -p rbd_storage
[root@ceph-01 ~]# rbd ls rbd_storage
rbd_image

# 删除命令
[root@ceph-01 ~]# rbd rm rbd_storage/rbd_image


# 挂载 rbd 块设备
# 将块设备映射到系统内核
[root@ceph-01 ~]# rbd map rbd_storage/rbd_image
/dev/rbd0
[root@ceph-01 ~]# lsblk | grep rbd
rbd0                                                               251:0    0   1G  0 disk 

# 格式化 rbd 设备
[root@ceph-01 ~]# mkfs.ext4 -m0 /dev/rbd/rbd_storage/rbd_image 
mke2fs 1.45.6 (20-Mar-2020)
Discarding device blocks: done                            
Creating filesystem with 262144 4k blocks and 65536 inodes
Filesystem UUID: edd56aad-cbd4-4f69-b950-de46d9098f1c
Superblock backups stored on blocks: 
	32768, 98304, 163840, 229376

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (8192 blocks): done
Writing superblocks and filesystem accounting information: done

# 挂载 rbd 设备

# 取消内核挂载
rbd unmap /dev/rbd0

0 人点赞