[root@ceph141 ~]# cephadm shell # 注意,此命令会启动一个新的容器,运行玩后会退出! Inferring fsid c153209c-d8a0-11ef-a0ed-bdb84668ed01 Inferring config /var/lib/ceph/c153209c-d8a0-11ef-a0ed-bdb84668ed01/mon.ceph141/config Using ceph image with id '2bc0b0f4375d' and tag 'v18' created on 2024-07-2406:19:35 +0800 CST quay.io/ceph/ceph@sha256:6ac7f923aa1d23b43248ce0ddec7e1388855ee3d00813b52c3172b0b23b37906 root@ceph141:/# ceph -s cluster: id: c153209c-d8a0-11ef-a0ed-bdb84668ed01 health: HEALTH_WARN mon ceph141 is low on available space OSD count 0 < osd_pool_default_size 3
services: mon: 1 daemons, quorum ceph141 (age 17h) mgr: ceph141.iphxbv(active, since 17h) osd: 0 osds: 0 up, 0in
data: pools: 0 pools, 0 pgs objects: 0 objects, 0 B usage: 0 B used, 0 B / 0 B avail pgs:
[root@ceph141 ~]# cephadm shell -- ceph -s Inferring fsid c153209c-d8a0-11ef-a0ed-bdb84668ed01 Inferring config /var/lib/ceph/c153209c-d8a0-11ef-a0ed-bdb84668ed01/mon.ceph141/config Using ceph image with id '2bc0b0f4375d' and tag 'v18' created on 2024-07-2406:19:35+0800 CST quay.io/ceph/ceph@sha256:6ac7f923aa1d23b43248ce0ddec7e1388855ee3d00813b52c3172b0b23b37906 cluster: id: c153209c-d8a0-11ef-a0ed-bdb84668ed01 health: HEALTH_WARN mon ceph141 is low on available space OSD count 0< osd_pool_default_size 3
services: mon:1 daemons, quorum ceph141 (age 17h) mgr: ceph141.iphxbv(active, since 17h) osd:0osds:0 up, 0in
data: pools:0 pools, 0 pgs objects:0 objects, 0 B usage:0 B used, 0 B /0 B avail pgs:
services: mon:1 daemons, quorum ceph141 (age 17h) mgr: ceph141.iphxbv(active, since 17h) osd:0osds:0 up, 0in
data: pools:0 pools, 0 pgs objects:0 objects, 0 B usage:0 B used, 0 B /0 B avail pgs:
基于命令行的方式修改ceph的dashboard的密码
修改admin的密码为1
1 2 3 4 5 6
[root@ceph141 ~]# echo 1 | ceph dashboard set-login-credentials admin -i - ****************************************************************** *** WARNING: this command is deprecated. *** *** Please use the ac-user-* related commands to manage users. *** ****************************************************************** Username and password updated
二、ceph集群添加或移除主机
1.查看现有的集群主机列表
1 2 3 4
[root@ceph141 ~]# ceph orch host ls HOST ADDR LABELS STATUS ceph141 10.0.0.141 _admin 1 hosts in cluster
[root@ceph141 ~]# ceph orch device ls HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS ceph141 /dev/sdc hdd 200G Yes 10m ago ceph141 /dev/sr0 hdd VMware_Virtual_SATA_CDRW_Drive_01000000000000000001 2006M No 10m ago Has a FileSystem, Insufficient space (<5GB) ceph142 /dev/sdb hdd 100G Yes 4m ago ceph142 /dev/sdc hdd 200G Yes 4m ago ceph142 /dev/sr0 hdd VMware_Virtual_SATA_CDRW_Drive_01000000000000000001 2006M No 4m ago Has a FileSystem, Insufficient space (<5GB) ceph143 /dev/sdb hdd 100G Yes 3m ago ceph143 /dev/sdc hdd 200G Yes 3m ago ceph143 /dev/sr0 hdd VMware_Virtual_SATA_CDRW_Drive_01000000000000000001 2006M No 3m ago Has a FileSystem, Insufficient space (<5GB)
查看各节点的空闲设备信息
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
[root@ceph141 ~]# lsblk .... sdb 8:16 0 200G 0 disk sdc 8:32 0 100G 0 disk ....
[root@ceph142 ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS ... sdb 8:16 0 200G 0 disk sdc 8:32 0 100G 0 disk ...
[root@ceph143 ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS ... sdb 8:16 0 200G 0 disk sdc 8:32 0 100G 0 disk
查看OSD列表
1 2 3
[root@ceph141 ~]# ceph osd tree ID CLASS WEIGHT TYPENAMESTATUS REWEIGHT PRI-AFF -10 root default