sudo podman exec ceph-mon-controller-0 ceph -s
cluster:
id: 670dc288-cd36-4772-a4fc-47287f8e2ebf
health: HEALTH_OK
services:
mon: 3 daemons, quorum controller-1,controller-2,controller-0 (age 14h)
mgr: controller-1(active, since 8w), standbys: controller-0, controller-2
mds: cephfs:1 {0=controller-2=up:active} 2 up:standby
osd: 15 osds: 15 up (since 8w), 15 in (since 8w)
task status:
scrub status:
mds.controller-2: idle
data:
pools: 6 pools, 192 pgs
objects: 309 objects, 1.6 GiB
usage: 21 GiB used, 144 GiB / 165 GiB avail
pgs: 192 active+clean
$ sudo podman exec ceph-mon-controller-0 ceph -s
cluster:
id: 670dc288-cd36-4772-a4fc-47287f8e2ebf
health: HEALTH_OK
services:
mon: 3 daemons, quorum controller-1,controller-2,controller-0 (age 14h)
mgr: controller-1(active, since 8w), standbys: controller-0, controller-2
mds: cephfs:1 {0=controller-2=up:active} 2 up:standby
osd: 15 osds: 15 up (since 8w), 15 in (since 8w)
task status:
scrub status:
mds.controller-2: idle
data:
pools: 6 pools, 192 pgs
objects: 309 objects, 1.6 GiB
usage: 21 GiB used, 144 GiB / 165 GiB avail
pgs: 192 active+clean
Copy to Clipboard
Copied!
Toggle word wrap
Toggle overflow