root@controller2:~# ceph -s cluster: id: 4a158d27-f750-41d5-9e7f-26ce4c9d2d45 health: HEALTH_OK services: mon: 3 daemons, quorum controller1,controller2,controller3 mgr: controller2(active) osd: 1404 osds: 1404 up, 1404 in rgw: 10 daemons active data: pools: 17 pools, 34260 pgs objects: 27.16 M objects, 1.0 PiB usage: 3.1 PiB used, 5.9 PiB / 9.1 PiB avail pgs: 34233 active+clean 24 active+clean+scrubbing+deep 3 active+clean+scrubbing io: client: 637 MiB/s rd, 563 KiB/s wr, 2.20 kop/s rd, 54 op/s wr root@controller2:~# ceph versions { "mon": { "ceph version 13.2.6 (7b695f835b03642f85998b2ae7b6dd093d9fbce4) mimic (stable)": 3 }, "mgr": { "ceph version 13.2.6 (7b695f835b03642f85998b2ae7b6dd093d9fbce4) mimic (stable)": 1 }, "osd": { "ceph version 13.2.6 (7b695f835b03642f85998b2ae7b6dd093d9fbce4) mimic (stable)": 1404 }, "mds": {}, "rgw": { "ceph version 13.2.6 (7b695f835b03642f85998b2ae7b6dd093d9fbce4) mimic (stable)": 10 }, "overall": { "ceph version 13.2.6 (7b695f835b03642f85998b2ae7b6dd093d9fbce4) mimic (stable)": 1418 } }