cat /etc/ceph/ceph.conf [global] fsid = 9fc34be0-eedd-4d49-80db-e0cfbe875f0d mon_initial_members = test-luminous-1, test-luminous-2, test-luminous-3 mon_host = 192.168.1.9,192.168.1.10,192.168.1.15 auth_cluster_required = cephx auth_service_required = cephx auth_client_required = cephx [client] rbd cache = true rbd cache writethrough until flush = true rbd concurrent management ops = 40 rbd cache max_dirty = 100663296 rbd cache size = 134217728 rbd cache target dirty = 67108864 log file = /var/log/qemu/qemu-guest-$pid.log admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok rbd default features = 61 [root@test-pike-aio ~]# ceph osd tree ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF -1 9.41469 root default -3 3.40489 host test-luminous-1 6 hdd 0.39999 osd.6 up 0 1.00000 12 hdd 1.00000 osd.12 up 1.00000 1.00000 15 hdd 1.00000 osd.15 up 0 1.00000 3 ssd 1.00000 osd.3 up 1.00000 1.00000 9 ssd 0.00490 osd.9 up 0 1.00000 -5 3.00490 host test-luminous-2 1 hdd 0.50000 osd.1 up 0 1.00000 7 hdd 0.50000 osd.7 up 0 1.00000 13 hdd 1.00000 osd.13 up 1.00000 1.00000 4 ssd 1.00000 osd.4 up 1.00000 1.00000 10 ssd 0.00490 osd.10 up 0 1.00000 -7 3.00490 host test-luminous-3 2 hdd 0.50000 osd.2 up 0 1.00000 8 hdd 0.50000 osd.8 up 0 1.00000 14 hdd 1.00000 osd.14 up 1.00000 1.00000 5 ssd 1.00000 osd.5 up 1.00000 1.00000 11 ssd 0.00490 osd.11 up 0 1.00000 [root@test-pike-aio ~]# ceph osd df ID CLASS WEIGHT REWEIGHT SIZE USE AVAIL %USE VAR PGS 6 hdd 0.39999 0 0B 0B 0B 0 0 0 12 hdd 1.00000 1.00000 20.0GiB 13.5GiB 6.46GiB 67.72 1.16 48 15 hdd 1.00000 0 0B 0B 0B 0 0 0 3 ssd 1.00000 1.00000 5.00GiB 1.09GiB 3.91GiB 21.82 0.37 16 9 ssd 0.00490 0 0B 0B 0B 0 0 0 1 hdd 0.50000 0 0B 0B 0B 0 0 0 7 hdd 0.50000 0 0B 0B 0B 0 0 0 13 hdd 1.00000 1.00000 20.0GiB 13.5GiB 6.46GiB 67.72 1.16 48 4 ssd 1.00000 1.00000 5.00GiB 1.09GiB 3.90GiB 21.85 0.37 16 10 ssd 0.00490 0 0B 0B 0B 0 0 0 2 hdd 0.50000 0 0B 0B 0B 0 0 0 8 hdd 0.50000 0 0B 0B 0B 0 0 0 14 hdd 1.00000 1.00000 20.0GiB 13.5GiB 6.46GiB 67.72 1.16 48 5 ssd 1.00000 1.00000 5.00GiB 1.09GiB 3.90GiB 21.84 0.37 16 11 ssd 0.00490 0 0B 0B 0B 0 0 0 TOTAL 135GiB 53.7GiB 81.3GiB 58.55 MIN/MAX VAR: 0.37/1.16 STDDEV: 26.75 [root@test-pike-aio ~]# ceph osd pool ls detail pool 3 'images' replicated size 3 min_size 2 crush_rule 2 object_hash rjenkins pg_num 16 pgp_num 16 last_change 1219 flags hashpspool stripe_width 0 application rbd removed_snaps [1~3] pool 4 'vms' replicated size 3 min_size 2 crush_rule 2 object_hash rjenkins pg_num 16 pgp_num 16 last_change 1219 flags hashpspool stripe_width 0 application rbd removed_snaps [1~3] pool 11 'volumes' replicated size 3 min_size 2 crush_rule 2 object_hash rjenkins pg_num 16 pgp_num 16 last_change 1718 lfor 1718/1718 flags hashpspool max_bytes 5368709120 tiers 14 read_tier 14 write_tier 14 stripe_width 0 pool 14 'cache_volumes' replicated size 3 min_size 2 crush_rule 1 object_hash rjenkins pg_num 16 pgp_num 16 last_change 1728 lfor 1718/1718 flags hashpspool,incomplete_clones max_bytes 5368709120 tier_of 11 cache_mode writeback target_bytes 5368709120 hit_set bloom{false_positive_probability: 0.05, target_size: 0, seed: 0} 1800s x6 decay_rate 0 search_last_n 0 stripe_width 0 [root@test-pike-aio ~]# ceph osd crush rule dump [ { "rule_id": 0, "rule_name": "replicated_rule", "ruleset": 0, "type": 1, "min_size": 1, "max_size": 10, "steps": [ { "op": "take", "item": -1, "item_name": "default" }, { "op": "chooseleaf_firstn", "num": 0, "type": "host" }, { "op": "emit" } ] }, { "rule_id": 1, "rule_name": "hot-storage", "ruleset": 1, "type": 1, "min_size": 1, "max_size": 10, "steps": [ { "op": "take", "item": -12, "item_name": "default~ssd" }, { "op": "chooseleaf_firstn", "num": 0, "type": "host" }, { "op": "emit" } ] }, { "rule_id": 2, "rule_name": "cold-storage", "ruleset": 2, "type": 1, "min_size": 1, "max_size": 10, "steps": [ { "op": "take", "item": -2, "item_name": "default~hdd" }, { "op": "chooseleaf_firstn", "num": 0, "type": "host" }, { "op": "emit" } ] } ]