# HELP ceph_health_status Cluster health status # TYPE ceph_health_status untyped ceph_health_status 1.0 # HELP ceph_mon_quorum_status Monitors in quorum # TYPE ceph_mon_quorum_status gauge ceph_mon_quorum_status{ceph_daemon="mon.ceph03"} 1.0 ceph_mon_quorum_status{ceph_daemon="mon.ceph04"} 1.0 ceph_mon_quorum_status{ceph_daemon="mon.ceph07"} 1.0 ceph_mon_quorum_status{ceph_daemon="mon.ceph05"} 1.0 ceph_mon_quorum_status{ceph_daemon="mon.ceph06"} 1.0 # HELP ceph_fs_metadata FS Metadata # TYPE ceph_fs_metadata untyped ceph_fs_metadata{data_pools="40,41,42,49",fs_id="6",metadata_pool="39",name="infinoid.oi"} 1.0 # HELP ceph_mds_metadata MDS Metadata # TYPE ceph_mds_metadata untyped ceph_mds_metadata{ceph_daemon="mds.infinoid.oi.ceph10.habnyr",fs_id="-1",hostname="ceph10",public_addr="172.22.0.110:6801/801673083",rank="-1",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_mds_metadata{ceph_daemon="mds.infinoid.oi.ceph09.mrpucx",fs_id="-1",hostname="ceph09",public_addr="172.22.0.109:6801/3702517795",rank="-1",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_mds_metadata{ceph_daemon="mds.infinoid.oi.ceph11.dfqlct",fs_id="-1",hostname="ceph11",public_addr="172.22.0.111:6801/189227323",rank="-1",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_mds_metadata{ceph_daemon="mds.infinoid.oi.ceph08.dddqvc",fs_id="6",hostname="ceph08",public_addr="172.22.0.108:6801/4215673398",rank="0",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 # HELP ceph_mon_metadata MON Metadata # TYPE ceph_mon_metadata untyped ceph_mon_metadata{ceph_daemon="mon.ceph03",hostname="ceph03",public_addr="172.22.0.103",rank="0",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_mon_metadata{ceph_daemon="mon.ceph04",hostname="ceph04",public_addr="172.22.0.104",rank="1",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_mon_metadata{ceph_daemon="mon.ceph07",hostname="ceph07",public_addr="172.22.0.107",rank="2",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_mon_metadata{ceph_daemon="mon.ceph05",hostname="ceph05",public_addr="172.22.0.105",rank="3",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_mon_metadata{ceph_daemon="mon.ceph06",hostname="ceph06",public_addr="172.22.0.106",rank="4",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 # HELP ceph_mgr_metadata MGR metadata # TYPE ceph_mgr_metadata gauge ceph_mgr_metadata{ceph_daemon="mgr.ceph00.cjoafg",hostname="ceph00",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_mgr_metadata{ceph_daemon="mgr.ceph01.ohaker",hostname="ceph01",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_mgr_metadata{ceph_daemon="mgr.ceph02.yqrmmw",hostname="ceph02",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 # HELP ceph_mgr_status MGR status (0=standby, 1=active) # TYPE ceph_mgr_status gauge ceph_mgr_status{ceph_daemon="mgr.ceph00.cjoafg"} 0.0 ceph_mgr_status{ceph_daemon="mgr.ceph01.ohaker"} 0.0 ceph_mgr_status{ceph_daemon="mgr.ceph02.yqrmmw"} 1.0 # HELP ceph_mgr_module_status MGR module status (0=disabled, 1=enabled, 2=auto-enabled) # TYPE ceph_mgr_module_status gauge ceph_mgr_module_status{name="alerts"} 0.0 ceph_mgr_module_status{name="balancer"} 2.0 ceph_mgr_module_status{name="cephadm"} 1.0 ceph_mgr_module_status{name="crash"} 2.0 ceph_mgr_module_status{name="dashboard"} 1.0 ceph_mgr_module_status{name="devicehealth"} 2.0 ceph_mgr_module_status{name="diskprediction_local"} 1.0 ceph_mgr_module_status{name="influx"} 0.0 ceph_mgr_module_status{name="insights"} 0.0 ceph_mgr_module_status{name="iostat"} 1.0 ceph_mgr_module_status{name="k8sevents"} 0.0 ceph_mgr_module_status{name="localpool"} 0.0 ceph_mgr_module_status{name="mds_autoscaler"} 0.0 ceph_mgr_module_status{name="mirroring"} 0.0 ceph_mgr_module_status{name="nfs"} 1.0 ceph_mgr_module_status{name="orchestrator"} 2.0 ceph_mgr_module_status{name="osd_perf_query"} 0.0 ceph_mgr_module_status{name="osd_support"} 0.0 ceph_mgr_module_status{name="pg_autoscaler"} 2.0 ceph_mgr_module_status{name="progress"} 2.0 ceph_mgr_module_status{name="prometheus"} 1.0 ceph_mgr_module_status{name="rbd_support"} 2.0 ceph_mgr_module_status{name="restful"} 1.0 ceph_mgr_module_status{name="rgw"} 0.0 ceph_mgr_module_status{name="rook"} 0.0 ceph_mgr_module_status{name="selftest"} 0.0 ceph_mgr_module_status{name="snap_schedule"} 0.0 ceph_mgr_module_status{name="stats"} 0.0 ceph_mgr_module_status{name="status"} 2.0 ceph_mgr_module_status{name="telegraf"} 0.0 ceph_mgr_module_status{name="telemetry"} 2.0 ceph_mgr_module_status{name="test_orchestrator"} 0.0 ceph_mgr_module_status{name="volumes"} 2.0 ceph_mgr_module_status{name="zabbix"} 0.0 # HELP ceph_mgr_module_can_run MGR module runnable state i.e. can it run (0=no, 1=yes) # TYPE ceph_mgr_module_can_run gauge ceph_mgr_module_can_run{name="alerts"} 1.0 ceph_mgr_module_can_run{name="balancer"} 1.0 ceph_mgr_module_can_run{name="cephadm"} 1.0 ceph_mgr_module_can_run{name="crash"} 1.0 ceph_mgr_module_can_run{name="dashboard"} 1.0 ceph_mgr_module_can_run{name="devicehealth"} 1.0 ceph_mgr_module_can_run{name="diskprediction_local"} 1.0 ceph_mgr_module_can_run{name="influx"} 0.0 ceph_mgr_module_can_run{name="insights"} 1.0 ceph_mgr_module_can_run{name="iostat"} 1.0 ceph_mgr_module_can_run{name="k8sevents"} 1.0 ceph_mgr_module_can_run{name="localpool"} 1.0 ceph_mgr_module_can_run{name="mds_autoscaler"} 1.0 ceph_mgr_module_can_run{name="mirroring"} 1.0 ceph_mgr_module_can_run{name="nfs"} 1.0 ceph_mgr_module_can_run{name="orchestrator"} 1.0 ceph_mgr_module_can_run{name="osd_perf_query"} 1.0 ceph_mgr_module_can_run{name="osd_support"} 1.0 ceph_mgr_module_can_run{name="pg_autoscaler"} 1.0 ceph_mgr_module_can_run{name="progress"} 1.0 ceph_mgr_module_can_run{name="prometheus"} 1.0 ceph_mgr_module_can_run{name="rbd_support"} 1.0 ceph_mgr_module_can_run{name="restful"} 1.0 ceph_mgr_module_can_run{name="rgw"} 1.0 ceph_mgr_module_can_run{name="rook"} 1.0 ceph_mgr_module_can_run{name="selftest"} 1.0 ceph_mgr_module_can_run{name="snap_schedule"} 1.0 ceph_mgr_module_can_run{name="stats"} 1.0 ceph_mgr_module_can_run{name="status"} 1.0 ceph_mgr_module_can_run{name="telegraf"} 1.0 ceph_mgr_module_can_run{name="telemetry"} 1.0 ceph_mgr_module_can_run{name="test_orchestrator"} 1.0 ceph_mgr_module_can_run{name="volumes"} 1.0 ceph_mgr_module_can_run{name="zabbix"} 1.0 # HELP ceph_osd_metadata OSD Metadata # TYPE ceph_osd_metadata untyped ceph_osd_metadata{back_iface="",ceph_daemon="osd.0",cluster_addr="172.22.0.100",device_class="ssd",front_iface="",hostname="ceph00",objectstore="bluestore",public_addr="172.22.0.100",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.1",cluster_addr="172.22.0.101",device_class="ssd",front_iface="",hostname="ceph01",objectstore="bluestore",public_addr="172.22.0.101",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.2",cluster_addr="172.22.0.102",device_class="ssd",front_iface="",hostname="ceph02",objectstore="bluestore",public_addr="172.22.0.102",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.3",cluster_addr="172.22.0.103",device_class="ssd",front_iface="",hostname="ceph03",objectstore="bluestore",public_addr="172.22.0.103",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.4",cluster_addr="172.22.0.104",device_class="ssd",front_iface="",hostname="ceph04",objectstore="bluestore",public_addr="172.22.0.104",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.5",cluster_addr="172.22.0.105",device_class="ssd",front_iface="",hostname="ceph05",objectstore="bluestore",public_addr="172.22.0.105",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.6",cluster_addr="172.22.0.106",device_class="ssd",front_iface="",hostname="ceph06",objectstore="bluestore",public_addr="172.22.0.106",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.7",cluster_addr="172.22.0.107",device_class="ssd",front_iface="",hostname="ceph07",objectstore="bluestore",public_addr="172.22.0.107",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.8",cluster_addr="172.22.0.108",device_class="ssd",front_iface="",hostname="ceph08",objectstore="bluestore",public_addr="172.22.0.108",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.9",cluster_addr="172.22.0.109",device_class="ssd",front_iface="",hostname="ceph09",objectstore="bluestore",public_addr="172.22.0.109",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.10",cluster_addr="172.22.0.110",device_class="ssd",front_iface="",hostname="ceph10",objectstore="bluestore",public_addr="172.22.0.110",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.11",cluster_addr="172.22.0.111",device_class="ssd",front_iface="",hostname="ceph11",objectstore="bluestore",public_addr="172.22.0.111",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.12",cluster_addr="172.22.0.112",device_class="ssd",front_iface="",hostname="ceph12",objectstore="bluestore",public_addr="172.22.0.112",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.13",cluster_addr="172.22.0.113",device_class="ssd",front_iface="",hostname="ceph13",objectstore="bluestore",public_addr="172.22.0.113",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.14",cluster_addr="172.22.0.114",device_class="ssd",front_iface="",hostname="ceph14",objectstore="bluestore",public_addr="172.22.0.114",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.15",cluster_addr="172.22.0.115",device_class="ssd",front_iface="",hostname="ceph15",objectstore="bluestore",public_addr="172.22.0.115",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.16",cluster_addr="172.22.0.116",device_class="ssd",front_iface="",hostname="ceph16",objectstore="bluestore",public_addr="172.22.0.116",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.17",cluster_addr="172.22.0.117",device_class="ssd",front_iface="",hostname="ceph17",objectstore="bluestore",public_addr="172.22.0.117",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.18",cluster_addr="172.22.0.118",device_class="ssd",front_iface="",hostname="ceph18",objectstore="bluestore",public_addr="172.22.0.118",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 ceph_osd_metadata{back_iface="",ceph_daemon="osd.19",cluster_addr="172.22.0.119",device_class="ssd",front_iface="",hostname="ceph19",objectstore="bluestore",public_addr="172.22.0.119",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0 # HELP ceph_disk_occupation Associate Ceph daemon with disk used # TYPE ceph_disk_occupation untyped ceph_disk_occupation{ceph_daemon="osd.0",device="/dev/dm-0",db_device="",wal_device="",instance="ceph00",devices="nvme0n1",device_ids="nvme0n1=TEAM_TM8FP4004T_112205030010489"} 1.0 ceph_disk_occupation{ceph_daemon="osd.1",device="/dev/dm-0",db_device="",wal_device="",instance="ceph01",devices="nvme0n1",device_ids="nvme0n1=TEAM_TM8FP4004T_112205030010925"} 1.0 ceph_disk_occupation{ceph_daemon="osd.2",device="/dev/dm-0",db_device="",wal_device="",instance="ceph02",devices="nvme0n1",device_ids="nvme0n1=TEAM_TM8FP4004T_112205030010915"} 1.0 ceph_disk_occupation{ceph_daemon="osd.3",device="/dev/dm-0",db_device="",wal_device="",instance="ceph03",devices="nvme0n1",device_ids="nvme0n1=TEAM_TM8FP4004T_112205030010474"} 1.0 ceph_disk_occupation{ceph_daemon="osd.4",device="/dev/dm-0",db_device="",wal_device="",instance="ceph04",devices="nvme0n1",device_ids="nvme0n1=TEAM_TM8FP4004T_112205030010462"} 1.0 ceph_disk_occupation{ceph_daemon="osd.5",device="/dev/dm-0",db_device="",wal_device="",instance="ceph05",devices="nvme0n1",device_ids="nvme0n1=TEAM_TM8FP4004T_112205030010492"} 1.0 ceph_disk_occupation{ceph_daemon="osd.6",device="/dev/dm-0",db_device="",wal_device="",instance="ceph06",devices="nvme0n1",device_ids="nvme0n1=TEAM_TM8FP4004T_112205030010491"} 1.0 ceph_disk_occupation{ceph_daemon="osd.7",device="/dev/dm-0",db_device="",wal_device="",instance="ceph07",devices="nvme0n1",device_ids="nvme0n1=TEAM_TM8FP4004T_112205030010466"} 1.0 ceph_disk_occupation{ceph_daemon="osd.8",device="/dev/dm-0",db_device="",wal_device="",instance="ceph08",devices="nvme0n1",device_ids="nvme0n1=TEAM_TM8FP4004T_112207280560030"} 1.0 ceph_disk_occupation{ceph_daemon="osd.9",device="/dev/dm-0",db_device="",wal_device="",instance="ceph09",devices="nvme0n1",device_ids="nvme0n1=TEAM_TM8FP4004T_112205030010917"} 1.0 ceph_disk_occupation{ceph_daemon="osd.10",device="/dev/dm-0",db_device="",wal_device="",instance="ceph10",devices="nvme0n1",device_ids="nvme0n1=CT4000P3SSD8_2312E6BE24FE"} 1.0 ceph_disk_occupation{ceph_daemon="osd.11",device="/dev/dm-0",db_device="",wal_device="",instance="ceph11",devices="nvme0n1",device_ids="nvme0n1=CT4000P3SSD8_2312E6BE21D0"} 1.0 ceph_disk_occupation{ceph_daemon="osd.12",device="/dev/dm-0",db_device="",wal_device="",instance="ceph12",devices="nvme0n1",device_ids="nvme0n1=CT4000P3SSD8_2324E6E2553B"} 1.0 ceph_disk_occupation{ceph_daemon="osd.13",device="/dev/dm-0",db_device="",wal_device="",instance="ceph13",devices="nvme0n1",device_ids="nvme0n1=CT4000P3SSD8_2312E6BE248E"} 1.0 ceph_disk_occupation{ceph_daemon="osd.14",device="/dev/dm-0",db_device="",wal_device="",instance="ceph14",devices="nvme0n1",device_ids="nvme0n1=CT4000P3SSD8_2312E6BE21F5"} 1.0 ceph_disk_occupation{ceph_daemon="osd.15",device="/dev/dm-0",db_device="",wal_device="",instance="ceph15",devices="nvme0n1",device_ids="nvme0n1=CT4000P3SSD8_2307E6AB5DC8"} 1.0 ceph_disk_occupation{ceph_daemon="osd.16",device="/dev/dm-0",db_device="",wal_device="",instance="ceph16",devices="nvme0n1",device_ids="nvme0n1=CT4000P3SSD8_2312E6BE21EE"} 1.0 ceph_disk_occupation{ceph_daemon="osd.17",device="/dev/dm-0",db_device="",wal_device="",instance="ceph17",devices="nvme0n1",device_ids="nvme0n1=CT4000P3SSD8_2324E6E25525"} 1.0 ceph_disk_occupation{ceph_daemon="osd.18",device="/dev/dm-0",db_device="",wal_device="",instance="ceph18",devices="nvme0n1",device_ids="nvme0n1=TEAM_TM8FP4004T_112203020640001"} 1.0 ceph_disk_occupation{ceph_daemon="osd.19",device="/dev/dm-0",db_device="",wal_device="",instance="ceph19",devices="nvme0n1",device_ids="nvme0n1=CT4000P3SSD8_2324E6E25483"} 1.0 # HELP ceph_disk_occupation_human Associate Ceph daemon with disk used # TYPE ceph_disk_occupation_human untyped ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/dm-0",instance="ceph00"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/dm-0",instance="ceph01"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.2",device="/dev/dm-0",instance="ceph02"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.3",device="/dev/dm-0",instance="ceph03"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.4",device="/dev/dm-0",instance="ceph04"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.5",device="/dev/dm-0",instance="ceph05"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.6",device="/dev/dm-0",instance="ceph06"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.7",device="/dev/dm-0",instance="ceph07"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.8",device="/dev/dm-0",instance="ceph08"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.9",device="/dev/dm-0",instance="ceph09"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.10",device="/dev/dm-0",instance="ceph10"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.11",device="/dev/dm-0",instance="ceph11"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.12",device="/dev/dm-0",instance="ceph12"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.13",device="/dev/dm-0",instance="ceph13"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.14",device="/dev/dm-0",instance="ceph14"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.15",device="/dev/dm-0",instance="ceph15"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.16",device="/dev/dm-0",instance="ceph16"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.17",device="/dev/dm-0",instance="ceph17"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.18",device="/dev/dm-0",instance="ceph18"} 1.0 ceph_disk_occupation_human{ceph_daemon="osd.19",device="/dev/dm-0",instance="ceph19"} 1.0 # HELP ceph_pool_metadata POOL Metadata # TYPE ceph_pool_metadata untyped ceph_pool_metadata{pool_id="1",name=".mgr",type="replicated",description="replica:5",compression_mode="none"} 1.0 ceph_pool_metadata{pool_id="39",name="cephfs.infinoid.oi.meta",type="replicated",description="replica:5",compression_mode="none"} 1.0 ceph_pool_metadata{pool_id="40",name="cephfs.infinoid.oi.data",type="replicated",description="replica:5",compression_mode="none"} 1.0 ceph_pool_metadata{pool_id="41",name="cephfs.infinoid.oi.longterm",type="erasure",description="ec:12+4",compression_mode="none"} 1.0 ceph_pool_metadata{pool_id="42",name="cephfs.infinoid.oi.shortterm",type="erasure",description="ec:8+2",compression_mode="none"} 1.0 ceph_pool_metadata{pool_id="43",name="rbd.ec124",type="erasure",description="ec:12+4",compression_mode="none"} 1.0 ceph_pool_metadata{pool_id="44",name="rbd",type="replicated",description="replica:5",compression_mode="none"} 1.0 ceph_pool_metadata{pool_id="45",name=".rgw.root",type="replicated",description="replica:3",compression_mode="none"} 1.0 ceph_pool_metadata{pool_id="46",name="default.rgw.log",type="replicated",description="replica:3",compression_mode="none"} 1.0 ceph_pool_metadata{pool_id="47",name="default.rgw.control",type="replicated",description="replica:3",compression_mode="none"} 1.0 ceph_pool_metadata{pool_id="48",name="default.rgw.meta",type="replicated",description="replica:3",compression_mode="none"} 1.0 ceph_pool_metadata{pool_id="49",name="cephfs.infinoid.oi.k8s",type="erasure",description="ec:12+4",compression_mode="none"} 1.0 # HELP ceph_rgw_metadata RGW Metadata # TYPE ceph_rgw_metadata untyped # HELP ceph_rbd_mirror_metadata RBD Mirror Metadata # TYPE ceph_rbd_mirror_metadata untyped # HELP ceph_pg_total PG Total Count per Pool # TYPE ceph_pg_total gauge ceph_pg_total{pool_id="1"} 1.0 ceph_pg_total{pool_id="39"} 16.0 ceph_pg_total{pool_id="40"} 32.0 ceph_pg_total{pool_id="41"} 32.0 ceph_pg_total{pool_id="42"} 32.0 ceph_pg_total{pool_id="43"} 32.0 ceph_pg_total{pool_id="44"} 32.0 ceph_pg_total{pool_id="45"} 32.0 ceph_pg_total{pool_id="46"} 32.0 ceph_pg_total{pool_id="47"} 32.0 ceph_pg_total{pool_id="48"} 32.0 ceph_pg_total{pool_id="49"} 32.0 # HELP ceph_health_detail healthcheck status by type (0=inactive, 1=active) # TYPE ceph_health_detail gauge ceph_health_detail{name="TOO_FEW_OSDS",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="MON_DOWN",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="MDS_ALL_DOWN",severity="HEALTH_ERR"} 0.0 ceph_health_detail{name="MDS_UP_LESS_THAN_MAX",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="FS_DEGRADED",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="MGR_MODULE_ERROR",severity="HEALTH_ERR"} 0.0 ceph_health_detail{name="OSD_DOWN",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="OSD_HOST_DOWN",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="PG_AVAILABILITY",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="CEPHADM_FAILED_DAEMON",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="PG_DEGRADED",severity="HEALTH_WARN"} 1.0 ceph_health_detail{name="MDS_SLOW_METADATA_IO",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="MON_CLOCK_SKEW",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="CEPHADM_REFRESH_FAILED",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="MDS_CLIENT_RECALL",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="SLOW_OPS",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="CEPHADM_APPLY_SPEC_FAIL",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="TELEMETRY_CHANGED",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="OSD_UPGRADE_FINISHED",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="PG_NOT_DEEP_SCRUBBED",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="CEPHADM_HOST_CHECK_FAILED",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="MDS_SLOW_REQUEST",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="MDS_TRIM",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="OSD_SLOW_PING_TIME_BACK",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="OSD_SLOW_PING_TIME_FRONT",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="OSDMAP_FLAGS",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="CEPHADM_STRAY_DAEMON",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="MDS_INSUFFICIENT_STANDBY",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="RECENT_CRASH",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="FS_WITH_FAILED_MDS",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="OSD_SCRUB_ERRORS",severity="HEALTH_ERR"} 0.0 ceph_health_detail{name="PG_DAMAGED",severity="HEALTH_ERR"} 0.0 ceph_health_detail{name="MDS_CLIENT_LATE_RELEASE",severity="HEALTH_WARN"} 0.0 ceph_health_detail{name="POOL_APP_NOT_ENABLED",severity="HEALTH_WARN"} 0.0 # HELP ceph_pool_objects_repaired Number of objects repaired in a pool # TYPE ceph_pool_objects_repaired counter ceph_pool_objects_repaired{pool_id="49"} 0.0 ceph_pool_objects_repaired{pool_id="48"} 0.0 ceph_pool_objects_repaired{pool_id="47"} 0.0 ceph_pool_objects_repaired{pool_id="46"} 0.0 ceph_pool_objects_repaired{pool_id="45"} 0.0 ceph_pool_objects_repaired{pool_id="44"} 0.0 ceph_pool_objects_repaired{pool_id="39"} 0.0 ceph_pool_objects_repaired{pool_id="1"} 0.0 ceph_pool_objects_repaired{pool_id="43"} 0.0 ceph_pool_objects_repaired{pool_id="40"} 0.0 ceph_pool_objects_repaired{pool_id="41"} 1.0 ceph_pool_objects_repaired{pool_id="42"} 0.0 # HELP ceph_daemon_health_metrics Health metrics for Ceph daemons # TYPE ceph_daemon_health_metrics gauge ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.0"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.0"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.1"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.1"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.2"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.2"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="mon.ceph03"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.3"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.3"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="mon.ceph04"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.4"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.4"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="mon.ceph05"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.5"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.5"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="mon.ceph06"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.6"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.6"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="mon.ceph07"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.7"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.7"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.9"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.9"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.10"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.10"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.11"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.11"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.12"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.12"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.13"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.13"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.14"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.14"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.15"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.15"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.16"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.16"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.17"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.17"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.18"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.18"} 0.0 ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.19"} 0.0 ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.19"} 0.0 # HELP ceph_osd_flag_noup OSD Flag noup # TYPE ceph_osd_flag_noup untyped ceph_osd_flag_noup 0.0 # HELP ceph_osd_flag_nodown OSD Flag nodown # TYPE ceph_osd_flag_nodown untyped ceph_osd_flag_nodown 0.0 # HELP ceph_osd_flag_noout OSD Flag noout # TYPE ceph_osd_flag_noout untyped ceph_osd_flag_noout 0.0 # HELP ceph_osd_flag_noin OSD Flag noin # TYPE ceph_osd_flag_noin untyped ceph_osd_flag_noin 0.0 # HELP ceph_osd_flag_nobackfill OSD Flag nobackfill # TYPE ceph_osd_flag_nobackfill untyped ceph_osd_flag_nobackfill 0.0 # HELP ceph_osd_flag_norebalance OSD Flag norebalance # TYPE ceph_osd_flag_norebalance untyped ceph_osd_flag_norebalance 0.0 # HELP ceph_osd_flag_norecover OSD Flag norecover # TYPE ceph_osd_flag_norecover untyped ceph_osd_flag_norecover 0.0 # HELP ceph_osd_flag_noscrub OSD Flag noscrub # TYPE ceph_osd_flag_noscrub untyped ceph_osd_flag_noscrub 0.0 # HELP ceph_osd_flag_nodeep_scrub OSD Flag nodeep-scrub # TYPE ceph_osd_flag_nodeep_scrub untyped ceph_osd_flag_nodeep_scrub 0.0 # HELP ceph_osd_weight OSD status weight # TYPE ceph_osd_weight untyped ceph_osd_weight{ceph_daemon="osd.0"} 1.0 ceph_osd_weight{ceph_daemon="osd.1"} 1.0 ceph_osd_weight{ceph_daemon="osd.2"} 1.0 ceph_osd_weight{ceph_daemon="osd.3"} 1.0 ceph_osd_weight{ceph_daemon="osd.4"} 1.0 ceph_osd_weight{ceph_daemon="osd.5"} 0.0 ceph_osd_weight{ceph_daemon="osd.6"} 1.0 ceph_osd_weight{ceph_daemon="osd.7"} 1.0 ceph_osd_weight{ceph_daemon="osd.8"} 1.0 ceph_osd_weight{ceph_daemon="osd.9"} 1.0 ceph_osd_weight{ceph_daemon="osd.10"} 1.0 ceph_osd_weight{ceph_daemon="osd.11"} 1.0 ceph_osd_weight{ceph_daemon="osd.12"} 1.0 ceph_osd_weight{ceph_daemon="osd.13"} 1.0 ceph_osd_weight{ceph_daemon="osd.14"} 1.0 ceph_osd_weight{ceph_daemon="osd.15"} 1.0 ceph_osd_weight{ceph_daemon="osd.16"} 1.0 ceph_osd_weight{ceph_daemon="osd.17"} 1.0 ceph_osd_weight{ceph_daemon="osd.18"} 1.0 ceph_osd_weight{ceph_daemon="osd.19"} 1.0 # HELP ceph_osd_up OSD status up # TYPE ceph_osd_up untyped ceph_osd_up{ceph_daemon="osd.0"} 1.0 ceph_osd_up{ceph_daemon="osd.1"} 1.0 ceph_osd_up{ceph_daemon="osd.2"} 1.0 ceph_osd_up{ceph_daemon="osd.3"} 1.0 ceph_osd_up{ceph_daemon="osd.4"} 1.0 ceph_osd_up{ceph_daemon="osd.5"} 1.0 ceph_osd_up{ceph_daemon="osd.6"} 1.0 ceph_osd_up{ceph_daemon="osd.7"} 1.0 ceph_osd_up{ceph_daemon="osd.8"} 1.0 ceph_osd_up{ceph_daemon="osd.9"} 1.0 ceph_osd_up{ceph_daemon="osd.10"} 1.0 ceph_osd_up{ceph_daemon="osd.11"} 1.0 ceph_osd_up{ceph_daemon="osd.12"} 1.0 ceph_osd_up{ceph_daemon="osd.13"} 1.0 ceph_osd_up{ceph_daemon="osd.14"} 1.0 ceph_osd_up{ceph_daemon="osd.15"} 1.0 ceph_osd_up{ceph_daemon="osd.16"} 1.0 ceph_osd_up{ceph_daemon="osd.17"} 1.0 ceph_osd_up{ceph_daemon="osd.18"} 1.0 ceph_osd_up{ceph_daemon="osd.19"} 1.0 # HELP ceph_osd_in OSD status in # TYPE ceph_osd_in untyped ceph_osd_in{ceph_daemon="osd.0"} 1.0 ceph_osd_in{ceph_daemon="osd.1"} 1.0 ceph_osd_in{ceph_daemon="osd.2"} 1.0 ceph_osd_in{ceph_daemon="osd.3"} 1.0 ceph_osd_in{ceph_daemon="osd.4"} 1.0 ceph_osd_in{ceph_daemon="osd.5"} 0.0 ceph_osd_in{ceph_daemon="osd.6"} 1.0 ceph_osd_in{ceph_daemon="osd.7"} 1.0 ceph_osd_in{ceph_daemon="osd.8"} 1.0 ceph_osd_in{ceph_daemon="osd.9"} 1.0 ceph_osd_in{ceph_daemon="osd.10"} 1.0 ceph_osd_in{ceph_daemon="osd.11"} 1.0 ceph_osd_in{ceph_daemon="osd.12"} 1.0 ceph_osd_in{ceph_daemon="osd.13"} 1.0 ceph_osd_in{ceph_daemon="osd.14"} 1.0 ceph_osd_in{ceph_daemon="osd.15"} 1.0 ceph_osd_in{ceph_daemon="osd.16"} 1.0 ceph_osd_in{ceph_daemon="osd.17"} 1.0 ceph_osd_in{ceph_daemon="osd.18"} 1.0 ceph_osd_in{ceph_daemon="osd.19"} 1.0 # HELP ceph_osd_apply_latency_ms OSD stat apply_latency_ms # TYPE ceph_osd_apply_latency_ms gauge ceph_osd_apply_latency_ms{ceph_daemon="osd.14"} 9.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.3"} 3.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.16"} 9.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.0"} 2.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.10"} 58.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.18"} 2.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.1"} 4.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.15"} 58.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.13"} 8.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.12"} 55.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.5"} 0.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.19"} 9.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.2"} 3.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.17"} 10.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.7"} 2.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.9"} 5.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.11"} 10.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.6"} 3.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.4"} 2.0 ceph_osd_apply_latency_ms{ceph_daemon="osd.8"} 2.0 # HELP ceph_osd_commit_latency_ms OSD stat commit_latency_ms # TYPE ceph_osd_commit_latency_ms gauge ceph_osd_commit_latency_ms{ceph_daemon="osd.14"} 9.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.3"} 3.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.16"} 9.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.0"} 2.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.10"} 58.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.18"} 2.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.1"} 4.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.15"} 58.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.13"} 8.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.12"} 55.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.5"} 0.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.19"} 9.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.2"} 3.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.17"} 10.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.7"} 2.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.9"} 5.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.11"} 10.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.6"} 3.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.4"} 2.0 ceph_osd_commit_latency_ms{ceph_daemon="osd.8"} 2.0 # HELP ceph_pool_recovering_objects_per_sec OSD pool stats: recovering_objects_per_sec # TYPE ceph_pool_recovering_objects_per_sec gauge ceph_pool_recovering_objects_per_sec{pool_id="1"} 0.0 ceph_pool_recovering_objects_per_sec{pool_id="39"} 0.0 ceph_pool_recovering_objects_per_sec{pool_id="40"} 0.0 ceph_pool_recovering_objects_per_sec{pool_id="41"} 97.0 ceph_pool_recovering_objects_per_sec{pool_id="42"} 0.0 ceph_pool_recovering_objects_per_sec{pool_id="43"} 0.0 ceph_pool_recovering_objects_per_sec{pool_id="44"} 0.0 ceph_pool_recovering_objects_per_sec{pool_id="45"} 0.0 ceph_pool_recovering_objects_per_sec{pool_id="46"} 0.0 ceph_pool_recovering_objects_per_sec{pool_id="47"} 0.0 ceph_pool_recovering_objects_per_sec{pool_id="48"} 0.0 ceph_pool_recovering_objects_per_sec{pool_id="49"} 0.0 # HELP ceph_pool_recovering_bytes_per_sec OSD pool stats: recovering_bytes_per_sec # TYPE ceph_pool_recovering_bytes_per_sec gauge ceph_pool_recovering_bytes_per_sec{pool_id="1"} 0.0 ceph_pool_recovering_bytes_per_sec{pool_id="39"} 0.0 ceph_pool_recovering_bytes_per_sec{pool_id="40"} 0.0 ceph_pool_recovering_bytes_per_sec{pool_id="41"} 326516464.0 ceph_pool_recovering_bytes_per_sec{pool_id="42"} 0.0 ceph_pool_recovering_bytes_per_sec{pool_id="43"} 0.0 ceph_pool_recovering_bytes_per_sec{pool_id="44"} 0.0 ceph_pool_recovering_bytes_per_sec{pool_id="45"} 0.0 ceph_pool_recovering_bytes_per_sec{pool_id="46"} 0.0 ceph_pool_recovering_bytes_per_sec{pool_id="47"} 0.0 ceph_pool_recovering_bytes_per_sec{pool_id="48"} 0.0 ceph_pool_recovering_bytes_per_sec{pool_id="49"} 0.0 # HELP ceph_pool_recovering_keys_per_sec OSD pool stats: recovering_keys_per_sec # TYPE ceph_pool_recovering_keys_per_sec gauge ceph_pool_recovering_keys_per_sec{pool_id="1"} 0.0 ceph_pool_recovering_keys_per_sec{pool_id="39"} 0.0 ceph_pool_recovering_keys_per_sec{pool_id="40"} 0.0 ceph_pool_recovering_keys_per_sec{pool_id="41"} 0.0 ceph_pool_recovering_keys_per_sec{pool_id="42"} 0.0 ceph_pool_recovering_keys_per_sec{pool_id="43"} 0.0 ceph_pool_recovering_keys_per_sec{pool_id="44"} 0.0 ceph_pool_recovering_keys_per_sec{pool_id="45"} 0.0 ceph_pool_recovering_keys_per_sec{pool_id="46"} 0.0 ceph_pool_recovering_keys_per_sec{pool_id="47"} 0.0 ceph_pool_recovering_keys_per_sec{pool_id="48"} 0.0 ceph_pool_recovering_keys_per_sec{pool_id="49"} 0.0 # HELP ceph_pool_num_objects_recovered OSD pool stats: num_objects_recovered # TYPE ceph_pool_num_objects_recovered gauge ceph_pool_num_objects_recovered{pool_id="1"} 0.0 ceph_pool_num_objects_recovered{pool_id="39"} 0.0 ceph_pool_num_objects_recovered{pool_id="40"} 0.0 ceph_pool_num_objects_recovered{pool_id="41"} 1174.0 ceph_pool_num_objects_recovered{pool_id="42"} 0.0 ceph_pool_num_objects_recovered{pool_id="43"} 0.0 ceph_pool_num_objects_recovered{pool_id="44"} 0.0 ceph_pool_num_objects_recovered{pool_id="45"} 0.0 ceph_pool_num_objects_recovered{pool_id="46"} 0.0 ceph_pool_num_objects_recovered{pool_id="47"} 0.0 ceph_pool_num_objects_recovered{pool_id="48"} 0.0 ceph_pool_num_objects_recovered{pool_id="49"} 0.0 # HELP ceph_pool_num_bytes_recovered OSD pool stats: num_bytes_recovered # TYPE ceph_pool_num_bytes_recovered gauge ceph_pool_num_bytes_recovered{pool_id="1"} 0.0 ceph_pool_num_bytes_recovered{pool_id="39"} 0.0 ceph_pool_num_bytes_recovered{pool_id="40"} 0.0 ceph_pool_num_bytes_recovered{pool_id="41"} 3933451366.0 ceph_pool_num_bytes_recovered{pool_id="42"} 0.0 ceph_pool_num_bytes_recovered{pool_id="43"} 0.0 ceph_pool_num_bytes_recovered{pool_id="44"} 0.0 ceph_pool_num_bytes_recovered{pool_id="45"} 0.0 ceph_pool_num_bytes_recovered{pool_id="46"} 0.0 ceph_pool_num_bytes_recovered{pool_id="47"} 0.0 ceph_pool_num_bytes_recovered{pool_id="48"} 0.0 ceph_pool_num_bytes_recovered{pool_id="49"} 0.0 # HELP ceph_pg_active PG active per pool # TYPE ceph_pg_active gauge ceph_pg_active{pool_id="1"} 1.0 ceph_pg_active{pool_id="39"} 16.0 ceph_pg_active{pool_id="40"} 32.0 ceph_pg_active{pool_id="41"} 32.0 ceph_pg_active{pool_id="42"} 32.0 ceph_pg_active{pool_id="43"} 32.0 ceph_pg_active{pool_id="44"} 32.0 ceph_pg_active{pool_id="45"} 32.0 ceph_pg_active{pool_id="46"} 32.0 ceph_pg_active{pool_id="47"} 32.0 ceph_pg_active{pool_id="48"} 32.0 ceph_pg_active{pool_id="49"} 32.0 # HELP ceph_pg_clean PG clean per pool # TYPE ceph_pg_clean gauge ceph_pg_clean{pool_id="1"} 1.0 ceph_pg_clean{pool_id="39"} 13.0 ceph_pg_clean{pool_id="40"} 22.0 ceph_pg_clean{pool_id="41"} 5.0 ceph_pg_clean{pool_id="42"} 22.0 ceph_pg_clean{pool_id="43"} 10.0 ceph_pg_clean{pool_id="44"} 27.0 ceph_pg_clean{pool_id="45"} 32.0 ceph_pg_clean{pool_id="46"} 32.0 ceph_pg_clean{pool_id="47"} 32.0 ceph_pg_clean{pool_id="48"} 32.0 ceph_pg_clean{pool_id="49"} 32.0 # HELP ceph_pg_down PG down per pool # TYPE ceph_pg_down gauge ceph_pg_down{pool_id="1"} 0.0 ceph_pg_down{pool_id="39"} 0.0 ceph_pg_down{pool_id="40"} 0.0 ceph_pg_down{pool_id="41"} 0.0 ceph_pg_down{pool_id="42"} 0.0 ceph_pg_down{pool_id="43"} 0.0 ceph_pg_down{pool_id="44"} 0.0 ceph_pg_down{pool_id="45"} 0.0 ceph_pg_down{pool_id="46"} 0.0 ceph_pg_down{pool_id="47"} 0.0 ceph_pg_down{pool_id="48"} 0.0 ceph_pg_down{pool_id="49"} 0.0 # HELP ceph_pg_recovery_unfound PG recovery_unfound per pool # TYPE ceph_pg_recovery_unfound gauge ceph_pg_recovery_unfound{pool_id="1"} 0.0 ceph_pg_recovery_unfound{pool_id="39"} 0.0 ceph_pg_recovery_unfound{pool_id="40"} 0.0 ceph_pg_recovery_unfound{pool_id="41"} 0.0 ceph_pg_recovery_unfound{pool_id="42"} 0.0 ceph_pg_recovery_unfound{pool_id="43"} 0.0 ceph_pg_recovery_unfound{pool_id="44"} 0.0 ceph_pg_recovery_unfound{pool_id="45"} 0.0 ceph_pg_recovery_unfound{pool_id="46"} 0.0 ceph_pg_recovery_unfound{pool_id="47"} 0.0 ceph_pg_recovery_unfound{pool_id="48"} 0.0 ceph_pg_recovery_unfound{pool_id="49"} 0.0 # HELP ceph_pg_backfill_unfound PG backfill_unfound per pool # TYPE ceph_pg_backfill_unfound gauge ceph_pg_backfill_unfound{pool_id="1"} 0.0 ceph_pg_backfill_unfound{pool_id="39"} 0.0 ceph_pg_backfill_unfound{pool_id="40"} 0.0 ceph_pg_backfill_unfound{pool_id="41"} 0.0 ceph_pg_backfill_unfound{pool_id="42"} 0.0 ceph_pg_backfill_unfound{pool_id="43"} 0.0 ceph_pg_backfill_unfound{pool_id="44"} 0.0 ceph_pg_backfill_unfound{pool_id="45"} 0.0 ceph_pg_backfill_unfound{pool_id="46"} 0.0 ceph_pg_backfill_unfound{pool_id="47"} 0.0 ceph_pg_backfill_unfound{pool_id="48"} 0.0 ceph_pg_backfill_unfound{pool_id="49"} 0.0 # HELP ceph_pg_scrubbing PG scrubbing per pool # TYPE ceph_pg_scrubbing gauge ceph_pg_scrubbing{pool_id="1"} 0.0 ceph_pg_scrubbing{pool_id="39"} 0.0 ceph_pg_scrubbing{pool_id="40"} 0.0 ceph_pg_scrubbing{pool_id="41"} 0.0 ceph_pg_scrubbing{pool_id="42"} 0.0 ceph_pg_scrubbing{pool_id="43"} 0.0 ceph_pg_scrubbing{pool_id="44"} 0.0 ceph_pg_scrubbing{pool_id="45"} 0.0 ceph_pg_scrubbing{pool_id="46"} 0.0 ceph_pg_scrubbing{pool_id="47"} 0.0 ceph_pg_scrubbing{pool_id="48"} 0.0 ceph_pg_scrubbing{pool_id="49"} 0.0 # HELP ceph_pg_degraded PG degraded per pool # TYPE ceph_pg_degraded gauge ceph_pg_degraded{pool_id="1"} 0.0 ceph_pg_degraded{pool_id="39"} 0.0 ceph_pg_degraded{pool_id="40"} 10.0 ceph_pg_degraded{pool_id="41"} 27.0 ceph_pg_degraded{pool_id="42"} 10.0 ceph_pg_degraded{pool_id="43"} 15.0 ceph_pg_degraded{pool_id="44"} 0.0 ceph_pg_degraded{pool_id="45"} 0.0 ceph_pg_degraded{pool_id="46"} 0.0 ceph_pg_degraded{pool_id="47"} 0.0 ceph_pg_degraded{pool_id="48"} 0.0 ceph_pg_degraded{pool_id="49"} 0.0 # HELP ceph_pg_inconsistent PG inconsistent per pool # TYPE ceph_pg_inconsistent gauge ceph_pg_inconsistent{pool_id="1"} 0.0 ceph_pg_inconsistent{pool_id="39"} 0.0 ceph_pg_inconsistent{pool_id="40"} 0.0 ceph_pg_inconsistent{pool_id="41"} 0.0 ceph_pg_inconsistent{pool_id="42"} 0.0 ceph_pg_inconsistent{pool_id="43"} 0.0 ceph_pg_inconsistent{pool_id="44"} 0.0 ceph_pg_inconsistent{pool_id="45"} 0.0 ceph_pg_inconsistent{pool_id="46"} 0.0 ceph_pg_inconsistent{pool_id="47"} 0.0 ceph_pg_inconsistent{pool_id="48"} 0.0 ceph_pg_inconsistent{pool_id="49"} 0.0 # HELP ceph_pg_peering PG peering per pool # TYPE ceph_pg_peering gauge ceph_pg_peering{pool_id="1"} 0.0 ceph_pg_peering{pool_id="39"} 0.0 ceph_pg_peering{pool_id="40"} 0.0 ceph_pg_peering{pool_id="41"} 0.0 ceph_pg_peering{pool_id="42"} 0.0 ceph_pg_peering{pool_id="43"} 0.0 ceph_pg_peering{pool_id="44"} 0.0 ceph_pg_peering{pool_id="45"} 0.0 ceph_pg_peering{pool_id="46"} 0.0 ceph_pg_peering{pool_id="47"} 0.0 ceph_pg_peering{pool_id="48"} 0.0 ceph_pg_peering{pool_id="49"} 0.0 # HELP ceph_pg_repair PG repair per pool # TYPE ceph_pg_repair gauge ceph_pg_repair{pool_id="1"} 0.0 ceph_pg_repair{pool_id="39"} 0.0 ceph_pg_repair{pool_id="40"} 0.0 ceph_pg_repair{pool_id="41"} 0.0 ceph_pg_repair{pool_id="42"} 0.0 ceph_pg_repair{pool_id="43"} 0.0 ceph_pg_repair{pool_id="44"} 0.0 ceph_pg_repair{pool_id="45"} 0.0 ceph_pg_repair{pool_id="46"} 0.0 ceph_pg_repair{pool_id="47"} 0.0 ceph_pg_repair{pool_id="48"} 0.0 ceph_pg_repair{pool_id="49"} 0.0 # HELP ceph_pg_recovering PG recovering per pool # TYPE ceph_pg_recovering gauge ceph_pg_recovering{pool_id="1"} 0.0 ceph_pg_recovering{pool_id="39"} 0.0 ceph_pg_recovering{pool_id="40"} 0.0 ceph_pg_recovering{pool_id="41"} 0.0 ceph_pg_recovering{pool_id="42"} 0.0 ceph_pg_recovering{pool_id="43"} 0.0 ceph_pg_recovering{pool_id="44"} 0.0 ceph_pg_recovering{pool_id="45"} 0.0 ceph_pg_recovering{pool_id="46"} 0.0 ceph_pg_recovering{pool_id="47"} 0.0 ceph_pg_recovering{pool_id="48"} 0.0 ceph_pg_recovering{pool_id="49"} 0.0 # HELP ceph_pg_forced_recovery PG forced_recovery per pool # TYPE ceph_pg_forced_recovery gauge ceph_pg_forced_recovery{pool_id="1"} 0.0 ceph_pg_forced_recovery{pool_id="39"} 0.0 ceph_pg_forced_recovery{pool_id="40"} 0.0 ceph_pg_forced_recovery{pool_id="41"} 0.0 ceph_pg_forced_recovery{pool_id="42"} 0.0 ceph_pg_forced_recovery{pool_id="43"} 0.0 ceph_pg_forced_recovery{pool_id="44"} 0.0 ceph_pg_forced_recovery{pool_id="45"} 0.0 ceph_pg_forced_recovery{pool_id="46"} 0.0 ceph_pg_forced_recovery{pool_id="47"} 0.0 ceph_pg_forced_recovery{pool_id="48"} 0.0 ceph_pg_forced_recovery{pool_id="49"} 0.0 # HELP ceph_pg_backfill_wait PG backfill_wait per pool # TYPE ceph_pg_backfill_wait gauge ceph_pg_backfill_wait{pool_id="1"} 0.0 ceph_pg_backfill_wait{pool_id="39"} 3.0 ceph_pg_backfill_wait{pool_id="40"} 10.0 ceph_pg_backfill_wait{pool_id="41"} 19.0 ceph_pg_backfill_wait{pool_id="42"} 10.0 ceph_pg_backfill_wait{pool_id="43"} 22.0 ceph_pg_backfill_wait{pool_id="44"} 5.0 ceph_pg_backfill_wait{pool_id="45"} 0.0 ceph_pg_backfill_wait{pool_id="46"} 0.0 ceph_pg_backfill_wait{pool_id="47"} 0.0 ceph_pg_backfill_wait{pool_id="48"} 0.0 ceph_pg_backfill_wait{pool_id="49"} 0.0 # HELP ceph_pg_incomplete PG incomplete per pool # TYPE ceph_pg_incomplete gauge ceph_pg_incomplete{pool_id="1"} 0.0 ceph_pg_incomplete{pool_id="39"} 0.0 ceph_pg_incomplete{pool_id="40"} 0.0 ceph_pg_incomplete{pool_id="41"} 0.0 ceph_pg_incomplete{pool_id="42"} 0.0 ceph_pg_incomplete{pool_id="43"} 0.0 ceph_pg_incomplete{pool_id="44"} 0.0 ceph_pg_incomplete{pool_id="45"} 0.0 ceph_pg_incomplete{pool_id="46"} 0.0 ceph_pg_incomplete{pool_id="47"} 0.0 ceph_pg_incomplete{pool_id="48"} 0.0 ceph_pg_incomplete{pool_id="49"} 0.0 # HELP ceph_pg_stale PG stale per pool # TYPE ceph_pg_stale gauge ceph_pg_stale{pool_id="1"} 0.0 ceph_pg_stale{pool_id="39"} 0.0 ceph_pg_stale{pool_id="40"} 0.0 ceph_pg_stale{pool_id="41"} 0.0 ceph_pg_stale{pool_id="42"} 0.0 ceph_pg_stale{pool_id="43"} 0.0 ceph_pg_stale{pool_id="44"} 0.0 ceph_pg_stale{pool_id="45"} 0.0 ceph_pg_stale{pool_id="46"} 0.0 ceph_pg_stale{pool_id="47"} 0.0 ceph_pg_stale{pool_id="48"} 0.0 ceph_pg_stale{pool_id="49"} 0.0 # HELP ceph_pg_remapped PG remapped per pool # TYPE ceph_pg_remapped gauge ceph_pg_remapped{pool_id="1"} 0.0 ceph_pg_remapped{pool_id="39"} 3.0 ceph_pg_remapped{pool_id="40"} 10.0 ceph_pg_remapped{pool_id="41"} 27.0 ceph_pg_remapped{pool_id="42"} 10.0 ceph_pg_remapped{pool_id="43"} 22.0 ceph_pg_remapped{pool_id="44"} 5.0 ceph_pg_remapped{pool_id="45"} 0.0 ceph_pg_remapped{pool_id="46"} 0.0 ceph_pg_remapped{pool_id="47"} 0.0 ceph_pg_remapped{pool_id="48"} 0.0 ceph_pg_remapped{pool_id="49"} 0.0 # HELP ceph_pg_deep PG deep per pool # TYPE ceph_pg_deep gauge ceph_pg_deep{pool_id="1"} 0.0 ceph_pg_deep{pool_id="39"} 0.0 ceph_pg_deep{pool_id="40"} 0.0 ceph_pg_deep{pool_id="41"} 0.0 ceph_pg_deep{pool_id="42"} 0.0 ceph_pg_deep{pool_id="43"} 0.0 ceph_pg_deep{pool_id="44"} 0.0 ceph_pg_deep{pool_id="45"} 0.0 ceph_pg_deep{pool_id="46"} 0.0 ceph_pg_deep{pool_id="47"} 0.0 ceph_pg_deep{pool_id="48"} 0.0 ceph_pg_deep{pool_id="49"} 0.0 # HELP ceph_pg_backfilling PG backfilling per pool # TYPE ceph_pg_backfilling gauge ceph_pg_backfilling{pool_id="1"} 0.0 ceph_pg_backfilling{pool_id="39"} 0.0 ceph_pg_backfilling{pool_id="40"} 0.0 ceph_pg_backfilling{pool_id="41"} 8.0 ceph_pg_backfilling{pool_id="42"} 0.0 ceph_pg_backfilling{pool_id="43"} 0.0 ceph_pg_backfilling{pool_id="44"} 0.0 ceph_pg_backfilling{pool_id="45"} 0.0 ceph_pg_backfilling{pool_id="46"} 0.0 ceph_pg_backfilling{pool_id="47"} 0.0 ceph_pg_backfilling{pool_id="48"} 0.0 ceph_pg_backfilling{pool_id="49"} 0.0 # HELP ceph_pg_forced_backfill PG forced_backfill per pool # TYPE ceph_pg_forced_backfill gauge ceph_pg_forced_backfill{pool_id="1"} 0.0 ceph_pg_forced_backfill{pool_id="39"} 0.0 ceph_pg_forced_backfill{pool_id="40"} 0.0 ceph_pg_forced_backfill{pool_id="41"} 0.0 ceph_pg_forced_backfill{pool_id="42"} 0.0 ceph_pg_forced_backfill{pool_id="43"} 0.0 ceph_pg_forced_backfill{pool_id="44"} 0.0 ceph_pg_forced_backfill{pool_id="45"} 0.0 ceph_pg_forced_backfill{pool_id="46"} 0.0 ceph_pg_forced_backfill{pool_id="47"} 0.0 ceph_pg_forced_backfill{pool_id="48"} 0.0 ceph_pg_forced_backfill{pool_id="49"} 0.0 # HELP ceph_pg_backfill_toofull PG backfill_toofull per pool # TYPE ceph_pg_backfill_toofull gauge ceph_pg_backfill_toofull{pool_id="1"} 0.0 ceph_pg_backfill_toofull{pool_id="39"} 0.0 ceph_pg_backfill_toofull{pool_id="40"} 0.0 ceph_pg_backfill_toofull{pool_id="41"} 0.0 ceph_pg_backfill_toofull{pool_id="42"} 0.0 ceph_pg_backfill_toofull{pool_id="43"} 0.0 ceph_pg_backfill_toofull{pool_id="44"} 0.0 ceph_pg_backfill_toofull{pool_id="45"} 0.0 ceph_pg_backfill_toofull{pool_id="46"} 0.0 ceph_pg_backfill_toofull{pool_id="47"} 0.0 ceph_pg_backfill_toofull{pool_id="48"} 0.0 ceph_pg_backfill_toofull{pool_id="49"} 0.0 # HELP ceph_pg_recovery_wait PG recovery_wait per pool # TYPE ceph_pg_recovery_wait gauge ceph_pg_recovery_wait{pool_id="1"} 0.0 ceph_pg_recovery_wait{pool_id="39"} 0.0 ceph_pg_recovery_wait{pool_id="40"} 0.0 ceph_pg_recovery_wait{pool_id="41"} 0.0 ceph_pg_recovery_wait{pool_id="42"} 0.0 ceph_pg_recovery_wait{pool_id="43"} 0.0 ceph_pg_recovery_wait{pool_id="44"} 0.0 ceph_pg_recovery_wait{pool_id="45"} 0.0 ceph_pg_recovery_wait{pool_id="46"} 0.0 ceph_pg_recovery_wait{pool_id="47"} 0.0 ceph_pg_recovery_wait{pool_id="48"} 0.0 ceph_pg_recovery_wait{pool_id="49"} 0.0 # HELP ceph_pg_recovery_toofull PG recovery_toofull per pool # TYPE ceph_pg_recovery_toofull gauge ceph_pg_recovery_toofull{pool_id="1"} 0.0 ceph_pg_recovery_toofull{pool_id="39"} 0.0 ceph_pg_recovery_toofull{pool_id="40"} 0.0 ceph_pg_recovery_toofull{pool_id="41"} 0.0 ceph_pg_recovery_toofull{pool_id="42"} 0.0 ceph_pg_recovery_toofull{pool_id="43"} 0.0 ceph_pg_recovery_toofull{pool_id="44"} 0.0 ceph_pg_recovery_toofull{pool_id="45"} 0.0 ceph_pg_recovery_toofull{pool_id="46"} 0.0 ceph_pg_recovery_toofull{pool_id="47"} 0.0 ceph_pg_recovery_toofull{pool_id="48"} 0.0 ceph_pg_recovery_toofull{pool_id="49"} 0.0 # HELP ceph_pg_undersized PG undersized per pool # TYPE ceph_pg_undersized gauge ceph_pg_undersized{pool_id="1"} 0.0 ceph_pg_undersized{pool_id="39"} 0.0 ceph_pg_undersized{pool_id="40"} 10.0 ceph_pg_undersized{pool_id="41"} 27.0 ceph_pg_undersized{pool_id="42"} 10.0 ceph_pg_undersized{pool_id="43"} 15.0 ceph_pg_undersized{pool_id="44"} 0.0 ceph_pg_undersized{pool_id="45"} 0.0 ceph_pg_undersized{pool_id="46"} 0.0 ceph_pg_undersized{pool_id="47"} 0.0 ceph_pg_undersized{pool_id="48"} 0.0 ceph_pg_undersized{pool_id="49"} 0.0 # HELP ceph_pg_activating PG activating per pool # TYPE ceph_pg_activating gauge ceph_pg_activating{pool_id="1"} 0.0 ceph_pg_activating{pool_id="39"} 0.0 ceph_pg_activating{pool_id="40"} 0.0 ceph_pg_activating{pool_id="41"} 0.0 ceph_pg_activating{pool_id="42"} 0.0 ceph_pg_activating{pool_id="43"} 0.0 ceph_pg_activating{pool_id="44"} 0.0 ceph_pg_activating{pool_id="45"} 0.0 ceph_pg_activating{pool_id="46"} 0.0 ceph_pg_activating{pool_id="47"} 0.0 ceph_pg_activating{pool_id="48"} 0.0 ceph_pg_activating{pool_id="49"} 0.0 # HELP ceph_pg_peered PG peered per pool # TYPE ceph_pg_peered gauge ceph_pg_peered{pool_id="1"} 0.0 ceph_pg_peered{pool_id="39"} 0.0 ceph_pg_peered{pool_id="40"} 0.0 ceph_pg_peered{pool_id="41"} 0.0 ceph_pg_peered{pool_id="42"} 0.0 ceph_pg_peered{pool_id="43"} 0.0 ceph_pg_peered{pool_id="44"} 0.0 ceph_pg_peered{pool_id="45"} 0.0 ceph_pg_peered{pool_id="46"} 0.0 ceph_pg_peered{pool_id="47"} 0.0 ceph_pg_peered{pool_id="48"} 0.0 ceph_pg_peered{pool_id="49"} 0.0 # HELP ceph_pg_snaptrim PG snaptrim per pool # TYPE ceph_pg_snaptrim gauge ceph_pg_snaptrim{pool_id="1"} 0.0 ceph_pg_snaptrim{pool_id="39"} 0.0 ceph_pg_snaptrim{pool_id="40"} 0.0 ceph_pg_snaptrim{pool_id="41"} 0.0 ceph_pg_snaptrim{pool_id="42"} 0.0 ceph_pg_snaptrim{pool_id="43"} 0.0 ceph_pg_snaptrim{pool_id="44"} 0.0 ceph_pg_snaptrim{pool_id="45"} 0.0 ceph_pg_snaptrim{pool_id="46"} 0.0 ceph_pg_snaptrim{pool_id="47"} 0.0 ceph_pg_snaptrim{pool_id="48"} 0.0 ceph_pg_snaptrim{pool_id="49"} 0.0 # HELP ceph_pg_snaptrim_wait PG snaptrim_wait per pool # TYPE ceph_pg_snaptrim_wait gauge ceph_pg_snaptrim_wait{pool_id="1"} 0.0 ceph_pg_snaptrim_wait{pool_id="39"} 0.0 ceph_pg_snaptrim_wait{pool_id="40"} 0.0 ceph_pg_snaptrim_wait{pool_id="41"} 0.0 ceph_pg_snaptrim_wait{pool_id="42"} 0.0 ceph_pg_snaptrim_wait{pool_id="43"} 0.0 ceph_pg_snaptrim_wait{pool_id="44"} 0.0 ceph_pg_snaptrim_wait{pool_id="45"} 0.0 ceph_pg_snaptrim_wait{pool_id="46"} 0.0 ceph_pg_snaptrim_wait{pool_id="47"} 0.0 ceph_pg_snaptrim_wait{pool_id="48"} 0.0 ceph_pg_snaptrim_wait{pool_id="49"} 0.0 # HELP ceph_pg_snaptrim_error PG snaptrim_error per pool # TYPE ceph_pg_snaptrim_error gauge ceph_pg_snaptrim_error{pool_id="1"} 0.0 ceph_pg_snaptrim_error{pool_id="39"} 0.0 ceph_pg_snaptrim_error{pool_id="40"} 0.0 ceph_pg_snaptrim_error{pool_id="41"} 0.0 ceph_pg_snaptrim_error{pool_id="42"} 0.0 ceph_pg_snaptrim_error{pool_id="43"} 0.0 ceph_pg_snaptrim_error{pool_id="44"} 0.0 ceph_pg_snaptrim_error{pool_id="45"} 0.0 ceph_pg_snaptrim_error{pool_id="46"} 0.0 ceph_pg_snaptrim_error{pool_id="47"} 0.0 ceph_pg_snaptrim_error{pool_id="48"} 0.0 ceph_pg_snaptrim_error{pool_id="49"} 0.0 # HELP ceph_pg_creating PG creating per pool # TYPE ceph_pg_creating gauge ceph_pg_creating{pool_id="1"} 0.0 ceph_pg_creating{pool_id="39"} 0.0 ceph_pg_creating{pool_id="40"} 0.0 ceph_pg_creating{pool_id="41"} 0.0 ceph_pg_creating{pool_id="42"} 0.0 ceph_pg_creating{pool_id="43"} 0.0 ceph_pg_creating{pool_id="44"} 0.0 ceph_pg_creating{pool_id="45"} 0.0 ceph_pg_creating{pool_id="46"} 0.0 ceph_pg_creating{pool_id="47"} 0.0 ceph_pg_creating{pool_id="48"} 0.0 ceph_pg_creating{pool_id="49"} 0.0 # HELP ceph_pg_unknown PG unknown per pool # TYPE ceph_pg_unknown gauge ceph_pg_unknown{pool_id="1"} 0.0 ceph_pg_unknown{pool_id="39"} 0.0 ceph_pg_unknown{pool_id="40"} 0.0 ceph_pg_unknown{pool_id="41"} 0.0 ceph_pg_unknown{pool_id="42"} 0.0 ceph_pg_unknown{pool_id="43"} 0.0 ceph_pg_unknown{pool_id="44"} 0.0 ceph_pg_unknown{pool_id="45"} 0.0 ceph_pg_unknown{pool_id="46"} 0.0 ceph_pg_unknown{pool_id="47"} 0.0 ceph_pg_unknown{pool_id="48"} 0.0 ceph_pg_unknown{pool_id="49"} 0.0 # HELP ceph_pg_premerge PG premerge per pool # TYPE ceph_pg_premerge gauge ceph_pg_premerge{pool_id="1"} 0.0 ceph_pg_premerge{pool_id="39"} 0.0 ceph_pg_premerge{pool_id="40"} 0.0 ceph_pg_premerge{pool_id="41"} 0.0 ceph_pg_premerge{pool_id="42"} 0.0 ceph_pg_premerge{pool_id="43"} 0.0 ceph_pg_premerge{pool_id="44"} 0.0 ceph_pg_premerge{pool_id="45"} 0.0 ceph_pg_premerge{pool_id="46"} 0.0 ceph_pg_premerge{pool_id="47"} 0.0 ceph_pg_premerge{pool_id="48"} 0.0 ceph_pg_premerge{pool_id="49"} 0.0 # HELP ceph_pg_failed_repair PG failed_repair per pool # TYPE ceph_pg_failed_repair gauge ceph_pg_failed_repair{pool_id="1"} 0.0 ceph_pg_failed_repair{pool_id="39"} 0.0 ceph_pg_failed_repair{pool_id="40"} 0.0 ceph_pg_failed_repair{pool_id="41"} 0.0 ceph_pg_failed_repair{pool_id="42"} 0.0 ceph_pg_failed_repair{pool_id="43"} 0.0 ceph_pg_failed_repair{pool_id="44"} 0.0 ceph_pg_failed_repair{pool_id="45"} 0.0 ceph_pg_failed_repair{pool_id="46"} 0.0 ceph_pg_failed_repair{pool_id="47"} 0.0 ceph_pg_failed_repair{pool_id="48"} 0.0 ceph_pg_failed_repair{pool_id="49"} 0.0 # HELP ceph_pg_laggy PG laggy per pool # TYPE ceph_pg_laggy gauge ceph_pg_laggy{pool_id="1"} 0.0 ceph_pg_laggy{pool_id="39"} 0.0 ceph_pg_laggy{pool_id="40"} 0.0 ceph_pg_laggy{pool_id="41"} 0.0 ceph_pg_laggy{pool_id="42"} 0.0 ceph_pg_laggy{pool_id="43"} 0.0 ceph_pg_laggy{pool_id="44"} 0.0 ceph_pg_laggy{pool_id="45"} 0.0 ceph_pg_laggy{pool_id="46"} 0.0 ceph_pg_laggy{pool_id="47"} 0.0 ceph_pg_laggy{pool_id="48"} 0.0 ceph_pg_laggy{pool_id="49"} 0.0 # HELP ceph_pg_wait PG wait per pool # TYPE ceph_pg_wait gauge ceph_pg_wait{pool_id="1"} 0.0 ceph_pg_wait{pool_id="39"} 0.0 ceph_pg_wait{pool_id="40"} 0.0 ceph_pg_wait{pool_id="41"} 0.0 ceph_pg_wait{pool_id="42"} 0.0 ceph_pg_wait{pool_id="43"} 0.0 ceph_pg_wait{pool_id="44"} 0.0 ceph_pg_wait{pool_id="45"} 0.0 ceph_pg_wait{pool_id="46"} 0.0 ceph_pg_wait{pool_id="47"} 0.0 ceph_pg_wait{pool_id="48"} 0.0 ceph_pg_wait{pool_id="49"} 0.0 # HELP ceph_cluster_total_bytes DF total_bytes # TYPE ceph_cluster_total_bytes gauge ceph_cluster_total_bytes 76975079161856.0 # HELP ceph_cluster_by_class_total_bytes DF total_bytes # TYPE ceph_cluster_by_class_total_bytes gauge ceph_cluster_by_class_total_bytes{device_class="ssd"} 76975079161856.0 # HELP ceph_cluster_total_used_bytes DF total_used_bytes # TYPE ceph_cluster_total_used_bytes gauge ceph_cluster_total_used_bytes 35531428306944.0 # HELP ceph_cluster_by_class_total_used_bytes DF total_used_bytes # TYPE ceph_cluster_by_class_total_used_bytes gauge ceph_cluster_by_class_total_used_bytes{device_class="ssd"} 35531428306944.0 # HELP ceph_cluster_total_used_raw_bytes DF total_used_raw_bytes # TYPE ceph_cluster_total_used_raw_bytes gauge ceph_cluster_total_used_raw_bytes 35531428306944.0 # HELP ceph_cluster_by_class_total_used_raw_bytes DF total_used_raw_bytes # TYPE ceph_cluster_by_class_total_used_raw_bytes gauge ceph_cluster_by_class_total_used_raw_bytes{device_class="ssd"} 35531428306944.0 # HELP ceph_pool_max_avail DF pool max_avail # TYPE ceph_pool_max_avail gauge ceph_pool_max_avail{pool_id="1"} 6211199041536.0 ceph_pool_max_avail{pool_id="39"} 6211199041536.0 ceph_pool_max_avail{pool_id="40"} 6624926236672.0 ceph_pool_max_avail{pool_id="41"} 24518408011776.0 ceph_pool_max_avail{pool_id="42"} 25647357362176.0 ceph_pool_max_avail{pool_id="43"} 23965657464832.0 ceph_pool_max_avail{pool_id="44"} 6211199041536.0 ceph_pool_max_avail{pool_id="45"} 10351998402560.0 ceph_pool_max_avail{pool_id="46"} 10351998402560.0 ceph_pool_max_avail{pool_id="47"} 10351998402560.0 ceph_pool_max_avail{pool_id="48"} 10351998402560.0 ceph_pool_max_avail{pool_id="49"} 23291995619328.0 # HELP ceph_pool_avail_raw DF pool avail_raw # TYPE ceph_pool_avail_raw gauge ceph_pool_avail_raw{pool_id="1"} 31055996803224.0 ceph_pool_avail_raw{pool_id="39"} 31055996803224.0 ceph_pool_avail_raw{pool_id="40"} 31055996803224.0 ceph_pool_avail_raw{pool_id="41"} 31055996803224.0 ceph_pool_avail_raw{pool_id="42"} 31055996803224.0 ceph_pool_avail_raw{pool_id="43"} 31055996803224.0 ceph_pool_avail_raw{pool_id="44"} 31055996803224.0 ceph_pool_avail_raw{pool_id="45"} 31055996803224.0 ceph_pool_avail_raw{pool_id="46"} 31055996803224.0 ceph_pool_avail_raw{pool_id="47"} 31055996803224.0 ceph_pool_avail_raw{pool_id="48"} 31055996803224.0 ceph_pool_avail_raw{pool_id="49"} 31055996803224.0 # HELP ceph_pool_stored DF pool stored # TYPE ceph_pool_stored gauge ceph_pool_stored{pool_id="1"} 18350680.0 ceph_pool_stored{pool_id="39"} 1195342768.0 ceph_pool_stored{pool_id="40"} 271.0 ceph_pool_stored{pool_id="41"} 25660240166912.0 ceph_pool_stored{pool_id="42"} 2107008876544.0 ceph_pool_stored{pool_id="43"} 62720688128.0 ceph_pool_stored{pool_id="44"} 329771.0 ceph_pool_stored{pool_id="45"} 1323.0 ceph_pool_stored{pool_id="46"} 182.0 ceph_pool_stored{pool_id="47"} 0.0 ceph_pool_stored{pool_id="48"} 0.0 ceph_pool_stored{pool_id="49"} 49152.0 # HELP ceph_pool_stored_raw DF pool stored_raw # TYPE ceph_pool_stored_raw gauge ceph_pool_stored_raw{pool_id="1"} 91753400.0 ceph_pool_stored_raw{pool_id="39"} 5976713728.0 ceph_pool_stored_raw{pool_id="40"} 1270.0 ceph_pool_stored_raw{pool_id="41"} 32502284550144.0 ceph_pool_stored_raw{pool_id="42"} 2551345053696.0 ceph_pool_stored_raw{pool_id="43"} 81276862464.0 ceph_pool_stored_raw{pool_id="44"} 1648855.0 ceph_pool_stored_raw{pool_id="45"} 3969.0 ceph_pool_stored_raw{pool_id="46"} 546.0 ceph_pool_stored_raw{pool_id="47"} 0.0 ceph_pool_stored_raw{pool_id="48"} 0.0 ceph_pool_stored_raw{pool_id="49"} 65536.0 # HELP ceph_pool_objects DF pool objects # TYPE ceph_pool_objects gauge ceph_pool_objects{pool_id="1"} 6.0 ceph_pool_objects{pool_id="39"} 33536.0 ceph_pool_objects{pool_id="40"} 1730648.0 ceph_pool_objects{pool_id="41"} 7674794.0 ceph_pool_objects{pool_id="42"} 1011105.0 ceph_pool_objects{pool_id="43"} 17125.0 ceph_pool_objects{pool_id="44"} 53.0 ceph_pool_objects{pool_id="45"} 4.0 ceph_pool_objects{pool_id="46"} 2.0 ceph_pool_objects{pool_id="47"} 8.0 ceph_pool_objects{pool_id="48"} 0.0 ceph_pool_objects{pool_id="49"} 3.0 # HELP ceph_pool_dirty DF pool dirty # TYPE ceph_pool_dirty gauge ceph_pool_dirty{pool_id="1"} 0.0 ceph_pool_dirty{pool_id="39"} 0.0 ceph_pool_dirty{pool_id="40"} 0.0 ceph_pool_dirty{pool_id="41"} 0.0 ceph_pool_dirty{pool_id="42"} 0.0 ceph_pool_dirty{pool_id="43"} 0.0 ceph_pool_dirty{pool_id="44"} 0.0 ceph_pool_dirty{pool_id="45"} 0.0 ceph_pool_dirty{pool_id="46"} 0.0 ceph_pool_dirty{pool_id="47"} 0.0 ceph_pool_dirty{pool_id="48"} 0.0 ceph_pool_dirty{pool_id="49"} 0.0 # HELP ceph_pool_quota_bytes DF pool quota_bytes # TYPE ceph_pool_quota_bytes gauge ceph_pool_quota_bytes{pool_id="1"} 0.0 ceph_pool_quota_bytes{pool_id="39"} 0.0 ceph_pool_quota_bytes{pool_id="40"} 0.0 ceph_pool_quota_bytes{pool_id="41"} 0.0 ceph_pool_quota_bytes{pool_id="42"} 0.0 ceph_pool_quota_bytes{pool_id="43"} 0.0 ceph_pool_quota_bytes{pool_id="44"} 0.0 ceph_pool_quota_bytes{pool_id="45"} 0.0 ceph_pool_quota_bytes{pool_id="46"} 0.0 ceph_pool_quota_bytes{pool_id="47"} 0.0 ceph_pool_quota_bytes{pool_id="48"} 0.0 ceph_pool_quota_bytes{pool_id="49"} 0.0 # HELP ceph_pool_quota_objects DF pool quota_objects # TYPE ceph_pool_quota_objects gauge ceph_pool_quota_objects{pool_id="1"} 0.0 ceph_pool_quota_objects{pool_id="39"} 0.0 ceph_pool_quota_objects{pool_id="40"} 0.0 ceph_pool_quota_objects{pool_id="41"} 0.0 ceph_pool_quota_objects{pool_id="42"} 0.0 ceph_pool_quota_objects{pool_id="43"} 0.0 ceph_pool_quota_objects{pool_id="44"} 0.0 ceph_pool_quota_objects{pool_id="45"} 0.0 ceph_pool_quota_objects{pool_id="46"} 0.0 ceph_pool_quota_objects{pool_id="47"} 0.0 ceph_pool_quota_objects{pool_id="48"} 0.0 ceph_pool_quota_objects{pool_id="49"} 0.0 # HELP ceph_pool_rd DF pool rd # TYPE ceph_pool_rd counter ceph_pool_rd{pool_id="1"} 93926.0 ceph_pool_rd{pool_id="39"} 2514031.0 ceph_pool_rd{pool_id="40"} 51.0 ceph_pool_rd{pool_id="41"} 10171383.0 ceph_pool_rd{pool_id="42"} 6021249.0 ceph_pool_rd{pool_id="43"} 7199463.0 ceph_pool_rd{pool_id="44"} 17233242.0 ceph_pool_rd{pool_id="45"} 0.0 ceph_pool_rd{pool_id="46"} 0.0 ceph_pool_rd{pool_id="47"} 0.0 ceph_pool_rd{pool_id="48"} 0.0 ceph_pool_rd{pool_id="49"} 12.0 # HELP ceph_pool_rd_bytes DF pool rd_bytes # TYPE ceph_pool_rd_bytes counter ceph_pool_rd_bytes{pool_id="1"} 136045568.0 ceph_pool_rd_bytes{pool_id="39"} 250216634368.0 ceph_pool_rd_bytes{pool_id="40"} 52224.0 ceph_pool_rd_bytes{pool_id="41"} 18641421428736.0 ceph_pool_rd_bytes{pool_id="42"} 10420081667072.0 ceph_pool_rd_bytes{pool_id="43"} 33307242496.0 ceph_pool_rd_bytes{pool_id="44"} 165942605824.0 ceph_pool_rd_bytes{pool_id="45"} 0.0 ceph_pool_rd_bytes{pool_id="46"} 0.0 ceph_pool_rd_bytes{pool_id="47"} 0.0 ceph_pool_rd_bytes{pool_id="48"} 0.0 ceph_pool_rd_bytes{pool_id="49"} 12288.0 # HELP ceph_pool_wr DF pool wr # TYPE ceph_pool_wr counter ceph_pool_wr{pool_id="1"} 160031.0 ceph_pool_wr{pool_id="39"} 22187611.0 ceph_pool_wr{pool_id="40"} 5141748.0 ceph_pool_wr{pool_id="41"} 27301244.0 ceph_pool_wr{pool_id="42"} 13288211.0 ceph_pool_wr{pool_id="43"} 46755535.0 ceph_pool_wr{pool_id="44"} 58225909.0 ceph_pool_wr{pool_id="45"} 0.0 ceph_pool_wr{pool_id="46"} 0.0 ceph_pool_wr{pool_id="47"} 0.0 ceph_pool_wr{pool_id="48"} 0.0 ceph_pool_wr{pool_id="49"} 31.0 # HELP ceph_pool_wr_bytes DF pool wr_bytes # TYPE ceph_pool_wr_bytes counter ceph_pool_wr_bytes{pool_id="1"} 3608983552.0 ceph_pool_wr_bytes{pool_id="39"} 4039032205312.0 ceph_pool_wr_bytes{pool_id="40"} 2048.0 ceph_pool_wr_bytes{pool_id="41"} 44945586965504.0 ceph_pool_wr_bytes{pool_id="42"} 14688842105856.0 ceph_pool_wr_bytes{pool_id="43"} 315449363456.0 ceph_pool_wr_bytes{pool_id="44"} 382128504832.0 ceph_pool_wr_bytes{pool_id="45"} 0.0 ceph_pool_wr_bytes{pool_id="46"} 0.0 ceph_pool_wr_bytes{pool_id="47"} 0.0 ceph_pool_wr_bytes{pool_id="48"} 0.0 ceph_pool_wr_bytes{pool_id="49"} 13312.0 # HELP ceph_pool_compress_bytes_used DF pool compress_bytes_used # TYPE ceph_pool_compress_bytes_used gauge ceph_pool_compress_bytes_used{pool_id="1"} 0.0 ceph_pool_compress_bytes_used{pool_id="39"} 0.0 ceph_pool_compress_bytes_used{pool_id="40"} 0.0 ceph_pool_compress_bytes_used{pool_id="41"} 0.0 ceph_pool_compress_bytes_used{pool_id="42"} 0.0 ceph_pool_compress_bytes_used{pool_id="43"} 0.0 ceph_pool_compress_bytes_used{pool_id="44"} 0.0 ceph_pool_compress_bytes_used{pool_id="45"} 0.0 ceph_pool_compress_bytes_used{pool_id="46"} 0.0 ceph_pool_compress_bytes_used{pool_id="47"} 0.0 ceph_pool_compress_bytes_used{pool_id="48"} 0.0 ceph_pool_compress_bytes_used{pool_id="49"} 0.0 # HELP ceph_pool_compress_under_bytes DF pool compress_under_bytes # TYPE ceph_pool_compress_under_bytes gauge ceph_pool_compress_under_bytes{pool_id="1"} 0.0 ceph_pool_compress_under_bytes{pool_id="39"} 0.0 ceph_pool_compress_under_bytes{pool_id="40"} 0.0 ceph_pool_compress_under_bytes{pool_id="41"} 0.0 ceph_pool_compress_under_bytes{pool_id="42"} 0.0 ceph_pool_compress_under_bytes{pool_id="43"} 0.0 ceph_pool_compress_under_bytes{pool_id="44"} 0.0 ceph_pool_compress_under_bytes{pool_id="45"} 0.0 ceph_pool_compress_under_bytes{pool_id="46"} 0.0 ceph_pool_compress_under_bytes{pool_id="47"} 0.0 ceph_pool_compress_under_bytes{pool_id="48"} 0.0 ceph_pool_compress_under_bytes{pool_id="49"} 0.0 # HELP ceph_pool_bytes_used DF pool bytes_used # TYPE ceph_pool_bytes_used gauge ceph_pool_bytes_used{pool_id="1"} 91770880.0 ceph_pool_bytes_used{pool_id="39"} 5976998909.0 ceph_pool_bytes_used{pool_id="40"} 40960.0 ceph_pool_bytes_used{pool_id="41"} 32502285082624.0 ceph_pool_bytes_used{pool_id="42"} 2551345065984.0 ceph_pool_bytes_used{pool_id="43"} 81277685760.0 ceph_pool_bytes_used{pool_id="44"} 2155301.0 ceph_pool_bytes_used{pool_id="45"} 49152.0 ceph_pool_bytes_used{pool_id="46"} 24576.0 ceph_pool_bytes_used{pool_id="47"} 0.0 ceph_pool_bytes_used{pool_id="48"} 0.0 ceph_pool_bytes_used{pool_id="49"} 65536.0 # HELP ceph_pool_percent_used DF pool percent_used # TYPE ceph_pool_percent_used gauge ceph_pool_percent_used{pool_id="1"} 2.9550044473580783e-06 ceph_pool_percent_used{pool_id="39"} 0.00019242173584643751 ceph_pool_percent_used{pool_id="40"} 1.318907982827966e-09 ceph_pool_percent_used{pool_id="41"} 0.5113776326179504 ceph_pool_percent_used{pool_id="42"} 0.07591630518436432 ceph_pool_percent_used{pool_id="43"} 0.0026103018317371607 ceph_pool_percent_used{pool_id="44"} 6.940047114767367e-08 ceph_pool_percent_used{pool_id="45"} 1.5826895349846382e-09 ceph_pool_percent_used{pool_id="46"} 7.913447674923191e-10 ceph_pool_percent_used{pool_id="47"} 0.0 ceph_pool_percent_used{pool_id="48"} 0.0 ceph_pool_percent_used{pool_id="49"} 2.1102526392979826e-09 # HELP ceph_cluster_osd_blocklist_count OSD Blocklist Count osd_blocklist_count # TYPE ceph_cluster_osd_blocklist_count gauge ceph_cluster_osd_blocklist_count 0.0 # HELP ceph_num_objects_degraded Number of degraded objects # TYPE ceph_num_objects_degraded gauge ceph_num_objects_degraded 7006796.0 # HELP ceph_num_objects_misplaced Number of misplaced objects # TYPE ceph_num_objects_misplaced gauge ceph_num_objects_misplaced 11451.0 # HELP ceph_num_objects_unfound Number of unfound objects # TYPE ceph_num_objects_unfound gauge ceph_num_objects_unfound 0.0 # HELP ceph_healthcheck_slow_ops OSD or Monitor requests taking a long time to process # TYPE ceph_healthcheck_slow_ops gauge ceph_healthcheck_slow_ops 0.0 # HELP ceph_prometheus_collect_duration_seconds_sum The sum of seconds took to collect all metrics of this exporter # TYPE ceph_prometheus_collect_duration_seconds_sum counter ceph_prometheus_collect_duration_seconds_sum{method="get_health"} 30.306828260421753 ceph_prometheus_collect_duration_seconds_sum{method="get_pool_stats"} 33.1092209815979 ceph_prometheus_collect_duration_seconds_sum{method="get_df"} 64.73373317718506 ceph_prometheus_collect_duration_seconds_sum{method="get_osd_blocklisted_entries"} 275.0508291721344 ceph_prometheus_collect_duration_seconds_sum{method="get_fs"} 189.1084861755371 ceph_prometheus_collect_duration_seconds_sum{method="get_quorum_status"} 142.97075939178467 ceph_prometheus_collect_duration_seconds_sum{method="get_mgr_status"} 1752.1374332904816 ceph_prometheus_collect_duration_seconds_sum{method="get_pg_status"} 186.63581323623657 ceph_prometheus_collect_duration_seconds_sum{method="get_osd_stats"} 175.25432705879211 ceph_prometheus_collect_duration_seconds_sum{method="get_metadata_and_osd_status"} 5389.169413328171 ceph_prometheus_collect_duration_seconds_sum{method="get_num_objects"} 131.23889112472534 ceph_prometheus_collect_duration_seconds_sum{method="get_rbd_stats"} 699.2813787460327 # HELP ceph_prometheus_collect_duration_seconds_count The amount of metrics gathered for this exporter # TYPE ceph_prometheus_collect_duration_seconds_count counter ceph_prometheus_collect_duration_seconds_count{method="get_health"} 42858.0 ceph_prometheus_collect_duration_seconds_count{method="get_pool_stats"} 42858.0 ceph_prometheus_collect_duration_seconds_count{method="get_df"} 42858.0 ceph_prometheus_collect_duration_seconds_count{method="get_osd_blocklisted_entries"} 42858.0 ceph_prometheus_collect_duration_seconds_count{method="get_fs"} 42858.0 ceph_prometheus_collect_duration_seconds_count{method="get_quorum_status"} 42858.0 ceph_prometheus_collect_duration_seconds_count{method="get_mgr_status"} 42858.0 ceph_prometheus_collect_duration_seconds_count{method="get_pg_status"} 42858.0 ceph_prometheus_collect_duration_seconds_count{method="get_osd_stats"} 42858.0 ceph_prometheus_collect_duration_seconds_count{method="get_metadata_and_osd_status"} 42858.0 ceph_prometheus_collect_duration_seconds_count{method="get_num_objects"} 42858.0 ceph_prometheus_collect_duration_seconds_count{method="get_rbd_stats"} 42858.0