Project

General

Profile

Bug #64051 ยป metrics.txt

Blake Klynsma, 01/16/2024 04:19 PM

 

# HELP ceph_health_status Cluster health status
# TYPE ceph_health_status untyped
ceph_health_status 0.0
# HELP ceph_mon_quorum_status Monitors in quorum
# TYPE ceph_mon_quorum_status gauge
ceph_mon_quorum_status{ceph_daemon="mon.mon01"} 1.0
ceph_mon_quorum_status{ceph_daemon="mon.mon02"} 1.0
ceph_mon_quorum_status{ceph_daemon="mon.mon03"} 1.0
# HELP ceph_fs_metadata FS Metadata
# TYPE ceph_fs_metadata untyped
# HELP ceph_mds_metadata MDS Metadata
# TYPE ceph_mds_metadata untyped
# HELP ceph_mon_metadata MON Metadata
# TYPE ceph_mon_metadata untyped
ceph_mon_metadata{ceph_daemon="mon.mon01",hostname="mon01",public_addr="172.16.0.52",rank="0",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_mon_metadata{ceph_daemon="mon.mon02",hostname="mon02",public_addr="172.16.0.53",rank="1",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_mon_metadata{ceph_daemon="mon.mon03",hostname="mon03",public_addr="172.16.0.54",rank="2",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
# HELP ceph_mgr_metadata MGR metadata
# TYPE ceph_mgr_metadata gauge
ceph_mgr_metadata{ceph_daemon="mgr.ceph-admin.rajvha",hostname="ceph-admin",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_mgr_metadata{ceph_daemon="mgr.mon01.vplxzp",hostname="mon01",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
# HELP ceph_mgr_status MGR status (0=standby, 1=active)
# TYPE ceph_mgr_status gauge
ceph_mgr_status{ceph_daemon="mgr.ceph-admin.rajvha"} 0.0
ceph_mgr_status{ceph_daemon="mgr.mon01.vplxzp"} 1.0
# HELP ceph_mgr_module_status MGR module status (0=disabled, 1=enabled, 2=auto-enabled)
# TYPE ceph_mgr_module_status gauge
ceph_mgr_module_status{name="alerts"} 0.0
ceph_mgr_module_status{name="balancer"} 2.0
ceph_mgr_module_status{name="cephadm"} 1.0
ceph_mgr_module_status{name="crash"} 2.0
ceph_mgr_module_status{name="dashboard"} 1.0
ceph_mgr_module_status{name="devicehealth"} 2.0
ceph_mgr_module_status{name="diskprediction_local"} 0.0
ceph_mgr_module_status{name="influx"} 0.0
ceph_mgr_module_status{name="insights"} 1.0
ceph_mgr_module_status{name="iostat"} 1.0
ceph_mgr_module_status{name="k8sevents"} 0.0
ceph_mgr_module_status{name="localpool"} 0.0
ceph_mgr_module_status{name="mds_autoscaler"} 0.0
ceph_mgr_module_status{name="mirroring"} 0.0
ceph_mgr_module_status{name="nfs"} 0.0
ceph_mgr_module_status{name="orchestrator"} 2.0
ceph_mgr_module_status{name="osd_perf_query"} 0.0
ceph_mgr_module_status{name="osd_support"} 0.0
ceph_mgr_module_status{name="pg_autoscaler"} 2.0
ceph_mgr_module_status{name="progress"} 2.0
ceph_mgr_module_status{name="prometheus"} 1.0
ceph_mgr_module_status{name="rbd_support"} 2.0
ceph_mgr_module_status{name="restful"} 1.0
ceph_mgr_module_status{name="rgw"} 0.0
ceph_mgr_module_status{name="rook"} 0.0
ceph_mgr_module_status{name="selftest"} 1.0
ceph_mgr_module_status{name="snap_schedule"} 0.0
ceph_mgr_module_status{name="stats"} 0.0
ceph_mgr_module_status{name="status"} 2.0
ceph_mgr_module_status{name="telegraf"} 0.0
ceph_mgr_module_status{name="telemetry"} 2.0
ceph_mgr_module_status{name="test_orchestrator"} 0.0
ceph_mgr_module_status{name="volumes"} 2.0
ceph_mgr_module_status{name="zabbix"} 0.0
# HELP ceph_mgr_module_can_run MGR module runnable state i.e. can it run (0=no, 1=yes)
# TYPE ceph_mgr_module_can_run gauge
ceph_mgr_module_can_run{name="alerts"} 1.0
ceph_mgr_module_can_run{name="balancer"} 1.0
ceph_mgr_module_can_run{name="cephadm"} 1.0
ceph_mgr_module_can_run{name="crash"} 1.0
ceph_mgr_module_can_run{name="dashboard"} 1.0
ceph_mgr_module_can_run{name="devicehealth"} 1.0
ceph_mgr_module_can_run{name="diskprediction_local"} 1.0
ceph_mgr_module_can_run{name="influx"} 0.0
ceph_mgr_module_can_run{name="insights"} 1.0
ceph_mgr_module_can_run{name="iostat"} 1.0
ceph_mgr_module_can_run{name="k8sevents"} 1.0
ceph_mgr_module_can_run{name="localpool"} 1.0
ceph_mgr_module_can_run{name="mds_autoscaler"} 1.0
ceph_mgr_module_can_run{name="mirroring"} 1.0
ceph_mgr_module_can_run{name="nfs"} 1.0
ceph_mgr_module_can_run{name="orchestrator"} 1.0
ceph_mgr_module_can_run{name="osd_perf_query"} 1.0
ceph_mgr_module_can_run{name="osd_support"} 1.0
ceph_mgr_module_can_run{name="pg_autoscaler"} 1.0
ceph_mgr_module_can_run{name="progress"} 1.0
ceph_mgr_module_can_run{name="prometheus"} 1.0
ceph_mgr_module_can_run{name="rbd_support"} 1.0
ceph_mgr_module_can_run{name="restful"} 1.0
ceph_mgr_module_can_run{name="rgw"} 1.0
ceph_mgr_module_can_run{name="rook"} 1.0
ceph_mgr_module_can_run{name="selftest"} 1.0
ceph_mgr_module_can_run{name="snap_schedule"} 1.0
ceph_mgr_module_can_run{name="stats"} 1.0
ceph_mgr_module_can_run{name="status"} 1.0
ceph_mgr_module_can_run{name="telegraf"} 1.0
ceph_mgr_module_can_run{name="telemetry"} 1.0
ceph_mgr_module_can_run{name="test_orchestrator"} 1.0
ceph_mgr_module_can_run{name="volumes"} 1.0
ceph_mgr_module_can_run{name="zabbix"} 1.0
# HELP ceph_osd_metadata OSD Metadata
# TYPE ceph_osd_metadata untyped
ceph_osd_metadata{back_iface="",ceph_daemon="osd.0",cluster_addr="10.10.0.15",device_class="hdd",front_iface="",hostname="01",objectstore="bluestore",public_addr="172.16.0.64",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.1",cluster_addr="10.10.0.15",device_class="hdd",front_iface="",hostname="01",objectstore="bluestore",public_addr="172.16.0.64",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.2",cluster_addr="10.10.0.15",device_class="hdd",front_iface="",hostname="01",objectstore="bluestore",public_addr="172.16.0.64",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.3",cluster_addr="10.10.0.15",device_class="hdd",front_iface="",hostname="01",objectstore="bluestore",public_addr="172.16.0.64",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.4",cluster_addr="10.10.0.16",device_class="hdd",front_iface="",hostname="02",objectstore="bluestore",public_addr="172.16.0.65",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.5",cluster_addr="10.10.0.16",device_class="hdd",front_iface="",hostname="02",objectstore="bluestore",public_addr="172.16.0.65",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.6",cluster_addr="10.10.0.16",device_class="hdd",front_iface="",hostname="02",objectstore="bluestore",public_addr="172.16.0.65",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.7",cluster_addr="10.10.0.16",device_class="hdd",front_iface="",hostname="02",objectstore="bluestore",public_addr="172.16.0.65",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.8",cluster_addr="10.10.0.17",device_class="hdd",front_iface="",hostname="03",objectstore="bluestore",public_addr="172.16.0.66",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.9",cluster_addr="10.10.0.17",device_class="hdd",front_iface="",hostname="03",objectstore="bluestore",public_addr="172.16.0.66",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.10",cluster_addr="10.10.0.17",device_class="hdd",front_iface="",hostname="03",objectstore="bluestore",public_addr="172.16.0.66",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.11",cluster_addr="10.10.0.17",device_class="hdd",front_iface="",hostname="03",objectstore="bluestore",public_addr="172.16.0.66",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.12",cluster_addr="10.10.0.18",device_class="hdd",front_iface="",hostname="04",objectstore="bluestore",public_addr="172.16.0.67",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.13",cluster_addr="10.10.0.18",device_class="hdd",front_iface="",hostname="04",objectstore="bluestore",public_addr="172.16.0.67",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.14",cluster_addr="10.10.0.18",device_class="hdd",front_iface="",hostname="04",objectstore="bluestore",public_addr="172.16.0.67",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.15",cluster_addr="10.10.0.18",device_class="hdd",front_iface="",hostname="04",objectstore="bluestore",public_addr="172.16.0.67",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.16",cluster_addr="10.10.0.19",device_class="hdd",front_iface="",hostname="05",objectstore="bluestore",public_addr="172.16.0.68",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.17",cluster_addr="10.10.0.19",device_class="hdd",front_iface="",hostname="05",objectstore="bluestore",public_addr="172.16.0.68",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.18",cluster_addr="10.10.0.19",device_class="hdd",front_iface="",hostname="05",objectstore="bluestore",public_addr="172.16.0.68",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.19",cluster_addr="10.10.0.19",device_class="hdd",front_iface="",hostname="05",objectstore="bluestore",public_addr="172.16.0.68",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.20",cluster_addr="10.10.0.20",device_class="hdd",front_iface="",hostname="06",objectstore="bluestore",public_addr="172.16.0.69",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.21",cluster_addr="10.10.0.20",device_class="hdd",front_iface="",hostname="06",objectstore="bluestore",public_addr="172.16.0.69",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.22",cluster_addr="10.10.0.20",device_class="hdd",front_iface="",hostname="06",objectstore="bluestore",public_addr="172.16.0.69",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.23",cluster_addr="10.10.0.20",device_class="hdd",front_iface="",hostname="06",objectstore="bluestore",public_addr="172.16.0.69",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.24",cluster_addr="10.10.0.20",device_class="hdd",front_iface="",hostname="06",objectstore="bluestore",public_addr="172.16.0.69",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.25",cluster_addr="10.10.0.20",device_class="hdd",front_iface="",hostname="06",objectstore="bluestore",public_addr="172.16.0.69",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.26",cluster_addr="10.10.0.20",device_class="hdd",front_iface="",hostname="06",objectstore="bluestore",public_addr="172.16.0.69",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.27",cluster_addr="10.10.0.20",device_class="hdd",front_iface="",hostname="06",objectstore="bluestore",public_addr="172.16.0.69",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.28",cluster_addr="10.10.0.20",device_class="hdd",front_iface="",hostname="06",objectstore="bluestore",public_addr="172.16.0.69",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.29",cluster_addr="10.10.0.20",device_class="hdd",front_iface="",hostname="06",objectstore="bluestore",public_addr="172.16.0.69",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.30",cluster_addr="10.10.0.21",device_class="hdd",front_iface="",hostname="07",objectstore="bluestore",public_addr="172.16.0.70",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.31",cluster_addr="10.10.0.21",device_class="hdd",front_iface="",hostname="07",objectstore="bluestore",public_addr="172.16.0.70",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.32",cluster_addr="10.10.0.21",device_class="hdd",front_iface="",hostname="07",objectstore="bluestore",public_addr="172.16.0.70",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.33",cluster_addr="10.10.0.21",device_class="hdd",front_iface="",hostname="07",objectstore="bluestore",public_addr="172.16.0.70",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.34",cluster_addr="10.10.0.21",device_class="hdd",front_iface="",hostname="07",objectstore="bluestore",public_addr="172.16.0.70",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.35",cluster_addr="10.10.0.21",device_class="hdd",front_iface="",hostname="07",objectstore="bluestore",public_addr="172.16.0.70",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.36",cluster_addr="10.10.0.21",device_class="hdd",front_iface="",hostname="07",objectstore="bluestore",public_addr="172.16.0.70",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.37",cluster_addr="10.10.0.21",device_class="hdd",front_iface="",hostname="07",objectstore="bluestore",public_addr="172.16.0.70",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.38",cluster_addr="10.10.0.21",device_class="hdd",front_iface="",hostname="07",objectstore="bluestore",public_addr="172.16.0.70",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.39",cluster_addr="10.10.0.21",device_class="hdd",front_iface="",hostname="07",objectstore="bluestore",public_addr="172.16.0.70",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.40",cluster_addr="10.10.0.22",device_class="hdd",front_iface="",hostname="08",objectstore="bluestore",public_addr="172.16.0.62",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.41",cluster_addr="10.10.0.22",device_class="hdd",front_iface="",hostname="08",objectstore="bluestore",public_addr="172.16.0.62",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.42",cluster_addr="10.10.0.22",device_class="hdd",front_iface="",hostname="08",objectstore="bluestore",public_addr="172.16.0.62",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.43",cluster_addr="10.10.0.22",device_class="hdd",front_iface="",hostname="08",objectstore="bluestore",public_addr="172.16.0.62",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.44",cluster_addr="10.10.0.22",device_class="hdd",front_iface="",hostname="08",objectstore="bluestore",public_addr="172.16.0.62",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.45",cluster_addr="10.10.0.22",device_class="hdd",front_iface="",hostname="08",objectstore="bluestore",public_addr="172.16.0.62",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.46",cluster_addr="10.10.0.22",device_class="hdd",front_iface="",hostname="08",objectstore="bluestore",public_addr="172.16.0.62",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.47",cluster_addr="10.10.0.22",device_class="hdd",front_iface="",hostname="08",objectstore="bluestore",public_addr="172.16.0.62",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.48",cluster_addr="10.10.0.22",device_class="hdd",front_iface="",hostname="08",objectstore="bluestore",public_addr="172.16.0.62",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.49",cluster_addr="10.10.0.22",device_class="hdd",front_iface="",hostname="08",objectstore="bluestore",public_addr="172.16.0.62",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.50",cluster_addr="10.10.0.23",device_class="hdd",front_iface="",hostname="09",objectstore="bluestore",public_addr="172.16.0.63",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.51",cluster_addr="10.10.0.23",device_class="hdd",front_iface="",hostname="09",objectstore="bluestore",public_addr="172.16.0.63",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.52",cluster_addr="10.10.0.23",device_class="hdd",front_iface="",hostname="09",objectstore="bluestore",public_addr="172.16.0.63",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.53",cluster_addr="10.10.0.23",device_class="hdd",front_iface="",hostname="09",objectstore="bluestore",public_addr="172.16.0.63",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.54",cluster_addr="10.10.0.23",device_class="hdd",front_iface="",hostname="09",objectstore="bluestore",public_addr="172.16.0.63",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.55",cluster_addr="10.10.0.23",device_class="hdd",front_iface="",hostname="09",objectstore="bluestore",public_addr="172.16.0.63",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.56",cluster_addr="10.10.0.23",device_class="hdd",front_iface="",hostname="09",objectstore="bluestore",public_addr="172.16.0.63",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.57",cluster_addr="10.10.0.23",device_class="hdd",front_iface="",hostname="09",objectstore="bluestore",public_addr="172.16.0.63",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.58",cluster_addr="10.10.0.23",device_class="hdd",front_iface="",hostname="09",objectstore="bluestore",public_addr="172.16.0.63",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
ceph_osd_metadata{back_iface="",ceph_daemon="osd.59",cluster_addr="10.10.0.23",device_class="hdd",front_iface="",hostname="09",objectstore="bluestore",public_addr="172.16.0.63",ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} 1.0
# HELP ceph_disk_occupation Associate Ceph daemon with disk used
# TYPE ceph_disk_occupation untyped
ceph_disk_occupation{ceph_daemon="osd.0",device="/dev/dm-4",db_device="",wal_device="",instance="01",devices="sda",device_ids="sda=ST12000NM0007-2A_ZCH0E87P"} 1.0
ceph_disk_occupation{ceph_daemon="osd.1",device="/dev/dm-2",db_device="",wal_device="",instance="01",devices="sdb",device_ids="sdb=ST12000NM0007-2A_ZJV06AWK"} 1.0
ceph_disk_occupation{ceph_daemon="osd.2",device="/dev/dm-3",db_device="",wal_device="",instance="01",devices="sdc",device_ids="sdc=ST12000NM0007-2A_ZJV062DC"} 1.0
ceph_disk_occupation{ceph_daemon="osd.3",device="/dev/dm-1",db_device="",wal_device="",instance="01",devices="sdd",device_ids="sdd=ST12000NM0007-2A_ZCH0E2M3"} 1.0
ceph_disk_occupation{ceph_daemon="osd.4",device="/dev/dm-1",db_device="",wal_device="",instance="02",devices="sda",device_ids="sda=ST12000NM001G-2M_ZL25JEHM"} 1.0
ceph_disk_occupation{ceph_daemon="osd.5",device="/dev/dm-2",db_device="",wal_device="",instance="02",devices="sdb",device_ids="sdb=ST12000NM0007-2A_ZCH0BXNB"} 1.0
ceph_disk_occupation{ceph_daemon="osd.6",device="/dev/dm-3",db_device="",wal_device="",instance="02",devices="sdc",device_ids="sdc=ST12000NM0007-2A_ZJV062LX"} 1.0
ceph_disk_occupation{ceph_daemon="osd.7",device="/dev/dm-4",db_device="",wal_device="",instance="02",devices="sdd",device_ids="sdd=ST12000NM0007-2A_ZJV062MG"} 1.0
ceph_disk_occupation{ceph_daemon="osd.8",device="/dev/dm-1",db_device="",wal_device="",instance="03",devices="sdb",device_ids="sdb=ST12000NM0007-2A_ZCH0E2N3"} 1.0
ceph_disk_occupation{ceph_daemon="osd.9",device="/dev/dm-0",db_device="",wal_device="",instance="03",devices="sdc",device_ids="sdc=ST12000NM0007-2A_ZJV068N3"} 1.0
ceph_disk_occupation{ceph_daemon="osd.10",device="/dev/dm-3",db_device="",wal_device="",instance="03",devices="sdd",device_ids="sdd=ST12000NM0007-2A_ZJV32SYK"} 1.0
ceph_disk_occupation{ceph_daemon="osd.11",device="/dev/dm-4",db_device="",wal_device="",instance="03",devices="sde",device_ids="sde=HGST_HUH721212AL_8DHG39MH"} 1.0
ceph_disk_occupation{ceph_daemon="osd.12",device="/dev/dm-2",db_device="",wal_device="",instance="04",devices="sdb",device_ids="sdb=ST12000NM0007-2A_ZJV062CK"} 1.0
ceph_disk_occupation{ceph_daemon="osd.13",device="/dev/dm-1",db_device="",wal_device="",instance="04",devices="sdc",device_ids="sdc=ST18000NM003J-2T_ZR55PKCC"} 1.0
ceph_disk_occupation{ceph_daemon="osd.14",device="/dev/dm-3",db_device="",wal_device="",instance="04",devices="sdd",device_ids="sdd=ST12000NM0007-2A_ZJV067WK"} 1.0
ceph_disk_occupation{ceph_daemon="osd.15",device="/dev/dm-0",db_device="",wal_device="",instance="04",devices="sde",device_ids="sde=ST12000NM0007-2A_ZCH0E2L6"} 1.0
ceph_disk_occupation{ceph_daemon="osd.16",device="/dev/dm-2",db_device="",wal_device="",instance="05",devices="sda",device_ids="sda=ST8000VX0022-2EJ_ZA16DCWK"} 1.0
ceph_disk_occupation{ceph_daemon="osd.17",device="/dev/dm-1",db_device="",wal_device="",instance="05",devices="sdb",device_ids="sdb=ST8000VX0022-2EJ_ZA16E72M"} 1.0
ceph_disk_occupation{ceph_daemon="osd.18",device="/dev/dm-4",db_device="",wal_device="",instance="05",devices="sdc",device_ids="sdc=HGST_HUS728T8TAL_VGG2M6WG"} 1.0
ceph_disk_occupation{ceph_daemon="osd.19",device="/dev/dm-3",db_device="",wal_device="",instance="05",devices="sdd",device_ids="sdd=HGST_HUS728T8TAL_VGG2M8EG"} 1.0
ceph_disk_occupation{ceph_daemon="osd.20",device="/dev/dm-3",db_device="",wal_device="",instance="06",devices="sda",device_ids="sda=HUH721010AL42C0_5000cca26ac5a8e0"} 1.0
ceph_disk_occupation{ceph_daemon="osd.21",device="/dev/dm-10",db_device="",wal_device="",instance="06",devices="sdg",device_ids="sdg=HUH721010AL42C0_5000cca26ac538d0"} 1.0
ceph_disk_occupation{ceph_daemon="osd.22",device="/dev/dm-9",db_device="",wal_device="",instance="06",devices="sdh",device_ids="sdh=HUH721010AL42C0_5000cca26aba82f8"} 1.0
ceph_disk_occupation{ceph_daemon="osd.23",device="/dev/dm-7",db_device="",wal_device="",instance="06",devices="sde",device_ids="sde=HUH721010AL42C0_5000cca26aca2c28"} 1.0
ceph_disk_occupation{ceph_daemon="osd.24",device="/dev/dm-6",db_device="",wal_device="",instance="06",devices="sdb",device_ids="sdb=HUH721010AL42C0_5000cca26ab5d810"} 1.0
ceph_disk_occupation{ceph_daemon="osd.25",device="/dev/dm-5",db_device="",wal_device="",instance="06",devices="sdc",device_ids="sdc=HUH721010AL42C0_5000cca26ab96670"} 1.0
ceph_disk_occupation{ceph_daemon="osd.26",device="/dev/dm-8",db_device="",wal_device="",instance="06",devices="sdd",device_ids="sdd=HUH721010AL42C0_5000cca26abbc31c"} 1.0
ceph_disk_occupation{ceph_daemon="osd.27",device="/dev/dm-4",db_device="",wal_device="",instance="06",devices="sdf",device_ids="sdf=HUH721010AL42C0_5000cca26ab9c538"} 1.0
ceph_disk_occupation{ceph_daemon="osd.28",device="/dev/dm-2",db_device="",wal_device="",instance="06",devices="sdi",device_ids="sdi=HUH721010AL42C0_5000cca26aa586f8"} 1.0
ceph_disk_occupation{ceph_daemon="osd.29",device="/dev/dm-1",db_device="",wal_device="",instance="06",devices="sdj",device_ids="sdj=HUH721010AL42C0_5000cca26ac57408"} 1.0
ceph_disk_occupation{ceph_daemon="osd.30",device="/dev/dm-3",db_device="",wal_device="",instance="07",devices="sda",device_ids="sda=HUH721010AL42C0_5000cca26ac7279c"} 1.0
ceph_disk_occupation{ceph_daemon="osd.31",device="/dev/dm-8",db_device="",wal_device="",instance="07",devices="sdd",device_ids="sdd=HUH721010AL42C0_5000cca26ab7073c"} 1.0
ceph_disk_occupation{ceph_daemon="osd.32",device="/dev/dm-9",db_device="",wal_device="",instance="07",devices="sdc",device_ids="sdc=HUH721010AL42C0_5000cca26aa8bb38"} 1.0
ceph_disk_occupation{ceph_daemon="osd.33",device="/dev/dm-4",db_device="",wal_device="",instance="07",devices="sdf",device_ids="sdf=HUH721010AL42C0_5000cca26ab71278"} 1.0
ceph_disk_occupation{ceph_daemon="osd.34",device="/dev/dm-10",db_device="",wal_device="",instance="07",devices="sdg",device_ids="sdg=HUH721010AL42C0_5000cca26ab9d618"} 1.0
ceph_disk_occupation{ceph_daemon="osd.35",device="/dev/dm-6",db_device="",wal_device="",instance="07",devices="sdh",device_ids="sdh=HUH721010AL42C0_5000cca26ad25e2c"} 1.0
ceph_disk_occupation{ceph_daemon="osd.36",device="/dev/dm-5",db_device="",wal_device="",instance="07",devices="sde",device_ids="sde=HUH721010AL42C0_5000cca26aab5d90"} 1.0
ceph_disk_occupation{ceph_daemon="osd.37",device="/dev/dm-7",db_device="",wal_device="",instance="07",devices="sdb",device_ids="sdb=HUH721010AL42C0_5000cca26ac56518"} 1.0
ceph_disk_occupation{ceph_daemon="osd.38",device="/dev/dm-2",db_device="",wal_device="",instance="07",devices="sdi",device_ids="sdi=HUH721010AL42C0_5000cca26a846fac"} 1.0
ceph_disk_occupation{ceph_daemon="osd.39",device="/dev/dm-1",db_device="",wal_device="",instance="07",devices="sdj",device_ids="sdj=HUH721010AL42C0_5000cca26a5b87b8"} 1.0
ceph_disk_occupation{ceph_daemon="osd.40",device="/dev/dm-3",db_device="",wal_device="",instance="08",devices="sda",device_ids="sda=HUH721010AL42C0_5000cca26a619b38"} 1.0
ceph_disk_occupation{ceph_daemon="osd.41",device="/dev/dm-2",db_device="",wal_device="",instance="08",devices="sdb",device_ids="sdb=HUH721010AL42C0_5000cca26a74ad00"} 1.0
ceph_disk_occupation{ceph_daemon="osd.42",device="/dev/dm-10",db_device="",wal_device="",instance="08",devices="sde",device_ids="sde=HUH721010AL42C0_5000cca26a662a08"} 1.0
ceph_disk_occupation{ceph_daemon="osd.43",device="/dev/dm-6",db_device="",wal_device="",instance="08",devices="sdf",device_ids="sdf=HUH721010AL42C0_5000cca26a643c8c"} 1.0
ceph_disk_occupation{ceph_daemon="osd.44",device="/dev/dm-9",db_device="",wal_device="",instance="08",devices="sdg",device_ids="sdg=HUH721010AL42C0_5000cca26a74cd90"} 1.0
ceph_disk_occupation{ceph_daemon="osd.45",device="/dev/dm-7",db_device="",wal_device="",instance="08",devices="sdd",device_ids="sdd=HUH721010AL42C0_5000cca26a706340"} 1.0
ceph_disk_occupation{ceph_daemon="osd.46",device="/dev/dm-5",db_device="",wal_device="",instance="08",devices="sdh",device_ids="sdh=HUH721010AL42C0_5000cca26abb9758"} 1.0
ceph_disk_occupation{ceph_daemon="osd.47",device="/dev/dm-4",db_device="",wal_device="",instance="08",devices="sdc",device_ids="sdc=HUH721010AL42C0_5000cca26a5ba750"} 1.0
ceph_disk_occupation{ceph_daemon="osd.48",device="/dev/dm-0",db_device="",wal_device="",instance="08",devices="sdi",device_ids="sdi=HUH721010AL42C0_5000cca26a6a0388"} 1.0
ceph_disk_occupation{ceph_daemon="osd.49",device="/dev/dm-1",db_device="",wal_device="",instance="08",devices="sdj",device_ids="sdj=HUH721010AL42C0_5000cca26a7312fc"} 1.0
ceph_disk_occupation{ceph_daemon="osd.50",device="/dev/dm-2",db_device="",wal_device="",instance="09",devices="sda",device_ids="sda=HUH721010AL42C0_5000cca26a979224"} 1.0
ceph_disk_occupation{ceph_daemon="osd.51",device="/dev/dm-7",db_device="",wal_device="",instance="09",devices="sdg",device_ids="sdg=HUH721010AL42C0_5000cca26a9ec9fc"} 1.0
ceph_disk_occupation{ceph_daemon="osd.52",device="/dev/dm-9",db_device="",wal_device="",instance="09",devices="sdb",device_ids="sdb=HUH721010AL42C0_5000cca26a9b65dc"} 1.0
ceph_disk_occupation{ceph_daemon="osd.53",device="/dev/dm-8",db_device="",wal_device="",instance="09",devices="sdc",device_ids="sdc=HUH721010AL42C0_5000cca26a990b98"} 1.0
ceph_disk_occupation{ceph_daemon="osd.54",device="/dev/dm-5",db_device="",wal_device="",instance="09",devices="sdd",device_ids="sdd=HUH721010AL42C0_5000cca26a96df74"} 1.0
ceph_disk_occupation{ceph_daemon="osd.55",device="/dev/dm-6",db_device="",wal_device="",instance="09",devices="sde",device_ids="sde=HUH721010AL42C0_5000cca26a952abc"} 1.0
ceph_disk_occupation{ceph_daemon="osd.56",device="/dev/dm-4",db_device="",wal_device="",instance="09",devices="sdf",device_ids="sdf=HUH721010AL42C0_5000cca26a989060"} 1.0
ceph_disk_occupation{ceph_daemon="osd.57",device="/dev/dm-3",db_device="",wal_device="",instance="09",devices="sdh",device_ids="sdh=HUH721010AL42C0_5000cca26a9d4a1c"} 1.0
ceph_disk_occupation{ceph_daemon="osd.58",device="/dev/dm-10",db_device="",wal_device="",instance="09",devices="sdi",device_ids="sdi=HUH721010AL42C0_5000cca26a97aff0"} 1.0
ceph_disk_occupation{ceph_daemon="osd.59",device="/dev/dm-1",db_device="",wal_device="",instance="09",devices="sdj",device_ids="sdj=HUH721010AL42C0_5000cca26a7c1884"} 1.0
# HELP ceph_disk_occupation_human Associate Ceph daemon with disk used
# TYPE ceph_disk_occupation_human untyped
ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/dm-4",instance="01"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/dm-2",instance="01"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.2",device="/dev/dm-3",instance="01"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.3",device="/dev/dm-1",instance="01"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.4",device="/dev/dm-1",instance="02"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.5",device="/dev/dm-2",instance="02"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.6",device="/dev/dm-3",instance="02"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.7",device="/dev/dm-4",instance="02"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.8",device="/dev/dm-1",instance="03"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.9",device="/dev/dm-0",instance="03"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.10",device="/dev/dm-3",instance="03"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.11",device="/dev/dm-4",instance="03"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.12",device="/dev/dm-2",instance="04"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.13",device="/dev/dm-1",instance="04"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.14",device="/dev/dm-3",instance="04"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.15",device="/dev/dm-0",instance="04"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.16",device="/dev/dm-2",instance="05"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.17",device="/dev/dm-1",instance="05"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.18",device="/dev/dm-4",instance="05"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.19",device="/dev/dm-3",instance="05"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.20",device="/dev/dm-3",instance="06"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.21",device="/dev/dm-10",instance="06"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.22",device="/dev/dm-9",instance="06"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.23",device="/dev/dm-7",instance="06"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.24",device="/dev/dm-6",instance="06"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.25",device="/dev/dm-5",instance="06"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.26",device="/dev/dm-8",instance="06"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.27",device="/dev/dm-4",instance="06"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.28",device="/dev/dm-2",instance="06"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.29",device="/dev/dm-1",instance="06"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.30",device="/dev/dm-3",instance="07"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.31",device="/dev/dm-8",instance="07"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.32",device="/dev/dm-9",instance="07"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.33",device="/dev/dm-4",instance="07"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.34",device="/dev/dm-10",instance="07"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.35",device="/dev/dm-6",instance="07"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.36",device="/dev/dm-5",instance="07"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.37",device="/dev/dm-7",instance="07"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.38",device="/dev/dm-2",instance="07"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.39",device="/dev/dm-1",instance="07"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.40",device="/dev/dm-3",instance="08"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.41",device="/dev/dm-2",instance="08"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.42",device="/dev/dm-10",instance="08"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.43",device="/dev/dm-6",instance="08"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.44",device="/dev/dm-9",instance="08"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.45",device="/dev/dm-7",instance="08"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.46",device="/dev/dm-5",instance="08"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.47",device="/dev/dm-4",instance="08"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.48",device="/dev/dm-0",instance="08"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.49",device="/dev/dm-1",instance="08"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.50",device="/dev/dm-2",instance="09"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.51",device="/dev/dm-7",instance="09"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.52",device="/dev/dm-9",instance="09"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.53",device="/dev/dm-8",instance="09"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.54",device="/dev/dm-5",instance="09"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.55",device="/dev/dm-6",instance="09"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.56",device="/dev/dm-4",instance="09"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.57",device="/dev/dm-3",instance="09"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.58",device="/dev/dm-10",instance="09"} 1.0
ceph_disk_occupation_human{ceph_daemon="osd.59",device="/dev/dm-1",instance="09"} 1.0
# HELP ceph_rbd_mirror_metadata RBD Mirror Metadata
# TYPE ceph_rbd_mirror_metadata untyped
# HELP ceph_pg_total PG Total Count per Pool
# TYPE ceph_pg_total gauge
ceph_pg_total{pool_id="1"} 1024.0
ceph_pg_total{pool_id="33"} 16.0
ceph_pg_total{pool_id="34"} 16.0
ceph_pg_total{pool_id="35"} 16.0
ceph_pg_total{pool_id="40"} 16.0
ceph_pg_total{pool_id="44"} 16.0
ceph_pg_total{pool_id="45"} 1024.0
ceph_pg_total{pool_id="46"} 16.0
ceph_pg_total{pool_id="47"} 16.0
# HELP ceph_health_detail healthcheck status by type (0=inactive, 1=active)
# TYPE ceph_health_detail gauge
ceph_health_detail{name="CEPHADM_REFRESH_FAILED",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="OSD_NEARFULL",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="PG_NOT_DEEP_SCRUBBED",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="PG_NOT_SCRUBBED",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="POOL_NEARFULL",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="OSD_DOWN",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="PG_AVAILABILITY",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="PG_DEGRADED",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="OSD_HOST_DOWN",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="FS_DEGRADED",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="MON_DOWN",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="PG_BACKFILL_FULL",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="OSD_BACKFILLFULL",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="POOL_BACKFILLFULL",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="SLOW_OPS",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="MDS_ALL_DOWN",severity="HEALTH_ERR"} 0.0
ceph_health_detail{name="CEPHADM_FAILED_DAEMON",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="CEPHADM_HOST_CHECK_FAILED",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="OSD_TOO_MANY_REPAIRS",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="OSDMAP_FLAGS",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="MON_CLOCK_SKEW",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="TELEMETRY_CHANGED",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="OSD_UPGRADE_FINISHED",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="OSD_SLOW_PING_TIME_BACK",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="OSD_SLOW_PING_TIME_FRONT",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="CEPHADM_APPLY_SPEC_FAIL",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="MGR_MODULE_ERROR",severity="HEALTH_ERR"} 0.0
ceph_health_detail{name="MON_DISK_LOW",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="MANY_OBJECTS_PER_PG",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="CEPHADM_DAEMON_PLACE_FAIL",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="DASHBOARD_DEBUG",severity="HEALTH_WARN"} 0.0
ceph_health_detail{name="OSD_SCRUB_ERRORS",severity="HEALTH_ERR"} 0.0
ceph_health_detail{name="PG_DAMAGED",severity="HEALTH_ERR"} 0.0
# HELP ceph_pool_objects_repaired Number of objects repaired in a pool
# TYPE ceph_pool_objects_repaired counter
ceph_pool_objects_repaired{pool_id="47"} 0.0
ceph_pool_objects_repaired{pool_id="46"} 0.0
ceph_pool_objects_repaired{pool_id="45"} 0.0
ceph_pool_objects_repaired{pool_id="33"} 0.0
ceph_pool_objects_repaired{pool_id="40"} 0.0
ceph_pool_objects_repaired{pool_id="34"} 0.0
ceph_pool_objects_repaired{pool_id="1"} 21.0
ceph_pool_objects_repaired{pool_id="35"} 0.0
ceph_pool_objects_repaired{pool_id="44"} 0.0
# HELP ceph_daemon_health_metrics Health metrics for Ceph daemons
# TYPE ceph_daemon_health_metrics gauge
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.0"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.0"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.1"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.1"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.2"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.2"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.3"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.3"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.4"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.4"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.5"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.5"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.6"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.6"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.7"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.7"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.10"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.10"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.11"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.11"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.8"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.8"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.9"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.9"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.12"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.12"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.13"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.13"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.14"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.14"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.15"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.15"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.16"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.16"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.17"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.17"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.18"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.18"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.19"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.19"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.20"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.20"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.21"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.21"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.22"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.22"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.23"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.23"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.24"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.24"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.25"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.25"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.26"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.26"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.27"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.27"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.28"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.28"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.29"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.29"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.30"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.30"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.31"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.31"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.32"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.32"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.33"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.33"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.34"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.34"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.35"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.35"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.36"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.36"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.37"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.37"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.38"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.38"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.39"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.39"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.40"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.40"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.41"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.41"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.42"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.42"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.43"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.43"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.44"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.44"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.45"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.45"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.46"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.46"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.47"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.47"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.48"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.48"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.49"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.49"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.50"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.50"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.51"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.51"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.52"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.52"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.53"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.53"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.54"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.54"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.55"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.55"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.56"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.56"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.57"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.57"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.58"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.58"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="osd.59"} 0.0
ceph_daemon_health_metrics{type="PENDING_CREATING_PGS",ceph_daemon="osd.59"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="mon.mon01"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="mon.mon02"} 0.0
ceph_daemon_health_metrics{type="SLOW_OPS",ceph_daemon="mon.mon03"} 0.0
# HELP ceph_osd_flag_noup OSD Flag noup
# TYPE ceph_osd_flag_noup untyped
ceph_osd_flag_noup 0.0
# HELP ceph_osd_flag_nodown OSD Flag nodown
# TYPE ceph_osd_flag_nodown untyped
ceph_osd_flag_nodown 0.0
# HELP ceph_osd_flag_noout OSD Flag noout
# TYPE ceph_osd_flag_noout untyped
ceph_osd_flag_noout 0.0
# HELP ceph_osd_flag_noin OSD Flag noin
# TYPE ceph_osd_flag_noin untyped
ceph_osd_flag_noin 0.0
# HELP ceph_osd_flag_nobackfill OSD Flag nobackfill
# TYPE ceph_osd_flag_nobackfill untyped
ceph_osd_flag_nobackfill 0.0
# HELP ceph_osd_flag_norebalance OSD Flag norebalance
# TYPE ceph_osd_flag_norebalance untyped
ceph_osd_flag_norebalance 0.0
# HELP ceph_osd_flag_norecover OSD Flag norecover
# TYPE ceph_osd_flag_norecover untyped
ceph_osd_flag_norecover 0.0
# HELP ceph_osd_flag_noscrub OSD Flag noscrub
# TYPE ceph_osd_flag_noscrub untyped
ceph_osd_flag_noscrub 0.0
# HELP ceph_osd_flag_nodeep_scrub OSD Flag nodeep-scrub
# TYPE ceph_osd_flag_nodeep_scrub untyped
ceph_osd_flag_nodeep_scrub 0.0
# HELP ceph_osd_weight OSD status weight
# TYPE ceph_osd_weight untyped
ceph_osd_weight{ceph_daemon="osd.0"} 1.0
ceph_osd_weight{ceph_daemon="osd.1"} 1.0
ceph_osd_weight{ceph_daemon="osd.2"} 1.0
ceph_osd_weight{ceph_daemon="osd.3"} 1.0
ceph_osd_weight{ceph_daemon="osd.4"} 1.0
ceph_osd_weight{ceph_daemon="osd.5"} 1.0
ceph_osd_weight{ceph_daemon="osd.6"} 1.0
ceph_osd_weight{ceph_daemon="osd.7"} 1.0
ceph_osd_weight{ceph_daemon="osd.8"} 1.0
ceph_osd_weight{ceph_daemon="osd.9"} 1.0
ceph_osd_weight{ceph_daemon="osd.10"} 1.0
ceph_osd_weight{ceph_daemon="osd.11"} 1.0
ceph_osd_weight{ceph_daemon="osd.12"} 1.0
ceph_osd_weight{ceph_daemon="osd.13"} 1.0
ceph_osd_weight{ceph_daemon="osd.14"} 1.0
ceph_osd_weight{ceph_daemon="osd.15"} 1.0
ceph_osd_weight{ceph_daemon="osd.16"} 1.0
ceph_osd_weight{ceph_daemon="osd.17"} 1.0
ceph_osd_weight{ceph_daemon="osd.18"} 1.0
ceph_osd_weight{ceph_daemon="osd.19"} 1.0
ceph_osd_weight{ceph_daemon="osd.20"} 1.0
ceph_osd_weight{ceph_daemon="osd.21"} 1.0
ceph_osd_weight{ceph_daemon="osd.22"} 1.0
ceph_osd_weight{ceph_daemon="osd.23"} 1.0
ceph_osd_weight{ceph_daemon="osd.24"} 1.0
ceph_osd_weight{ceph_daemon="osd.25"} 1.0
ceph_osd_weight{ceph_daemon="osd.26"} 1.0
ceph_osd_weight{ceph_daemon="osd.27"} 1.0
ceph_osd_weight{ceph_daemon="osd.28"} 1.0
ceph_osd_weight{ceph_daemon="osd.29"} 1.0
ceph_osd_weight{ceph_daemon="osd.30"} 1.0
ceph_osd_weight{ceph_daemon="osd.31"} 1.0
ceph_osd_weight{ceph_daemon="osd.32"} 1.0
ceph_osd_weight{ceph_daemon="osd.33"} 1.0
ceph_osd_weight{ceph_daemon="osd.34"} 1.0
ceph_osd_weight{ceph_daemon="osd.35"} 1.0
ceph_osd_weight{ceph_daemon="osd.36"} 1.0
ceph_osd_weight{ceph_daemon="osd.37"} 1.0
ceph_osd_weight{ceph_daemon="osd.38"} 1.0
ceph_osd_weight{ceph_daemon="osd.39"} 1.0
ceph_osd_weight{ceph_daemon="osd.40"} 1.0
ceph_osd_weight{ceph_daemon="osd.41"} 1.0
ceph_osd_weight{ceph_daemon="osd.42"} 1.0
ceph_osd_weight{ceph_daemon="osd.43"} 1.0
ceph_osd_weight{ceph_daemon="osd.44"} 1.0
ceph_osd_weight{ceph_daemon="osd.45"} 1.0
ceph_osd_weight{ceph_daemon="osd.46"} 1.0
ceph_osd_weight{ceph_daemon="osd.47"} 1.0
ceph_osd_weight{ceph_daemon="osd.48"} 1.0
ceph_osd_weight{ceph_daemon="osd.49"} 1.0
ceph_osd_weight{ceph_daemon="osd.50"} 1.0
ceph_osd_weight{ceph_daemon="osd.51"} 1.0
ceph_osd_weight{ceph_daemon="osd.52"} 1.0
ceph_osd_weight{ceph_daemon="osd.53"} 1.0
ceph_osd_weight{ceph_daemon="osd.54"} 1.0
ceph_osd_weight{ceph_daemon="osd.55"} 1.0
ceph_osd_weight{ceph_daemon="osd.56"} 1.0
ceph_osd_weight{ceph_daemon="osd.57"} 1.0
ceph_osd_weight{ceph_daemon="osd.58"} 1.0
ceph_osd_weight{ceph_daemon="osd.59"} 1.0
# HELP ceph_osd_up OSD status up
# TYPE ceph_osd_up untyped
ceph_osd_up{ceph_daemon="osd.0"} 1.0
ceph_osd_up{ceph_daemon="osd.1"} 1.0
ceph_osd_up{ceph_daemon="osd.2"} 1.0
ceph_osd_up{ceph_daemon="osd.3"} 1.0
ceph_osd_up{ceph_daemon="osd.4"} 1.0
ceph_osd_up{ceph_daemon="osd.5"} 1.0
ceph_osd_up{ceph_daemon="osd.6"} 1.0
ceph_osd_up{ceph_daemon="osd.7"} 1.0
ceph_osd_up{ceph_daemon="osd.8"} 1.0
ceph_osd_up{ceph_daemon="osd.9"} 1.0
ceph_osd_up{ceph_daemon="osd.10"} 1.0
ceph_osd_up{ceph_daemon="osd.11"} 1.0
ceph_osd_up{ceph_daemon="osd.12"} 1.0
ceph_osd_up{ceph_daemon="osd.13"} 1.0
ceph_osd_up{ceph_daemon="osd.14"} 1.0
ceph_osd_up{ceph_daemon="osd.15"} 1.0
ceph_osd_up{ceph_daemon="osd.16"} 1.0
ceph_osd_up{ceph_daemon="osd.17"} 1.0
ceph_osd_up{ceph_daemon="osd.18"} 1.0
ceph_osd_up{ceph_daemon="osd.19"} 1.0
ceph_osd_up{ceph_daemon="osd.20"} 1.0
ceph_osd_up{ceph_daemon="osd.21"} 1.0
ceph_osd_up{ceph_daemon="osd.22"} 1.0
ceph_osd_up{ceph_daemon="osd.23"} 1.0
ceph_osd_up{ceph_daemon="osd.24"} 1.0
ceph_osd_up{ceph_daemon="osd.25"} 1.0
ceph_osd_up{ceph_daemon="osd.26"} 1.0
ceph_osd_up{ceph_daemon="osd.27"} 1.0
ceph_osd_up{ceph_daemon="osd.28"} 1.0
ceph_osd_up{ceph_daemon="osd.29"} 1.0
ceph_osd_up{ceph_daemon="osd.30"} 1.0
ceph_osd_up{ceph_daemon="osd.31"} 1.0
ceph_osd_up{ceph_daemon="osd.32"} 1.0
ceph_osd_up{ceph_daemon="osd.33"} 1.0
ceph_osd_up{ceph_daemon="osd.34"} 1.0
ceph_osd_up{ceph_daemon="osd.35"} 1.0
ceph_osd_up{ceph_daemon="osd.36"} 1.0
ceph_osd_up{ceph_daemon="osd.37"} 1.0
ceph_osd_up{ceph_daemon="osd.38"} 1.0
ceph_osd_up{ceph_daemon="osd.39"} 1.0
ceph_osd_up{ceph_daemon="osd.40"} 1.0
ceph_osd_up{ceph_daemon="osd.41"} 1.0
ceph_osd_up{ceph_daemon="osd.42"} 1.0
ceph_osd_up{ceph_daemon="osd.43"} 1.0
ceph_osd_up{ceph_daemon="osd.44"} 1.0
ceph_osd_up{ceph_daemon="osd.45"} 1.0
ceph_osd_up{ceph_daemon="osd.46"} 1.0
ceph_osd_up{ceph_daemon="osd.47"} 1.0
ceph_osd_up{ceph_daemon="osd.48"} 1.0
ceph_osd_up{ceph_daemon="osd.49"} 1.0
ceph_osd_up{ceph_daemon="osd.50"} 1.0
ceph_osd_up{ceph_daemon="osd.51"} 1.0
ceph_osd_up{ceph_daemon="osd.52"} 1.0
ceph_osd_up{ceph_daemon="osd.53"} 1.0
ceph_osd_up{ceph_daemon="osd.54"} 1.0
ceph_osd_up{ceph_daemon="osd.55"} 1.0
ceph_osd_up{ceph_daemon="osd.56"} 1.0
ceph_osd_up{ceph_daemon="osd.57"} 1.0
ceph_osd_up{ceph_daemon="osd.58"} 1.0
ceph_osd_up{ceph_daemon="osd.59"} 1.0
# HELP ceph_osd_in OSD status in
# TYPE ceph_osd_in untyped
ceph_osd_in{ceph_daemon="osd.0"} 1.0
ceph_osd_in{ceph_daemon="osd.1"} 1.0
ceph_osd_in{ceph_daemon="osd.2"} 1.0
ceph_osd_in{ceph_daemon="osd.3"} 1.0
ceph_osd_in{ceph_daemon="osd.4"} 1.0
ceph_osd_in{ceph_daemon="osd.5"} 1.0
ceph_osd_in{ceph_daemon="osd.6"} 1.0
ceph_osd_in{ceph_daemon="osd.7"} 1.0
ceph_osd_in{ceph_daemon="osd.8"} 1.0
ceph_osd_in{ceph_daemon="osd.9"} 1.0
ceph_osd_in{ceph_daemon="osd.10"} 1.0
ceph_osd_in{ceph_daemon="osd.11"} 1.0
ceph_osd_in{ceph_daemon="osd.12"} 1.0
ceph_osd_in{ceph_daemon="osd.13"} 1.0
ceph_osd_in{ceph_daemon="osd.14"} 1.0
ceph_osd_in{ceph_daemon="osd.15"} 1.0
ceph_osd_in{ceph_daemon="osd.16"} 1.0
ceph_osd_in{ceph_daemon="osd.17"} 1.0
ceph_osd_in{ceph_daemon="osd.18"} 1.0
ceph_osd_in{ceph_daemon="osd.19"} 1.0
ceph_osd_in{ceph_daemon="osd.20"} 1.0
ceph_osd_in{ceph_daemon="osd.21"} 1.0
ceph_osd_in{ceph_daemon="osd.22"} 1.0
ceph_osd_in{ceph_daemon="osd.23"} 1.0
ceph_osd_in{ceph_daemon="osd.24"} 1.0
ceph_osd_in{ceph_daemon="osd.25"} 1.0
ceph_osd_in{ceph_daemon="osd.26"} 1.0
ceph_osd_in{ceph_daemon="osd.27"} 1.0
ceph_osd_in{ceph_daemon="osd.28"} 1.0
ceph_osd_in{ceph_daemon="osd.29"} 1.0
ceph_osd_in{ceph_daemon="osd.30"} 1.0
ceph_osd_in{ceph_daemon="osd.31"} 1.0
ceph_osd_in{ceph_daemon="osd.32"} 1.0
ceph_osd_in{ceph_daemon="osd.33"} 1.0
ceph_osd_in{ceph_daemon="osd.34"} 1.0
ceph_osd_in{ceph_daemon="osd.35"} 1.0
ceph_osd_in{ceph_daemon="osd.36"} 1.0
ceph_osd_in{ceph_daemon="osd.37"} 1.0
ceph_osd_in{ceph_daemon="osd.38"} 1.0
ceph_osd_in{ceph_daemon="osd.39"} 1.0
ceph_osd_in{ceph_daemon="osd.40"} 1.0
ceph_osd_in{ceph_daemon="osd.41"} 1.0
ceph_osd_in{ceph_daemon="osd.42"} 1.0
ceph_osd_in{ceph_daemon="osd.43"} 1.0
ceph_osd_in{ceph_daemon="osd.44"} 1.0
ceph_osd_in{ceph_daemon="osd.45"} 1.0
ceph_osd_in{ceph_daemon="osd.46"} 1.0
ceph_osd_in{ceph_daemon="osd.47"} 1.0
ceph_osd_in{ceph_daemon="osd.48"} 1.0
ceph_osd_in{ceph_daemon="osd.49"} 1.0
ceph_osd_in{ceph_daemon="osd.50"} 1.0
ceph_osd_in{ceph_daemon="osd.51"} 1.0
ceph_osd_in{ceph_daemon="osd.52"} 1.0
ceph_osd_in{ceph_daemon="osd.53"} 1.0
ceph_osd_in{ceph_daemon="osd.54"} 1.0
ceph_osd_in{ceph_daemon="osd.55"} 1.0
ceph_osd_in{ceph_daemon="osd.56"} 1.0
ceph_osd_in{ceph_daemon="osd.57"} 1.0
ceph_osd_in{ceph_daemon="osd.58"} 1.0
ceph_osd_in{ceph_daemon="osd.59"} 1.0
# HELP ceph_osd_apply_latency_ms OSD stat apply_latency_ms
# TYPE ceph_osd_apply_latency_ms gauge
ceph_osd_apply_latency_ms{ceph_daemon="osd.59"} 175.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.58"} 17.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.57"} 11.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.56"} 38.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.55"} 7.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.54"} 37.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.53"} 18.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.52"} 69.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.51"} 22.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.50"} 6.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.49"} 5.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.48"} 6.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.47"} 104.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.46"} 43.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.45"} 33.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.44"} 8.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.43"} 28.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.42"} 20.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.41"} 8.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.40"} 9.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.39"} 10.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.38"} 13.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.37"} 22.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.36"} 41.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.15"} 17.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.14"} 13.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.13"} 8.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.12"} 11.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.11"} 23.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.10"} 20.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.9"} 13.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.8"} 12.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.7"} 4.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.6"} 12.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.1"} 19.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.0"} 2.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.2"} 9.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.3"} 11.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.4"} 8.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.5"} 4.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.16"} 5.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.17"} 13.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.18"} 11.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.19"} 6.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.20"} 6.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.21"} 14.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.22"} 32.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.23"} 32.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.24"} 6.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.25"} 3.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.26"} 7.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.27"} 23.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.28"} 12.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.29"} 16.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.30"} 58.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.31"} 19.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.32"} 21.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.33"} 30.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.34"} 7.0
ceph_osd_apply_latency_ms{ceph_daemon="osd.35"} 33.0
# HELP ceph_osd_commit_latency_ms OSD stat commit_latency_ms
# TYPE ceph_osd_commit_latency_ms gauge
ceph_osd_commit_latency_ms{ceph_daemon="osd.59"} 175.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.58"} 17.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.57"} 11.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.56"} 38.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.55"} 7.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.54"} 37.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.53"} 18.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.52"} 69.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.51"} 22.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.50"} 6.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.49"} 5.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.48"} 6.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.47"} 104.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.46"} 43.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.45"} 33.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.44"} 8.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.43"} 28.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.42"} 20.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.41"} 8.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.40"} 9.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.39"} 10.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.38"} 13.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.37"} 22.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.36"} 41.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.15"} 17.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.14"} 13.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.13"} 8.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.12"} 11.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.11"} 23.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.10"} 20.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.9"} 13.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.8"} 12.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.7"} 4.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.6"} 12.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.1"} 19.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.0"} 2.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.2"} 9.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.3"} 11.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.4"} 8.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.5"} 4.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.16"} 5.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.17"} 13.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.18"} 11.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.19"} 6.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.20"} 6.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.21"} 14.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.22"} 32.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.23"} 32.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.24"} 6.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.25"} 3.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.26"} 7.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.27"} 23.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.28"} 12.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.29"} 16.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.30"} 58.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.31"} 19.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.32"} 21.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.33"} 30.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.34"} 7.0
ceph_osd_commit_latency_ms{ceph_daemon="osd.35"} 33.0
# HELP ceph_pool_recovering_objects_per_sec OSD pool stats: recovering_objects_per_sec
# TYPE ceph_pool_recovering_objects_per_sec gauge
ceph_pool_recovering_objects_per_sec{pool_id="1"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="33"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="34"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="35"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="40"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="44"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="45"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="46"} 0.0
ceph_pool_recovering_objects_per_sec{pool_id="47"} 0.0
# HELP ceph_pool_recovering_bytes_per_sec OSD pool stats: recovering_bytes_per_sec
# TYPE ceph_pool_recovering_bytes_per_sec gauge
ceph_pool_recovering_bytes_per_sec{pool_id="1"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="33"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="34"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="35"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="40"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="44"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="45"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="46"} 0.0
ceph_pool_recovering_bytes_per_sec{pool_id="47"} 0.0
# HELP ceph_pool_recovering_keys_per_sec OSD pool stats: recovering_keys_per_sec
# TYPE ceph_pool_recovering_keys_per_sec gauge
ceph_pool_recovering_keys_per_sec{pool_id="1"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="33"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="34"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="35"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="40"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="44"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="45"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="46"} 0.0
ceph_pool_recovering_keys_per_sec{pool_id="47"} 0.0
# HELP ceph_pool_num_objects_recovered OSD pool stats: num_objects_recovered
# TYPE ceph_pool_num_objects_recovered gauge
ceph_pool_num_objects_recovered{pool_id="1"} 0.0
ceph_pool_num_objects_recovered{pool_id="33"} 0.0
ceph_pool_num_objects_recovered{pool_id="34"} 0.0
ceph_pool_num_objects_recovered{pool_id="35"} 0.0
ceph_pool_num_objects_recovered{pool_id="40"} 0.0
ceph_pool_num_objects_recovered{pool_id="44"} 0.0
ceph_pool_num_objects_recovered{pool_id="45"} 0.0
ceph_pool_num_objects_recovered{pool_id="46"} 0.0
ceph_pool_num_objects_recovered{pool_id="47"} 0.0
# HELP ceph_pool_num_bytes_recovered OSD pool stats: num_bytes_recovered
# TYPE ceph_pool_num_bytes_recovered gauge
ceph_pool_num_bytes_recovered{pool_id="1"} 0.0
ceph_pool_num_bytes_recovered{pool_id="33"} 0.0
ceph_pool_num_bytes_recovered{pool_id="34"} 0.0
ceph_pool_num_bytes_recovered{pool_id="35"} 0.0
ceph_pool_num_bytes_recovered{pool_id="40"} 0.0
ceph_pool_num_bytes_recovered{pool_id="44"} 0.0
ceph_pool_num_bytes_recovered{pool_id="45"} 0.0
ceph_pool_num_bytes_recovered{pool_id="46"} 0.0
ceph_pool_num_bytes_recovered{pool_id="47"} 0.0
# HELP ceph_pg_active PG active per pool
# TYPE ceph_pg_active gauge
ceph_pg_active{pool_id="1"} 1024.0
ceph_pg_active{pool_id="33"} 16.0
ceph_pg_active{pool_id="34"} 16.0
ceph_pg_active{pool_id="35"} 16.0
ceph_pg_active{pool_id="40"} 16.0
ceph_pg_active{pool_id="44"} 16.0
ceph_pg_active{pool_id="45"} 1024.0
ceph_pg_active{pool_id="46"} 16.0
ceph_pg_active{pool_id="47"} 16.0
# HELP ceph_pg_clean PG clean per pool
# TYPE ceph_pg_clean gauge
ceph_pg_clean{pool_id="1"} 1024.0
ceph_pg_clean{pool_id="33"} 16.0
ceph_pg_clean{pool_id="34"} 16.0
ceph_pg_clean{pool_id="35"} 16.0
ceph_pg_clean{pool_id="40"} 16.0
ceph_pg_clean{pool_id="44"} 16.0
ceph_pg_clean{pool_id="45"} 1024.0
ceph_pg_clean{pool_id="46"} 16.0
ceph_pg_clean{pool_id="47"} 16.0
# HELP ceph_pg_down PG down per pool
# TYPE ceph_pg_down gauge
ceph_pg_down{pool_id="1"} 0.0
ceph_pg_down{pool_id="33"} 0.0
ceph_pg_down{pool_id="34"} 0.0
ceph_pg_down{pool_id="35"} 0.0
ceph_pg_down{pool_id="40"} 0.0
ceph_pg_down{pool_id="44"} 0.0
ceph_pg_down{pool_id="45"} 0.0
ceph_pg_down{pool_id="46"} 0.0
ceph_pg_down{pool_id="47"} 0.0
# HELP ceph_pg_recovery_unfound PG recovery_unfound per pool
# TYPE ceph_pg_recovery_unfound gauge
ceph_pg_recovery_unfound{pool_id="1"} 0.0
ceph_pg_recovery_unfound{pool_id="33"} 0.0
ceph_pg_recovery_unfound{pool_id="34"} 0.0
ceph_pg_recovery_unfound{pool_id="35"} 0.0
ceph_pg_recovery_unfound{pool_id="40"} 0.0
ceph_pg_recovery_unfound{pool_id="44"} 0.0
ceph_pg_recovery_unfound{pool_id="45"} 0.0
ceph_pg_recovery_unfound{pool_id="46"} 0.0
ceph_pg_recovery_unfound{pool_id="47"} 0.0
# HELP ceph_pg_backfill_unfound PG backfill_unfound per pool
# TYPE ceph_pg_backfill_unfound gauge
ceph_pg_backfill_unfound{pool_id="1"} 0.0
ceph_pg_backfill_unfound{pool_id="33"} 0.0
ceph_pg_backfill_unfound{pool_id="34"} 0.0
ceph_pg_backfill_unfound{pool_id="35"} 0.0
ceph_pg_backfill_unfound{pool_id="40"} 0.0
ceph_pg_backfill_unfound{pool_id="44"} 0.0
ceph_pg_backfill_unfound{pool_id="45"} 0.0
ceph_pg_backfill_unfound{pool_id="46"} 0.0
ceph_pg_backfill_unfound{pool_id="47"} 0.0
# HELP ceph_pg_scrubbing PG scrubbing per pool
# TYPE ceph_pg_scrubbing gauge
ceph_pg_scrubbing{pool_id="1"} 20.0
ceph_pg_scrubbing{pool_id="33"} 0.0
ceph_pg_scrubbing{pool_id="34"} 0.0
ceph_pg_scrubbing{pool_id="35"} 0.0
ceph_pg_scrubbing{pool_id="40"} 0.0
ceph_pg_scrubbing{pool_id="44"} 0.0
ceph_pg_scrubbing{pool_id="45"} 31.0
ceph_pg_scrubbing{pool_id="46"} 0.0
ceph_pg_scrubbing{pool_id="47"} 0.0
# HELP ceph_pg_degraded PG degraded per pool
# TYPE ceph_pg_degraded gauge
ceph_pg_degraded{pool_id="1"} 0.0
ceph_pg_degraded{pool_id="33"} 0.0
ceph_pg_degraded{pool_id="34"} 0.0
ceph_pg_degraded{pool_id="35"} 0.0
ceph_pg_degraded{pool_id="40"} 0.0
ceph_pg_degraded{pool_id="44"} 0.0
ceph_pg_degraded{pool_id="45"} 0.0
ceph_pg_degraded{pool_id="46"} 0.0
ceph_pg_degraded{pool_id="47"} 0.0
# HELP ceph_pg_inconsistent PG inconsistent per pool
# TYPE ceph_pg_inconsistent gauge
ceph_pg_inconsistent{pool_id="1"} 0.0
ceph_pg_inconsistent{pool_id="33"} 0.0
ceph_pg_inconsistent{pool_id="34"} 0.0
ceph_pg_inconsistent{pool_id="35"} 0.0
ceph_pg_inconsistent{pool_id="40"} 0.0
ceph_pg_inconsistent{pool_id="44"} 0.0
ceph_pg_inconsistent{pool_id="45"} 0.0
ceph_pg_inconsistent{pool_id="46"} 0.0
ceph_pg_inconsistent{pool_id="47"} 0.0
# HELP ceph_pg_peering PG peering per pool
# TYPE ceph_pg_peering gauge
ceph_pg_peering{pool_id="1"} 0.0
ceph_pg_peering{pool_id="33"} 0.0
ceph_pg_peering{pool_id="34"} 0.0
ceph_pg_peering{pool_id="35"} 0.0
ceph_pg_peering{pool_id="40"} 0.0
ceph_pg_peering{pool_id="44"} 0.0
ceph_pg_peering{pool_id="45"} 0.0
ceph_pg_peering{pool_id="46"} 0.0
ceph_pg_peering{pool_id="47"} 0.0
# HELP ceph_pg_repair PG repair per pool
# TYPE ceph_pg_repair gauge
ceph_pg_repair{pool_id="1"} 0.0
ceph_pg_repair{pool_id="33"} 0.0
ceph_pg_repair{pool_id="34"} 0.0
ceph_pg_repair{pool_id="35"} 0.0
ceph_pg_repair{pool_id="40"} 0.0
ceph_pg_repair{pool_id="44"} 0.0
ceph_pg_repair{pool_id="45"} 0.0
ceph_pg_repair{pool_id="46"} 0.0
ceph_pg_repair{pool_id="47"} 0.0
# HELP ceph_pg_recovering PG recovering per pool
# TYPE ceph_pg_recovering gauge
ceph_pg_recovering{pool_id="1"} 0.0
ceph_pg_recovering{pool_id="33"} 0.0
ceph_pg_recovering{pool_id="34"} 0.0
ceph_pg_recovering{pool_id="35"} 0.0
ceph_pg_recovering{pool_id="40"} 0.0
ceph_pg_recovering{pool_id="44"} 0.0
ceph_pg_recovering{pool_id="45"} 0.0
ceph_pg_recovering{pool_id="46"} 0.0
ceph_pg_recovering{pool_id="47"} 0.0
# HELP ceph_pg_forced_recovery PG forced_recovery per pool
# TYPE ceph_pg_forced_recovery gauge
ceph_pg_forced_recovery{pool_id="1"} 0.0
ceph_pg_forced_recovery{pool_id="33"} 0.0
ceph_pg_forced_recovery{pool_id="34"} 0.0
ceph_pg_forced_recovery{pool_id="35"} 0.0
ceph_pg_forced_recovery{pool_id="40"} 0.0
ceph_pg_forced_recovery{pool_id="44"} 0.0
ceph_pg_forced_recovery{pool_id="45"} 0.0
ceph_pg_forced_recovery{pool_id="46"} 0.0
ceph_pg_forced_recovery{pool_id="47"} 0.0
# HELP ceph_pg_backfill_wait PG backfill_wait per pool
# TYPE ceph_pg_backfill_wait gauge
ceph_pg_backfill_wait{pool_id="1"} 0.0
ceph_pg_backfill_wait{pool_id="33"} 0.0
ceph_pg_backfill_wait{pool_id="34"} 0.0
ceph_pg_backfill_wait{pool_id="35"} 0.0
ceph_pg_backfill_wait{pool_id="40"} 0.0
ceph_pg_backfill_wait{pool_id="44"} 0.0
ceph_pg_backfill_wait{pool_id="45"} 0.0
ceph_pg_backfill_wait{pool_id="46"} 0.0
ceph_pg_backfill_wait{pool_id="47"} 0.0
# HELP ceph_pg_incomplete PG incomplete per pool
# TYPE ceph_pg_incomplete gauge
ceph_pg_incomplete{pool_id="1"} 0.0
ceph_pg_incomplete{pool_id="33"} 0.0
ceph_pg_incomplete{pool_id="34"} 0.0
ceph_pg_incomplete{pool_id="35"} 0.0
ceph_pg_incomplete{pool_id="40"} 0.0
ceph_pg_incomplete{pool_id="44"} 0.0
ceph_pg_incomplete{pool_id="45"} 0.0
ceph_pg_incomplete{pool_id="46"} 0.0
ceph_pg_incomplete{pool_id="47"} 0.0
# HELP ceph_pg_stale PG stale per pool
# TYPE ceph_pg_stale gauge
ceph_pg_stale{pool_id="1"} 0.0
ceph_pg_stale{pool_id="33"} 0.0
ceph_pg_stale{pool_id="34"} 0.0
ceph_pg_stale{pool_id="35"} 0.0
ceph_pg_stale{pool_id="40"} 0.0
ceph_pg_stale{pool_id="44"} 0.0
ceph_pg_stale{pool_id="45"} 0.0
ceph_pg_stale{pool_id="46"} 0.0
ceph_pg_stale{pool_id="47"} 0.0
# HELP ceph_pg_remapped PG remapped per pool
# TYPE ceph_pg_remapped gauge
ceph_pg_remapped{pool_id="1"} 0.0
ceph_pg_remapped{pool_id="33"} 0.0
ceph_pg_remapped{pool_id="34"} 0.0
ceph_pg_remapped{pool_id="35"} 0.0
ceph_pg_remapped{pool_id="40"} 0.0
ceph_pg_remapped{pool_id="44"} 0.0
ceph_pg_remapped{pool_id="45"} 0.0
ceph_pg_remapped{pool_id="46"} 0.0
ceph_pg_remapped{pool_id="47"} 0.0
# HELP ceph_pg_deep PG deep per pool
# TYPE ceph_pg_deep gauge
ceph_pg_deep{pool_id="1"} 16.0
ceph_pg_deep{pool_id="33"} 0.0
ceph_pg_deep{pool_id="34"} 0.0
ceph_pg_deep{pool_id="35"} 0.0
ceph_pg_deep{pool_id="40"} 0.0
ceph_pg_deep{pool_id="44"} 0.0
ceph_pg_deep{pool_id="45"} 26.0
ceph_pg_deep{pool_id="46"} 0.0
ceph_pg_deep{pool_id="47"} 0.0
# HELP ceph_pg_backfilling PG backfilling per pool
# TYPE ceph_pg_backfilling gauge
ceph_pg_backfilling{pool_id="1"} 0.0
ceph_pg_backfilling{pool_id="33"} 0.0
ceph_pg_backfilling{pool_id="34"} 0.0
ceph_pg_backfilling{pool_id="35"} 0.0
ceph_pg_backfilling{pool_id="40"} 0.0
ceph_pg_backfilling{pool_id="44"} 0.0
ceph_pg_backfilling{pool_id="45"} 0.0
ceph_pg_backfilling{pool_id="46"} 0.0
ceph_pg_backfilling{pool_id="47"} 0.0
# HELP ceph_pg_forced_backfill PG forced_backfill per pool
# TYPE ceph_pg_forced_backfill gauge
ceph_pg_forced_backfill{pool_id="1"} 0.0
ceph_pg_forced_backfill{pool_id="33"} 0.0
ceph_pg_forced_backfill{pool_id="34"} 0.0
ceph_pg_forced_backfill{pool_id="35"} 0.0
ceph_pg_forced_backfill{pool_id="40"} 0.0
ceph_pg_forced_backfill{pool_id="44"} 0.0
ceph_pg_forced_backfill{pool_id="45"} 0.0
ceph_pg_forced_backfill{pool_id="46"} 0.0
ceph_pg_forced_backfill{pool_id="47"} 0.0
# HELP ceph_pg_backfill_toofull PG backfill_toofull per pool
# TYPE ceph_pg_backfill_toofull gauge
ceph_pg_backfill_toofull{pool_id="1"} 0.0
ceph_pg_backfill_toofull{pool_id="33"} 0.0
ceph_pg_backfill_toofull{pool_id="34"} 0.0
ceph_pg_backfill_toofull{pool_id="35"} 0.0
ceph_pg_backfill_toofull{pool_id="40"} 0.0
ceph_pg_backfill_toofull{pool_id="44"} 0.0
ceph_pg_backfill_toofull{pool_id="45"} 0.0
ceph_pg_backfill_toofull{pool_id="46"} 0.0
ceph_pg_backfill_toofull{pool_id="47"} 0.0
# HELP ceph_pg_recovery_wait PG recovery_wait per pool
# TYPE ceph_pg_recovery_wait gauge
ceph_pg_recovery_wait{pool_id="1"} 0.0
ceph_pg_recovery_wait{pool_id="33"} 0.0
ceph_pg_recovery_wait{pool_id="34"} 0.0
ceph_pg_recovery_wait{pool_id="35"} 0.0
ceph_pg_recovery_wait{pool_id="40"} 0.0
ceph_pg_recovery_wait{pool_id="44"} 0.0
ceph_pg_recovery_wait{pool_id="45"} 0.0
ceph_pg_recovery_wait{pool_id="46"} 0.0
ceph_pg_recovery_wait{pool_id="47"} 0.0
# HELP ceph_pg_recovery_toofull PG recovery_toofull per pool
# TYPE ceph_pg_recovery_toofull gauge
ceph_pg_recovery_toofull{pool_id="1"} 0.0
ceph_pg_recovery_toofull{pool_id="33"} 0.0
ceph_pg_recovery_toofull{pool_id="34"} 0.0
ceph_pg_recovery_toofull{pool_id="35"} 0.0
ceph_pg_recovery_toofull{pool_id="40"} 0.0
ceph_pg_recovery_toofull{pool_id="44"} 0.0
ceph_pg_recovery_toofull{pool_id="45"} 0.0
ceph_pg_recovery_toofull{pool_id="46"} 0.0
ceph_pg_recovery_toofull{pool_id="47"} 0.0
# HELP ceph_pg_undersized PG undersized per pool
# TYPE ceph_pg_undersized gauge
ceph_pg_undersized{pool_id="1"} 0.0
ceph_pg_undersized{pool_id="33"} 0.0
ceph_pg_undersized{pool_id="34"} 0.0
ceph_pg_undersized{pool_id="35"} 0.0
ceph_pg_undersized{pool_id="40"} 0.0
ceph_pg_undersized{pool_id="44"} 0.0
ceph_pg_undersized{pool_id="45"} 0.0
ceph_pg_undersized{pool_id="46"} 0.0
ceph_pg_undersized{pool_id="47"} 0.0
# HELP ceph_pg_activating PG activating per pool
# TYPE ceph_pg_activating gauge
ceph_pg_activating{pool_id="1"} 0.0
ceph_pg_activating{pool_id="33"} 0.0
ceph_pg_activating{pool_id="34"} 0.0
ceph_pg_activating{pool_id="35"} 0.0
ceph_pg_activating{pool_id="40"} 0.0
ceph_pg_activating{pool_id="44"} 0.0
ceph_pg_activating{pool_id="45"} 0.0
ceph_pg_activating{pool_id="46"} 0.0
ceph_pg_activating{pool_id="47"} 0.0
# HELP ceph_pg_peered PG peered per pool
# TYPE ceph_pg_peered gauge
ceph_pg_peered{pool_id="1"} 0.0
ceph_pg_peered{pool_id="33"} 0.0
ceph_pg_peered{pool_id="34"} 0.0
ceph_pg_peered{pool_id="35"} 0.0
ceph_pg_peered{pool_id="40"} 0.0
ceph_pg_peered{pool_id="44"} 0.0
ceph_pg_peered{pool_id="45"} 0.0
ceph_pg_peered{pool_id="46"} 0.0
ceph_pg_peered{pool_id="47"} 0.0
# HELP ceph_pg_snaptrim PG snaptrim per pool
# TYPE ceph_pg_snaptrim gauge
ceph_pg_snaptrim{pool_id="1"} 0.0
ceph_pg_snaptrim{pool_id="33"} 0.0
ceph_pg_snaptrim{pool_id="34"} 0.0
ceph_pg_snaptrim{pool_id="35"} 0.0
ceph_pg_snaptrim{pool_id="40"} 0.0
ceph_pg_snaptrim{pool_id="44"} 0.0
ceph_pg_snaptrim{pool_id="45"} 0.0
ceph_pg_snaptrim{pool_id="46"} 0.0
ceph_pg_snaptrim{pool_id="47"} 0.0
# HELP ceph_pg_snaptrim_wait PG snaptrim_wait per pool
# TYPE ceph_pg_snaptrim_wait gauge
ceph_pg_snaptrim_wait{pool_id="1"} 0.0
ceph_pg_snaptrim_wait{pool_id="33"} 0.0
ceph_pg_snaptrim_wait{pool_id="34"} 0.0
ceph_pg_snaptrim_wait{pool_id="35"} 0.0
ceph_pg_snaptrim_wait{pool_id="40"} 0.0
ceph_pg_snaptrim_wait{pool_id="44"} 0.0
ceph_pg_snaptrim_wait{pool_id="45"} 0.0
ceph_pg_snaptrim_wait{pool_id="46"} 0.0
ceph_pg_snaptrim_wait{pool_id="47"} 0.0
# HELP ceph_pg_snaptrim_error PG snaptrim_error per pool
# TYPE ceph_pg_snaptrim_error gauge
ceph_pg_snaptrim_error{pool_id="1"} 0.0
ceph_pg_snaptrim_error{pool_id="33"} 0.0
ceph_pg_snaptrim_error{pool_id="34"} 0.0
ceph_pg_snaptrim_error{pool_id="35"} 0.0
ceph_pg_snaptrim_error{pool_id="40"} 0.0
ceph_pg_snaptrim_error{pool_id="44"} 0.0
ceph_pg_snaptrim_error{pool_id="45"} 0.0
ceph_pg_snaptrim_error{pool_id="46"} 0.0
ceph_pg_snaptrim_error{pool_id="47"} 0.0
# HELP ceph_pg_creating PG creating per pool
# TYPE ceph_pg_creating gauge
ceph_pg_creating{pool_id="1"} 0.0
ceph_pg_creating{pool_id="33"} 0.0
ceph_pg_creating{pool_id="34"} 0.0
ceph_pg_creating{pool_id="35"} 0.0
ceph_pg_creating{pool_id="40"} 0.0
ceph_pg_creating{pool_id="44"} 0.0
ceph_pg_creating{pool_id="45"} 0.0
ceph_pg_creating{pool_id="46"} 0.0
ceph_pg_creating{pool_id="47"} 0.0
# HELP ceph_pg_unknown PG unknown per pool
# TYPE ceph_pg_unknown gauge
ceph_pg_unknown{pool_id="1"} 0.0
ceph_pg_unknown{pool_id="33"} 0.0
ceph_pg_unknown{pool_id="34"} 0.0
ceph_pg_unknown{pool_id="35"} 0.0
ceph_pg_unknown{pool_id="40"} 0.0
ceph_pg_unknown{pool_id="44"} 0.0
ceph_pg_unknown{pool_id="45"} 0.0
ceph_pg_unknown{pool_id="46"} 0.0
ceph_pg_unknown{pool_id="47"} 0.0
# HELP ceph_pg_premerge PG premerge per pool
# TYPE ceph_pg_premerge gauge
ceph_pg_premerge{pool_id="1"} 0.0
ceph_pg_premerge{pool_id="33"} 0.0
ceph_pg_premerge{pool_id="34"} 0.0
ceph_pg_premerge{pool_id="35"} 0.0
ceph_pg_premerge{pool_id="40"} 0.0
ceph_pg_premerge{pool_id="44"} 0.0
ceph_pg_premerge{pool_id="45"} 0.0
ceph_pg_premerge{pool_id="46"} 0.0
ceph_pg_premerge{pool_id="47"} 0.0
# HELP ceph_pg_failed_repair PG failed_repair per pool
# TYPE ceph_pg_failed_repair gauge
ceph_pg_failed_repair{pool_id="1"} 0.0
ceph_pg_failed_repair{pool_id="33"} 0.0
ceph_pg_failed_repair{pool_id="34"} 0.0
ceph_pg_failed_repair{pool_id="35"} 0.0
ceph_pg_failed_repair{pool_id="40"} 0.0
ceph_pg_failed_repair{pool_id="44"} 0.0
ceph_pg_failed_repair{pool_id="45"} 0.0
ceph_pg_failed_repair{pool_id="46"} 0.0
ceph_pg_failed_repair{pool_id="47"} 0.0
# HELP ceph_pg_laggy PG laggy per pool
# TYPE ceph_pg_laggy gauge
ceph_pg_laggy{pool_id="1"} 0.0
ceph_pg_laggy{pool_id="33"} 0.0
ceph_pg_laggy{pool_id="34"} 0.0
ceph_pg_laggy{pool_id="35"} 0.0
ceph_pg_laggy{pool_id="40"} 0.0
ceph_pg_laggy{pool_id="44"} 0.0
ceph_pg_laggy{pool_id="45"} 0.0
ceph_pg_laggy{pool_id="46"} 0.0
ceph_pg_laggy{pool_id="47"} 0.0
# HELP ceph_pg_wait PG wait per pool
# TYPE ceph_pg_wait gauge
ceph_pg_wait{pool_id="1"} 0.0
ceph_pg_wait{pool_id="33"} 0.0
ceph_pg_wait{pool_id="34"} 0.0
ceph_pg_wait{pool_id="35"} 0.0
ceph_pg_wait{pool_id="40"} 0.0
ceph_pg_wait{pool_id="44"} 0.0
ceph_pg_wait{pool_id="45"} 0.0
ceph_pg_wait{pool_id="46"} 0.0
ceph_pg_wait{pool_id="47"} 0.0
# HELP ceph_cluster_total_bytes DF total_bytes
# TYPE ceph_cluster_total_bytes gauge
ceph_cluster_total_bytes 630041553666048.0
# HELP ceph_cluster_by_class_total_bytes DF total_bytes
# TYPE ceph_cluster_by_class_total_bytes gauge
ceph_cluster_by_class_total_bytes{device_class="hdd"} 630041553666048.0
# HELP ceph_cluster_total_used_bytes DF total_used_bytes
# TYPE ceph_cluster_total_used_bytes gauge
ceph_cluster_total_used_bytes 338797072535552.0
# HELP ceph_cluster_by_class_total_used_bytes DF total_used_bytes
# TYPE ceph_cluster_by_class_total_used_bytes gauge
ceph_cluster_by_class_total_used_bytes{device_class="hdd"} 338797072535552.0
# HELP ceph_cluster_total_used_raw_bytes DF total_used_raw_bytes
# TYPE ceph_cluster_total_used_raw_bytes gauge
ceph_cluster_total_used_raw_bytes 338797072535552.0
# HELP ceph_cluster_by_class_total_used_raw_bytes DF total_used_raw_bytes
# TYPE ceph_cluster_by_class_total_used_raw_bytes gauge
ceph_cluster_by_class_total_used_raw_bytes{device_class="hdd"} 338797072535552.0
# HELP ceph_pool_max_avail DF pool max_avail
# TYPE ceph_pool_max_avail gauge
ceph_pool_max_avail{pool_id="1"} 75601457184768.0
ceph_pool_max_avail{pool_id="33"} 113402185777152.0
ceph_pool_max_avail{pool_id="34"} 113402185777152.0
ceph_pool_max_avail{pool_id="35"} 113402185777152.0
ceph_pool_max_avail{pool_id="40"} 113402185777152.0
ceph_pool_max_avail{pool_id="44"} 113402185777152.0
ceph_pool_max_avail{pool_id="45"} 75601457184768.0
ceph_pool_max_avail{pool_id="46"} 113402185777152.0
ceph_pool_max_avail{pool_id="47"} 113402185777152.0
# HELP ceph_pool_avail_raw DF pool avail_raw
# TYPE ceph_pool_avail_raw gauge
ceph_pool_avail_raw{pool_id="1"} 226804364781063.0
ceph_pool_avail_raw{pool_id="33"} 226804364781063.0
ceph_pool_avail_raw{pool_id="34"} 226804364781063.0
ceph_pool_avail_raw{pool_id="35"} 226804364781063.0
ceph_pool_avail_raw{pool_id="40"} 226804364781063.0
ceph_pool_avail_raw{pool_id="44"} 226804364781063.0
ceph_pool_avail_raw{pool_id="45"} 226804364781063.0
ceph_pool_avail_raw{pool_id="46"} 226804364781063.0
ceph_pool_avail_raw{pool_id="47"} 226804364781063.0
# HELP ceph_pool_stored DF pool stored
# TYPE ceph_pool_stored gauge
ceph_pool_stored{pool_id="1"} 67252984507029.0
ceph_pool_stored{pool_id="33"} 0.0
ceph_pool_stored{pool_id="34"} 83881.0
ceph_pool_stored{pool_id="35"} 95463.0
ceph_pool_stored{pool_id="40"} 8398.0
ceph_pool_stored{pool_id="44"} 148350730240.0
ceph_pool_stored{pool_id="45"} 43733781315584.0
ceph_pool_stored{pool_id="46"} 820588.0
ceph_pool_stored{pool_id="47"} 297338752.0
# HELP ceph_pool_stored_raw DF pool stored_raw
# TYPE ceph_pool_stored_raw gauge
ceph_pool_stored_raw{pool_id="1"} 201758957633536.0
ceph_pool_stored_raw{pool_id="33"} 0.0
ceph_pool_stored_raw{pool_id="34"} 167762.0
ceph_pool_stored_raw{pool_id="35"} 190926.0
ceph_pool_stored_raw{pool_id="40"} 16796.0
ceph_pool_stored_raw{pool_id="44"} 296701460480.0
ceph_pool_stored_raw{pool_id="45"} 131201343946752.0
ceph_pool_stored_raw{pool_id="46"} 1641176.0
ceph_pool_stored_raw{pool_id="47"} 594677504.0
# HELP ceph_pool_objects DF pool objects
# TYPE ceph_pool_objects gauge
ceph_pool_objects{pool_id="1"} 16035672.0
ceph_pool_objects{pool_id="33"} 8.0
ceph_pool_objects{pool_id="34"} 133.0
ceph_pool_objects{pool_id="35"} 214.0
ceph_pool_objects{pool_id="40"} 21.0
ceph_pool_objects{pool_id="44"} 3790.0
ceph_pool_objects{pool_id="45"} 183910108.0
ceph_pool_objects{pool_id="46"} 1.0
ceph_pool_objects{pool_id="47"} 72.0
# HELP ceph_pool_dirty DF pool dirty
# TYPE ceph_pool_dirty gauge
ceph_pool_dirty{pool_id="1"} 0.0
ceph_pool_dirty{pool_id="33"} 0.0
ceph_pool_dirty{pool_id="34"} 0.0
ceph_pool_dirty{pool_id="35"} 0.0
ceph_pool_dirty{pool_id="40"} 0.0
ceph_pool_dirty{pool_id="44"} 0.0
ceph_pool_dirty{pool_id="45"} 0.0
ceph_pool_dirty{pool_id="46"} 0.0
ceph_pool_dirty{pool_id="47"} 0.0
# HELP ceph_pool_quota_bytes DF pool quota_bytes
# TYPE ceph_pool_quota_bytes gauge
ceph_pool_quota_bytes{pool_id="1"} 0.0
ceph_pool_quota_bytes{pool_id="33"} 0.0
ceph_pool_quota_bytes{pool_id="34"} 0.0
ceph_pool_quota_bytes{pool_id="35"} 0.0
ceph_pool_quota_bytes{pool_id="40"} 0.0
ceph_pool_quota_bytes{pool_id="44"} 0.0
ceph_pool_quota_bytes{pool_id="45"} 0.0
ceph_pool_quota_bytes{pool_id="46"} 0.0
ceph_pool_quota_bytes{pool_id="47"} 0.0
# HELP ceph_pool_quota_objects DF pool quota_objects
# TYPE ceph_pool_quota_objects gauge
ceph_pool_quota_objects{pool_id="1"} 0.0
ceph_pool_quota_objects{pool_id="33"} 0.0
ceph_pool_quota_objects{pool_id="34"} 0.0
ceph_pool_quota_objects{pool_id="35"} 0.0
ceph_pool_quota_objects{pool_id="40"} 0.0
ceph_pool_quota_objects{pool_id="44"} 0.0
ceph_pool_quota_objects{pool_id="45"} 0.0
ceph_pool_quota_objects{pool_id="46"} 0.0
ceph_pool_quota_objects{pool_id="47"} 0.0
# HELP ceph_pool_rd DF pool rd
# TYPE ceph_pool_rd counter
ceph_pool_rd{pool_id="1"} 3934324342.0
ceph_pool_rd{pool_id="33"} 0.0
ceph_pool_rd{pool_id="34"} 61299733.0
ceph_pool_rd{pool_id="35"} 232311079.0
ceph_pool_rd{pool_id="40"} 1871231.0
ceph_pool_rd{pool_id="44"} 17604542111.0
ceph_pool_rd{pool_id="45"} 1660672362.0
ceph_pool_rd{pool_id="46"} 604546.0
ceph_pool_rd{pool_id="47"} 989238.0
# HELP ceph_pool_rd_bytes DF pool rd_bytes
# TYPE ceph_pool_rd_bytes counter
ceph_pool_rd_bytes{pool_id="1"} 842376959649792.0
ceph_pool_rd_bytes{pool_id="33"} 0.0
ceph_pool_rd_bytes{pool_id="34"} 51572443136.0
ceph_pool_rd_bytes{pool_id="35"} 247293477888.0
ceph_pool_rd_bytes{pool_id="40"} 1915851776.0
ceph_pool_rd_bytes{pool_id="44"} 53668984848384.0
ceph_pool_rd_bytes{pool_id="45"} 8991147509760.0
ceph_pool_rd_bytes{pool_id="46"} 393598976.0
ceph_pool_rd_bytes{pool_id="47"} 3835064320.0
# HELP ceph_pool_wr DF pool wr
# TYPE ceph_pool_wr counter
ceph_pool_wr{pool_id="1"} 1933146127.0
ceph_pool_wr{pool_id="33"} 0.0
ceph_pool_wr{pool_id="34"} 3205831.0
ceph_pool_wr{pool_id="35"} 150910076.0
ceph_pool_wr{pool_id="40"} 110.0
ceph_pool_wr{pool_id="44"} 3243189010.0
ceph_pool_wr{pool_id="45"} 5474280179.0
ceph_pool_wr{pool_id="46"} 157892.0
ceph_pool_wr{pool_id="47"} 1015158.0
# HELP ceph_pool_wr_bytes DF pool wr_bytes
# TYPE ceph_pool_wr_bytes counter
ceph_pool_wr_bytes{pool_id="1"} 1032698105611264.0
ceph_pool_wr_bytes{pool_id="33"} 0.0
ceph_pool_wr_bytes{pool_id="34"} 1637237760.0
ceph_pool_wr_bytes{pool_id="35"} 1164502016.0
ceph_pool_wr_bytes{pool_id="40"} 64512.0
ceph_pool_wr_bytes{pool_id="44"} 2142465670144.0
ceph_pool_wr_bytes{pool_id="45"} 87839206533120.0
ceph_pool_wr_bytes{pool_id="46"} 148935680.0
ceph_pool_wr_bytes{pool_id="47"} 19457269760.0
# HELP ceph_pool_compress_bytes_used DF pool compress_bytes_used
# TYPE ceph_pool_compress_bytes_used gauge
ceph_pool_compress_bytes_used{pool_id="1"} 0.0
ceph_pool_compress_bytes_used{pool_id="33"} 0.0
ceph_pool_compress_bytes_used{pool_id="34"} 0.0
ceph_pool_compress_bytes_used{pool_id="35"} 0.0
ceph_pool_compress_bytes_used{pool_id="40"} 0.0
ceph_pool_compress_bytes_used{pool_id="44"} 0.0
ceph_pool_compress_bytes_used{pool_id="45"} 0.0
ceph_pool_compress_bytes_used{pool_id="46"} 0.0
ceph_pool_compress_bytes_used{pool_id="47"} 0.0
# HELP ceph_pool_compress_under_bytes DF pool compress_under_bytes
# TYPE ceph_pool_compress_under_bytes gauge
ceph_pool_compress_under_bytes{pool_id="1"} 0.0
ceph_pool_compress_under_bytes{pool_id="33"} 0.0
ceph_pool_compress_under_bytes{pool_id="34"} 0.0
ceph_pool_compress_under_bytes{pool_id="35"} 0.0
ceph_pool_compress_under_bytes{pool_id="40"} 0.0
ceph_pool_compress_under_bytes{pool_id="44"} 0.0
ceph_pool_compress_under_bytes{pool_id="45"} 0.0
ceph_pool_compress_under_bytes{pool_id="46"} 0.0
ceph_pool_compress_under_bytes{pool_id="47"} 0.0
# HELP ceph_pool_bytes_used DF pool bytes_used
# TYPE ceph_pool_bytes_used gauge
ceph_pool_bytes_used{pool_id="1"} 201758909063105.0
ceph_pool_bytes_used{pool_id="33"} 0.0
ceph_pool_bytes_used{pool_id="34"} 1100649.0
ceph_pool_bytes_used{pool_id="35"} 687094.0
ceph_pool_bytes_used{pool_id="40"} 163840.0
ceph_pool_bytes_used{pool_id="44"} 296701447170.0
ceph_pool_bytes_used{pool_id="45"} 132565484404736.0
ceph_pool_bytes_used{pool_id="46"} 1649315.0
ceph_pool_bytes_used{pool_id="47"} 594681856.0
# HELP ceph_pool_percent_used DF pool percent_used
# TYPE ceph_pool_percent_used gauge
ceph_pool_percent_used{pool_id="1"} 0.47077974677085876
ceph_pool_percent_used{pool_id="33"} 0.0
ceph_pool_percent_used{pool_id="34"} 4.852856338999345e-09
ceph_pool_percent_used{pool_id="35"} 3.0294566855104677e-09
ceph_pool_percent_used{pool_id="40"} 7.223846520965083e-10
ceph_pool_percent_used{pool_id="44"} 0.0013064731610938907
ceph_pool_percent_used{pool_id="45"} 0.3688831627368927
ceph_pool_percent_used{pool_id="46"} 7.271971913525022e-09
ceph_pool_percent_used{pool_id="47"} 2.6219966002827277e-06
# HELP ceph_cluster_osd_blocklist_count OSD Blocklist Count osd_blocklist_count
# TYPE ceph_cluster_osd_blocklist_count gauge
ceph_cluster_osd_blocklist_count 0.0
# HELP ceph_num_objects_degraded Number of degraded objects
# TYPE ceph_num_objects_degraded gauge
ceph_num_objects_degraded 0.0
# HELP ceph_num_objects_misplaced Number of misplaced objects
# TYPE ceph_num_objects_misplaced gauge
ceph_num_objects_misplaced 0.0
# HELP ceph_num_objects_unfound Number of unfound objects
# TYPE ceph_num_objects_unfound gauge
ceph_num_objects_unfound 0.0
# HELP ceph_healthcheck_slow_ops OSD or Monitor requests taking a long time to process
# TYPE ceph_healthcheck_slow_ops gauge
ceph_healthcheck_slow_ops 0.0
# HELP ceph_prometheus_collect_duration_seconds_sum The sum of seconds took to collect all metrics of this exporter
# TYPE ceph_prometheus_collect_duration_seconds_sum counter
ceph_prometheus_collect_duration_seconds_sum{method="get_health"} 42.1791136264801
ceph_prometheus_collect_duration_seconds_sum{method="get_pool_stats"} 22.007351875305176
ceph_prometheus_collect_duration_seconds_sum{method="get_df"} 39.1889591217041
ceph_prometheus_collect_duration_seconds_sum{method="get_osd_blocklisted_entries"} 226.58358502388
ceph_prometheus_collect_duration_seconds_sum{method="get_fs"} 116.95380568504333
ceph_prometheus_collect_duration_seconds_sum{method="get_quorum_status"} 129.25314927101135
ceph_prometheus_collect_duration_seconds_sum{method="get_mgr_status"} 607.5676686763763
ceph_prometheus_collect_duration_seconds_sum{method="get_pg_status"} 308.7933542728424
ceph_prometheus_collect_duration_seconds_sum{method="get_osd_stats"} 219.10668897628784
ceph_prometheus_collect_duration_seconds_sum{method="get_metadata_and_osd_status"} 5938.0899522304535
ceph_prometheus_collect_duration_seconds_sum{method="get_num_objects"} 243.41758704185486
ceph_prometheus_collect_duration_seconds_sum{method="get_rbd_stats"} 2249.9423191547394
# HELP ceph_prometheus_collect_duration_seconds_count The amount of metrics gathered for this exporter
# TYPE ceph_prometheus_collect_duration_seconds_count counter
ceph_prometheus_collect_duration_seconds_count{method="get_health"} 44236.0
ceph_prometheus_collect_duration_seconds_count{method="get_pool_stats"} 44236.0
ceph_prometheus_collect_duration_seconds_count{method="get_df"} 44236.0
ceph_prometheus_collect_duration_seconds_count{method="get_osd_blocklisted_entries"} 44236.0
ceph_prometheus_collect_duration_seconds_count{method="get_fs"} 44236.0
ceph_prometheus_collect_duration_seconds_count{method="get_quorum_status"} 44236.0
ceph_prometheus_collect_duration_seconds_count{method="get_mgr_status"} 44236.0
ceph_prometheus_collect_duration_seconds_count{method="get_pg_status"} 44236.0
ceph_prometheus_collect_duration_seconds_count{method="get_osd_stats"} 44236.0
ceph_prometheus_collect_duration_seconds_count{method="get_metadata_and_osd_status"} 44236.0
ceph_prometheus_collect_duration_seconds_count{method="get_num_objects"} 44236.0
ceph_prometheus_collect_duration_seconds_count{method="get_rbd_stats"} 44236.0
    (1-1/1)