Project

General

Profile

Actions

Feature #58421

closed

OSD metadata should show the min_alloc_size that each OSD was built with

Added by Anthony D'Atri over 1 year ago. Updated 10 months ago.

Status:
Resolved
Priority:
Normal
Assignee:
-
Target version:
-
% Done:

0%

Source:
Tags:
low-hanging-fruit backport_processed
Backport:
quincy, pacific, reef
Reviewed:
Affected Versions:
Pull request ID:

Description

To be very clear, the value the OSD was built with, not the prevailing value in `ceph.conf` or the central db.

``` {
"id": 239,
"arch": "x86_64",
"back_addr": "[v2:10.233.94.74:6802/96,v1:10.233.94.74:6803/96]",
"back_iface": "",
"bluefs": "1",
"bluefs_dedicated_db": "0",
"bluefs_dedicated_wal": "0",
"bluefs_single_shared_device": "1",
"bluestore_bdev_access_mode": "blk",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_dev_node": "/dev/dm-15",
"bluestore_bdev_devices": "nvme6n1",
"bluestore_bdev_driver": "KernelDevice",
"bluestore_bdev_optimal_io_size": "0",
"bluestore_bdev_partition_path": "/dev/dm-15",
"bluestore_bdev_rotational": "0",
"bluestore_bdev_size": "1600311525376",
"bluestore_bdev_support_discard": "1",
"bluestore_bdev_type": "ssd",
"ceph_release": "quincy",
"ceph_version": "ceph version 17.2.5 (98318ae89f1a893a6ded3a640405cdbb33e08757) quincy (stable)",
"ceph_version_short": "17.2.5",
"container_hostname": "rook-ceph-osd-239-5b4ff945fc-95m5h",
"container_image": "registry.indexexchange.com:5000/ceph/ceph:v17.2.5",
"cpu": "Intel(R) Xeon(R) Platinum 8358 CPU @ 2.60GHz",
"default_device_class": "ssd",
"device_ids": "nvme6n1=Dell_Ent_NVMe_P5600_MU_U.2_3.2TB_PHAB133101JB3P8EGN",
"device_paths": "nvme6n1=/dev/disk/by-path/pci-0000:e5:00.0-nvme-1",
"devices": "nvme6n1",
"distro": "centos",
"distro_description": "CentOS Stream 8",
"distro_version": "8",
"front_addr": "[v2:10.233.94.74:6800/96,v1:10.233.94.74:6801/96]",
"front_iface": "",
"hb_back_addr": "[v2:10.233.94.74:6806/96,v1:10.233.94.74:6807/96]",
"hb_front_addr": "[v2:10.233.94.74:6804/96,v1:10.233.94.74:6805/96]",
"hostname": "ceph9.tor3.indexww.com",
"journal_rotational": "0",
"kernel_description": "#1 SMP PREEMPT_DYNAMIC Tue Jun 14 17:07:38 EDT 2022",
"kernel_version": "5.18.5-1.el8.elrepo.x86_64",
"mem_cgroup_limit": "8589934592",
"mem_swap_kb": "0",
"mem_total_kb": "263554920",
"network_numa_unknown_ifaces": "back_iface,front_iface",
"objectstore_numa_node": "1",
"objectstore_numa_nodes": "1",
"os": "Linux",
"osd_data": "/var/lib/ceph/osd/ceph-239",
"osd_objectstore": "bluestore",
"osdspec_affinity": "",
"pod_name": "rook-ceph-osd-239-5b4ff945fc-95m5h",
"pod_namespace": "rook-ceph",
"rotational": "0"
}
```

I'm a bit surprised that `bluestore_bdev_optimal_io_size` here reports 0. This is a cluster deployed with Rook.


Related issues 3 (0 open3 closed)

Copied to bluestore - Backport #59009: quincy: OSD metadata should show the min_alloc_size that each OSD was built withResolvedActions
Copied to bluestore - Backport #59010: pacific: OSD metadata should show the min_alloc_size that each OSD was built withResolvedActions
Copied to bluestore - Backport #59011: reef: OSD metadata should show the min_alloc_size that each OSD was built withResolvedActions
Actions

Also available in: Atom PDF