Ceph : Issues
https://tracker.ceph.com/
https://tracker.ceph.com/favicon.ico
2024-03-28T15:00:23Z
Ceph
Redmine
crimson - Bug #65203 (New): ReplicatedRecoveryBackend::recalc_subsets(ObjectRecoveryInfo&, crimso...
https://tracker.ceph.com/issues/65203
2024-03-28T15:00:23Z
Matan Breizman
<p>osd.3: <a class="external" href="https://pulpito.ceph.com/matan-2024-03-27_13:02:57-crimson-rados-main-distro-crimson-smithi/7626294">https://pulpito.ceph.com/matan-2024-03-27_13:02:57-crimson-rados-main-distro-crimson-smithi/7626294</a></p>
<p>After adding a restart OSDs to the thrash tests: <a class="external" href="https://github.com/ceph/ceph/pull/56511">https://github.com/ceph/ceph/pull/56511</a></p>
<pre><code class="text syntaxhl"><span class="CodeRay">DEBUG 2024-03-27 13:26:06,805 [shard 0:main] osd - background_recovery_sub(id=362, detail=MOSDPGPush(3.d 26/25 {PushOp(3:bd1211d5:::smithi05531420-40:1, version: 18'16, data_included: [655473~716476,2099033~332100], data_size: 1048576, omap_header_size: 0, omap_entries_size: 0, attrset_size: 2, recovery_info: ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false), after_progress: ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false), before_progress: ObjectRecoveryProgress(first, data_recovered_to: 0, data_complete: false, omap_recovered_to: , omap_complete: false, error: false))})): starting start_pg_operation
DEBUG 2024-03-27 13:26:06,805 [shard 0:main] osd - background_recovery_sub(id=362, detail=MOSDPGPush(3.d 26/25 {PushOp(3:bd1211d5:::smithi05531420-40:1, version: 18'16, data_included: [655473~716476,2099033~332100], data_size: 1048576, omap_header_size: 0, omap_entries_size: 0, attrset_size: 2, recovery_info: ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false), after_progress: ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false), before_progress: ObjectRecoveryProgress(first, data_recovered_to: 0, data_complete: false, omap_recovered_to: , omap_complete: false, error: false))})): start_pg_operation in await_active stage
DEBUG 2024-03-27 13:26:06,805 [shard 0:main] osd - background_recovery_sub(id=362, detail=MOSDPGPush(3.d 26/25 {PushOp(3:bd1211d5:::smithi05531420-40:1, version: 18'16, data_included: [655473~716476,2099033~332100], data_size: 1048576, omap_header_size: 0, omap_entries_size: 0, attrset_size: 2, recovery_info: ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false), after_progress: ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false), before_progress: ObjectRecoveryProgress(first, data_recovered_to: 0, data_complete: false, omap_recovered_to: , omap_complete: false, error: false))})): start_pg_operation active, entering await_map
DEBUG 2024-03-27 13:26:06,805 [shard 0:main] osd - background_recovery_sub(id=362, detail=MOSDPGPush(3.d 26/25 {PushOp(3:bd1211d5:::smithi05531420-40:1, version: 18'16, data_included: [655473~716476,2099033~332100], data_size: 1048576, omap_header_size: 0, omap_entries_size: 0, attrset_size: 2, recovery_info: ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false), after_progress: ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false), before_progress: ObjectRecoveryProgress(first, data_recovered_to: 0, data_complete: false, omap_recovered_to: , omap_complete: false, error: false))})): start_pg_operation await_map stage
DEBUG 2024-03-27 13:26:06,806 [shard 0:main] osd - background_recovery_sub(id=362, detail=MOSDPGPush(3.d 26/25 {PushOp(3:bd1211d5:::smithi05531420-40:1, version: 18'16, data_included: [655473~716476,2099033~332100], data_size: 1048576, omap_header_size: 0, omap_entries_size: 0, attrset_size: 2, recovery_info: ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false), after_progress: ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false), before_progress: ObjectRecoveryProgress(first, data_recovered_to: 0, data_complete: false, omap_recovered_to: , omap_complete: false, error: false))})): got map 26, entering get_pg_mapping
DEBUG 2024-03-27 13:26:06,806 [shard 0:main] osd - background_recovery_sub(id=362, detail=MOSDPGPush(3.d 26/25 {PushOp(3:bd1211d5:::smithi05531420-40:1, version: 18'16, data_included: [655473~716476,2099033~332100], data_size: 1048576, omap_header_size: 0, omap_entries_size: 0, attrset_size: 2, recovery_info: ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false), after_progress: ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false), before_progress: ObjectRecoveryProgress(first, data_recovered_to: 0, data_complete: false, omap_recovered_to: , omap_complete: false, error: false))})): can_create=false, target-core=2
DEBUG 2024-03-27 13:26:06,806 [shard 0:main] osd - background_recovery_sub(id=362, detail=MOSDPGPush(3.d 26/25 {PushOp(3:bd1211d5:::smithi05531420-40:1, version: 18'16, data_included: [655473~716476,2099033~332100], data_size: 1048576, omap_header_size: 0, omap_entries_size: 0, attrset_size: 2, recovery_info: ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false), after_progress: ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false), before_progress: ObjectRecoveryProgress(first, data_recovered_to: 0, data_complete: false, omap_recovered_to: , omap_complete: false, error: false))})): send 37 to the remote pg core 2
DEBUG 2024-03-27 13:26:06,806 [shard 2:main] osd - background_recovery_sub(id=362, detail=MOSDPGPush(3.d 26/25 {PushOp(3:bd1211d5:::smithi05531420-40:1, version: 18'16, data_included: [655473~716476,2099033~332100], data_size: 1048576, omap_header_size: 0, omap_entries_size: 0, attrset_size: 2, recovery_info: ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false), after_progress: ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false), before_progress: ObjectRecoveryProgress(first, data_recovered_to: 0, data_complete: false, omap_recovered_to: , omap_complete: false, error: false))})): entering create_or_wait_pg
DEBUG 2024-03-27 13:26:06,806 [shard 2:main] osd - background_recovery_sub(id=362, detail=MOSDPGPush(3.d 26/25 {PushOp(3:bd1211d5:::smithi05531420-40:1, version: 18'16, data_included: [655473~716476,2099033~332100], data_size: 1048576, omap_header_size: 0, omap_entries_size: 0, attrset_size: 2, recovery_info: ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false), after_progress: ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false), before_progress: ObjectRecoveryProgress(first, data_recovered_to: 0, data_complete: false, omap_recovered_to: , omap_complete: false, error: false))})): have_pg
DEBUG 2024-03-27 13:26:06,806 [shard 2:main] osd - 0x603000429b00 RecoverySubRequest::with_pg: RecoverySubRequest::with_pg: background_recovery_sub(id=362, detail=MOSDPGPush(3.d 26/25 {PushOp(3:bd1211d5:::smithi05531420-40:1, version: 18'16, data_included: [655473~716476,2099033~332100], data_size: 1048576, omap_header_size: 0, omap_entries_size: 0, attrset_size: 2, recovery_info: ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false), after_progress: ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false), before_progress: ObjectRecoveryProgress(first, data_recovered_to: 0, data_complete: false, omap_recovered_to: , omap_complete: false, error: false))}))
DEBUG 2024-03-27 13:26:06,806 [shard 2:main] osd - handle_pull_response: MOSDPGPush(3.d 26/25 {PushOp(3:bd1211d5:::smithi05531420-40:1, version: 18'16, data_included: [655473~716476,2099033~332100], data_size: 1048576, omap_header_size: 0, omap_entries_size: 0, attrset_size: 2, recovery_info: ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false), after_progress: ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false), before_progress: ObjectRecoveryProgress(first, data_recovered_to: 0, data_complete: false, omap_recovered_to: , omap_complete: false, error: false))}) v4
DEBUG 2024-03-27 13:26:06,806 [shard 2:main] osd - handle_pull_response ObjectRecoveryInfo(3:bd1211d5:::smithi05531420-40:1@0'0, size: 2655473, copy_subset: [(0, 2655473)], clone_subset: {}, snapset: 1=[]:{1: [1]}, object_exist: false) ObjectRecoveryProgress(!first, data_recovered_to: 2431133, data_complete: false, omap_recovered_to: , omap_complete: true, error: false) data.size() is 1048576 data_included: [(655473, 716476), (2099033, 332100)]
DEBUG 2024-03-27 13:26:06,807 [shard 2:main] osd - pg_epoch 26 pg[3.d( v 26'20 lc 17'15 (0'0,26'20] local-lis/les=25/26 n=0 ec=14/14 lis/c=25/14 les/c/f=26/15/0 sis=25) [3,0] r=0 lpr=25 pi=[14,25)/1 luod=26'21 lua=21'18 crt=26'21 mlcod 17'15 active+recovering+undersized+degraded ObjectContextLoader::with_head_obc: object 3:bd1211d5:::smithi05531420-40:head
DEBUG 2024-03-27 13:26:06,807 [shard 2:main] osd - pg_epoch 26 pg[3.d( v 26'20 lc 17'15 (0'0,26'20] local-lis/les=25/26 n=0 ec=14/14 lis/c=25/14 les/c/f=26/15/0 sis=25) [3,0] r=0 lpr=25 pi=[14,25)/1 luod=26'21 lua=21'18 crt=26'21 mlcod 17'15 active+recovering+undersized+degraded ObjectContextLoader::get_or_load_obc: cache hit on 3:bd1211d5:::smithi05531420-40:head
DEBUG 2024-03-27 13:26:06,807 [shard 2:main] osd - resolve_oid oid.snap=1,head snapset.seq=1
DEBUG 2024-03-27 13:26:06,807 [shard 2:main] osd - pg_epoch 26 pg[3.d( v 26'20 lc 17'15 (0'0,26'20] local-lis/les=25/26 n=0 ec=14/14 lis/c=25/14 les/c/f=26/15/0 sis=25) [3,0] r=0 lpr=25 pi=[14,25)/1 luod=26'21 lua=21'18 crt=26'21 mlcod 17'15 active+recovering+undersized+degraded ObjectContextLoader::get_or_load_obc: cache miss on 3:bd1211d5:::smithi05531420-40:1
DEBUG 2024-03-27 13:26:06,807 [shard 2:main] osd - load_metadata: object 3:bd1211d5:::smithi05531420-40:1 doesn't exist, returning empty metadata
DEBUG 2024-03-27 13:26:06,807 [shard 2:main] osd - pg_epoch 26 pg[3.d( v 26'20 lc 17'15 (0'0,26'20] local-lis/les=25/26 n=0 ec=14/14 lis/c=25/14 les/c/f=26/15/0 sis=25) [3,0] r=0 lpr=25 pi=[14,25)/1 luod=26'21 lua=21'18 crt=26'21 mlcod 17'15 active+recovering+undersized+degraded ObjectContextLoader::load_obc: loaded obs 3:bd1211d5:::smithi05531420-40:1(0'0 unknown.0.0:0 s 0 uv 0 alloc_hint [0 0 0]) for 3:bd1211d5:::smithi05531420-40:1
DEBUG 2024-03-27 13:26:06,807 [shard 2:main] osd - pg_epoch 26 pg[3.d( v 26'20 lc 17'15 (0'0,26'20] local-lis/les=25/26 n=0 ec=14/14 lis/c=25/14 les/c/f=26/15/0 sis=25) [3,0] r=0 lpr=25 pi=[14,25)/1 luod=26'21 lua=21'18 crt=26'21 mlcod 17'15 active+recovering+undersized+degraded ObjectContextLoader::load_obc: returning obc 3:bd1211d5:::smithi05531420-40:1(0'0 unknown.0.0:0 s 0 uv 0 alloc_hint [0 0 0]) for 3:bd1211d5:::smithi05531420-40:1
ceph-osd: /home/jenkins-build/build/workspace/ceph-dev-build/ARCH/x86_64/AVAILABLE_ARCH/x86_64/AVAILABLE_DIST/centos9/DIST/centos9/MACHINE_SIZE/gigantic/release/19.0.0-2476-g56e21662/rpm/el9/BUILD/ceph-19.0.0-2476-g56e21662/src/crimson/osd/replicated_recovery_backend.cc:886: void ReplicatedRecoveryBackend::recalc_subsets(ObjectRecoveryInfo&, crimson::osd::SnapSetContextRef): Assertion `ssc' failed.
Aborting on shard 2.
Backtrace:
0# 0x00007F182BAA154C in /lib64/libc.so.6
1# raise in /lib64/libc.so.6
2# abort in /lib64/libc.so.6
3# 0x00007F182BA2871B in /lib64/libc.so.6
4# 0x00007F182BA4DCA6 in /lib64/libc.so.6
5# ReplicatedRecoveryBackend::recalc_subsets(ObjectRecoveryInfo&, boost::intrusive_ptr<crimson::osd::SnapSetContext>) in ceph-osd
</span></code></pre>
crimson - Bug #65201 (New): ReplicatedRecoveryBackend::prep_push_to_replica(const hobject_t&, eve...
https://tracker.ceph.com/issues/65201
2024-03-28T14:55:47Z
Matan Breizman
<p>osd.3: <a class="external" href="https://pulpito.ceph.com/matan-2024-03-27_13:02:57-crimson-rados-main-distro-crimson-smithi/7626293">https://pulpito.ceph.com/matan-2024-03-27_13:02:57-crimson-rados-main-distro-crimson-smithi/7626293</a></p>
<p>After adding a restart OSDs to the thrash tests: <a class="external" href="https://github.com/ceph/ceph/pull/56511">https://github.com/ceph/ceph/pull/56511</a></p>
<pre><code class="text syntaxhl"><span class="CodeRay">DEBUG 2024-03-27 13:27:01,678 [shard 0:main] osd - pg_epoch 45 pg[3.0( v 37'19 (0'0,37'19] local-lis/les=44/45 n=6 ec=14/14 lis/c=44/14 les/c/f=45/15/0 sis=44) [3,2,1] r=0 lpr=44 pi=[14,44)/1 crt=37'19 lcod 0'0 mlcod 0'0 active+recovering+degraded ObjectContextLoader::load_obc: returning obc 3:0254ed2b:::smithi01231316-5:8(37'18 client.4225.0:19 s 2067228 uv 3 alloc_hint [0 0 0]) for 3:0254ed2b:::smithi01231316-5:8
DEBUG 2024-03-27 13:27:01,678 [shard 0:main] osd - recover_object: loaded obc: 3:0254ed2b:::smithi01231316-5:8
DEBUG 2024-03-27 13:27:01,678 [shard 0:main] osd - prep_push_to_replica: 3:0254ed2b:::smithi01231316-5:8, 37'18
ERROR 2024-03-27 13:27:01,678 [shard 0:main] none - /home/jenkins-build/build/workspace/ceph-dev-build/ARCH/x86_64/AVAILABLE_ARCH/x86_64/AVAILABLE_DIST/centos9/DIST/centos9/MACHINE_SIZE/gigantic/release/19.0.0-2476-g56e21662/rpm/el9/BUILD/ceph-19.0.0-2476-g56e21662/src/crimson/osd/replicated_recovery_backend.cc:347 : In function 'RecoveryBackend::interruptible_future<PushOp> ReplicatedRecoveryBackend::prep_push_to_replica(const hobject_t&, eversion_t, pg_shard_t)', ceph_assert(%s)
ssc
Aborting on shard 0.
Backtrace:
0# 0x00007F96396A154C in /lib64/libc.so.6
1# raise in /lib64/libc.so.6
2# abort in /lib64/libc.so.6
3# ceph::__ceph_assert_fail(ceph::assert_data const&) in ceph-osd
4# ReplicatedRecoveryBackend::prep_push_to_replica(hobject_t const&, eversion_t, pg_shard_t) in ceph-osd
</span></code></pre>
crimson - Bug #65200 (New): PeeringState::get_peer_info(pg_shard_t) const: Assertion `it != peer_...
https://tracker.ceph.com/issues/65200
2024-03-28T14:54:17Z
Matan Breizman
<p>osd.1: <a class="external" href="https://pulpito.ceph.com/matan-2024-03-27_13:02:57-crimson-rados-main-distro-crimson-smithi/7626293">https://pulpito.ceph.com/matan-2024-03-27_13:02:57-crimson-rados-main-distro-crimson-smithi/7626293</a></p>
<p>After adding a restart OSDs to the thrash tests: <a class="external" href="https://github.com/ceph/ceph/pull/56511">https://github.com/ceph/ceph/pull/56511</a></p>
<pre><code class="text syntaxhl"><span class="CodeRay">INFO 2024-03-27 13:27:01,801 [shard 0:main] osd - start_primary_recovery_ops recovering 0 in pg pg_epoch 45 pg[3.2( v 40'55 lc 36'54 (0'0,40'55] local-lis/les=44/45 n=0 ec=14/14 lis/c=44/14 les/c/f=45/15/0 sis=44) [1,0,3] r=0 lpr=44 pi=[14,44)/2 crt=40'55 mlcod 0'0 active+recovering , missing missing(1 may_include_deletes = 1)
INFO 2024-03-27 13:27:01,801 [shard 0:main] osd - start_primary_recovery_ops 3:48a442ac:::smithi01231316-12:head item.need 40'55 (missing) (missing head)
INFO 2024-03-27 13:27:01,801 [shard 0:main] osd - recover_missing 3:48a442ac:::smithi01231316-12:head v 40'55
INFO 2024-03-27 13:27:01,801 [shard 0:main] osd - recover_missing 3:48a442ac:::smithi01231316-12:head v 40'55, new recovery
DEBUG 2024-03-27 13:27:01,801 [shard 0:main] osd - recover_object: 3:48a442ac:::smithi01231316-12:head, 40'55
DEBUG 2024-03-27 13:27:01,801 [shard 0:main] osd - maybe_pull_missing_obj: 3:48a442ac:::smithi01231316-12:head, 40'55
DEBUG 2024-03-27 13:27:01,802 [shard 0:main] osd - pg_epoch 45 pg[3.2( v 40'55 lc 36'54 (0'0,40'55] local-lis/les=44/45 n=0 ec=14/14 lis/c=44/14 les/c/f=45/15/0 sis=44) [1,0,3] r=0 lpr=44 pi=[14,44)/2 crt=40'55 mlcod 0'0 active+recovering ObjectContextLoader::with_head_obc: object 3:48a442ac:::smithi01231316-12:head
INFO 2024-03-27 13:27:01,802 [shard 0:main] osd - start_primary_recovery_ops started 1 skipped 1
DEBUG 2024-03-27 13:27:01,802 [shard 0:main] osd - pg_epoch 45 pg[3.2( v 40'55 lc 36'54 (0'0,40'55] local-lis/les=44/45 n=0 ec=14/14 lis/c=44/14 les/c/f=45/15/0 sis=44) [1,0,3] r=0 lpr=44 pi=[14,44)/2 crt=40'55 mlcod 0'0 active+recovering ObjectContextLoader::get_or_load_obc: cache hit on 3:48a442ac:::smithi01231316-12:head
DEBUG 2024-03-27 13:27:01,802 [shard 0:main] osd - prepare_pull: 3:48a442ac:::smithi01231316-12:head, 40'55
ceph-osd: /home/jenkins-build/build/workspace/ceph-dev-build/ARCH/x86_64/AVAILABLE_ARCH/x86_64/AVAILABLE_DIST/centos9/DIST/centos9/MACHINE_SIZE/gigantic/release/19.0.0-2476-g56e21662/rpm/el9/BUILD/ceph-19.0.0-2476-g56e21662/src/osd/PeeringState.h:2349: const pg_info_t& PeeringState::get_peer_info(pg_shard_t) const: Assertion `it != peer_info.end()' failed.
Aborting on shard 0.
Backtrace:
Reactor stalled for 159 ms on shard 0. Backtrace: 0x6bddd 0xb99f089 0xb871b50 0xb8730cc 0xb8732e2 0xb873438 0xb873901 0x54daf 0x118a06 0x118829 0x6efa70b 0x6efca58 0x6efd993 0x6efde84 0x6efe723 0x6efedaf 0x6efef6b 0x6ef9294 0x6ef9685 0x6ef994c 0x54daf 0xa154b 0x54d05 0x287f2 0x2871a 0x4dca5 0x3f96f7c 0x4f4503f 0x4f5601c 0x4f5771b 0x4f578fb 0x4f57a79 0x3f5e56c 0x460952c 0x4609732 0x46098e6 0x461333a 0x4613515 0x4613839 0x4613b44 0x4613caf 0x4613d38 0x46175d6 0x461789a 0x461792e 0x46179e6 0x462b282 0x462b4fa 0x462b76d 0x4688845 0xb8847d5 0xb89ea6f 0xb93fa6d 0xb9410bb 0xb61d823 0xb61e19f 0x368057a 0x3feaf 0x3ff5f 0x346c434
kernel callstack: 0xffffffffffffff80 0xffffffff8e781dc1 0xffffffff8e782126 0xffffffff8e505d94 0xffffffff8e505f31 0xffffffff8e50733f 0xffffffff8e50801b 0xffffffff8e5084d0 0xffffffff8f07e45c 0xffffffff8f2000ea
Reactor stalled for 303 ms on shard 0. Backtrace: 0x6bddd 0xb99f089 0xb871b50 0xb8730cc 0xb8732e2 0xb873438 0xb873901 0x54daf 0x195b59 0x6efa069 0x6efc6cb 0x6efd993 0x6efde84 0x6efe723 0x6efedaf 0x6efef6b 0x6ef9294 0x6ef9685 0x6ef994c 0x54daf 0xa154b 0x54d05 0x287f2 0x2871a 0x4dca5 0x3f96f7c 0x4f4503f 0x4f5601c 0x4f5771b 0x4f578fb 0x4f57a79 0x3f5e56c 0x460952c 0x4609732 0x46098e6 0x461333a 0x4613515 0x4613839 0x4613b44 0x4613caf 0x4613d38 0x46175d6 0x461789a 0x461792e 0x46179e6 0x462b282 0x462b4fa 0x462b76d 0x4688845 0xb8847d5 0xb89ea6f 0xb93fa6d 0xb9410bb 0xb61d823 0xb61e19f 0x368057a 0x3feaf 0x3ff5f 0x346c434
kernel callstack:
Reactor stalled for 539 ms on shard 0. Backtrace: 0x6bddd 0xb99f089 0xb871b50 0xb8730cc 0xb8732e2 0xb873438 0xb873901 0x54daf 0x195b53 0x6efa069 0x6efe1dd 0x6efe723 0x6efedaf 0x6efef6b 0x6ef9294 0x6ef9685 0x6ef994c 0x54daf 0xa154b 0x54d05 0x287f2 0x2871a 0x4dca5 0x3f96f7c 0x4f4503f 0x4f5601c 0x4f5771b 0x4f578fb 0x4f57a79 0x3f5e56c 0x460952c 0x4609732 0x46098e6 0x461333a 0x4613515 0x4613839 0x4613b44 0x4613caf 0x4613d38 0x46175d6 0x461789a 0x461792e 0x46179e6 0x462b282 0x462b4fa 0x462b76d 0x4688845 0xb8847d5 0xb89ea6f 0xb93fa6d 0xb9410bb 0xb61d823 0xb61e19f 0x368057a 0x3feaf 0x3ff5f 0x346c434
kernel callstack:
Reactor stalled for 975 ms on shard 0. Backtrace: 0x6bddd 0xb99f089 0xb871b50 0xb8730cc 0xb8732e2 0xb873438 0xb873901 0x54daf 0x195bc1 0x6efa069 0x6efc6cb 0x6efd006 0x6efd5f7 0x6efd7b2 0x6efdcdf 0x6efe723 0x6efedaf 0x6efef6b 0x6ef9294 0x6ef9685 0x6ef994c 0x54daf 0xa154b 0x54d05 0x287f2 0x2871a 0x4dca5 0x3f96f7c 0x4f4503f 0x4f5601c 0x4f5771b 0x4f578fb 0x4f57a79 0x3f5e56c 0x460952c 0x4609732 0x46098e6 0x461333a 0x4613515 0x4613839 0x4613b44 0x4613caf 0x4613d38 0x46175d6 0x461789a 0x461792e 0x46179e6 0x462b282 0x462b4fa 0x462b76d 0x4688845 0xb8847d5 0xb89ea6f 0xb93fa6d 0xb9410bb 0xb61d823 0xb61e19f 0x368057a 0x3feaf 0x3ff5f 0x346c434
kernel callstack:
0# 0x00007F0AE5EA154C in /lib64/libc.so.6
1# raise in /lib64/libc.so.6
2# abort in /lib64/libc.so.6
3# 0x00007F0AE5E2871B in /lib64/libc.so.6
4# 0x00007F0AE5E4DCA6 in /lib64/libc.so.6
5# PeeringState::get_peer_info(pg_shard_t) const in ceph-osd
6# ReplicatedRecoveryBackend::prepare_pull(boost::intrusive_ptr<crimson::osd::ObjectContext> const&, PullOp&, RecoveryBackend::pull_info_t&, hobject_t const&, eversion_t) in ceph-
</span></code></pre>
crimson - Bug #65113 (New): crimson: SnapTrimObjSubEvent num_bytes stats calculation
https://tracker.ceph.com/issues/65113
2024-03-25T10:18:06Z
Matan Breizman
<p>From: <a class="external" href="https://github.com/ceph/ceph/pull/56378">https://github.com/ceph/ceph/pull/56378</a></p>
<pre><code class="text syntaxhl"><span class="CodeRay">SnapTrimObjSubEvent calculation of num_bytes seems to be off.
I suspect this is caused due to wrong clone_overlap which are used in SnapSet::get_clone_bytes.
</span></code></pre>
<p>The unittest: LibRadosSnapshotsSelfManagedPP, SnapOverlapPP should be re-enabled as well and will help with identifying obvious issues.</p>
crimson - Feature #64862 (In Progress): Support PG split/merge
https://tracker.ceph.com/issues/64862
2024-03-12T12:14:37Z
Matan Breizman
RADOS - Bug #64646 (Pending Backport): ceph osd pool rmsnap clone object leak
https://tracker.ceph.com/issues/64646
2024-02-29T14:46:56Z
Matan Breizman
<p>There are 2 ways to remove pool snaps, rados tool or mon command (ceph osd pool rmsnap).<br />It seems that the monitor command is not reporting the actual removal via new_removed_snaps which is later proceed in OSDMap::apply_incremental.<br />This will result in a clone object leakage since the snap id won't be marked as purged (and won't be trimmed).</p>
<p>First step (<a class="external" href="https://github.com/ceph/ceph/pull/55841">https://github.com/ceph/ceph/pull/55841</a>) would be to fix the command.</p>
<p>Second step is to handle already leaked snapids which were impacted by the faulty command <a class="external" href="https://github.com/ceph/ceph/pull/53545">https://github.com/ceph/ceph/pull/53545</a>.</p>
RADOS - Bug #64519 (In Progress): OSD/MON: No snapshot metadata keys trimming
https://tracker.ceph.com/issues/64519
2024-02-21T10:22:49Z
Matan Breizman
<p>The Monitor's keys of purged_snap_ / purged_epoch_ and OSD's PSN_ (SnapMapper::PURGED_SNAP_PREFIX) keys are not trimmed and will continue to accumulate.</p>
<p>Relevant threads:<br /><a class="external" href="https://lists.ceph.io/hyperkitty/list/dev@ceph.io/thread/UOJG46YXTIPOXJUSELIN42ATAD5FPMDY/">https://lists.ceph.io/hyperkitty/list/dev@ceph.io/thread/UOJG46YXTIPOXJUSELIN42ATAD5FPMDY/</a><br /><a class="external" href="https://lists.ceph.io/hyperkitty/list/dev@ceph.io/thread/B72HSXIGX6IJFLTZU2SPXCQQWFTOXS5A/">https://lists.ceph.io/hyperkitty/list/dev@ceph.io/thread/B72HSXIGX6IJFLTZU2SPXCQQWFTOXS5A/</a></p>
Dashboard - Bug #64377 (New): tasks/e2e: Modular dependency problems
https://tracker.ceph.com/issues/64377
2024-02-11T09:47:36Z
Matan Breizman
<pre><code class="text syntaxhl"><span class="CodeRay">2024-02-09T10:23:51.910 INFO:tasks.workunit.client.0.smithi096.stderr:Modular dependency problems:
2024-02-09T10:23:51.911 INFO:tasks.workunit.client.0.smithi096.stderr:
2024-02-09T10:23:51.911 INFO:tasks.workunit.client.0.smithi096.stderr: Problem 1: conflicting requests
2024-02-09T10:23:51.911 INFO:tasks.workunit.client.0.smithi096.stderr: - nothing provides module(platform:el8) needed by module container-tools:rhel8:820240206092359:20125149.x86_64 from CentOS-AppStream
2024-02-09T10:23:51.911 INFO:tasks.workunit.client.0.smithi096.stderr: Problem 2: conflicting requests
2024-02-09T10:23:51.911 INFO:tasks.workunit.client.0.smithi096.stderr: - nothing provides module(platform:el8) needed by module eclipse:rhel8:820201023100746:b230ed4e.x86_64 from CentOS-AppStream
</span></code></pre>
<p><a class="external" href="https://pulpito.ceph.com/yuriw-2024-02-09_00:15:46-rados-wip-yuri2-testing-2024-02-08-0727-distro-default-smithi/7553319">https://pulpito.ceph.com/yuriw-2024-02-09_00:15:46-rados-wip-yuri2-testing-2024-02-08-0727-distro-default-smithi/7553319</a></p>
<p><a class="external" href="https://pulpito.ceph.com/yuriw-2024-02-09_00:15:46-rados-wip-yuri2-testing-2024-02-08-0727-distro-default-smithi/7553483">https://pulpito.ceph.com/yuriw-2024-02-09_00:15:46-rados-wip-yuri2-testing-2024-02-08-0727-distro-default-smithi/7553483</a></p>
crimson - Bug #64332 (New): seastar submodule: Enable SEASTAR_GATE_HOLDER_DEBUG
https://tracker.ceph.com/issues/64332
2024-02-06T15:26:41Z
Matan Breizman
<p>See: <a class="external" href="https://github.com/scylladb/seastar/pull/1329">https://github.com/scylladb/seastar/pull/1329</a><br />Seastar added a debug macro that is used to count the number of gate holders in debug mode and assert it's zero when the gate is moved or destroyed.</p>
<p>This exposed a bug in the current way gates are handled in Crimson and causes a crash.<br />The macro is disabled in out seastar submodule to allow upgrading.<br />The macro should be re-enabled once the crash is fixed.</p>
<p>Can be reproduced locally using `ceph_test_cls_rbd`.<br /><pre><code class="text syntaxhl"><span class="CodeRay">(gdb) bt
#0 0x00000000098ac0c0 in boost::intrusive::list_node_traits<void*>::set_next (next=0x60e00010acf8, n=0x0)
at ceph/build/boost/include/boost/intrusive/detail/list_node.hpp:66
#1 boost::intrusive::circular_list_algorithms<boost::intrusive::list_node_traits<void*> >::unlink (
this_node=0x7fc7710e02a8)
at ceph/build/boost/include/boost/intrusive/circular_list_algorithms.hpp:154
#2 boost::intrusive::generic_hook<(boost::intrusive::algo_types)0, boost::intrusive::list_node_traits<void*>, boost::intrusive::member_tag, (boost::intrusive::link_mode_type)2, (boost::intrusive::base_hook_type)0>::unlink (
this=0x7fc7710e02a8) at ceph/build/boost/include/boost/intrusive/detail/generic_hook.hpp:214
#3 boost::intrusive::detail::destructor_impl<boost::intrusive::generic_hook<(boost::intrusive::algo_types)0, boost::intrusive::list_node_traits<void*>, boost::intrusive::member_tag, (boost::intrusive::link_mode_type)2, (boost::intrusive::base_hook_type)0> > (hook=...)
at ceph/build/boost/include/boost/intrusive/detail/generic_hook.hpp:53
#4 boost::intrusive::generic_hook<(boost::intrusive::algo_types)0, boost::intrusive::list_node_traits<void*>, boost::intrusive::member_tag, (boost::intrusive::link_mode_type)2, (boost::intrusive::base_hook_type)0>::~generic_hook (this=0x7fc7710e02a8, __in_chrg=<optimized out>)
at ceph/build/boost/include/boost/intrusive/detail/generic_hook.hpp:193
#5 boost::intrusive::list_member_hook<boost::intrusive::link_mode<(boost::intrusive::link_mode_type)2> >::~list_member_hook (this=0x7fc7710e02a8, __in_chrg=<optimized out>)
at ceph/build/boost/include/boost/intrusive/list_hook.hpp:207
#6 seastar::gate::holder::~holder (this=0x7fc7710e02a0, __in_chrg=<optimized out>)
at ceph/src/seastar/include/seastar/core/gate.hh:236
#7 0x000000000a9a7775 in seastar::with_gate<crimson::os::AlienStore::do_with_op_gate<std::map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, ceph::buffer::v15_2_0::list, std::less<void>, std::allocator<std::pair<const std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, ceph::buffer::v15_2_0::list> > >, crimson::os::AlienStore::omap_get_values(crimson::os::FuturizedStore::Shard::CollectionRef, const ghobject_t&, const std::optional<std::__cxx11::basic_string<char> >&)::<lambda(auto:128&)> >(std::map<std::__cxx11::basic_string<char>, ceph::buffer::v15_2_0::list, std::less<void> >&&, crimson::os::AlienStore::omap_get_values(crimson::os::FuturizedStore::Shard::CollectionRef, const ghobject_t&, const std::optional<std::__cxx11::basic_string<char> >&)::<lambda(auto:128&)>&&) const::<lambda()> > (func=..., g=...)
at ceph/src/seastar/include/seastar/core/gate.hh:327
#8 crimson::os::AlienStore::do_with_op_gate<std::map<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, ceph::buffer::v15_2_0::list, std::less<void>, std::allocator<std::pair<const std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, ceph::buffer::v15_2_0::list> > >, crimson::os::AlienStore::omap_get_values(crimson::os::FuturizedStore::Shard::CollectionRef, const ghobject_t&, const std::optional<std::__cxx11::basic_string<char> >&)::<lambda(auto:128&)> >(void) const (this=this@entry=0x628000010100)
at ceph/src/crimson/os/alienstore/alien_store.h:117
#9 0x000000000a9bf439 in crimson::os::AlienStore::omap_get_values (this=0x628000010100, ch=..., oid=...,
start=std::optional<std::string> = {...}) at ceph/src/crimson/os/alienstore/alien_store.cc:442
#10 0x000000000666a017 in OSDriver::get_next (this=0x625000ed0158,
key="SNA_2_", '0' <repeats 15 times>, "2_", '0' <repeats 15 times>, "2.2B", next=<optimized out>)
at ceph/src/osd/SnapMapper.cc:119
#11 0x00000000066b1a79 in MapCacher::MapCacher<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, ceph::buffer::v15_2_0::list>::get_next (this=this@entry=0x625000ed0210,
key="SNA_2_", '0' <repeats 15 times>, "2_", '0' <repeats 15 times>, "2.2B", next=next@entry=0x7fc7710e1ae0)
at ceph/src/common/map_cacher.hpp:99
#12 0x000000000661abed in SnapMapper::get_objects_by_prefixes (this=this@entry=0x625000ed0200, snap=...,
max=max@entry=2, out=out@entry=0x7fc7710e2be0) at ceph/src/osd/SnapMapper.cc:591
#13 0x00000000066227bc in SnapMapper::get_next_objects_to_trim (this=this@entry=0x625000ed0200, snap=...,
max=max@entry=2, out=out@entry=0x7fc7710e2be0) at ceph/src/osd/SnapMapper.cc:652
</span></code></pre></p>
crimson - Bug #64206 (New): obc->is_loaded_and_valid() assertion
https://tracker.ceph.com/issues/64206
2024-01-29T08:05:12Z
Matan Breizman
<p>From osd.1:<br /><pre><code class="text syntaxhl"><span class="CodeRay">ERROR 2024-01-28 13:11:27,149 [shard 0] none - /home/jenkins-build/build/workspace/ceph-dev-new-build/ARCH/x86_64/AVAILABLE_ARCH/x86_64/AVAILABLE_DIST/centos9/DIST/centos9/MACHINE_SIZE/gigantic/release/19.0.0-933-g00c0d54d/rpm/el9/BUILD/ceph-19.0.0-933-g00c0d54d/src/crimson/osd/object_context_loader.cc:179 : In function 'crimson::interruptible::interruptible_errorator<crimson::osd::IOInterruptCondition, crimson::errorator<crimson::unthrowable_wrapper<const std::error_code&, ((const std::error_code&)(& crimson::ec<2>))>, crimson::unthrowable_wrapper<const std::error_code&, ((const std::error_code&)(& crimson::ec<84>))> > >::future<boost::intrusive_ptr<crimson::osd::ObjectContext> > crimson::osd::ObjectContextLoader::get_or_load_obc(crimson::osd::ObjectContextRef, bool) [with RWState::State State = RWState::RWREAD; crimson::interruptible::interruptible_errorator<crimson::osd::IOInterruptCondition, crimson::errorator<crimson::unthrowable_wrapper<const std::error_code&, ((const std::error_code&)(& crimson::ec<2>))>, crimson::unthrowable_wrapper<const std::error_code&, ((const std::error_code&)(& crimson::ec<84>))> > >::future<boost::intrusive_ptr<crimson::osd::ObjectContext> > = crimson::interruptible::interruptible_future_detail<crimson::osd::IOInterruptCondition, crimson::errorator<crimson::unthrowable_wrapper<const std::error_code&, ((const std::error_code&)(& crimson::ec<2>))>, crimson::unthrowable_wrapper<const std::error_code&, ((const std::error_code&)(& crimson::ec<84>))> >::_future<crimson::errorated_future_marker<boost::intrusive_ptr<crimson::osd::ObjectContext> > > >; crimson::osd::ObjectContextRef = boost::intrusive_ptr<crimson::osd::ObjectContext>]', ceph_assert(%s)
obc->is_loaded_and_valid()
Aborting on shard 0.
Backtrace:
</span></code></pre></p>
<p><a class="external" href="https://pulpito.ceph.com/matan-2024-01-28_12:38:29-crimson-rados-wip-matanb-crimson-pg-map-logs-distro-crimson-smithi/7535805">https://pulpito.ceph.com/matan-2024-01-28_12:38:29-crimson-rados-wip-matanb-crimson-pg-map-logs-distro-crimson-smithi/7535805</a></p>
crimson - Feature #64086 (Resolved): Enable multicore messanger
https://tracker.ceph.com/issues/64086
2024-01-18T15:14:28Z
Matan Breizman
<p>Set SocketMessenger::dispatch_only_on_sid to be false on "cluster" and "client" messengers.</p>
<ul>
<li>Not supported yet:<br /> - A crash can be reproduced with a 3 OSD cluster with smp 3 by running `ceph_test_cls_rbd`.<br /> - <a class="external" href="https://pulpito.ceph.com/matan-2024-01-11_09:46:59-crimson-rados-wip-matanb-crimson-multicore-msgr-distro-crimson-smithi/">https://pulpito.ceph.com/matan-2024-01-11_09:46:59-crimson-rados-wip-matanb-crimson-multicore-msgr-distro-crimson-smithi/</a></li>
</ul>
<p>See (Socket.cc):</p>
<pre><code class="text syntaxhl"><span class="CodeRay">seastar::future<ShardedServerSocket*>
ShardedServerSocket::create(bool dispatch_only_on_this_shard)
{
auto primary_sid = seastar::this_shard_id();
// start the sharded service: we should only construct/stop shards on #0
return seastar::smp::submit_to(0, [primary_sid, dispatch_only_on_this_shard] {
auto service = std::make_unique<sharded_service_t>();
return service->start(
primary_sid, dispatch_only_on_this_shard, construct_tag{} <-----
).then([service = std::move(service)]() mutable {
auto p_shard = service.get();
p_shard->local().service = std::move(service);
return p_shard;
});
}).then([](auto p_shard) {
return &p_shard->local();
});
}
</span></code></pre>
<pre><code class="text syntaxhl"><span class="CodeRay"> using sharded_service_t = seastar::sharded<ShardedServerSocket>;
std::unique_ptr<sharded_service_t> service;
</span></code></pre>
<p>This will set ShardedServerSocket::dispatch_only_on_primary_sid to be true when starting and creating the sharded service.<br />ShardedServerSocket::dispatch_only_on_primary_sid will be used in 2 cases</p>
<p><strong>1) SocketMessenger::start:<br /></strong><pre><code class="text syntaxhl"><span class="CodeRay"> bool is_fixed_shard_dispatching() const {
return dispatch_only_on_primary_sid;
}
</span></code></pre></p>
<pre><code class="text syntaxhl"><span class="CodeRay">seastar::future<> SocketMessenger::start(
const dispatchers_t& _dispatchers) {
assert(seastar::this_shard_id() == sid);
dispatchers.assign(_dispatchers);
if (listener) {
return listener->accept([this](SocketRef _socket, entity_addr_t peer_addr) {
..
if (listener->is_fixed_shard_dispatching()) { <-----
return accept(std::move(socket), peer_addr);
} else {
return seastar::smp::submit_to(sid,
[this, peer_addr, socket = std::move(socket)]() mutable {
return accept(std::move(socket), peer_addr);
});
}
});
}
return seastar::now();
}
</span></code></pre>
<p><strong>2) ShardedServerSocket::listen:</strong><br /><pre><code class="text syntaxhl"><span class="CodeRay">ShardedServerSocket::listen(entity_addr_t addr)
{
ceph_assert_always(seastar::this_shard_id() == primary_sid);
logger().debug("ShardedServerSocket({})::listen()...", addr);
..
seastar::listen_options lo;
if (ss.dispatch_only_on_primary_sid) { <-----
lo.set_fixed_cpu(ss.primary_sid);
}
ss.listener = seastar::listen(s_addr, lo)
</span></code></pre></p>
crimson - Bug #64040 (New): PGBackend unhandled throw exceptions
https://tracker.ceph.com/issues/64040
2024-01-16T10:56:25Z
Matan Breizman
<p>See `PGBackend::interruptible_future<> PGBackend::write_same()`:<br /><pre><code class="text syntaxhl"><span class="CodeRay"> if (op.writesame.data_length == 0 ||
len % op.writesame.data_length != 0 ||
op.writesame.data_length != osd_op.indata.length()) {
throw crimson::osd::invalid_argument();
}
</span></code></pre></p>
<p>any catch-throw usage should probably be replaced by errorator instead and handling the error should result in replying<br />the correct error message back. This is being tested for example in NeoRadosMisc.WriteSame and NeoRadosIo.Limits (which are now skipped):<br /><pre><code class="text syntaxhl"><span class="CodeRay"> // Write length must be a multiple of the pattern length
co_await expect_error_code(execute(oid, WriteOp{}
.writesame(0, samelen - 1, patbl)),
sys::errc::invalid_argument);
</span></code></pre></p>
RADOS - Feature #64002 (New): Allow clusters to recover from "past_interval start interval mismatch"
https://tracker.ceph.com/issues/64002
2024-01-11T13:58:43Z
Matan Breizman
<p>Clusters affected by: <a class="external" href="https://tracker.ceph.com/issues/49689">https://tracker.ceph.com/issues/49689</a> will need to upgrade to a release version that includes the fix (Backported to P and later).<br />After upgrading, check_past_interval_bounds must be skipped in order to allow the OSD to boot and come back up. Later on, the skip should be returned to false.</p>
crimson - Bug #64000 (Resolved): OpInfo::check_rmw - rmw_flags != 0
https://tracker.ceph.com/issues/64000
2024-01-11T09:05:39Z
Matan Breizman
<pre><code class="text syntaxhl"><span class="CodeRay">ERROR 2024-01-10 16:03:45,725 [shard 1] none - /home/jenkins-build/build/workspace/ceph-dev-new-build/ARCH/x86_64/AVAILABLE_ARCH/x86_64/AVAILABLE_DIST/centos9/DIST/centos9/MACHINE_SIZE/gigantic/release/19.0.0-596-ga59e708b/rpm/el9/BUILD/ceph-19.0.0-596-ga59e708b/src/osd/osd_op_util.cc:16 : In function 'bool OpInfo::check_rmw(int) const', ceph_assert(%s)
rmw_flags != 0
Aborting on shard 1.
Backtrace:
0# 0x00007F78852A154C in /lib64/libc.so.6
1# raise in /lib64/libc.so.6
2# abort in /lib64/libc.so.6
3# ceph::__ceph_assert_fail(char const*, char const*, int, char const*) in ceph-osd
4# OpInfo::check_rmw(int) const in ceph-osd
5# OpInfo::need_write_cap() const in ceph-osd
6# OpInfo::may_write() const in ceph-osd
7# OpInfo::rwordered() const in ceph-osd
8# crimson::osd::PG::get_lock_type(OpInfo const&) in ceph-osd
9# crimson::osd::PG::with_locked_obc(hobject_t const&, OpInfo const&, std::function<crimson::interruptible::interruptible_future_detail<crimson::osd::IOInterruptCondition, crimson::errorator<crimson::unthrowable_wrapper<std::error_code const&, crimson::ec<2> >, crimson::unthrowable_wrapper<std::error_code const&, crimson::ec<84> > >::_future<crimson::errorated_future_marker<void> > > (boost::intrusive_ptr<crimson::osd::ObjectContext>, boost::intrusive_ptr<crimson::osd::ObjectContext>)>&&) in ceph-osd
10# 0x0000563C7567388B in ceph-osd
11# 0x0000563C75673F5F in ceph-osd
</span></code></pre>
<p><a class="external" href="https://pulpito.ceph.com/matan-2024-01-10_15:38:24-crimson-rados-wip-matanb-crimson-snaptrim_event-cleanup-distro-crimson-smithi/7511867/">https://pulpito.ceph.com/matan-2024-01-10_15:38:24-crimson-rados-wip-matanb-crimson-snaptrim_event-cleanup-distro-crimson-smithi/7511867/</a></p>
RADOS - Bug #63881 (Fix Under Review): Inaccurate pg splits/merges and pool deletion/creation on ...
https://tracker.ceph.com/issues/63881
2023-12-21T11:57:22Z
Matan Breizman