Actions
Bug #60947
opencrash: crimson::dmclock::RequestTag::RequestTag(crimson::dmclock::RequestTag const&, crimson::dmclock::ClientInfo const&, unsigned int, unsigned int, double, unsigned int, double)
Status:
New
Priority:
Normal
Assignee:
-
Category:
-
Target version:
-
% Done:
0%
Source:
Telemetry
Tags:
Backport:
Regression:
No
Severity:
3 - minor
Reviewed:
Affected Versions:
ceph-qa-suite:
Component(RADOS):
Pull request ID:
Crash signature (v1):
a7b3721da55f15f764ea9db98cfa7f98e344c230b2d3280c0921889229de5b2a
Crash signature (v2):
Description
Sanitized backtrace:
crimson::dmclock::RequestTag::RequestTag(crimson::dmclock::RequestTag const&, crimson::dmclock::ClientInfo const&, unsigned int, unsigned int, double, unsigned int, double) crimson::dmclock::PriorityQueueBase<ceph::osd::scheduler::scheduler_id_t, ceph::osd::scheduler::OpSchedulerItem, true, true, 2u>::do_add_request(std::unique_ptr<ceph::osd::scheduler::OpSchedulerItem, std::default_delete<ceph::osd::scheduler::OpSchedulerItem> >&&, ceph::osd::scheduler::scheduler_id_t const&, crimson::dmclock::ReqParams const&, double, unsigned int) crimson::dmclock::PullPriorityQueue<ceph::osd::scheduler::scheduler_id_t, ceph::osd::scheduler::OpSchedulerItem, true, true, 2u>::add_request(ceph::osd::scheduler::OpSchedulerItem&&, ceph::osd::scheduler::scheduler_id_t const&, unsigned int) ceph::osd::scheduler::mClockScheduler::enqueue(ceph::osd::scheduler::OpSchedulerItem&&) OSD::ShardedOpWQ::_enqueue(ceph::osd::scheduler::OpSchedulerItem&&) void OSDService::queue_scrub_event_msg<ceph::osd::scheduler::PGScrubMapsCompared>(PG*, Scrub::scrub_prio_t) Scrub::WaitReplicas::react(Scrub::GotReplicas const&) boost::statechart::simple_state<Scrub::WaitReplicas, Scrub::ActiveScrubbing, boost::mpl::list<mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na>, (boost::statechart::history_mode)>::react_impl(boost::statechart::event_base const&, void const*) boost::statechart::state_machine<Scrub::ScrubMachine, Scrub::NotActive, std::allocator<boost::statechart::none>, boost::statechart::null_exception_translator>::process_event(boost::statechart::event_base const&) PgScrubber::send_replica_maps_ready(unsigned int) ceph::osd::scheduler::PGScrubGotReplMaps::run(OSD*, OSDShard*, boost::intrusive_ptr<PG>&, ThreadPool::TPHandle&) OSD::ShardedOpWQ::_process(unsigned int, ceph::heartbeat_handle_d*) ShardedThreadPool::shardedthreadpool_worker(unsigned int) ShardedThreadPool::WorkThreadSharded::entry()
Crash dump sample:
{ "backtrace": [ "/lib64/libpthread.so.0(+0x12cf0) [0x7fe9d8685cf0]", "(crimson::dmclock::RequestTag::RequestTag(crimson::dmclock::RequestTag const&, crimson::dmclock::ClientInfo const&, unsigned int, unsigned int, double, unsigned int, double)+0x5e) [0x5616ac334c5e]", "(crimson::dmclock::PriorityQueueBase<ceph::osd::scheduler::scheduler_id_t, ceph::osd::scheduler::OpSchedulerItem, true, true, 2u>::do_add_request(std::unique_ptr<ceph::osd::scheduler::OpSchedulerItem, std::default_delete<ceph::osd::scheduler::OpSchedulerItem> >&&, ceph::osd::scheduler::scheduler_id_t const&, crimson::dmclock::ReqParams const&, double, unsigned int)+0x833) [0x5616ac33a1b3]", "(crimson::dmclock::PullPriorityQueue<ceph::osd::scheduler::scheduler_id_t, ceph::osd::scheduler::OpSchedulerItem, true, true, 2u>::add_request(ceph::osd::scheduler::OpSchedulerItem&&, ceph::osd::scheduler::scheduler_id_t const&, unsigned int)+0x102) [0x5616ac33a772]", "(ceph::osd::scheduler::mClockScheduler::enqueue(ceph::osd::scheduler::OpSchedulerItem&&)+0x8f) [0x5616ac33238f]", "(OSD::ShardedOpWQ::_enqueue(ceph::osd::scheduler::OpSchedulerItem&&)+0x125) [0x5616ac03bc05]", "(void OSDService::queue_scrub_event_msg<ceph::osd::scheduler::PGScrubMapsCompared>(PG*, Scrub::scrub_prio_t)+0x12c) [0x5616ac0c6b7c]", "(Scrub::WaitReplicas::react(Scrub::GotReplicas const&)+0x165) [0x5616ac2b1ec5]", "(boost::statechart::simple_state<Scrub::WaitReplicas, Scrub::ActiveScrubbing, boost::mpl::list<mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na, mpl_::na>, (boost::statechart::history_mode)0>::react_impl(boost::statechart::event_base const&, void const*)+0x111) [0x5616ac2baf01]", "(boost::statechart::state_machine<Scrub::ScrubMachine, Scrub::NotActive, std::allocator<boost::statechart::none>, boost::statechart::null_exception_translator>::process_event(boost::statechart::event_base const&)+0x7b) [0x5616ac2807fb]", "(PgScrubber::send_replica_maps_ready(unsigned int)+0xd6) [0x5616ac27a016]", "(ceph::osd::scheduler::PGScrubGotReplMaps::run(OSD*, OSDShard*, boost::intrusive_ptr<PG>&, ThreadPool::TPHandle&)+0x2f) [0x5616ac32cb1f]", "(OSD::ShardedOpWQ::_process(unsigned int, ceph::heartbeat_handle_d*)+0x115f) [0x5616ac041dbf]", "(ShardedThreadPool::shardedthreadpool_worker(unsigned int)+0x435) [0x5616ac79f8c5]", "(ShardedThreadPool::WorkThreadSharded::entry()+0x14) [0x5616ac7a1fe4]", "/lib64/libpthread.so.0(+0x81ca) [0x7fe9d867b1ca]", "clone()" ], "ceph_version": "17.2.5", "crash_id": "2023-04-13T10:15:23.747763Z_410b9b4e-9731-41bb-aca3-c950a3bfef41", "entity_name": "osd.b1ed3b57214817bfdf53fef4d9921b1314ef8bfa", "os_id": "centos", "os_name": "CentOS Stream", "os_version": "8", "os_version_id": "8", "process_name": "ceph-osd", "stack_sig": "a7b3721da55f15f764ea9db98cfa7f98e344c230b2d3280c0921889229de5b2a", "timestamp": "2023-04-13T10:15:23.747763Z", "utsname_machine": "x86_64", "utsname_release": "6.2.8-200.fc37.x86_64", "utsname_sysname": "Linux", "utsname_version": "#1 SMP PREEMPT_DYNAMIC Wed Mar 22 19:11:02 UTC 2023" }
Actions