From 3a1a3dd0db4256eef6382706c84c7b908b48bccd Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 27 Jan 2022 12:17:14 -0800 Subject: [PATCH] hle: kernel: KScheduler: Fix deadlock with core waiting for a thread lock that has migrated. - Previously, it was possible for a thread migration to occur from core A to core B. - Next, core B waits on a guest lock that must be released by a thread queued for core A. - Meanwhile, core A is still waiting on the core B's current thread lock - resulting in a deadlock. - Fix this by try-locking the thread lock. - Fixes softlocks in FF8 and Pokemon Legends Arceus. --- src/core/hle/kernel/k_priority_queue.h | 2 +- src/core/hle/kernel/k_scheduler.cpp | 45 +++++++++++++------------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h index 0b894c8cfe..bd779739d1 100644 --- a/src/core/hle/kernel/k_priority_queue.h +++ b/src/core/hle/kernel/k_priority_queue.h @@ -258,7 +258,7 @@ private: private: constexpr void ClearAffinityBit(u64& affinity, s32 core) { - affinity &= ~(u64(1) << core); + affinity &= ~(UINT64_C(1) << core); } constexpr s32 GetNextCore(u64& affinity) { diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index b32d4f2857..c965208287 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -710,23 +710,19 @@ void KScheduler::Unload(KThread* thread) { } void KScheduler::Reload(KThread* thread) { - LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr"); + LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName()); - if (thread) { - ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); - - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.LoadContext(thread->GetContext32()); - cpu_core.LoadContext(thread->GetContext64()); - cpu_core.SetTlsAddress(thread->GetTLSAddress()); - cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); + cpu_core.LoadContext(thread->GetContext32()); + cpu_core.LoadContext(thread->GetContext64()); + cpu_core.SetTlsAddress(thread->GetTLSAddress()); + cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); } void KScheduler::SwitchContextStep2() { // Load context of new thread - Reload(current_thread.load()); + Reload(GetCurrentThread()); RescheduleCurrentCore(); } @@ -735,13 +731,17 @@ void KScheduler::ScheduleImpl() { KThread* previous_thread = GetCurrentThread(); KThread* next_thread = state.highest_priority_thread; - state.needs_scheduling = false; + state.needs_scheduling.store(false); // We never want to schedule a null thread, so use the idle thread if we don't have a next. if (next_thread == nullptr) { next_thread = idle_thread; } + if (next_thread->GetCurrentCore() != core_id) { + next_thread->SetCurrentCore(core_id); + } + // We never want to schedule a dummy thread, as these are only used by host threads for locking. if (next_thread->GetThreadType() == ThreadType::Dummy) { ASSERT_MSG(false, "Dummy threads should never be scheduled!"); @@ -755,14 +755,8 @@ void KScheduler::ScheduleImpl() { return; } - if (next_thread->GetCurrentCore() != core_id) { - next_thread->SetCurrentCore(core_id); - } - - current_thread.store(next_thread); - + // Update the CPU time tracking variables. KProcess* const previous_process = system.Kernel().CurrentProcess(); - UpdateLastContextSwitchTime(previous_thread, previous_process); // Save context for previous thread @@ -770,6 +764,10 @@ void KScheduler::ScheduleImpl() { std::shared_ptr* old_context; old_context = &previous_thread->GetHostContext(); + + // Set the new thread. + current_thread.store(next_thread); + guard.Unlock(); Common::Fiber::YieldTo(*old_context, *switch_fiber); @@ -797,8 +795,8 @@ void KScheduler::SwitchToCurrent() { do { auto next_thread = current_thread.load(); if (next_thread != nullptr) { - next_thread->context_guard.Lock(); - if (next_thread->GetRawState() != ThreadState::Runnable) { + const auto locked = next_thread->context_guard.TryLock(); + if (state.needs_scheduling.load()) { next_thread->context_guard.Unlock(); break; } @@ -806,6 +804,9 @@ void KScheduler::SwitchToCurrent() { next_thread->context_guard.Unlock(); break; } + if (!locked) { + continue; + } } auto thread = next_thread ? next_thread : idle_thread; Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());