Merge pull request #7737 from bunnei/fix-dummy-thread-leak

Various fixes to HLE service thread management
This commit is contained in:
bunnei 2022-01-21 22:34:47 -08:00 committed by GitHub
commit 68c8a1b170
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 120 additions and 40 deletions

View file

@ -341,10 +341,6 @@ public:
return *thread; return *thread;
} }
bool IsThreadWaiting() const {
return is_thread_waiting;
}
private: private:
friend class IPC::ResponseBuilder; friend class IPC::ResponseBuilder;
@ -379,7 +375,6 @@ private:
u32 domain_offset{}; u32 domain_offset{};
std::shared_ptr<SessionRequestManager> manager; std::shared_ptr<SessionRequestManager> manager;
bool is_thread_waiting{};
KernelCore& kernel; KernelCore& kernel;
Core::Memory::Memory& memory; Core::Memory::Memory& memory;

View file

@ -45,6 +45,7 @@ concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
{ t.GetActiveCore() } -> Common::ConvertibleTo<s32>; { t.GetActiveCore() } -> Common::ConvertibleTo<s32>;
{ t.GetPriority() } -> Common::ConvertibleTo<s32>; { t.GetPriority() } -> Common::ConvertibleTo<s32>;
{ t.IsDummyThread() } -> Common::ConvertibleTo<bool>;
}; };
template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority> template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority>
@ -349,24 +350,49 @@ public:
// Mutators. // Mutators.
constexpr void PushBack(Member* member) { constexpr void PushBack(Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
this->PushBack(member->GetPriority(), member); this->PushBack(member->GetPriority(), member);
} }
constexpr void Remove(Member* member) { constexpr void Remove(Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
this->Remove(member->GetPriority(), member); this->Remove(member->GetPriority(), member);
} }
constexpr void MoveToScheduledFront(Member* member) { constexpr void MoveToScheduledFront(Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
} }
constexpr KThread* MoveToScheduledBack(Member* member) { constexpr KThread* MoveToScheduledBack(Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return {};
}
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
member); member);
} }
// First class fancy operations. // First class fancy operations.
constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) { constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
ASSERT(IsValidPriority(prev_priority)); ASSERT(IsValidPriority(prev_priority));
// Remove the member from the queues. // Remove the member from the queues.
@ -383,6 +409,11 @@ public:
constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity, constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity,
Member* member) { Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
// Get the new information. // Get the new information.
const s32 priority = member->GetPriority(); const s32 priority = member->GetPriority();
const AffinityMaskType& new_affinity = member->GetAffinityMask(); const AffinityMaskType& new_affinity = member->GetAffinityMask();
@ -412,6 +443,11 @@ public:
} }
constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) { constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
// Get the new information. // Get the new information.
const s32 new_core = member->GetActiveCore(); const s32 new_core = member->GetActiveCore();
const s32 priority = member->GetPriority(); const s32 priority = member->GetPriority();

View file

@ -406,6 +406,9 @@ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduli
} else { } else {
RescheduleCores(kernel, cores_needing_scheduling); RescheduleCores(kernel, cores_needing_scheduling);
} }
// Special case to ensure dummy threads that are waiting block.
current_thread->IfDummyThreadTryWait();
} }
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@ -739,6 +742,12 @@ void KScheduler::ScheduleImpl() {
next_thread = idle_thread; next_thread = idle_thread;
} }
// We never want to schedule a dummy thread, as these are only used by host threads for locking.
if (next_thread->GetThreadType() == ThreadType::Dummy) {
ASSERT_MSG(false, "Dummy threads should never be scheduled!");
next_thread = idle_thread;
}
// If we're not actually switching thread, there's nothing to do. // If we're not actually switching thread, there's nothing to do.
if (next_thread == current_thread.load()) { if (next_thread == current_thread.load()) {
previous_thread->EnableDispatch(); previous_thread->EnableDispatch();

View file

@ -8,7 +8,6 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/scope_exit.h"
#include "core/core_timing.h" #include "core/core_timing.h"
#include "core/hle/ipc_helpers.h" #include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/hle_ipc.h"
@ -123,20 +122,10 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
// In the event that something fails here, stub a result to prevent the game from crashing.
// This is a work-around in the event that somehow we process a service request after the
// session has been closed by the game. This has been observed to happen rarely in Pokemon
// Sword/Shield and is likely a result of us using host threads/scheduling for services.
// TODO(bunnei): Find a better solution here.
auto error_guard = SCOPE_GUARD({ CompleteSyncRequest(*context); });
// Ensure we have a session request handler // Ensure we have a session request handler
if (manager->HasSessionRequestHandler(*context)) { if (manager->HasSessionRequestHandler(*context)) {
if (auto strong_ptr = manager->GetServiceThread().lock()) { if (auto strong_ptr = manager->GetServiceThread().lock()) {
strong_ptr->QueueSyncRequest(*parent, std::move(context)); strong_ptr->QueueSyncRequest(*parent, std::move(context));
// We succeeded.
error_guard.Cancel();
} else { } else {
ASSERT_MSG(false, "strong_ptr is nullptr!"); ASSERT_MSG(false, "strong_ptr is nullptr!");
} }
@ -171,13 +160,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
convert_to_domain = false; convert_to_domain = false;
} }
// Some service requests require the thread to block // The calling thread is waiting for this request to complete, so wake it up.
{
KScopedSchedulerLock lock(kernel);
if (!context.IsThreadWaiting()) {
context.GetThread().EndWait(result); context.GetThread().EndWait(result);
}
}
return result; return result;
} }

View file

@ -106,7 +106,7 @@ KThread::~KThread() = default;
ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
s32 virt_core, KProcess* owner, ThreadType type) { s32 virt_core, KProcess* owner, ThreadType type) {
// Assert parameters are valid. // Assert parameters are valid.
ASSERT((type == ThreadType::Main) || ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority)); (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
ASSERT((owner != nullptr) || (type != ThreadType::User)); ASSERT((owner != nullptr) || (type != ThreadType::User));
ASSERT(0 <= virt_core && virt_core < static_cast<s32>(Common::BitSize<u64>())); ASSERT(0 <= virt_core && virt_core < static_cast<s32>(Common::BitSize<u64>()));
@ -140,7 +140,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type)); UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
break; break;
} }
thread_type_for_debugging = type; thread_type = type;
// Set the ideal core ID and affinity mask. // Set the ideal core ID and affinity mask.
virtual_ideal_core_id = virt_core; virtual_ideal_core_id = virt_core;
@ -262,7 +262,7 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
} }
ResultCode KThread::InitializeDummyThread(KThread* thread) { ResultCode KThread::InitializeDummyThread(KThread* thread) {
return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy); return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy);
} }
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
@ -1075,12 +1075,46 @@ ResultCode KThread::Sleep(s64 timeout) {
return ResultSuccess; return ResultSuccess;
} }
void KThread::IfDummyThreadTryWait() {
if (!IsDummyThread()) {
return;
}
if (GetState() != ThreadState::Waiting) {
return;
}
// Block until we can grab the lock.
KScopedSpinLock lk{dummy_wait_lock};
}
void KThread::IfDummyThreadBeginWait() {
if (!IsDummyThread()) {
return;
}
// Ensure the thread will block when IfDummyThreadTryWait is called.
dummy_wait_lock.Lock();
}
void KThread::IfDummyThreadEndWait() {
if (!IsDummyThread()) {
return;
}
// Ensure the thread will no longer block.
dummy_wait_lock.Unlock();
}
void KThread::BeginWait(KThreadQueue* queue) { void KThread::BeginWait(KThreadQueue* queue) {
// Set our state as waiting. // Set our state as waiting.
SetState(ThreadState::Waiting); SetState(ThreadState::Waiting);
// Set our wait queue. // Set our wait queue.
wait_queue = queue; wait_queue = queue;
// Special case for dummy threads to ensure they block.
IfDummyThreadBeginWait();
} }
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) { void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
@ -1099,7 +1133,16 @@ void KThread::EndWait(ResultCode wait_result_) {
// If we're waiting, notify our queue that we're available. // If we're waiting, notify our queue that we're available.
if (GetState() == ThreadState::Waiting) { if (GetState() == ThreadState::Waiting) {
if (wait_queue == nullptr) {
// This should never happen, but avoid a hard crash below to get this logged.
ASSERT_MSG(false, "wait_queue is nullptr!");
return;
}
wait_queue->EndWait(this, wait_result_); wait_queue->EndWait(this, wait_result_);
// Special case for dummy threads to wakeup if necessary.
IfDummyThreadEndWait();
} }
} }

View file

@ -112,6 +112,7 @@ private:
public: public:
static constexpr s32 DefaultThreadPriority = 44; static constexpr s32 DefaultThreadPriority = 44;
static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1; static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2;
explicit KThread(KernelCore& kernel_); explicit KThread(KernelCore& kernel_);
~KThread() override; ~KThread() override;
@ -553,8 +554,12 @@ public:
return wait_reason_for_debugging; return wait_reason_for_debugging;
} }
[[nodiscard]] ThreadType GetThreadTypeForDebugging() const { [[nodiscard]] ThreadType GetThreadType() const {
return thread_type_for_debugging; return thread_type;
}
[[nodiscard]] bool IsDummyThread() const {
return GetThreadType() == ThreadType::Dummy;
} }
void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) { void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
@ -631,6 +636,14 @@ public:
return condvar_key; return condvar_key;
} }
// Dummy threads (used for HLE host threads) cannot wait based on the guest scheduler, and
// therefore will not block on guest kernel synchronization primitives. These methods handle
// blocking as needed.
void IfDummyThreadTryWait();
void IfDummyThreadBeginWait();
void IfDummyThreadEndWait();
private: private:
static constexpr size_t PriorityInheritanceCountMax = 10; static constexpr size_t PriorityInheritanceCountMax = 10;
union SyncObjectBuffer { union SyncObjectBuffer {
@ -749,16 +762,17 @@ private:
bool resource_limit_release_hint{}; bool resource_limit_release_hint{};
StackParameters stack_parameters{}; StackParameters stack_parameters{};
KSpinLock context_guard{}; KSpinLock context_guard{};
KSpinLock dummy_wait_lock{};
// For emulation // For emulation
std::shared_ptr<Common::Fiber> host_context{}; std::shared_ptr<Common::Fiber> host_context{};
bool is_single_core{}; bool is_single_core{};
ThreadType thread_type{};
// For debugging // For debugging
std::vector<KSynchronizationObject*> wait_objects_for_debugging; std::vector<KSynchronizationObject*> wait_objects_for_debugging;
VAddr mutex_wait_address_for_debugging{}; VAddr mutex_wait_address_for_debugging{};
ThreadWaitReasonForDebugging wait_reason_for_debugging{}; ThreadWaitReasonForDebugging wait_reason_for_debugging{};
ThreadType thread_type_for_debugging{};
public: public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree; using ConditionVariableThreadTreeType = ConditionVariableThreadTree;

View file

@ -301,12 +301,10 @@ struct KernelCore::Impl {
// Gets the dummy KThread for the caller, allocating a new one if this is the first time // Gets the dummy KThread for the caller, allocating a new one if this is the first time
KThread* GetHostDummyThread() { KThread* GetHostDummyThread() {
auto make_thread = [this]() { auto make_thread = [this]() {
std::lock_guard lk(dummy_thread_lock); KThread* thread = KThread::Create(system.Kernel());
auto& thread = dummy_threads.emplace_back(std::make_unique<KThread>(system.Kernel())); ASSERT(KThread::InitializeDummyThread(thread).IsSuccess());
KAutoObject::Create(thread.get());
ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess());
thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
return thread.get(); return thread;
}; };
thread_local KThread* saved_thread = make_thread(); thread_local KThread* saved_thread = make_thread();
@ -731,7 +729,6 @@ struct KernelCore::Impl {
std::mutex server_sessions_lock; std::mutex server_sessions_lock;
std::mutex registered_objects_lock; std::mutex registered_objects_lock;
std::mutex registered_in_use_objects_lock; std::mutex registered_in_use_objects_lock;
std::mutex dummy_thread_lock;
std::atomic<u32> next_object_id{0}; std::atomic<u32> next_object_id{0};
std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin}; std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
@ -788,9 +785,6 @@ struct KernelCore::Impl {
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
// Specifically tracked to be automatically destroyed with kernel
std::vector<std::unique_ptr<KThread>> dummy_threads;
bool is_multicore{}; bool is_multicore{};
std::atomic_bool is_shutting_down{}; std::atomic_bool is_shutting_down{};
bool is_phantom_mode_for_singlecore{}; bool is_phantom_mode_for_singlecore{};

View file

@ -12,6 +12,7 @@
#include "common/scope_exit.h" #include "common/scope_exit.h"
#include "common/thread.h" #include "common/thread.h"
#include "core/hle/kernel/k_session.h" #include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/service_thread.h" #include "core/hle/kernel/service_thread.h"
@ -50,6 +51,10 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
kernel.RegisterHostThread(); kernel.RegisterHostThread();
// Ensure the dummy thread allocated for this host thread is closed on exit.
auto* dummy_thread = kernel.GetCurrentEmuThread();
SCOPE_EXIT({ dummy_thread->Close(); });
while (true) { while (true) {
std::function<void()> task; std::function<void()> task;

View file

@ -95,7 +95,7 @@ std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList(
std::size_t row = 0; std::size_t row = 0;
auto add_threads = [&](const std::vector<Kernel::KThread*>& threads) { auto add_threads = [&](const std::vector<Kernel::KThread*>& threads) {
for (std::size_t i = 0; i < threads.size(); ++i) { for (std::size_t i = 0; i < threads.size(); ++i) {
if (threads[i]->GetThreadTypeForDebugging() == Kernel::ThreadType::User) { if (threads[i]->GetThreadType() == Kernel::ThreadType::User) {
item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i], system)); item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i], system));
item_list.back()->row = row; item_list.back()->row = row;
} }
@ -153,7 +153,7 @@ QString WaitTreeCallstack::GetText() const {
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() const { std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list; std::vector<std::unique_ptr<WaitTreeItem>> list;
if (thread.GetThreadTypeForDebugging() != Kernel::ThreadType::User) { if (thread.GetThreadType() != Kernel::ThreadType::User) {
return list; return list;
} }