2022-04-23 08:59:50 +00:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2014-05-10 02:11:18 +00:00
|
|
|
|
2014-06-06 02:35:36 +00:00
|
|
|
#include <algorithm>
|
2021-12-30 05:40:38 +00:00
|
|
|
#include <atomic>
|
2018-02-14 05:33:15 +00:00
|
|
|
#include <cinttypes>
|
2023-06-17 04:36:00 +00:00
|
|
|
#include <condition_variable>
|
|
|
|
#include <mutex>
|
2018-10-30 04:03:25 +00:00
|
|
|
#include <optional>
|
2014-08-18 03:03:22 +00:00
|
|
|
#include <vector>
|
2018-07-31 12:06:09 +00:00
|
|
|
|
2015-05-06 07:06:12 +00:00
|
|
|
#include "common/assert.h"
|
2021-01-20 21:42:27 +00:00
|
|
|
#include "common/bit_util.h"
|
2021-01-01 10:06:06 +00:00
|
|
|
#include "common/common_funcs.h"
|
2015-05-06 07:06:12 +00:00
|
|
|
#include "common/common_types.h"
|
2020-02-25 02:04:12 +00:00
|
|
|
#include "common/fiber.h"
|
2015-05-06 07:06:12 +00:00
|
|
|
#include "common/logging/log.h"
|
2021-08-14 09:14:19 +00:00
|
|
|
#include "common/settings.h"
|
2014-05-14 02:00:11 +00:00
|
|
|
#include "core/core.h"
|
2020-02-25 02:04:12 +00:00
|
|
|
#include "core/cpu_manager.h"
|
2020-02-11 23:56:24 +00:00
|
|
|
#include "core/hardware_properties.h"
|
2020-12-30 09:14:02 +00:00
|
|
|
#include "core/hle/kernel/k_condition_variable.h"
|
2021-04-24 09:40:31 +00:00
|
|
|
#include "core/hle/kernel/k_handle_table.h"
|
2021-02-13 00:02:35 +00:00
|
|
|
#include "core/hle/kernel/k_memory_layout.h"
|
2021-04-24 05:04:28 +00:00
|
|
|
#include "core/hle/kernel/k_process.h"
|
2021-01-30 09:40:49 +00:00
|
|
|
#include "core/hle/kernel/k_resource_limit.h"
|
2020-12-03 02:08:35 +00:00
|
|
|
#include "core/hle/kernel/k_scheduler.h"
|
2020-12-04 05:56:02 +00:00
|
|
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
2021-12-23 08:03:39 +00:00
|
|
|
#include "core/hle/kernel/k_system_control.h"
|
2020-12-31 07:01:08 +00:00
|
|
|
#include "core/hle/kernel/k_thread.h"
|
2021-01-20 21:42:27 +00:00
|
|
|
#include "core/hle/kernel/k_thread_queue.h"
|
2022-01-15 00:36:10 +00:00
|
|
|
#include "core/hle/kernel/k_worker_task_manager.h"
|
2018-08-28 16:30:33 +00:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
2023-02-18 21:26:48 +00:00
|
|
|
#include "core/hle/kernel/svc.h"
|
2021-01-01 10:06:06 +00:00
|
|
|
#include "core/hle/kernel/svc_results.h"
|
2022-09-06 01:19:30 +00:00
|
|
|
#include "core/hle/kernel/svc_types.h"
|
2014-10-23 03:20:01 +00:00
|
|
|
#include "core/hle/result.h"
|
2021-12-30 05:40:38 +00:00
|
|
|
#include "core/memory.h"
|
2014-05-10 02:11:18 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
namespace {
|
2022-09-06 01:19:30 +00:00
|
|
|
|
|
|
|
constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1;
|
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
static void ResetThreadContext32(Kernel::KThread::ThreadContext32& context, u32 stack_top,
|
2021-01-20 21:42:27 +00:00
|
|
|
u32 entry_point, u32 arg) {
|
|
|
|
context = {};
|
|
|
|
context.cpu_registers[0] = arg;
|
|
|
|
context.cpu_registers[15] = entry_point;
|
|
|
|
context.cpu_registers[13] = stack_top;
|
2023-03-08 00:18:06 +00:00
|
|
|
context.fpscr = 0;
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
2014-05-14 02:00:11 +00:00
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, u64 stack_top,
|
|
|
|
u64 entry_point, u64 arg) {
|
2021-01-20 21:42:27 +00:00
|
|
|
context = {};
|
|
|
|
context.cpu_registers[0] = arg;
|
2021-12-23 08:03:39 +00:00
|
|
|
context.cpu_registers[18] = Kernel::KSystemControl::GenerateRandomU64() | 1;
|
2021-01-20 21:42:27 +00:00
|
|
|
context.pc = entry_point;
|
|
|
|
context.sp = stack_top;
|
|
|
|
context.fpcr = 0;
|
2023-03-08 00:18:06 +00:00
|
|
|
context.fpsr = 0;
|
2020-02-11 21:36:39 +00:00
|
|
|
}
|
2021-01-20 21:42:27 +00:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
namespace Kernel {
|
2020-02-11 21:36:39 +00:00
|
|
|
|
2021-11-11 06:28:30 +00:00
|
|
|
namespace {
|
|
|
|
|
2021-12-30 05:40:38 +00:00
|
|
|
struct ThreadLocalRegion {
|
|
|
|
static constexpr std::size_t MessageBufferSize = 0x100;
|
|
|
|
std::array<u32, MessageBufferSize / sizeof(u32)> message_buffer;
|
|
|
|
std::atomic_uint16_t disable_count;
|
|
|
|
std::atomic_uint16_t interrupt_flag;
|
|
|
|
};
|
|
|
|
|
2021-11-11 06:28:30 +00:00
|
|
|
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
|
|
|
|
public:
|
2023-03-07 15:49:41 +00:00
|
|
|
explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel)
|
|
|
|
: KThreadQueueWithoutEndWait(kernel) {}
|
2021-11-11 06:28:30 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
|
|
|
|
public:
|
2023-03-07 15:49:41 +00:00
|
|
|
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel, KThread::WaiterList* wl)
|
|
|
|
: KThreadQueue(kernel), m_wait_list(wl) {}
|
2021-11-11 06:28:30 +00:00
|
|
|
|
2022-06-26 03:44:19 +00:00
|
|
|
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
2021-11-11 06:28:30 +00:00
|
|
|
// Remove the thread from the wait list.
|
|
|
|
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
|
|
|
|
|
|
|
// Invoke the base cancel wait handler.
|
|
|
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
|
|
}
|
2021-11-21 10:29:53 +00:00
|
|
|
|
|
|
|
private:
|
2023-03-07 21:11:50 +00:00
|
|
|
KThread::WaiterList* m_wait_list{};
|
2021-11-11 06:28:30 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2023-03-07 15:49:41 +00:00
|
|
|
KThread::KThread(KernelCore& kernel)
|
2023-03-07 21:11:50 +00:00
|
|
|
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {}
|
2020-12-31 07:01:08 +00:00
|
|
|
KThread::~KThread() = default;
|
2015-01-31 21:22:40 +00:00
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top,
|
|
|
|
s32 prio, s32 virt_core, KProcess* owner, ThreadType type) {
|
2021-01-20 21:42:27 +00:00
|
|
|
// Assert parameters are valid.
|
2022-01-18 02:48:14 +00:00
|
|
|
ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
|
2021-01-20 21:42:27 +00:00
|
|
|
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
|
|
|
|
ASSERT((owner != nullptr) || (type != ThreadType::User));
|
|
|
|
ASSERT(0 <= virt_core && virt_core < static_cast<s32>(Common::BitSize<u64>()));
|
|
|
|
|
|
|
|
// Convert the virtual core to a physical core.
|
|
|
|
const s32 phys_core = Core::Hardware::VirtualToPhysicalCoreMap[virt_core];
|
|
|
|
ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
|
|
|
|
|
|
|
// First, clear the TLS address.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_tls_address = {};
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Next, assert things based on the type.
|
|
|
|
switch (type) {
|
|
|
|
case ThreadType::Main:
|
|
|
|
ASSERT(arg == 0);
|
|
|
|
[[fallthrough]];
|
|
|
|
case ThreadType::User:
|
|
|
|
ASSERT(((owner == nullptr) ||
|
|
|
|
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-01 22:00:39 +00:00
|
|
|
ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) ||
|
2021-01-20 21:42:27 +00:00
|
|
|
(owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
|
|
|
|
break;
|
2023-10-21 20:47:43 +00:00
|
|
|
case ThreadType::HighPriority:
|
|
|
|
case ThreadType::Dummy:
|
|
|
|
break;
|
2021-01-20 21:42:27 +00:00
|
|
|
case ThreadType::Kernel:
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
break;
|
|
|
|
default:
|
2022-06-07 21:02:29 +00:00
|
|
|
ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
|
2021-01-20 21:42:27 +00:00
|
|
|
break;
|
|
|
|
}
|
2023-03-07 21:11:50 +00:00
|
|
|
m_thread_type = type;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set the ideal core ID and affinity mask.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_virtual_ideal_core_id = virt_core;
|
|
|
|
m_physical_ideal_core_id = phys_core;
|
|
|
|
m_virtual_affinity_mask = 1ULL << virt_core;
|
|
|
|
m_physical_affinity_mask.SetAffinity(phys_core, true);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set the thread state.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_thread_state = (type == ThreadType::Main || type == ThreadType::Dummy)
|
|
|
|
? ThreadState::Runnable
|
|
|
|
: ThreadState::Initialized;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set TLS address.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_tls_address = 0;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set parent and condvar tree.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_parent = nullptr;
|
|
|
|
m_condvar_tree = nullptr;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set sync booleans.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_signaled = false;
|
|
|
|
m_termination_requested = false;
|
|
|
|
m_wait_cancelled = false;
|
|
|
|
m_cancellable = false;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set core ID and wait result.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_core_id = phys_core;
|
|
|
|
m_wait_result = ResultNoSynchronizationObject;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set priorities.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_priority = prio;
|
|
|
|
m_base_priority = prio;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Initialize sleeping queue.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_wait_queue = nullptr;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set suspend flags.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_suspend_request_flags = 0;
|
|
|
|
m_suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// We're neither debug attached, nor are we nesting our priority inheritance.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_debug_attached = false;
|
|
|
|
m_priority_inheritance_count = 0;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// We haven't been scheduled, and we have done no light IPC.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_schedule_count = -1;
|
|
|
|
m_last_scheduled_tick = 0;
|
|
|
|
m_light_ipc_data = nullptr;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// We're not waiting for a lock, and we haven't disabled migration.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_waiting_lock_info = nullptr;
|
|
|
|
m_num_core_migration_disables = 0;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// We have no waiters, but we do have an entrypoint.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_num_kernel_waiters = 0;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set our current core id.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_current_core_id = phys_core;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// We haven't released our resource limit hint, and we've spent no time on the cpu.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_resource_limit_release_hint = false;
|
|
|
|
m_cpu_time = 0;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2022-06-01 14:54:44 +00:00
|
|
|
// Set debug context.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_stack_top = user_stack_top;
|
|
|
|
m_argument = arg;
|
2022-06-01 14:54:44 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Clear our stack parameters.
|
2023-03-11 15:38:33 +00:00
|
|
|
std::memset(static_cast<void*>(std::addressof(this->GetStackParameters())), 0,
|
2021-01-20 21:42:27 +00:00
|
|
|
sizeof(StackParameters));
|
|
|
|
|
|
|
|
// Set parent, if relevant.
|
|
|
|
if (owner != nullptr) {
|
2021-05-29 04:31:04 +00:00
|
|
|
// Setup the TLS, if needed.
|
|
|
|
if (type == ThreadType::User) {
|
2023-03-07 21:11:50 +00:00
|
|
|
R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address)));
|
2021-05-29 04:31:04 +00:00
|
|
|
}
|
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
m_parent = owner;
|
|
|
|
m_parent->Open();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
2020-03-03 17:02:50 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Initialize thread context.
|
2023-03-18 01:26:04 +00:00
|
|
|
ResetThreadContext64(m_thread_context_64, GetInteger(user_stack_top), GetInteger(func), arg);
|
|
|
|
ResetThreadContext32(m_thread_context_32, static_cast<u32>(GetInteger(user_stack_top)),
|
|
|
|
static_cast<u32>(GetInteger(func)), static_cast<u32>(arg));
|
2020-03-03 17:02:50 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Setup the stack parameters.
|
2023-03-07 21:11:50 +00:00
|
|
|
StackParameters& sp = this->GetStackParameters();
|
2021-01-20 21:42:27 +00:00
|
|
|
sp.cur_thread = this;
|
2022-06-16 00:53:49 +00:00
|
|
|
sp.disable_count = 1;
|
2023-03-07 21:11:50 +00:00
|
|
|
this->SetInExceptionHandler();
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set thread ID.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_thread_id = m_kernel.CreateNewThreadID();
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// We initialized!
|
2023-03-07 21:11:50 +00:00
|
|
|
m_initialized = true;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Register ourselves with our parent process.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_parent != nullptr) {
|
|
|
|
m_parent->RegisterThread(this);
|
|
|
|
if (m_parent->IsSuspended()) {
|
2021-01-20 21:42:27 +00:00
|
|
|
RequestSuspend(SuspendType::Process);
|
2020-03-12 00:44:53 +00:00
|
|
|
}
|
2020-03-03 17:02:50 +00:00
|
|
|
}
|
2014-05-14 02:00:11 +00:00
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2020-02-25 02:04:12 +00:00
|
|
|
}
|
|
|
|
|
2022-06-26 03:44:19 +00:00
|
|
|
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
|
2023-03-18 01:26:04 +00:00
|
|
|
KProcessAddress user_stack_top, s32 prio, s32 core,
|
|
|
|
KProcess* owner, ThreadType type,
|
|
|
|
std::function<void()>&& init_func) {
|
2021-01-20 21:42:27 +00:00
|
|
|
// Initialize the thread.
|
|
|
|
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
|
|
|
|
|
2021-08-14 09:14:19 +00:00
|
|
|
// Initialize emulation parameters.
|
2023-03-07 21:11:50 +00:00
|
|
|
thread->m_host_context = std::make_shared<Common::Fiber>(std::move(init_func));
|
2021-04-03 06:24:20 +00:00
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2020-02-25 16:40:33 +00:00
|
|
|
}
|
|
|
|
|
2022-11-03 00:21:32 +00:00
|
|
|
Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) {
|
2022-07-09 22:47:32 +00:00
|
|
|
// Initialize the thread.
|
2022-11-03 00:21:32 +00:00
|
|
|
R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy));
|
2022-07-09 22:47:32 +00:00
|
|
|
|
|
|
|
// Initialize emulation parameters.
|
2023-03-07 21:11:50 +00:00
|
|
|
thread->m_stack_parameters.disable_count = 0;
|
2022-07-09 22:47:32 +00:00
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2021-04-03 06:24:20 +00:00
|
|
|
}
|
|
|
|
|
2022-06-26 22:52:16 +00:00
|
|
|
Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
|
2022-10-15 05:55:51 +00:00
|
|
|
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
|
|
|
|
ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc()));
|
2022-06-26 22:52:16 +00:00
|
|
|
}
|
|
|
|
|
2022-06-26 03:44:19 +00:00
|
|
|
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
2022-10-15 05:55:51 +00:00
|
|
|
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
|
|
|
|
ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc()));
|
2021-04-03 06:24:20 +00:00
|
|
|
}
|
|
|
|
|
2022-06-26 03:44:19 +00:00
|
|
|
Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
|
|
|
|
KThreadFunction func, uintptr_t arg, s32 virt_core) {
|
2022-10-15 05:55:51 +00:00
|
|
|
R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr,
|
|
|
|
ThreadType::HighPriority,
|
|
|
|
system.GetCpuManager().GetShutdownThreadStartFunc()));
|
2021-04-03 06:24:20 +00:00
|
|
|
}
|
|
|
|
|
2022-06-26 03:44:19 +00:00
|
|
|
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
|
2023-03-18 01:26:04 +00:00
|
|
|
uintptr_t arg, KProcessAddress user_stack_top, s32 prio,
|
|
|
|
s32 virt_core, KProcess* owner) {
|
2021-04-03 06:24:20 +00:00
|
|
|
system.Kernel().GlobalSchedulerContext().AddThread(thread);
|
2022-10-15 05:55:51 +00:00
|
|
|
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
|
|
|
|
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
|
2021-04-03 06:24:20 +00:00
|
|
|
}
|
|
|
|
|
2023-02-18 21:26:48 +00:00
|
|
|
Result KThread::InitializeServiceThread(Core::System& system, KThread* thread,
|
|
|
|
std::function<void()>&& func, s32 prio, s32 virt_core,
|
|
|
|
KProcess* owner) {
|
|
|
|
system.Kernel().GlobalSchedulerContext().AddThread(thread);
|
2023-07-18 23:31:35 +00:00
|
|
|
std::function<void()> func2{[&system, func_{std::move(func)}] {
|
2023-02-18 21:26:48 +00:00
|
|
|
// Similar to UserModeThreadStarter.
|
|
|
|
system.Kernel().CurrentScheduler()->OnThreadStart();
|
|
|
|
|
|
|
|
// Run the guest function.
|
2023-07-18 23:31:35 +00:00
|
|
|
func_();
|
2023-02-18 21:26:48 +00:00
|
|
|
|
|
|
|
// Exit.
|
|
|
|
Svc::ExitThread(system);
|
|
|
|
}};
|
|
|
|
|
|
|
|
R_RETURN(InitializeThread(thread, {}, {}, {}, prio, virt_core, owner, ThreadType::HighPriority,
|
|
|
|
std::move(func2)));
|
|
|
|
}
|
|
|
|
|
2021-04-03 06:24:20 +00:00
|
|
|
void KThread::PostDestroy(uintptr_t arg) {
|
2021-04-24 05:04:28 +00:00
|
|
|
KProcess* owner = reinterpret_cast<KProcess*>(arg & ~1ULL);
|
2021-04-03 06:24:20 +00:00
|
|
|
const bool resource_limit_release_hint = (arg & 1);
|
|
|
|
const s64 hint_value = (resource_limit_release_hint ? 0 : 1);
|
|
|
|
if (owner != nullptr) {
|
2022-11-03 14:22:05 +00:00
|
|
|
owner->GetResourceLimit()->Release(LimitableResource::ThreadCountMax, 1, hint_value);
|
2021-04-03 06:24:20 +00:00
|
|
|
owner->Close();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KThread::Finalize() {
|
|
|
|
// If the thread has an owner process, unregister it.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_parent != nullptr) {
|
|
|
|
m_parent->UnregisterThread(this);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the thread has a local region, delete it.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_tls_address != 0) {
|
|
|
|
ASSERT(m_parent->DeleteThreadLocalRegion(m_tls_address).IsSuccess());
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Release any waiters.
|
|
|
|
{
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(m_waiting_lock_info == nullptr);
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl{m_kernel};
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// Check that we have no kernel waiters.
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(m_num_kernel_waiters == 0);
|
2022-07-09 22:54:54 +00:00
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
auto it = m_held_lock_info_list.begin();
|
|
|
|
while (it != m_held_lock_info_list.end()) {
|
2023-02-23 20:49:42 +00:00
|
|
|
// Get the lock info.
|
|
|
|
auto* const lock_info = std::addressof(*it);
|
2022-07-09 22:54:54 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// The lock shouldn't have a kernel waiter.
|
|
|
|
ASSERT(!lock_info->GetIsKernelAddressKey());
|
2021-11-11 06:28:30 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// Remove all waiters.
|
|
|
|
while (lock_info->GetWaiterCount() != 0) {
|
|
|
|
// Get the front waiter.
|
|
|
|
KThread* const waiter = lock_info->GetHighestPriorityWaiter();
|
2021-11-11 06:28:30 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// Remove it from the lock.
|
|
|
|
if (lock_info->RemoveWaiter(waiter)) {
|
|
|
|
ASSERT(lock_info->GetWaiterCount() == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cancel the thread's wait.
|
|
|
|
waiter->CancelWait(ResultInvalidState, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the held lock from our list.
|
2023-03-07 21:11:50 +00:00
|
|
|
it = m_held_lock_info_list.erase(it);
|
2023-02-23 20:49:42 +00:00
|
|
|
|
|
|
|
// Free the lock info.
|
2023-03-07 15:49:41 +00:00
|
|
|
LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-11 08:14:13 +00:00
|
|
|
// Release host emulation members.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_host_context.reset();
|
2022-03-11 08:14:13 +00:00
|
|
|
|
2021-04-04 06:22:07 +00:00
|
|
|
// Perform inherited finalization.
|
2022-01-15 00:36:10 +00:00
|
|
|
KSynchronizationObject::Finalize();
|
2019-04-17 11:08:12 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
bool KThread::IsSignaled() const {
|
2023-03-07 21:11:50 +00:00
|
|
|
return m_signaled;
|
2020-03-02 04:46:10 +00:00
|
|
|
}
|
|
|
|
|
2021-11-11 06:28:30 +00:00
|
|
|
void KThread::OnTimer() {
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2021-11-11 06:28:30 +00:00
|
|
|
// If we're waiting, cancel the wait.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (this->GetState() == ThreadState::Waiting) {
|
|
|
|
m_wait_queue->CancelWait(this, ResultTimedOut, false);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
2016-09-02 12:53:42 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KThread::StartTermination() {
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Release user exception and unpin, if relevant.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_parent != nullptr) {
|
|
|
|
m_parent->ReleaseUserException(this);
|
|
|
|
if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
|
2023-10-21 20:47:43 +00:00
|
|
|
m_parent->UnpinCurrentThread();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set state to terminated.
|
2023-03-07 21:11:50 +00:00
|
|
|
this->SetState(ThreadState::Terminated);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Clear the thread's status as running in parent.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_parent != nullptr) {
|
|
|
|
m_parent->ClearRunningThread(this);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Signal.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_signaled = true;
|
2021-11-10 03:02:11 +00:00
|
|
|
KSynchronizationObject::NotifyAvailable();
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Clear previous thread in KScheduler.
|
2023-03-07 15:49:41 +00:00
|
|
|
KScheduler::ClearPreviousThread(m_kernel, this);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Register terminated dpc flag.
|
2023-03-07 21:11:50 +00:00
|
|
|
this->RegisterDpc(DpcFlag::Terminated);
|
2022-01-15 00:36:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::FinishTermination() {
|
|
|
|
// Ensure that the thread is not executing on any core.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_parent != nullptr) {
|
2022-01-15 00:36:10 +00:00
|
|
|
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
|
|
|
KThread* core_thread{};
|
|
|
|
do {
|
2023-03-07 15:49:41 +00:00
|
|
|
core_thread = m_kernel.Scheduler(i).GetSchedulerCurrentThread();
|
2022-01-15 00:36:10 +00:00
|
|
|
} while (core_thread == this);
|
|
|
|
}
|
|
|
|
}
|
2021-04-03 06:24:20 +00:00
|
|
|
|
|
|
|
// Close the thread.
|
|
|
|
this->Close();
|
2020-02-25 02:04:12 +00:00
|
|
|
}
|
|
|
|
|
2022-01-15 00:36:10 +00:00
|
|
|
void KThread::DoWorkerTaskImpl() {
|
|
|
|
// Finish the termination that was begun by Exit().
|
|
|
|
this->FinishTermination();
|
|
|
|
}
|
|
|
|
|
2021-12-30 05:40:38 +00:00
|
|
|
void KThread::Pin(s32 current_core) {
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set ourselves as pinned.
|
|
|
|
GetStackParameters().is_pinned = true;
|
|
|
|
|
|
|
|
// Disable core migration.
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(m_num_core_migration_disables == 0);
|
2021-01-20 21:42:27 +00:00
|
|
|
{
|
2023-03-07 21:11:50 +00:00
|
|
|
++m_num_core_migration_disables;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Save our ideal state to restore when we're unpinned.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_original_physical_ideal_core_id = m_physical_ideal_core_id;
|
|
|
|
m_original_physical_affinity_mask = m_physical_affinity_mask;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Bind ourselves to this core.
|
2023-03-07 21:11:50 +00:00
|
|
|
const s32 active_core = this->GetActiveCore();
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
this->SetActiveCore(current_core);
|
|
|
|
m_physical_ideal_core_id = current_core;
|
|
|
|
m_physical_affinity_mask.SetAffinityMask(1ULL << current_core);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
if (active_core != current_core ||
|
|
|
|
m_physical_affinity_mask.GetAffinityMask() !=
|
|
|
|
m_original_physical_affinity_mask.GetAffinityMask()) {
|
|
|
|
KScheduler::OnThreadAffinityMaskChanged(m_kernel, this,
|
|
|
|
m_original_physical_affinity_mask, active_core);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disallow performing thread suspension.
|
|
|
|
{
|
|
|
|
// Update our allow flags.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) +
|
|
|
|
static_cast<u32>(ThreadState::SuspendShift)));
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Update our state.
|
2023-03-07 21:11:50 +00:00
|
|
|
this->UpdateState();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(bunnei): Update our SVC access permissions.
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(m_parent != nullptr);
|
2020-02-25 02:04:12 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KThread::Unpin() {
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set ourselves as unpinned.
|
2023-03-07 21:11:50 +00:00
|
|
|
this->GetStackParameters().is_pinned = false;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Enable core migration.
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(m_num_core_migration_disables == 1);
|
2021-01-20 21:42:27 +00:00
|
|
|
{
|
2023-03-07 21:11:50 +00:00
|
|
|
m_num_core_migration_disables--;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Restore our original state.
|
2023-03-07 21:11:50 +00:00
|
|
|
const KAffinityMask old_mask = m_physical_affinity_mask;
|
2021-01-01 10:06:06 +00:00
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
m_physical_ideal_core_id = m_original_physical_ideal_core_id;
|
|
|
|
m_physical_affinity_mask = m_original_physical_affinity_mask;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
|
|
|
const s32 active_core = this->GetActiveCore();
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
if (!m_physical_affinity_mask.GetAffinity(active_core)) {
|
|
|
|
if (m_physical_ideal_core_id >= 0) {
|
|
|
|
this->SetActiveCore(m_physical_ideal_core_id);
|
2021-01-20 21:42:27 +00:00
|
|
|
} else {
|
2023-03-07 21:11:50 +00:00
|
|
|
this->SetActiveCore(static_cast<s32>(
|
2021-01-20 21:42:27 +00:00
|
|
|
Common::BitSize<u64>() - 1 -
|
2023-03-07 21:11:50 +00:00
|
|
|
std::countl_zero(m_physical_affinity_mask.GetAffinityMask())));
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
}
|
2023-03-07 15:49:41 +00:00
|
|
|
KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
|
2020-02-25 02:04:12 +00:00
|
|
|
}
|
2014-12-22 13:07:22 +00:00
|
|
|
}
|
2014-05-21 01:02:35 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Allow performing thread suspension (if termination hasn't been requested).
|
2023-03-07 21:11:50 +00:00
|
|
|
if (!this->IsTerminationRequested()) {
|
2021-01-20 21:42:27 +00:00
|
|
|
// Update our allow flags.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) +
|
|
|
|
static_cast<u32>(ThreadState::SuspendShift)));
|
2014-06-06 02:35:36 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Update our state.
|
2023-03-07 21:11:50 +00:00
|
|
|
this->UpdateState();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
2020-12-31 08:46:09 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// TODO(bunnei): Update our SVC access permissions.
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(m_parent != nullptr);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Resume any threads that began waiting on us while we were pinned.
|
2023-03-07 21:11:50 +00:00
|
|
|
for (auto it = m_pinned_waiter_list.begin(); it != m_pinned_waiter_list.end(); ++it) {
|
2022-06-25 17:36:14 +00:00
|
|
|
it->EndWait(ResultSuccess);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-30 05:40:38 +00:00
|
|
|
u16 KThread::GetUserDisableCount() const {
|
2023-03-07 21:11:50 +00:00
|
|
|
if (!this->IsUserThread()) {
|
2021-12-30 05:40:38 +00:00
|
|
|
// We only emulate TLS for user threads
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2023-03-23 23:58:48 +00:00
|
|
|
auto& memory = this->GetOwnerProcess()->GetMemory();
|
2023-03-07 21:11:50 +00:00
|
|
|
return memory.Read16(m_tls_address + offsetof(ThreadLocalRegion, disable_count));
|
2021-12-30 05:40:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::SetInterruptFlag() {
|
2023-03-07 21:11:50 +00:00
|
|
|
if (!this->IsUserThread()) {
|
2021-12-30 05:40:38 +00:00
|
|
|
// We only emulate TLS for user threads
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-03-23 23:58:48 +00:00
|
|
|
auto& memory = this->GetOwnerProcess()->GetMemory();
|
2023-03-07 21:11:50 +00:00
|
|
|
memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1);
|
2021-12-30 05:40:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::ClearInterruptFlag() {
|
2023-03-07 21:11:50 +00:00
|
|
|
if (!this->IsUserThread()) {
|
2021-12-30 05:40:38 +00:00
|
|
|
// We only emulate TLS for user threads
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-03-23 23:58:48 +00:00
|
|
|
auto& memory = this->GetOwnerProcess()->GetMemory();
|
2023-03-07 21:11:50 +00:00
|
|
|
memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
|
2021-12-30 05:40:38 +00:00
|
|
|
}
|
|
|
|
|
2022-06-26 03:44:19 +00:00
|
|
|
Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl{m_kernel};
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Get the virtual mask.
|
2023-03-07 21:11:50 +00:00
|
|
|
*out_ideal_core = m_virtual_ideal_core_id;
|
|
|
|
*out_affinity_mask = m_virtual_affinity_mask;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
2020-12-31 08:46:09 +00:00
|
|
|
|
2022-06-26 03:44:19 +00:00
|
|
|
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl{m_kernel};
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(m_num_core_migration_disables >= 0);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Select between core mask and original core mask.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_num_core_migration_disables == 0) {
|
|
|
|
*out_ideal_core = m_physical_ideal_core_id;
|
|
|
|
*out_affinity_mask = m_physical_affinity_mask.GetAffinityMask();
|
2020-02-25 02:04:12 +00:00
|
|
|
} else {
|
2023-03-07 21:11:50 +00:00
|
|
|
*out_ideal_core = m_original_physical_ideal_core_id;
|
|
|
|
*out_affinity_mask = m_original_physical_affinity_mask.GetAffinityMask();
|
2020-02-25 02:04:12 +00:00
|
|
|
}
|
2020-11-04 00:54:53 +00:00
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
2021-11-11 06:46:07 +00:00
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
Result KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
|
|
|
|
ASSERT(m_parent != nullptr);
|
2021-01-20 21:42:27 +00:00
|
|
|
ASSERT(v_affinity_mask != 0);
|
2023-03-07 21:11:50 +00:00
|
|
|
KScopedLightLock lk(m_activity_pause_lock);
|
2015-01-26 06:56:17 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Set the core mask.
|
|
|
|
u64 p_affinity_mask = 0;
|
|
|
|
{
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(m_num_core_migration_disables >= 0);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2021-11-11 06:46:07 +00:00
|
|
|
// If we're updating, set our ideal virtual core.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (core_id != Svc::IdealCoreNoUpdate) {
|
|
|
|
m_virtual_ideal_core_id = core_id;
|
2021-11-11 06:46:07 +00:00
|
|
|
} else {
|
|
|
|
// Preserve our ideal core id.
|
2023-03-07 21:11:50 +00:00
|
|
|
core_id = m_virtual_ideal_core_id;
|
|
|
|
R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, ResultInvalidCombination);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2021-11-11 06:46:07 +00:00
|
|
|
// Set our affinity mask.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_virtual_affinity_mask = v_affinity_mask;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Translate the virtual core to a physical core.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (core_id >= 0) {
|
|
|
|
core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id];
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Translate the virtual affinity mask to a physical one.
|
|
|
|
while (v_affinity_mask != 0) {
|
|
|
|
const u64 next = std::countr_zero(v_affinity_mask);
|
|
|
|
v_affinity_mask &= ~(1ULL << next);
|
|
|
|
p_affinity_mask |= (1ULL << Core::Hardware::VirtualToPhysicalCoreMap[next]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we haven't disabled migration, perform an affinity change.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_num_core_migration_disables == 0) {
|
|
|
|
const KAffinityMask old_mask = m_physical_affinity_mask;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set our new ideals.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_physical_ideal_core_id = core_id;
|
|
|
|
m_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
2021-01-20 21:42:27 +00:00
|
|
|
const s32 active_core = GetActiveCore();
|
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
if (active_core >= 0 && !m_physical_affinity_mask.GetAffinity(active_core)) {
|
2021-01-20 21:42:27 +00:00
|
|
|
const s32 new_core = static_cast<s32>(
|
2023-03-07 21:11:50 +00:00
|
|
|
m_physical_ideal_core_id >= 0
|
|
|
|
? m_physical_ideal_core_id
|
2021-01-20 21:42:27 +00:00
|
|
|
: Common::BitSize<u64>() - 1 -
|
2023-03-07 21:11:50 +00:00
|
|
|
std::countl_zero(m_physical_affinity_mask.GetAffinityMask()));
|
2021-01-20 21:42:27 +00:00
|
|
|
SetActiveCore(new_core);
|
|
|
|
}
|
2023-03-07 15:49:41 +00:00
|
|
|
KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise, we edit the original affinity for restoration later.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_original_physical_ideal_core_id = core_id;
|
|
|
|
m_original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the pinned waiter list.
|
2023-03-07 21:11:50 +00:00
|
|
|
ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel, std::addressof(m_pinned_waiter_list));
|
2021-01-20 21:42:27 +00:00
|
|
|
{
|
2021-01-25 06:55:08 +00:00
|
|
|
bool retry_update{};
|
2021-01-20 21:42:27 +00:00
|
|
|
do {
|
|
|
|
// Lock the scheduler.
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Don't do any further management if our termination has been requested.
|
2023-03-07 21:11:50 +00:00
|
|
|
R_SUCCEED_IF(this->IsTerminationRequested());
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// By default, we won't need to retry.
|
|
|
|
retry_update = false;
|
|
|
|
|
|
|
|
// Check if the thread is currently running.
|
2021-01-25 06:55:08 +00:00
|
|
|
bool thread_is_current{};
|
2021-01-20 21:42:27 +00:00
|
|
|
s32 thread_core;
|
|
|
|
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
|
|
|
|
++thread_core) {
|
2023-03-07 15:49:41 +00:00
|
|
|
if (m_kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
|
2021-01-20 21:42:27 +00:00
|
|
|
thread_is_current = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the thread is currently running, check whether it's no longer allowed under the
|
|
|
|
// new mask.
|
|
|
|
if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) {
|
|
|
|
// If the thread is pinned, we want to wait until it's not pinned.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (this->GetStackParameters().is_pinned) {
|
2021-01-20 21:42:27 +00:00
|
|
|
// Verify that the current thread isn't terminating.
|
2023-03-07 15:49:41 +00:00
|
|
|
R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
|
2021-02-12 23:43:01 +00:00
|
|
|
ResultTerminationRequested);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Wait until the thread isn't pinned any more.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
|
|
|
|
GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue));
|
2021-01-20 21:42:27 +00:00
|
|
|
} else {
|
|
|
|
// If the thread isn't pinned, release the scheduler lock and retry until it's
|
|
|
|
// not current.
|
|
|
|
retry_update = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (retry_update);
|
|
|
|
}
|
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2014-06-02 02:12:54 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KThread::SetBasePriority(s32 value) {
|
|
|
|
ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority);
|
2020-12-30 09:14:02 +00:00
|
|
|
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl{m_kernel};
|
2020-12-30 09:14:02 +00:00
|
|
|
|
|
|
|
// Change our base priority.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_base_priority = value;
|
2020-12-30 09:14:02 +00:00
|
|
|
|
|
|
|
// Perform a priority restoration.
|
2023-03-07 15:49:41 +00:00
|
|
|
RestorePriority(m_kernel, this);
|
2014-06-02 02:12:54 +00:00
|
|
|
}
|
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
KThread* KThread::GetLockOwner() const {
|
2023-03-07 21:11:50 +00:00
|
|
|
return m_waiting_lock_info != nullptr ? m_waiting_lock_info->GetOwner() : nullptr;
|
2023-02-23 20:49:42 +00:00
|
|
|
}
|
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
void KThread::IncreaseBasePriority(s32 priority) {
|
|
|
|
ASSERT(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority);
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2023-02-23 20:49:42 +00:00
|
|
|
ASSERT(!this->GetStackParameters().is_pinned);
|
|
|
|
|
|
|
|
// Set our base priority.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_base_priority > priority) {
|
|
|
|
m_base_priority = priority;
|
2023-02-23 20:49:42 +00:00
|
|
|
|
|
|
|
// Perform a priority restoration.
|
2023-03-07 15:49:41 +00:00
|
|
|
RestorePriority(m_kernel, this);
|
2023-02-23 20:49:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KThread::RequestSuspend(SuspendType type) {
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl{m_kernel};
|
2015-01-17 07:03:44 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Note the request in our flags.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_suspend_request_flags |=
|
|
|
|
(1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Try to perform the suspend.
|
2023-03-07 21:11:50 +00:00
|
|
|
this->TrySuspend();
|
2017-01-04 15:53:01 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KThread::Resume(SuspendType type) {
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl{m_kernel};
|
2020-12-30 09:14:02 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Clear the request in our flags.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_suspend_request_flags &=
|
|
|
|
~(1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
|
2021-01-10 22:29:02 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Update our state.
|
2022-01-15 00:31:47 +00:00
|
|
|
this->UpdateState();
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KThread::WaitCancel() {
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl{m_kernel};
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Check if we're waiting and cancellable.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (this->GetState() == ThreadState::Waiting && m_cancellable) {
|
|
|
|
m_wait_cancelled = false;
|
|
|
|
m_wait_queue->CancelWait(this, ResultCancelled, true);
|
2021-01-20 21:42:27 +00:00
|
|
|
} else {
|
|
|
|
// Otherwise, note that we cancelled a wait.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_wait_cancelled = true;
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::TrySuspend() {
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2023-03-11 15:38:33 +00:00
|
|
|
ASSERT(this->IsSuspendRequested());
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Ensure that we have no waiters.
|
2023-03-11 15:38:33 +00:00
|
|
|
if (this->GetNumKernelWaiters() > 0) {
|
2021-01-20 21:42:27 +00:00
|
|
|
return;
|
|
|
|
}
|
2023-03-11 15:38:33 +00:00
|
|
|
ASSERT(this->GetNumKernelWaiters() == 0);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Perform the suspend.
|
2022-01-15 00:31:47 +00:00
|
|
|
this->UpdateState();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2022-01-15 00:31:47 +00:00
|
|
|
void KThread::UpdateState() {
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set our suspend flags in state.
|
2023-03-07 21:11:50 +00:00
|
|
|
const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed);
|
2022-01-15 00:31:47 +00:00
|
|
|
const auto new_state =
|
|
|
|
static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask);
|
2023-03-07 21:11:50 +00:00
|
|
|
m_thread_state.store(new_state, std::memory_order_relaxed);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Note the state change in scheduler.
|
2022-01-15 00:31:47 +00:00
|
|
|
if (new_state != old_state) {
|
2023-03-07 15:49:41 +00:00
|
|
|
KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
|
2022-01-15 00:31:47 +00:00
|
|
|
}
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::Continue() {
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Clear our suspend flags in state.
|
2023-03-07 21:11:50 +00:00
|
|
|
const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed);
|
|
|
|
m_thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Note the state change in scheduler.
|
2023-03-07 15:49:41 +00:00
|
|
|
KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2023-03-08 00:18:06 +00:00
|
|
|
void KThread::CloneFpuStatus() {
|
|
|
|
// We shouldn't reach here when starting kernel threads.
|
|
|
|
ASSERT(this->GetOwnerProcess() != nullptr);
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
|
2023-03-08 00:18:06 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
if (this->GetOwnerProcess()->Is64Bit()) {
|
2023-03-08 00:18:06 +00:00
|
|
|
// Clone FPSR and FPCR.
|
|
|
|
ThreadContext64 cur_ctx{};
|
2023-03-07 15:49:41 +00:00
|
|
|
m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
|
2023-03-08 00:18:06 +00:00
|
|
|
|
|
|
|
this->GetContext64().fpcr = cur_ctx.fpcr;
|
|
|
|
this->GetContext64().fpsr = cur_ctx.fpsr;
|
|
|
|
} else {
|
|
|
|
// Clone FPSCR.
|
|
|
|
ThreadContext32 cur_ctx{};
|
2023-03-07 15:49:41 +00:00
|
|
|
m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
|
2023-03-08 00:18:06 +00:00
|
|
|
|
|
|
|
this->GetContext32().fpscr = cur_ctx.fpscr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-26 03:44:19 +00:00
|
|
|
Result KThread::SetActivity(Svc::ThreadActivity activity) {
|
2021-01-20 21:42:27 +00:00
|
|
|
// Lock ourselves.
|
2023-03-07 21:11:50 +00:00
|
|
|
KScopedLightLock lk(m_activity_pause_lock);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Set the activity.
|
|
|
|
{
|
|
|
|
// Lock the scheduler.
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Verify our state.
|
2021-11-11 06:28:30 +00:00
|
|
|
const auto cur_state = this->GetState();
|
2021-01-20 21:42:27 +00:00
|
|
|
R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
|
2021-02-12 23:43:01 +00:00
|
|
|
ResultInvalidState);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Either pause or resume.
|
|
|
|
if (activity == Svc::ThreadActivity::Paused) {
|
|
|
|
// Verify that we're not suspended.
|
2021-11-11 06:28:30 +00:00
|
|
|
R_UNLESS(!this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Suspend.
|
2021-11-11 06:28:30 +00:00
|
|
|
this->RequestSuspend(SuspendType::Thread);
|
2021-01-20 21:42:27 +00:00
|
|
|
} else {
|
|
|
|
ASSERT(activity == Svc::ThreadActivity::Runnable);
|
|
|
|
|
|
|
|
// Verify that we're suspended.
|
2021-11-11 06:28:30 +00:00
|
|
|
R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Resume.
|
2021-11-11 06:28:30 +00:00
|
|
|
this->Resume(SuspendType::Thread);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the thread is now paused, update the pinned waiter list.
|
|
|
|
if (activity == Svc::ThreadActivity::Paused) {
|
2023-03-07 21:11:50 +00:00
|
|
|
ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel,
|
|
|
|
std::addressof(m_pinned_waiter_list));
|
2021-11-11 06:28:30 +00:00
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
bool thread_is_current{};
|
2021-01-20 21:42:27 +00:00
|
|
|
do {
|
|
|
|
// Lock the scheduler.
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Don't do any further management if our termination has been requested.
|
2021-11-11 06:28:30 +00:00
|
|
|
R_SUCCEED_IF(this->IsTerminationRequested());
|
|
|
|
|
|
|
|
// By default, treat the thread as not current.
|
|
|
|
thread_is_current = false;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Check whether the thread is pinned.
|
2021-11-11 06:28:30 +00:00
|
|
|
if (this->GetStackParameters().is_pinned) {
|
2021-01-20 21:42:27 +00:00
|
|
|
// Verify that the current thread isn't terminating.
|
2023-03-07 15:49:41 +00:00
|
|
|
R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
|
2021-02-12 23:43:01 +00:00
|
|
|
ResultTerminationRequested);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Wait until the thread isn't pinned any more.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
|
|
|
|
GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue));
|
2021-01-20 21:42:27 +00:00
|
|
|
} else {
|
|
|
|
// Check if the thread is currently running.
|
|
|
|
// If it is, we'll need to retry.
|
|
|
|
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
2023-03-07 15:49:41 +00:00
|
|
|
if (m_kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
|
2021-01-20 21:42:27 +00:00
|
|
|
thread_is_current = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (thread_is_current);
|
|
|
|
}
|
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2023-05-23 13:45:54 +00:00
|
|
|
Result KThread::GetThreadContext3(Common::ScratchBuffer<u8>& out) {
|
2021-01-20 21:42:27 +00:00
|
|
|
// Lock ourselves.
|
2023-03-07 21:11:50 +00:00
|
|
|
KScopedLightLock lk{m_activity_pause_lock};
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Get the context.
|
|
|
|
{
|
|
|
|
// Lock the scheduler.
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl{m_kernel};
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Verify that we're suspended.
|
2023-03-11 15:38:33 +00:00
|
|
|
R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// If we're not terminating, get the thread's user context.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (!this->IsTerminationRequested()) {
|
2023-10-21 20:47:43 +00:00
|
|
|
if (m_parent->Is64Bit()) {
|
2021-01-20 21:42:27 +00:00
|
|
|
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
|
|
|
|
auto context = GetContext64();
|
|
|
|
context.pstate &= 0xFF0FFE20;
|
2023-05-23 13:45:54 +00:00
|
|
|
out.resize_destructive(sizeof(context));
|
2023-03-07 17:01:07 +00:00
|
|
|
std::memcpy(out.data(), std::addressof(context), sizeof(context));
|
2021-01-20 21:42:27 +00:00
|
|
|
} else {
|
|
|
|
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
|
|
|
|
auto context = GetContext32();
|
|
|
|
context.cpsr &= 0xFF0FFE20;
|
2023-05-23 13:45:54 +00:00
|
|
|
out.resize_destructive(sizeof(context));
|
2023-03-07 17:01:07 +00:00
|
|
|
std::memcpy(out.data(), std::addressof(context), sizeof(context));
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2018-10-03 22:47:57 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// Set ourselves as the lock's owner.
|
|
|
|
lock_info->SetOwner(this);
|
|
|
|
|
|
|
|
// Add the lock to our held list.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_held_lock_info_list.push_front(*lock_info);
|
2023-02-23 20:49:42 +00:00
|
|
|
}
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(KProcessAddress address_key,
|
2023-03-11 15:38:33 +00:00
|
|
|
bool is_kernel_address_key) {
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2023-02-23 20:49:42 +00:00
|
|
|
|
|
|
|
// Try to find an existing held lock.
|
2023-03-07 21:11:50 +00:00
|
|
|
for (auto& held_lock : m_held_lock_info_list) {
|
2023-03-11 15:38:33 +00:00
|
|
|
if (held_lock.GetAddressKey() == address_key &&
|
|
|
|
held_lock.GetIsKernelAddressKey() == is_kernel_address_key) {
|
2023-02-23 20:49:42 +00:00
|
|
|
return std::addressof(held_lock);
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
2019-03-29 21:01:46 +00:00
|
|
|
}
|
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void KThread::AddWaiterImpl(KThread* thread) {
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2023-02-23 20:49:42 +00:00
|
|
|
ASSERT(thread->GetConditionVariableTree() == nullptr);
|
|
|
|
|
|
|
|
// Get the thread's address key.
|
2023-03-07 21:11:50 +00:00
|
|
|
const auto address_key = thread->GetAddressKey();
|
|
|
|
const auto is_kernel_address_key = thread->GetIsKernelAddressKey();
|
2023-02-23 20:49:42 +00:00
|
|
|
|
2020-12-30 09:14:02 +00:00
|
|
|
// Keep track of how many kernel waiters we have.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (is_kernel_address_key) {
|
|
|
|
ASSERT((m_num_kernel_waiters++) >= 0);
|
2023-03-07 15:49:41 +00:00
|
|
|
KScheduler::SetSchedulerUpdateNeeded(m_kernel);
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
2020-12-28 21:16:43 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// Get the relevant lock info.
|
2023-03-07 21:11:50 +00:00
|
|
|
auto* lock_info = this->FindHeldLock(address_key, is_kernel_address_key);
|
2023-02-23 20:49:42 +00:00
|
|
|
if (lock_info == nullptr) {
|
|
|
|
// Create a new lock for the address key.
|
|
|
|
lock_info =
|
2023-03-07 21:11:50 +00:00
|
|
|
LockWithPriorityInheritanceInfo::Create(m_kernel, address_key, is_kernel_address_key);
|
2023-02-23 20:49:42 +00:00
|
|
|
|
|
|
|
// Add the new lock to our list.
|
|
|
|
this->AddHeldLock(lock_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the thread as waiter to the lock info.
|
|
|
|
lock_info->AddWaiter(thread);
|
2018-10-03 22:47:57 +00:00
|
|
|
}
|
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
void KThread::RemoveWaiterImpl(KThread* thread) {
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2020-12-30 09:14:02 +00:00
|
|
|
|
|
|
|
// Keep track of how many kernel waiters we have.
|
2023-02-23 20:49:42 +00:00
|
|
|
if (thread->GetIsKernelAddressKey()) {
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT((m_num_kernel_waiters--) > 0);
|
2023-03-07 15:49:41 +00:00
|
|
|
KScheduler::SetSchedulerUpdateNeeded(m_kernel);
|
2018-08-12 21:35:27 +00:00
|
|
|
}
|
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// Get the info for the lock the thread is waiting on.
|
|
|
|
auto* lock_info = thread->GetWaitingLockInfo();
|
|
|
|
ASSERT(lock_info->GetOwner() == this);
|
|
|
|
|
2020-12-30 09:14:02 +00:00
|
|
|
// Remove the waiter.
|
2023-02-23 20:49:42 +00:00
|
|
|
if (lock_info->RemoveWaiter(thread)) {
|
2023-03-07 21:11:50 +00:00
|
|
|
m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info));
|
2023-03-07 15:49:41 +00:00
|
|
|
LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
|
2023-02-23 20:49:42 +00:00
|
|
|
}
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
2018-08-12 21:35:27 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
void KThread::RestorePriority(KernelCore& kernel, KThread* thread) {
|
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
2019-03-15 05:02:13 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
while (thread != nullptr) {
|
2020-12-30 09:14:02 +00:00
|
|
|
// We want to inherit priority where possible.
|
|
|
|
s32 new_priority = thread->GetBasePriority();
|
2023-03-07 21:11:50 +00:00
|
|
|
for (const auto& held_lock : thread->m_held_lock_info_list) {
|
2023-02-23 20:49:42 +00:00
|
|
|
new_priority =
|
|
|
|
std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority());
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
2019-03-15 05:02:13 +00:00
|
|
|
|
2020-12-30 09:14:02 +00:00
|
|
|
// If the priority we would inherit is not different from ours, don't do anything.
|
|
|
|
if (new_priority == thread->GetPriority()) {
|
|
|
|
return;
|
|
|
|
}
|
2018-04-21 01:15:16 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// Get the owner of whatever lock this thread is waiting on.
|
|
|
|
KThread* const lock_owner = thread->GetLockOwner();
|
|
|
|
|
|
|
|
// If the thread is waiting on some lock, remove it as a waiter to prevent violating red
|
|
|
|
// black tree invariants.
|
|
|
|
if (lock_owner != nullptr) {
|
|
|
|
lock_owner->RemoveWaiterImpl(thread);
|
|
|
|
}
|
|
|
|
|
2020-12-30 09:14:02 +00:00
|
|
|
// Ensure we don't violate condition variable red black tree invariants.
|
|
|
|
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
2023-02-23 20:49:42 +00:00
|
|
|
BeforeUpdatePriority(kernel, cv_tree, thread);
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
2018-08-12 21:35:27 +00:00
|
|
|
|
2020-12-30 09:14:02 +00:00
|
|
|
// Change the priority.
|
|
|
|
const s32 old_priority = thread->GetPriority();
|
|
|
|
thread->SetPriority(new_priority);
|
2019-03-15 05:02:13 +00:00
|
|
|
|
2020-12-30 09:14:02 +00:00
|
|
|
// Restore the condition variable, if relevant.
|
|
|
|
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
2023-02-23 20:49:42 +00:00
|
|
|
AfterUpdatePriority(kernel, cv_tree, thread);
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
2018-08-12 21:35:27 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// If we removed the thread from some lock's waiting list, add it back.
|
|
|
|
if (lock_owner != nullptr) {
|
|
|
|
lock_owner->AddWaiterImpl(thread);
|
2019-03-15 01:51:03 +00:00
|
|
|
}
|
2018-04-21 01:15:16 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// Update the scheduler.
|
|
|
|
KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
|
|
|
|
|
|
|
|
// Continue inheriting priority.
|
2020-12-30 09:14:02 +00:00
|
|
|
thread = lock_owner;
|
2019-03-15 01:51:03 +00:00
|
|
|
}
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
2018-04-21 01:15:16 +00:00
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
void KThread::AddWaiter(KThread* thread) {
|
2023-02-23 20:49:42 +00:00
|
|
|
this->AddWaiterImpl(thread);
|
|
|
|
|
|
|
|
// If the thread has a higher priority than us, we should inherit.
|
|
|
|
if (thread->GetPriority() < this->GetPriority()) {
|
2023-03-07 15:49:41 +00:00
|
|
|
RestorePriority(m_kernel, this);
|
2023-02-23 20:49:42 +00:00
|
|
|
}
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
2019-11-15 00:13:18 +00:00
|
|
|
|
2020-12-31 07:01:08 +00:00
|
|
|
void KThread::RemoveWaiter(KThread* thread) {
|
2023-02-23 20:49:42 +00:00
|
|
|
this->RemoveWaiterImpl(thread);
|
|
|
|
|
|
|
|
// If our priority is the same as the thread's (and we've inherited), we may need to restore to
|
|
|
|
// lower priority.
|
|
|
|
if (this->GetPriority() == thread->GetPriority() &&
|
|
|
|
this->GetPriority() < this->GetBasePriority()) {
|
2023-03-07 15:49:41 +00:00
|
|
|
RestorePriority(m_kernel, this);
|
2023-02-23 20:49:42 +00:00
|
|
|
}
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
2018-04-21 01:15:16 +00:00
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key,
|
|
|
|
bool is_kernel_address_key_) {
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2019-11-15 00:13:18 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// Get the relevant lock info.
|
2023-02-24 01:32:03 +00:00
|
|
|
auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_);
|
2023-02-23 20:49:42 +00:00
|
|
|
if (lock_info == nullptr) {
|
|
|
|
*out_has_waiters = false;
|
|
|
|
return nullptr;
|
|
|
|
}
|
2020-12-30 09:14:02 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// Remove the lock info from our held list.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info));
|
2023-02-23 20:49:42 +00:00
|
|
|
|
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
|
if (lock_info->GetIsKernelAddressKey()) {
|
2023-03-07 21:11:50 +00:00
|
|
|
m_num_kernel_waiters -= lock_info->GetWaiterCount();
|
|
|
|
ASSERT(m_num_kernel_waiters >= 0);
|
2023-03-07 15:49:41 +00:00
|
|
|
KScheduler::SetSchedulerUpdateNeeded(m_kernel);
|
2023-02-23 20:49:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(lock_info->GetWaiterCount() > 0);
|
|
|
|
|
|
|
|
// Remove the highest priority waiter from the lock to be the next owner.
|
|
|
|
KThread* next_lock_owner = lock_info->GetHighestPriorityWaiter();
|
|
|
|
if (lock_info->RemoveWaiter(next_lock_owner)) {
|
|
|
|
// The new owner was the only waiter.
|
|
|
|
*out_has_waiters = false;
|
|
|
|
|
|
|
|
// Free the lock info, since it has no waiters.
|
2023-03-07 15:49:41 +00:00
|
|
|
LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
|
2023-02-23 20:49:42 +00:00
|
|
|
} else {
|
|
|
|
// There are additional waiters on the lock.
|
|
|
|
*out_has_waiters = true;
|
|
|
|
|
|
|
|
// Add the lock to the new owner's held list.
|
|
|
|
next_lock_owner->AddHeldLock(lock_info);
|
|
|
|
|
|
|
|
// Keep track of any kernel waiters for the new owner.
|
|
|
|
if (lock_info->GetIsKernelAddressKey()) {
|
2023-03-07 21:11:50 +00:00
|
|
|
next_lock_owner->m_num_kernel_waiters += lock_info->GetWaiterCount();
|
|
|
|
ASSERT(next_lock_owner->m_num_kernel_waiters > 0);
|
2023-02-23 20:49:42 +00:00
|
|
|
|
|
|
|
// NOTE: No need to set scheduler update needed, because we will have already done so
|
|
|
|
// when removing earlier.
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
2019-03-15 05:02:13 +00:00
|
|
|
}
|
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// If our priority is the same as the next owner's (and we've inherited), we may need to restore
|
|
|
|
// to lower priority.
|
|
|
|
if (this->GetPriority() == next_lock_owner->GetPriority() &&
|
|
|
|
this->GetPriority() < this->GetBasePriority()) {
|
2023-03-07 15:49:41 +00:00
|
|
|
RestorePriority(m_kernel, this);
|
2023-02-23 20:49:42 +00:00
|
|
|
// NOTE: No need to restore priority on the next lock owner, because it was already the
|
|
|
|
// highest priority waiter on the lock.
|
2020-12-30 09:14:02 +00:00
|
|
|
}
|
2019-03-15 05:02:13 +00:00
|
|
|
|
2023-02-23 20:49:42 +00:00
|
|
|
// Return the next lock owner.
|
2020-12-30 09:14:02 +00:00
|
|
|
return next_lock_owner;
|
2018-04-21 01:15:16 +00:00
|
|
|
}
|
|
|
|
|
2022-06-26 03:44:19 +00:00
|
|
|
Result KThread::Run() {
|
2021-01-20 21:42:27 +00:00
|
|
|
while (true) {
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock lk{m_kernel};
|
2020-03-07 16:44:35 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// If either this thread or the current thread are requesting termination, note it.
|
2023-03-07 21:11:50 +00:00
|
|
|
R_UNLESS(!this->IsTerminationRequested(), ResultTerminationRequested);
|
2023-03-07 15:49:41 +00:00
|
|
|
R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
|
2020-03-07 16:44:35 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Ensure our thread state is correct.
|
2023-03-07 21:11:50 +00:00
|
|
|
R_UNLESS(this->GetState() == ThreadState::Initialized, ResultInvalidState);
|
2020-03-07 16:44:35 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// If the current thread has been asked to suspend, suspend it and retry.
|
2023-03-07 15:49:41 +00:00
|
|
|
if (GetCurrentThread(m_kernel).IsSuspended()) {
|
|
|
|
GetCurrentThread(m_kernel).UpdateState();
|
2021-01-20 21:42:27 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're not a kernel thread and we've been asked to suspend, suspend ourselves.
|
2022-01-15 00:31:47 +00:00
|
|
|
if (KProcess* owner = this->GetOwnerProcess(); owner != nullptr) {
|
2023-03-11 15:38:33 +00:00
|
|
|
if (this->IsUserThread() && this->IsSuspended()) {
|
2022-01-15 00:31:47 +00:00
|
|
|
this->UpdateState();
|
|
|
|
}
|
2022-01-23 05:09:45 +00:00
|
|
|
owner->IncrementRunningThreadCount();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Open a reference, now that we're running.
|
|
|
|
this->Open();
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Set our state and finish.
|
2023-03-07 21:11:50 +00:00
|
|
|
this->SetState(ThreadState::Runnable);
|
2021-08-07 06:04:32 +00:00
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2020-03-07 16:44:35 +00:00
|
|
|
}
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
2018-12-03 17:25:27 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KThread::Exit() {
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(this == GetCurrentThreadPointer(m_kernel));
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2022-01-23 05:09:45 +00:00
|
|
|
// Release the thread resource hint, running thread count from parent.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_parent != nullptr) {
|
|
|
|
m_parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1);
|
|
|
|
m_resource_limit_release_hint = true;
|
|
|
|
m_parent->DecrementRunningThreadCount();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Perform termination.
|
|
|
|
{
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl{m_kernel};
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2022-01-15 00:31:47 +00:00
|
|
|
// Disallow all suspension.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_suspend_allowed_flags = 0;
|
2022-01-15 00:31:47 +00:00
|
|
|
this->UpdateState();
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
// Disallow all suspension.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_suspend_allowed_flags = 0;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Start termination.
|
2023-03-11 15:38:33 +00:00
|
|
|
this->StartTermination();
|
2022-01-15 00:36:10 +00:00
|
|
|
|
|
|
|
// Register the thread as a work task.
|
2023-03-07 15:49:41 +00:00
|
|
|
KWorkerTaskManager::AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit, this);
|
2018-12-03 17:25:27 +00:00
|
|
|
}
|
2022-06-26 22:52:16 +00:00
|
|
|
|
|
|
|
UNREACHABLE_MSG("KThread::Exit() would return");
|
2018-12-03 17:25:27 +00:00
|
|
|
}
|
|
|
|
|
2022-09-06 01:19:30 +00:00
|
|
|
Result KThread::Terminate() {
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(this != GetCurrentThreadPointer(m_kernel));
|
2022-09-06 01:19:30 +00:00
|
|
|
|
|
|
|
// Request the thread terminate if it hasn't already.
|
|
|
|
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
|
|
|
|
// If the thread isn't terminated, wait for it to terminate.
|
|
|
|
s32 index;
|
|
|
|
KSynchronizationObject* objects[] = {this};
|
2023-03-07 15:49:41 +00:00
|
|
|
R_TRY(KSynchronizationObject::Wait(m_kernel, std::addressof(index), objects, 1,
|
2022-09-06 01:19:30 +00:00
|
|
|
Svc::WaitInfinite));
|
|
|
|
}
|
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2022-09-06 01:19:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ThreadState KThread::RequestTerminate() {
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(this != GetCurrentThreadPointer(m_kernel));
|
2022-09-06 01:19:30 +00:00
|
|
|
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl{m_kernel};
|
2022-09-06 01:19:30 +00:00
|
|
|
|
|
|
|
// Determine if this is the first termination request.
|
|
|
|
const bool first_request = [&]() -> bool {
|
|
|
|
// Perform an atomic compare-and-swap from false to true.
|
|
|
|
bool expected = false;
|
2023-03-07 21:11:50 +00:00
|
|
|
return m_termination_requested.compare_exchange_strong(expected, true);
|
2022-09-06 01:19:30 +00:00
|
|
|
}();
|
|
|
|
|
|
|
|
// If this is the first request, start termination procedure.
|
|
|
|
if (first_request) {
|
|
|
|
// If the thread is in initialized state, just change state to terminated.
|
|
|
|
if (this->GetState() == ThreadState::Initialized) {
|
2023-03-07 21:11:50 +00:00
|
|
|
m_thread_state = ThreadState::Terminated;
|
2022-09-06 01:19:30 +00:00
|
|
|
return ThreadState::Terminated;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the terminating dpc.
|
|
|
|
this->RegisterDpc(DpcFlag::Terminating);
|
|
|
|
|
|
|
|
// If the thread is pinned, unpin it.
|
|
|
|
if (this->GetStackParameters().is_pinned) {
|
|
|
|
this->GetOwnerProcess()->UnpinThread(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the thread is suspended, continue it.
|
|
|
|
if (this->IsSuspended()) {
|
2023-03-07 21:11:50 +00:00
|
|
|
m_suspend_allowed_flags = 0;
|
2022-09-06 01:19:30 +00:00
|
|
|
this->UpdateState();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change the thread's priority to be higher than any system thread's.
|
2023-02-23 20:49:42 +00:00
|
|
|
this->IncreaseBasePriority(TerminatingThreadPriority);
|
2022-09-06 01:19:30 +00:00
|
|
|
|
|
|
|
// If the thread is runnable, send a termination interrupt to other cores.
|
|
|
|
if (this->GetState() == ThreadState::Runnable) {
|
2023-03-07 21:11:50 +00:00
|
|
|
if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() &
|
2023-03-07 15:49:41 +00:00
|
|
|
~(1ULL << GetCurrentCoreId(m_kernel));
|
2022-09-06 01:19:30 +00:00
|
|
|
core_mask != 0) {
|
2023-03-07 15:49:41 +00:00
|
|
|
Kernel::KInterruptManager::SendInterProcessorInterrupt(m_kernel, core_mask);
|
2022-09-06 01:19:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wake up the thread.
|
|
|
|
if (this->GetState() == ThreadState::Waiting) {
|
2023-03-07 21:11:50 +00:00
|
|
|
m_wait_queue->CancelWait(this, ResultTerminationRequested, true);
|
2022-09-06 01:19:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return this->GetState();
|
|
|
|
}
|
|
|
|
|
2022-06-26 03:44:19 +00:00
|
|
|
Result KThread::Sleep(s64 timeout) {
|
2023-03-07 21:11:50 +00:00
|
|
|
ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(this == GetCurrentThreadPointer(m_kernel));
|
2021-01-20 21:42:27 +00:00
|
|
|
ASSERT(timeout > 0);
|
|
|
|
|
2023-03-11 15:38:33 +00:00
|
|
|
ThreadQueueImplForKThreadSleep wait_queue(m_kernel);
|
2023-03-08 01:48:46 +00:00
|
|
|
KHardwareTimer* timer{};
|
2020-02-25 02:04:12 +00:00
|
|
|
{
|
2021-01-20 21:42:27 +00:00
|
|
|
// Setup the scheduling lock and sleep.
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), this, timeout);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Check if the thread should terminate.
|
2021-11-11 06:28:30 +00:00
|
|
|
if (this->IsTerminationRequested()) {
|
2021-01-20 21:42:27 +00:00
|
|
|
slp.CancelSleep();
|
2022-10-15 05:55:51 +00:00
|
|
|
R_THROW(ResultTerminationRequested);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2021-11-11 06:28:30 +00:00
|
|
|
// Wait for the sleep to end.
|
2023-03-11 15:38:33 +00:00
|
|
|
wait_queue.SetHardwareTimer(timer);
|
|
|
|
this->BeginWait(std::addressof(wait_queue));
|
|
|
|
this->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
2020-02-25 02:04:12 +00:00
|
|
|
}
|
2019-03-16 03:28:29 +00:00
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2019-03-16 03:28:29 +00:00
|
|
|
}
|
|
|
|
|
2022-10-23 09:24:38 +00:00
|
|
|
void KThread::RequestDummyThreadWait() {
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2022-10-23 09:24:38 +00:00
|
|
|
ASSERT(this->IsDummyThread());
|
2022-01-22 01:10:11 +00:00
|
|
|
|
2022-10-23 09:24:38 +00:00
|
|
|
// We will block when the scheduler lock is released.
|
2023-06-17 04:36:00 +00:00
|
|
|
std::scoped_lock lock{m_dummy_thread_mutex};
|
|
|
|
m_dummy_thread_runnable = false;
|
2022-10-23 09:24:38 +00:00
|
|
|
}
|
2022-01-22 01:10:11 +00:00
|
|
|
|
2022-10-23 09:24:38 +00:00
|
|
|
void KThread::DummyThreadBeginWait() {
|
2023-03-07 15:49:41 +00:00
|
|
|
if (!this->IsDummyThread() || m_kernel.IsPhantomModeForSingleCore()) {
|
2022-10-30 22:44:29 +00:00
|
|
|
// Occurs in single core mode.
|
|
|
|
return;
|
|
|
|
}
|
2022-07-11 14:13:13 +00:00
|
|
|
|
2022-10-23 09:24:38 +00:00
|
|
|
// Block until runnable is no longer false.
|
2023-06-17 04:36:00 +00:00
|
|
|
std::unique_lock lock{m_dummy_thread_mutex};
|
|
|
|
m_dummy_thread_cv.wait(lock, [this] { return m_dummy_thread_runnable; });
|
2022-01-22 01:10:11 +00:00
|
|
|
}
|
|
|
|
|
2022-10-23 09:24:38 +00:00
|
|
|
void KThread::DummyThreadEndWait() {
|
2023-03-07 15:49:41 +00:00
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2022-10-23 09:24:38 +00:00
|
|
|
ASSERT(this->IsDummyThread());
|
2022-01-22 01:10:11 +00:00
|
|
|
|
2022-04-12 03:57:32 +00:00
|
|
|
// Wake up the waiting thread.
|
2023-06-17 04:36:00 +00:00
|
|
|
{
|
|
|
|
std::scoped_lock lock{m_dummy_thread_mutex};
|
|
|
|
m_dummy_thread_runnable = true;
|
|
|
|
}
|
|
|
|
m_dummy_thread_cv.notify_one();
|
2022-01-22 01:10:11 +00:00
|
|
|
}
|
|
|
|
|
2021-11-10 03:02:11 +00:00
|
|
|
void KThread::BeginWait(KThreadQueue* queue) {
|
|
|
|
// Set our state as waiting.
|
2023-03-11 15:38:33 +00:00
|
|
|
this->SetState(ThreadState::Waiting);
|
2021-11-10 03:02:11 +00:00
|
|
|
|
|
|
|
// Set our wait queue.
|
2023-03-07 21:11:50 +00:00
|
|
|
m_wait_queue = queue;
|
2021-11-10 03:02:11 +00:00
|
|
|
}
|
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result) {
|
2021-11-10 03:02:11 +00:00
|
|
|
// Lock the scheduler.
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
2021-11-10 03:02:11 +00:00
|
|
|
|
|
|
|
// If we're waiting, notify our queue that we're available.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (this->GetState() == ThreadState::Waiting) {
|
|
|
|
m_wait_queue->NotifyAvailable(this, signaled_object, wait_result);
|
2021-11-10 03:02:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
void KThread::EndWait(Result wait_result) {
|
2021-11-10 03:02:11 +00:00
|
|
|
// Lock the scheduler.
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
2021-11-10 03:02:11 +00:00
|
|
|
|
|
|
|
// If we're waiting, notify our queue that we're available.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (this->GetState() == ThreadState::Waiting) {
|
|
|
|
if (m_wait_queue == nullptr) {
|
2022-01-19 01:56:08 +00:00
|
|
|
// This should never happen, but avoid a hard crash below to get this logged.
|
|
|
|
ASSERT_MSG(false, "wait_queue is nullptr!");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
m_wait_queue->EndWait(this, wait_result);
|
2021-11-10 03:02:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
void KThread::CancelWait(Result wait_result, bool cancel_timer_task) {
|
2021-11-10 03:02:11 +00:00
|
|
|
// Lock the scheduler.
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
2021-11-10 03:02:11 +00:00
|
|
|
|
|
|
|
// If we're waiting, notify our queue that we're available.
|
2023-03-07 21:11:50 +00:00
|
|
|
if (this->GetState() == ThreadState::Waiting) {
|
|
|
|
m_wait_queue->CancelWait(this, wait_result, cancel_timer_task);
|
2021-11-10 03:02:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
void KThread::SetState(ThreadState state) {
|
2023-03-07 15:49:41 +00:00
|
|
|
KScopedSchedulerLock sl{m_kernel};
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Clear debugging state
|
2023-03-11 15:38:33 +00:00
|
|
|
this->SetWaitReasonForDebugging({});
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-03-07 21:11:50 +00:00
|
|
|
const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed);
|
|
|
|
m_thread_state.store(
|
2022-04-12 03:57:32 +00:00
|
|
|
static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)),
|
|
|
|
std::memory_order_relaxed);
|
2023-03-07 21:11:50 +00:00
|
|
|
if (m_thread_state.load(std::memory_order_relaxed) != old_state) {
|
2023-03-07 15:49:41 +00:00
|
|
|
KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
2020-03-07 16:44:35 +00:00
|
|
|
}
|
|
|
|
|
2021-03-06 01:08:17 +00:00
|
|
|
std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
|
2023-03-07 21:11:50 +00:00
|
|
|
return m_host_context;
|
2021-03-06 01:08:17 +00:00
|
|
|
}
|
|
|
|
|
2022-06-16 14:35:52 +00:00
|
|
|
void SetCurrentThread(KernelCore& kernel, KThread* thread) {
|
|
|
|
kernel.SetCurrentEmuThread(thread);
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
KThread* GetCurrentThreadPointer(KernelCore& kernel) {
|
2021-01-21 21:00:16 +00:00
|
|
|
return kernel.GetCurrentEmuThread();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
KThread& GetCurrentThread(KernelCore& kernel) {
|
|
|
|
return *GetCurrentThreadPointer(kernel);
|
|
|
|
}
|
|
|
|
|
2023-02-13 15:44:41 +00:00
|
|
|
KProcess* GetCurrentProcessPointer(KernelCore& kernel) {
|
|
|
|
return GetCurrentThread(kernel).GetOwnerProcess();
|
|
|
|
}
|
|
|
|
|
|
|
|
KProcess& GetCurrentProcess(KernelCore& kernel) {
|
|
|
|
return *GetCurrentProcessPointer(kernel);
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
s32 GetCurrentCoreId(KernelCore& kernel) {
|
|
|
|
return GetCurrentThread(kernel).GetCurrentCore();
|
2019-03-29 21:01:46 +00:00
|
|
|
}
|
|
|
|
|
2023-03-23 23:58:48 +00:00
|
|
|
Core::Memory::Memory& GetCurrentMemory(KernelCore& kernel) {
|
|
|
|
// TODO: per-process memory
|
|
|
|
return kernel.System().ApplicationMemory();
|
|
|
|
}
|
|
|
|
|
2021-08-07 06:04:32 +00:00
|
|
|
KScopedDisableDispatch::~KScopedDisableDispatch() {
|
2021-11-26 04:46:17 +00:00
|
|
|
// If we are shutting down the kernel, none of this is relevant anymore.
|
2023-03-07 15:49:41 +00:00
|
|
|
if (m_kernel.IsShuttingDown()) {
|
2021-11-26 04:46:17 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-03-07 15:49:41 +00:00
|
|
|
if (GetCurrentThread(m_kernel).GetDisableDispatchCount() <= 1) {
|
|
|
|
auto* scheduler = m_kernel.CurrentScheduler();
|
2021-08-07 06:04:32 +00:00
|
|
|
|
2023-03-07 15:49:41 +00:00
|
|
|
if (scheduler && !m_kernel.IsPhantomModeForSingleCore()) {
|
2021-08-07 06:04:32 +00:00
|
|
|
scheduler->RescheduleCurrentCore();
|
2022-07-11 14:13:13 +00:00
|
|
|
} else {
|
2023-03-07 15:49:41 +00:00
|
|
|
KScheduler::RescheduleCurrentHLEThread(m_kernel);
|
2021-08-07 06:04:32 +00:00
|
|
|
}
|
|
|
|
} else {
|
2023-03-07 15:49:41 +00:00
|
|
|
GetCurrentThread(m_kernel).EnableDispatch();
|
2021-08-07 06:04:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-08 16:35:03 +00:00
|
|
|
} // namespace Kernel
|