2023-10-21 20:47:43 +00:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
chore: make yuzu REUSE compliant
[REUSE] is a specification that aims at making file copyright
information consistent, so that it can be both human and machine
readable. It basically requires that all files have a header containing
copyright and licensing information. When this isn't possible, like
when dealing with binary assets, generated files or embedded third-party
dependencies, it is permitted to insert copyright information in the
`.reuse/dep5` file.
Oh, and it also requires that all the licenses used in the project are
present in the `LICENSES` folder, that's why the diff is so huge.
This can be done automatically with `reuse download --all`.
The `reuse` tool also contains a handy subcommand that analyzes the
project and tells whether or not the project is (still) compliant,
`reuse lint`.
Following REUSE has a few advantages over the current approach:
- Copyright information is easy to access for users / downstream
- Files like `dist/license.md` do not need to exist anymore, as
`.reuse/dep5` is used instead
- `reuse lint` makes it easy to ensure that copyright information of
files like binary assets / images is always accurate and up to date
To add copyright information of files that didn't have it I looked up
who committed what and when, for each file. As yuzu contributors do not
have to sign a CLA or similar I couldn't assume that copyright ownership
was of the "yuzu Emulator Project", so I used the name and/or email of
the commit author instead.
[REUSE]: https://reuse.software
Follow-up to 01cf05bc75b1e47beb08937439f3ed9339e7b254
2022-05-15 00:06:02 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2015-05-04 03:01:16 +00:00
|
|
|
|
2018-11-20 22:46:17 +00:00
|
|
|
#include <random>
|
2021-06-28 23:58:40 +00:00
|
|
|
#include "common/scope_exit.h"
|
2021-04-14 23:07:40 +00:00
|
|
|
#include "common/settings.h"
|
2018-09-21 06:06:47 +00:00
|
|
|
#include "core/core.h"
|
2021-04-24 05:04:28 +00:00
|
|
|
#include "core/hle/kernel/k_process.h"
|
2021-02-05 01:06:54 +00:00
|
|
|
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
2021-04-30 21:53:22 +00:00
|
|
|
#include "core/hle/kernel/k_shared_memory.h"
|
2021-09-25 15:01:53 +00:00
|
|
|
#include "core/hle/kernel/k_shared_memory_info.h"
|
2023-10-21 20:47:43 +00:00
|
|
|
#include "core/hle/kernel/k_thread_local_page.h"
|
|
|
|
#include "core/hle/kernel/k_thread_queue.h"
|
|
|
|
#include "core/hle/kernel/k_worker_task_manager.h"
|
2015-05-04 03:01:16 +00:00
|
|
|
|
2023-11-28 19:30:39 +00:00
|
|
|
#include "core/arm/dynarmic/arm_dynarmic_32.h"
|
|
|
|
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
|
|
|
#ifdef HAS_NCE
|
|
|
|
#include "core/arm/nce/arm_nce.h"
|
|
|
|
#endif
|
|
|
|
|
2015-05-04 03:01:16 +00:00
|
|
|
namespace Kernel {
|
2023-10-21 20:47:43 +00:00
|
|
|
|
2018-12-28 01:28:15 +00:00
|
|
|
namespace {
|
2023-10-21 20:47:43 +00:00
|
|
|
|
|
|
|
Result TerminateChildren(KernelCore& kernel, KProcess* process,
|
|
|
|
const KThread* thread_to_not_terminate) {
|
|
|
|
// Request that all children threads terminate.
|
|
|
|
{
|
|
|
|
KScopedLightLock proc_lk(process->GetListLock());
|
|
|
|
KScopedSchedulerLock sl(kernel);
|
|
|
|
|
|
|
|
if (thread_to_not_terminate != nullptr &&
|
|
|
|
process->GetPinnedThread(GetCurrentCoreId(kernel)) == thread_to_not_terminate) {
|
|
|
|
// NOTE: Here Nintendo unpins the current thread instead of the thread_to_not_terminate.
|
|
|
|
// This is valid because the only caller which uses non-nullptr as argument uses
|
|
|
|
// GetCurrentThreadPointer(), but it's still notable because it seems incorrect at
|
|
|
|
// first glance.
|
|
|
|
process->UnpinCurrentThread();
|
|
|
|
}
|
|
|
|
|
|
|
|
auto& thread_list = process->GetThreadList();
|
|
|
|
for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
|
|
|
|
if (KThread* thread = std::addressof(*it); thread != thread_to_not_terminate) {
|
|
|
|
if (thread->GetState() != ThreadState::Terminated) {
|
|
|
|
thread->RequestTerminate();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-02-25 02:04:12 +00:00
|
|
|
}
|
2022-06-16 00:53:49 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Wait for all children threads to terminate.
|
|
|
|
while (true) {
|
|
|
|
// Get the next child.
|
|
|
|
KThread* cur_child = nullptr;
|
|
|
|
{
|
|
|
|
KScopedLightLock proc_lk(process->GetListLock());
|
|
|
|
|
|
|
|
auto& thread_list = process->GetThreadList();
|
|
|
|
for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
|
|
|
|
if (KThread* thread = std::addressof(*it); thread != thread_to_not_terminate) {
|
|
|
|
if (thread->GetState() != ThreadState::Terminated) {
|
|
|
|
if (thread->Open()) {
|
|
|
|
cur_child = thread;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we didn't find any non-terminated children, we're done.
|
|
|
|
if (cur_child == nullptr) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Terminate and close the thread.
|
|
|
|
SCOPE_EXIT({ cur_child->Close(); });
|
|
|
|
|
|
|
|
if (const Result terminate_result = cur_child->Terminate();
|
|
|
|
ResultTerminationRequested == terminate_result) {
|
|
|
|
R_THROW(terminate_result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
R_SUCCEED();
|
2018-12-28 01:28:15 +00:00
|
|
|
}
|
2015-05-04 03:01:16 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
class ThreadQueueImplForKProcessEnterUserException final : public KThreadQueue {
|
|
|
|
private:
|
|
|
|
KThread** m_exception_thread;
|
2015-05-04 03:01:16 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
public:
|
|
|
|
explicit ThreadQueueImplForKProcessEnterUserException(KernelCore& kernel, KThread** t)
|
|
|
|
: KThreadQueue(kernel), m_exception_thread(t) {}
|
|
|
|
|
|
|
|
virtual void EndWait(KThread* waiting_thread, Result wait_result) override {
|
|
|
|
// Set the exception thread.
|
|
|
|
*m_exception_thread = waiting_thread;
|
|
|
|
|
|
|
|
// Invoke the base end wait handler.
|
|
|
|
KThreadQueue::EndWait(waiting_thread, wait_result);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void CancelWait(KThread* waiting_thread, Result wait_result,
|
|
|
|
bool cancel_timer_task) override {
|
|
|
|
// Remove the thread as a waiter on its mutex owner.
|
|
|
|
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
|
|
|
|
|
|
|
|
// Invoke the base cancel wait handler.
|
|
|
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
|
|
}
|
|
|
|
};
|
2015-05-08 20:53:19 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
void GenerateRandom(std::span<u64> out_random) {
|
2023-06-06 00:41:50 +00:00
|
|
|
std::mt19937 rng(Settings::values.rng_seed_enabled ? Settings::values.rng_seed.GetValue()
|
|
|
|
: static_cast<u32>(std::time(nullptr)));
|
2018-11-13 17:25:43 +00:00
|
|
|
std::uniform_int_distribution<u64> distribution;
|
2023-10-21 20:47:43 +00:00
|
|
|
std::generate(out_random.begin(), out_random.end(), [&] { return distribution(rng); });
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
void KProcess::Finalize() {
|
|
|
|
// Delete the process local region.
|
|
|
|
this->DeleteThreadLocalRegion(m_plr_address);
|
|
|
|
|
|
|
|
// Get the used memory size.
|
|
|
|
const size_t used_memory_size = this->GetUsedNonSystemUserPhysicalMemorySize();
|
|
|
|
|
|
|
|
// Finalize the page table.
|
|
|
|
m_page_table.Finalize();
|
|
|
|
|
|
|
|
// Finish using our system resource.
|
|
|
|
if (m_system_resource) {
|
|
|
|
if (m_system_resource->IsSecureResource()) {
|
|
|
|
// Finalize optimized memory. If memory wasn't optimized, this is a no-op.
|
|
|
|
m_kernel.MemoryManager().FinalizeOptimizedMemory(this->GetId(), m_memory_pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
m_system_resource->Close();
|
|
|
|
m_system_resource = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free all shared memory infos.
|
|
|
|
{
|
|
|
|
auto it = m_shared_memory_list.begin();
|
|
|
|
while (it != m_shared_memory_list.end()) {
|
|
|
|
KSharedMemoryInfo* info = std::addressof(*it);
|
|
|
|
KSharedMemory* shmem = info->GetSharedMemory();
|
2018-11-13 17:25:43 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
while (!info->Close()) {
|
|
|
|
shmem->Close();
|
|
|
|
}
|
|
|
|
shmem->Close();
|
|
|
|
|
|
|
|
it = m_shared_memory_list.erase(it);
|
|
|
|
KSharedMemoryInfo::Free(m_kernel, info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Our thread local page list must be empty at this point.
|
|
|
|
ASSERT(m_partially_used_tlp_tree.empty());
|
|
|
|
ASSERT(m_fully_used_tlp_tree.empty());
|
|
|
|
|
|
|
|
// Release memory to the resource limit.
|
|
|
|
if (m_resource_limit != nullptr) {
|
|
|
|
ASSERT(used_memory_size >= m_memory_release_hint);
|
|
|
|
m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, used_memory_size,
|
|
|
|
used_memory_size - m_memory_release_hint);
|
|
|
|
m_resource_limit->Close();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform inherited finalization.
|
|
|
|
KSynchronizationObject::Finalize();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KProcess::Initialize(const Svc::CreateProcessParameter& params, KResourceLimit* res_limit,
|
|
|
|
bool is_real) {
|
|
|
|
// TODO: remove this special case
|
|
|
|
if (is_real) {
|
|
|
|
// Create and clear the process local region.
|
|
|
|
R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
|
|
|
|
this->GetMemory().ZeroBlock(m_plr_address, Svc::ThreadLocalRegionSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy in the name from parameters.
|
|
|
|
static_assert(sizeof(params.name) < sizeof(m_name));
|
|
|
|
std::memcpy(m_name.data(), params.name.data(), sizeof(params.name));
|
|
|
|
m_name[sizeof(params.name)] = 0;
|
|
|
|
|
|
|
|
// Set misc fields.
|
|
|
|
m_state = State::Created;
|
|
|
|
m_main_thread_stack_size = 0;
|
|
|
|
m_used_kernel_memory_size = 0;
|
|
|
|
m_ideal_core_id = 0;
|
|
|
|
m_flags = params.flags;
|
|
|
|
m_version = params.version;
|
|
|
|
m_program_id = params.program_id;
|
|
|
|
m_code_address = params.code_address;
|
|
|
|
m_code_size = params.code_num_pages * PageSize;
|
|
|
|
m_is_application = True(params.flags & Svc::CreateProcessFlag::IsApplication);
|
|
|
|
|
|
|
|
// Set thread fields.
|
|
|
|
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
|
|
|
m_running_threads[i] = nullptr;
|
|
|
|
m_pinned_threads[i] = nullptr;
|
|
|
|
m_running_thread_idle_counts[i] = 0;
|
|
|
|
m_running_thread_switch_counts[i] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set max memory based on address space type.
|
|
|
|
switch ((params.flags & Svc::CreateProcessFlag::AddressSpaceMask)) {
|
|
|
|
case Svc::CreateProcessFlag::AddressSpace32Bit:
|
|
|
|
case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
|
|
|
|
case Svc::CreateProcessFlag::AddressSpace64Bit:
|
|
|
|
m_max_process_memory = m_page_table.GetHeapRegionSize();
|
|
|
|
break;
|
|
|
|
case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
|
|
|
|
m_max_process_memory = m_page_table.GetHeapRegionSize() + m_page_table.GetAliasRegionSize();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate random entropy.
|
|
|
|
GenerateRandom(m_entropy);
|
2021-04-04 05:22:36 +00:00
|
|
|
|
2022-01-23 05:09:45 +00:00
|
|
|
// Clear remaining fields.
|
2023-10-21 20:47:43 +00:00
|
|
|
m_num_running_threads = 0;
|
|
|
|
m_num_process_switches = 0;
|
|
|
|
m_num_thread_switches = 0;
|
|
|
|
m_num_fpu_switches = 0;
|
|
|
|
m_num_supervisor_calls = 0;
|
|
|
|
m_num_ipc_messages = 0;
|
|
|
|
|
|
|
|
m_is_signaled = false;
|
|
|
|
m_exception_thread = nullptr;
|
|
|
|
m_is_suspended = false;
|
|
|
|
m_memory_release_hint = 0;
|
|
|
|
m_schedule_count = 0;
|
|
|
|
m_is_handle_table_initialized = false;
|
|
|
|
|
|
|
|
// Open a reference to our resource limit.
|
|
|
|
m_resource_limit = res_limit;
|
|
|
|
m_resource_limit->Open();
|
|
|
|
|
|
|
|
// We're initialized!
|
|
|
|
m_is_initialized = true;
|
|
|
|
|
|
|
|
R_SUCCEED();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPageGroup& pg,
|
|
|
|
std::span<const u32> caps, KResourceLimit* res_limit,
|
|
|
|
KMemoryManager::Pool pool, bool immortal) {
|
|
|
|
ASSERT(res_limit != nullptr);
|
|
|
|
ASSERT((params.code_num_pages * PageSize) / PageSize ==
|
|
|
|
static_cast<size_t>(params.code_num_pages));
|
|
|
|
|
|
|
|
// Set members.
|
|
|
|
m_memory_pool = pool;
|
|
|
|
m_is_default_application_system_resource = false;
|
|
|
|
m_is_immortal = immortal;
|
|
|
|
|
|
|
|
// Setup our system resource.
|
|
|
|
if (const size_t system_resource_num_pages = params.system_resource_num_pages;
|
|
|
|
system_resource_num_pages != 0) {
|
|
|
|
// Create a secure system resource.
|
|
|
|
KSecureSystemResource* secure_resource = KSecureSystemResource::Create(m_kernel);
|
|
|
|
R_UNLESS(secure_resource != nullptr, ResultOutOfResource);
|
|
|
|
|
|
|
|
ON_RESULT_FAILURE {
|
|
|
|
secure_resource->Close();
|
|
|
|
};
|
|
|
|
|
|
|
|
// Initialize the secure resource.
|
|
|
|
R_TRY(secure_resource->Initialize(system_resource_num_pages * PageSize, res_limit,
|
|
|
|
m_memory_pool));
|
|
|
|
|
|
|
|
// Set our system resource.
|
|
|
|
m_system_resource = secure_resource;
|
|
|
|
} else {
|
|
|
|
// Use the system-wide system resource.
|
|
|
|
const bool is_app = True(params.flags & Svc::CreateProcessFlag::IsApplication);
|
|
|
|
m_system_resource = std::addressof(is_app ? m_kernel.GetAppSystemResource()
|
|
|
|
: m_kernel.GetSystemSystemResource());
|
|
|
|
|
|
|
|
m_is_default_application_system_resource = is_app;
|
|
|
|
|
|
|
|
// Open reference to the system resource.
|
|
|
|
m_system_resource->Open();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we clean up our secure resource, if we fail.
|
|
|
|
ON_RESULT_FAILURE {
|
|
|
|
m_system_resource->Close();
|
|
|
|
m_system_resource = nullptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Setup page table.
|
|
|
|
{
|
|
|
|
const auto as_type = params.flags & Svc::CreateProcessFlag::AddressSpaceMask;
|
|
|
|
const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
|
|
|
|
const bool enable_das_merge =
|
|
|
|
False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
|
2023-10-23 01:16:38 +00:00
|
|
|
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
|
|
|
|
params.code_address, params.code_num_pages * PageSize,
|
2023-11-17 19:58:29 +00:00
|
|
|
m_system_resource, res_limit, this->GetMemory(), 0));
|
2023-10-21 20:47:43 +00:00
|
|
|
}
|
|
|
|
ON_RESULT_FAILURE_2 {
|
|
|
|
m_page_table.Finalize();
|
|
|
|
};
|
|
|
|
|
|
|
|
// Ensure we can insert the code region.
|
|
|
|
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize,
|
|
|
|
KMemoryState::Code),
|
|
|
|
ResultInvalidMemoryRegion);
|
|
|
|
|
|
|
|
// Map the code region.
|
|
|
|
R_TRY(m_page_table.MapPageGroup(params.code_address, pg, KMemoryState::Code,
|
|
|
|
KMemoryPermission::KernelRead));
|
|
|
|
|
|
|
|
// Initialize capabilities.
|
|
|
|
R_TRY(m_capabilities.InitializeForKip(caps, std::addressof(m_page_table)));
|
|
|
|
|
|
|
|
// Initialize the process id.
|
|
|
|
m_process_id = m_kernel.CreateNewUserProcessID();
|
|
|
|
ASSERT(InitialProcessIdMin <= m_process_id);
|
|
|
|
ASSERT(m_process_id <= InitialProcessIdMax);
|
|
|
|
|
|
|
|
// Initialize the rest of the process.
|
|
|
|
R_TRY(this->Initialize(params, res_limit, true));
|
2022-01-23 05:09:45 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// We succeeded!
|
|
|
|
R_SUCCEED();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
|
|
|
|
std::span<const u32> user_caps, KResourceLimit* res_limit,
|
2023-11-17 19:58:29 +00:00
|
|
|
KMemoryManager::Pool pool, KProcessAddress aslr_space_start) {
|
2023-10-21 20:47:43 +00:00
|
|
|
ASSERT(res_limit != nullptr);
|
|
|
|
|
|
|
|
// Set members.
|
|
|
|
m_memory_pool = pool;
|
|
|
|
m_is_default_application_system_resource = false;
|
|
|
|
m_is_immortal = false;
|
|
|
|
|
|
|
|
// Get the memory sizes.
|
|
|
|
const size_t code_num_pages = params.code_num_pages;
|
|
|
|
const size_t system_resource_num_pages = params.system_resource_num_pages;
|
|
|
|
const size_t code_size = code_num_pages * PageSize;
|
|
|
|
const size_t system_resource_size = system_resource_num_pages * PageSize;
|
|
|
|
|
|
|
|
// Reserve memory for our code resource.
|
|
|
|
KScopedResourceReservation memory_reservation(
|
|
|
|
res_limit, Svc::LimitableResource::PhysicalMemoryMax, code_size);
|
|
|
|
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
|
|
|
|
|
|
|
// Setup our system resource.
|
|
|
|
if (system_resource_num_pages != 0) {
|
|
|
|
// Create a secure system resource.
|
|
|
|
KSecureSystemResource* secure_resource = KSecureSystemResource::Create(m_kernel);
|
|
|
|
R_UNLESS(secure_resource != nullptr, ResultOutOfResource);
|
|
|
|
|
|
|
|
ON_RESULT_FAILURE {
|
|
|
|
secure_resource->Close();
|
|
|
|
};
|
|
|
|
|
|
|
|
// Initialize the secure resource.
|
|
|
|
R_TRY(secure_resource->Initialize(system_resource_size, res_limit, m_memory_pool));
|
|
|
|
|
|
|
|
// Set our system resource.
|
|
|
|
m_system_resource = secure_resource;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// Use the system-wide system resource.
|
|
|
|
const bool is_app = True(params.flags & Svc::CreateProcessFlag::IsApplication);
|
|
|
|
m_system_resource = std::addressof(is_app ? m_kernel.GetAppSystemResource()
|
|
|
|
: m_kernel.GetSystemSystemResource());
|
|
|
|
|
|
|
|
m_is_default_application_system_resource = is_app;
|
|
|
|
|
|
|
|
// Open reference to the system resource.
|
|
|
|
m_system_resource->Open();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we clean up our secure resource, if we fail.
|
|
|
|
ON_RESULT_FAILURE {
|
|
|
|
m_system_resource->Close();
|
|
|
|
m_system_resource = nullptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Setup page table.
|
|
|
|
{
|
|
|
|
const auto as_type = params.flags & Svc::CreateProcessFlag::AddressSpaceMask;
|
|
|
|
const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
|
|
|
|
const bool enable_das_merge =
|
|
|
|
False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
|
2023-10-23 01:16:38 +00:00
|
|
|
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
|
|
|
|
params.code_address, code_size, m_system_resource, res_limit,
|
2023-11-17 19:58:29 +00:00
|
|
|
this->GetMemory(), aslr_space_start));
|
2023-10-21 20:47:43 +00:00
|
|
|
}
|
|
|
|
ON_RESULT_FAILURE_2 {
|
|
|
|
m_page_table.Finalize();
|
|
|
|
};
|
|
|
|
|
|
|
|
// Ensure we can insert the code region.
|
|
|
|
R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code),
|
|
|
|
ResultInvalidMemoryRegion);
|
2022-02-21 20:33:17 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Map the code region.
|
|
|
|
R_TRY(m_page_table.MapPages(params.code_address, code_num_pages, KMemoryState::Code,
|
|
|
|
KMemoryPermission::KernelRead | KMemoryPermission::NotMapped));
|
|
|
|
|
|
|
|
// Initialize capabilities.
|
|
|
|
R_TRY(m_capabilities.InitializeForUser(user_caps, std::addressof(m_page_table)));
|
|
|
|
|
|
|
|
// Initialize the process id.
|
|
|
|
m_process_id = m_kernel.CreateNewUserProcessID();
|
|
|
|
ASSERT(ProcessIdMin <= m_process_id);
|
|
|
|
ASSERT(m_process_id <= ProcessIdMax);
|
|
|
|
|
|
|
|
// If we should optimize memory allocations, do so.
|
|
|
|
if (m_system_resource->IsSecureResource() &&
|
|
|
|
True(params.flags & Svc::CreateProcessFlag::OptimizeMemoryAllocation)) {
|
|
|
|
R_TRY(m_kernel.MemoryManager().InitializeOptimizedMemory(m_process_id, pool));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize the rest of the process.
|
|
|
|
R_TRY(this->Initialize(params, res_limit, true));
|
|
|
|
|
|
|
|
// We succeeded, so commit our memory reservation.
|
|
|
|
memory_reservation.Commit();
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2015-05-04 03:01:16 +00:00
|
|
|
}
|
|
|
|
|
2022-01-15 00:33:24 +00:00
|
|
|
void KProcess::DoWorkerTaskImpl() {
|
2023-10-21 20:47:43 +00:00
|
|
|
// Terminate child threads.
|
|
|
|
TerminateChildren(m_kernel, this, nullptr);
|
|
|
|
|
|
|
|
// Finalize the handle table, if we're not immortal.
|
|
|
|
if (!m_is_immortal && m_is_handle_table_initialized) {
|
|
|
|
this->FinalizeHandleTable();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finish termination.
|
|
|
|
this->FinishTermination();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KProcess::StartTermination() {
|
|
|
|
// Finalize the handle table when we're done, if the process isn't immortal.
|
|
|
|
SCOPE_EXIT({
|
|
|
|
if (!m_is_immortal) {
|
|
|
|
this->FinalizeHandleTable();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Terminate child threads other than the current one.
|
|
|
|
R_RETURN(TerminateChildren(m_kernel, this, GetCurrentThreadPointer(m_kernel)));
|
2022-01-15 00:33:24 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
void KProcess::FinishTermination() {
|
|
|
|
// Only allow termination to occur if the process isn't immortal.
|
|
|
|
if (!m_is_immortal) {
|
|
|
|
// Release resource limit hint.
|
|
|
|
if (m_resource_limit != nullptr) {
|
|
|
|
m_memory_release_hint = this->GetUsedNonSystemUserPhysicalMemorySize();
|
|
|
|
m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, 0,
|
|
|
|
m_memory_release_hint);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change state.
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
|
|
|
this->ChangeState(State::Terminated);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close.
|
|
|
|
this->Close();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::Exit() {
|
|
|
|
// Determine whether we need to start terminating
|
|
|
|
bool needs_terminate = false;
|
|
|
|
{
|
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
|
|
|
|
|
|
|
ASSERT(m_state != State::Created);
|
|
|
|
ASSERT(m_state != State::CreatedAttached);
|
|
|
|
ASSERT(m_state != State::Crashed);
|
|
|
|
ASSERT(m_state != State::Terminated);
|
|
|
|
if (m_state == State::Running || m_state == State::RunningAttached ||
|
|
|
|
m_state == State::DebugBreak) {
|
|
|
|
this->ChangeState(State::Terminating);
|
|
|
|
needs_terminate = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we need to start termination, do so.
|
|
|
|
if (needs_terminate) {
|
|
|
|
this->StartTermination();
|
|
|
|
|
|
|
|
// Register the process as a work task.
|
|
|
|
m_kernel.WorkerTaskManager().AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit, this);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exit the current thread.
|
|
|
|
GetCurrentThread(m_kernel).Exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KProcess::Terminate() {
|
|
|
|
// Determine whether we need to start terminating.
|
|
|
|
bool needs_terminate = false;
|
|
|
|
{
|
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
|
|
|
|
// Check whether we're allowed to terminate.
|
|
|
|
R_UNLESS(m_state != State::Created, ResultInvalidState);
|
|
|
|
R_UNLESS(m_state != State::CreatedAttached, ResultInvalidState);
|
|
|
|
|
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
|
|
|
|
|
|
|
if (m_state == State::Running || m_state == State::RunningAttached ||
|
|
|
|
m_state == State::Crashed || m_state == State::DebugBreak) {
|
|
|
|
this->ChangeState(State::Terminating);
|
|
|
|
needs_terminate = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we need to terminate, do so.
|
|
|
|
if (needs_terminate) {
|
|
|
|
// Start termination.
|
|
|
|
if (R_SUCCEEDED(this->StartTermination())) {
|
|
|
|
// Finish termination.
|
|
|
|
this->FinishTermination();
|
|
|
|
} else {
|
|
|
|
// Register the process as a work task.
|
|
|
|
m_kernel.WorkerTaskManager().AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit,
|
|
|
|
this);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
R_SUCCEED();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KProcess::AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size) {
|
|
|
|
// Lock ourselves, to prevent concurrent access.
|
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
|
|
|
|
// Try to find an existing info for the memory.
|
|
|
|
KSharedMemoryInfo* info = nullptr;
|
|
|
|
for (auto it = m_shared_memory_list.begin(); it != m_shared_memory_list.end(); ++it) {
|
|
|
|
if (it->GetSharedMemory() == shmem) {
|
|
|
|
info = std::addressof(*it);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we didn't find an info, create one.
|
|
|
|
if (info == nullptr) {
|
|
|
|
// Allocate a new info.
|
|
|
|
info = KSharedMemoryInfo::Allocate(m_kernel);
|
|
|
|
R_UNLESS(info != nullptr, ResultOutOfResource);
|
|
|
|
|
|
|
|
// Initialize the info and add it to our list.
|
|
|
|
info->Initialize(shmem);
|
|
|
|
m_shared_memory_list.push_back(*info);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open a reference to the shared memory and its info.
|
|
|
|
shmem->Open();
|
|
|
|
info->Open();
|
|
|
|
|
|
|
|
R_SUCCEED();
|
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size) {
|
|
|
|
// Lock ourselves, to prevent concurrent access.
|
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
|
|
|
|
// Find an existing info for the memory.
|
|
|
|
KSharedMemoryInfo* info = nullptr;
|
|
|
|
auto it = m_shared_memory_list.begin();
|
|
|
|
for (; it != m_shared_memory_list.end(); ++it) {
|
|
|
|
if (it->GetSharedMemory() == shmem) {
|
|
|
|
info = std::addressof(*it);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT(info != nullptr);
|
|
|
|
|
|
|
|
// Close a reference to the info and its memory.
|
|
|
|
if (info->Close()) {
|
|
|
|
m_shared_memory_list.erase(it);
|
|
|
|
KSharedMemoryInfo::Free(m_kernel, info);
|
|
|
|
}
|
|
|
|
|
|
|
|
shmem->Close();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
|
|
|
|
KThreadLocalPage* tlp = nullptr;
|
|
|
|
KProcessAddress tlr = 0;
|
|
|
|
|
|
|
|
// See if we can get a region from a partially used TLP.
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
|
|
|
|
|
|
|
if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) {
|
|
|
|
tlr = it->Reserve();
|
|
|
|
ASSERT(tlr != 0);
|
|
|
|
|
|
|
|
if (it->IsAllUsed()) {
|
|
|
|
tlp = std::addressof(*it);
|
|
|
|
m_partially_used_tlp_tree.erase(it);
|
|
|
|
m_fully_used_tlp_tree.insert(*tlp);
|
|
|
|
}
|
|
|
|
|
|
|
|
*out = tlr;
|
|
|
|
R_SUCCEED();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate a new page.
|
|
|
|
tlp = KThreadLocalPage::Allocate(m_kernel);
|
|
|
|
R_UNLESS(tlp != nullptr, ResultOutOfMemory);
|
|
|
|
ON_RESULT_FAILURE {
|
|
|
|
KThreadLocalPage::Free(m_kernel, tlp);
|
|
|
|
};
|
|
|
|
|
|
|
|
// Initialize the new page.
|
|
|
|
R_TRY(tlp->Initialize(m_kernel, this));
|
|
|
|
|
|
|
|
// Reserve a TLR.
|
|
|
|
tlr = tlp->Reserve();
|
|
|
|
ASSERT(tlr != 0);
|
|
|
|
|
|
|
|
// Insert into our tree.
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
|
|
|
if (tlp->IsAllUsed()) {
|
|
|
|
m_fully_used_tlp_tree.insert(*tlp);
|
|
|
|
} else {
|
|
|
|
m_partially_used_tlp_tree.insert(*tlp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We succeeded!
|
|
|
|
*out = tlr;
|
|
|
|
R_SUCCEED();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
|
|
|
|
KThreadLocalPage* page_to_free = nullptr;
|
|
|
|
|
|
|
|
// Release the region.
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
|
|
|
|
|
|
|
// Try to find the page in the partially used list.
|
|
|
|
auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
|
|
|
|
if (it == m_partially_used_tlp_tree.end()) {
|
|
|
|
// If we don't find it, it has to be in the fully used list.
|
|
|
|
it = m_fully_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
|
|
|
|
R_UNLESS(it != m_fully_used_tlp_tree.end(), ResultInvalidAddress);
|
|
|
|
|
|
|
|
// Release the region.
|
|
|
|
it->Release(addr);
|
|
|
|
|
|
|
|
// Move the page out of the fully used list.
|
|
|
|
KThreadLocalPage* tlp = std::addressof(*it);
|
|
|
|
m_fully_used_tlp_tree.erase(it);
|
|
|
|
if (tlp->IsAllFree()) {
|
|
|
|
page_to_free = tlp;
|
|
|
|
} else {
|
|
|
|
m_partially_used_tlp_tree.insert(*tlp);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Release the region.
|
|
|
|
it->Release(addr);
|
|
|
|
|
|
|
|
// Handle the all-free case.
|
|
|
|
KThreadLocalPage* tlp = std::addressof(*it);
|
|
|
|
if (tlp->IsAllFree()) {
|
|
|
|
m_partially_used_tlp_tree.erase(it);
|
|
|
|
page_to_free = tlp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we should free the page it was in, do so.
|
|
|
|
if (page_to_free != nullptr) {
|
|
|
|
page_to_free->Finalize();
|
|
|
|
|
|
|
|
KThreadLocalPage::Free(m_kernel, page_to_free);
|
|
|
|
}
|
|
|
|
|
|
|
|
R_SUCCEED();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool KProcess::ReserveResource(Svc::LimitableResource which, s64 value) {
|
|
|
|
if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
return rl->Reserve(which, value);
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool KProcess::ReserveResource(Svc::LimitableResource which, s64 value, s64 timeout) {
|
|
|
|
if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
return rl->Reserve(which, value, timeout);
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::ReleaseResource(Svc::LimitableResource which, s64 value) {
|
|
|
|
if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
rl->Release(which, value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::ReleaseResource(Svc::LimitableResource which, s64 value, s64 hint) {
|
|
|
|
if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
rl->Release(which, value, hint);
|
|
|
|
}
|
2018-12-04 05:29:15 +00:00
|
|
|
}
|
|
|
|
|
2022-01-23 05:09:45 +00:00
|
|
|
void KProcess::IncrementRunningThreadCount() {
|
2023-03-07 21:45:13 +00:00
|
|
|
ASSERT(m_num_running_threads.load() >= 0);
|
2023-10-21 20:47:43 +00:00
|
|
|
|
2023-03-07 21:45:13 +00:00
|
|
|
++m_num_running_threads;
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2022-01-23 05:09:45 +00:00
|
|
|
void KProcess::DecrementRunningThreadCount() {
|
2023-03-07 21:45:13 +00:00
|
|
|
ASSERT(m_num_running_threads.load() > 0);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-03-07 21:45:13 +00:00
|
|
|
if (const auto prev = m_num_running_threads--; prev == 1) {
|
2023-10-21 20:47:43 +00:00
|
|
|
this->Terminate();
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
bool KProcess::EnterUserException() {
|
|
|
|
// Get the current thread.
|
|
|
|
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
|
|
|
ASSERT(this == cur_thread->GetOwnerProcess());
|
|
|
|
|
|
|
|
// Check that we haven't already claimed the exception thread.
|
|
|
|
if (m_exception_thread == cur_thread) {
|
|
|
|
return false;
|
2020-04-09 02:19:12 +00:00
|
|
|
}
|
2019-06-09 22:20:20 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Create the wait queue we'll be using.
|
|
|
|
ThreadQueueImplForKProcessEnterUserException wait_queue(m_kernel,
|
|
|
|
std::addressof(m_exception_thread));
|
2019-06-09 22:20:20 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Claim the exception thread.
|
|
|
|
{
|
|
|
|
// Lock the scheduler.
|
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
|
|
|
|
|
|
|
// Check that we're not terminating.
|
|
|
|
if (cur_thread->IsTerminationRequested()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we don't have an exception thread, we can just claim it directly.
|
|
|
|
if (m_exception_thread == nullptr) {
|
|
|
|
m_exception_thread = cur_thread;
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded(m_kernel);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we need to wait until we don't have an exception thread.
|
|
|
|
|
|
|
|
// Add the current thread as a waiter on the current exception thread.
|
|
|
|
cur_thread->SetKernelAddressKey(
|
|
|
|
reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
|
|
|
|
m_exception_thread->AddWaiter(cur_thread);
|
|
|
|
|
|
|
|
// Wait to claim the exception thread.
|
|
|
|
cur_thread->BeginWait(std::addressof(wait_queue));
|
|
|
|
}
|
|
|
|
|
|
|
|
// If our wait didn't end due to thread termination, we succeeded.
|
|
|
|
return ResultTerminationRequested != cur_thread->GetWaitResult();
|
2019-03-29 02:59:17 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
bool KProcess::LeaveUserException() {
|
|
|
|
return this->ReleaseUserException(GetCurrentThreadPointer(m_kernel));
|
2019-06-09 22:20:20 +00:00
|
|
|
}
|
|
|
|
|
2021-04-24 05:04:28 +00:00
|
|
|
bool KProcess::ReleaseUserException(KThread* thread) {
|
2023-10-21 20:47:43 +00:00
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-03-07 21:45:13 +00:00
|
|
|
if (m_exception_thread == thread) {
|
|
|
|
m_exception_thread = nullptr;
|
2021-01-20 21:42:27 +00:00
|
|
|
|
|
|
|
// Remove waiter thread.
|
2023-10-21 20:47:43 +00:00
|
|
|
bool has_waiters;
|
2023-02-24 01:32:03 +00:00
|
|
|
if (KThread* next = thread->RemoveKernelWaiterByKey(
|
2023-02-23 20:49:42 +00:00
|
|
|
std::addressof(has_waiters),
|
2023-10-21 20:47:43 +00:00
|
|
|
reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
|
2021-06-10 18:26:54 +00:00
|
|
|
next != nullptr) {
|
2022-06-25 17:36:14 +00:00
|
|
|
next->EndWait(ResultSuccess);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2023-03-07 15:49:41 +00:00
|
|
|
KScheduler::SetSchedulerUpdateNeeded(m_kernel);
|
2021-06-10 18:26:54 +00:00
|
|
|
|
2021-01-20 21:42:27 +00:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
void KProcess::RegisterThread(KThread* thread) {
|
|
|
|
KScopedLightLock lk(m_list_lock);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
m_thread_list.push_back(*thread);
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
void KProcess::UnregisterThread(KThread* thread) {
|
|
|
|
KScopedLightLock lk(m_list_lock);
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
m_thread_list.erase(m_thread_list.iterator_to(*thread));
|
|
|
|
}
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
size_t KProcess::GetUsedUserPhysicalMemorySize() const {
|
|
|
|
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
|
|
|
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
|
|
|
const size_t sec_size = this->GetRequiredSecureMemorySizeNonDefault();
|
2021-01-20 21:42:27 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
return norm_size + other_size + sec_size;
|
2021-01-20 21:42:27 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
size_t KProcess::GetTotalUserPhysicalMemorySize() const {
|
|
|
|
// Get the amount of free and used size.
|
|
|
|
const size_t free_size =
|
|
|
|
m_resource_limit->GetFreeValue(Svc::LimitableResource::PhysicalMemoryMax);
|
|
|
|
const size_t max_size = m_max_process_memory;
|
|
|
|
|
|
|
|
// Determine used size.
|
|
|
|
// NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike
|
|
|
|
// GetUsedUserPhysicalMemorySize().
|
|
|
|
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
|
|
|
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
|
|
|
const size_t sec_size = this->GetRequiredSecureMemorySize();
|
|
|
|
const size_t used_size = norm_size + other_size + sec_size;
|
|
|
|
|
|
|
|
// NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo
|
|
|
|
// does it this way.
|
|
|
|
if (used_size + free_size > max_size) {
|
|
|
|
return max_size;
|
|
|
|
} else {
|
|
|
|
return free_size + this->GetUsedUserPhysicalMemorySize();
|
|
|
|
}
|
|
|
|
}
|
2021-11-11 07:02:45 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
size_t KProcess::GetUsedNonSystemUserPhysicalMemorySize() const {
|
|
|
|
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
|
|
|
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
2021-11-11 07:02:45 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
return norm_size + other_size;
|
|
|
|
}
|
2021-11-11 07:02:45 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const {
|
|
|
|
// Get the amount of free and used size.
|
|
|
|
const size_t free_size =
|
|
|
|
m_resource_limit->GetFreeValue(Svc::LimitableResource::PhysicalMemoryMax);
|
|
|
|
const size_t max_size = m_max_process_memory;
|
|
|
|
|
|
|
|
// Determine used size.
|
|
|
|
// NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike
|
|
|
|
// GetUsedUserPhysicalMemorySize().
|
|
|
|
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
|
|
|
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
|
|
|
const size_t sec_size = this->GetRequiredSecureMemorySize();
|
|
|
|
const size_t used_size = norm_size + other_size + sec_size;
|
|
|
|
|
|
|
|
// NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo
|
|
|
|
// does it this way.
|
|
|
|
if (used_size + free_size > max_size) {
|
|
|
|
return max_size - this->GetRequiredSecureMemorySizeNonDefault();
|
|
|
|
} else {
|
|
|
|
return free_size + this->GetUsedNonSystemUserPhysicalMemorySize();
|
|
|
|
}
|
2021-11-11 07:02:45 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
Result KProcess::Run(s32 priority, size_t stack_size) {
|
2021-04-30 21:53:22 +00:00
|
|
|
// Lock ourselves, to prevent concurrent access.
|
2023-03-07 21:45:13 +00:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2021-04-30 21:53:22 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Validate that we're in a state where we can initialize.
|
|
|
|
const auto state = m_state;
|
|
|
|
R_UNLESS(state == State::Created || state == State::CreatedAttached, ResultInvalidState);
|
2021-09-25 15:01:53 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Place a tentative reservation of a thread for this process.
|
|
|
|
KScopedResourceReservation thread_reservation(this, Svc::LimitableResource::ThreadCountMax);
|
|
|
|
R_UNLESS(thread_reservation.Succeeded(), ResultLimitReached);
|
2021-09-25 15:01:53 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Ensure that we haven't already allocated stack.
|
|
|
|
ASSERT(m_main_thread_stack_size == 0);
|
2021-04-30 21:53:22 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Ensure that we're allocating a valid stack.
|
|
|
|
stack_size = Common::AlignUp(stack_size, PageSize);
|
|
|
|
R_UNLESS(stack_size + m_code_size <= m_max_process_memory, ResultOutOfMemory);
|
|
|
|
R_UNLESS(stack_size + m_code_size >= m_code_size, ResultOutOfMemory);
|
2021-04-30 21:53:22 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Place a tentative reservation of memory for our new stack.
|
|
|
|
KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax,
|
|
|
|
stack_size);
|
|
|
|
R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached);
|
2021-04-30 21:53:22 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Allocate and map our stack.
|
|
|
|
KProcessAddress stack_top = 0;
|
|
|
|
if (stack_size) {
|
|
|
|
KProcessAddress stack_bottom;
|
|
|
|
R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
|
|
|
|
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
|
2021-04-30 21:53:22 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
stack_top = stack_bottom + stack_size;
|
|
|
|
m_main_thread_stack_size = stack_size;
|
2021-09-25 15:01:53 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Ensure our stack is safe to clean up on exit.
|
|
|
|
ON_RESULT_FAILURE {
|
|
|
|
if (m_main_thread_stack_size) {
|
|
|
|
ASSERT(R_SUCCEEDED(m_page_table.UnmapPages(stack_top - m_main_thread_stack_size,
|
|
|
|
m_main_thread_stack_size / PageSize,
|
|
|
|
KMemoryState::Stack)));
|
|
|
|
m_main_thread_stack_size = 0;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Set our maximum heap size.
|
|
|
|
R_TRY(m_page_table.SetMaxHeapSize(m_max_process_memory -
|
|
|
|
(m_main_thread_stack_size + m_code_size)));
|
2021-09-25 15:01:53 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Initialize our handle table.
|
|
|
|
R_TRY(this->InitializeHandleTable(m_capabilities.GetHandleTableSize()));
|
|
|
|
ON_RESULT_FAILURE_2 {
|
|
|
|
this->FinalizeHandleTable();
|
|
|
|
};
|
2021-04-30 21:53:22 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Create a new thread for the process.
|
|
|
|
KThread* main_thread = KThread::Create(m_kernel);
|
|
|
|
R_UNLESS(main_thread != nullptr, ResultOutOfResource);
|
|
|
|
SCOPE_EXIT({ main_thread->Close(); });
|
|
|
|
|
|
|
|
// Initialize the thread.
|
|
|
|
R_TRY(KThread::InitializeUserThread(m_kernel.System(), main_thread, this->GetEntryPoint(), 0,
|
|
|
|
stack_top, priority, m_ideal_core_id, this));
|
|
|
|
|
|
|
|
// Register the thread, and commit our reservation.
|
|
|
|
KThread::Register(m_kernel, main_thread);
|
|
|
|
thread_reservation.Commit();
|
|
|
|
|
|
|
|
// Add the thread to our handle table.
|
|
|
|
Handle thread_handle;
|
|
|
|
R_TRY(m_handle_table.Add(std::addressof(thread_handle), main_thread));
|
|
|
|
|
|
|
|
// Set the thread arguments.
|
2023-11-28 19:30:39 +00:00
|
|
|
main_thread->GetContext().r[0] = 0;
|
|
|
|
main_thread->GetContext().r[1] = thread_handle;
|
2023-10-21 20:47:43 +00:00
|
|
|
|
|
|
|
// Update our state.
|
|
|
|
this->ChangeState((state == State::Created) ? State::Running : State::RunningAttached);
|
|
|
|
ON_RESULT_FAILURE_2 {
|
|
|
|
this->ChangeState(state);
|
|
|
|
};
|
2021-04-30 21:53:22 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Suspend for debug, if we should.
|
|
|
|
if (m_kernel.System().DebuggerEnabled()) {
|
|
|
|
main_thread->RequestSuspend(SuspendType::Debug);
|
|
|
|
}
|
2022-06-13 22:36:30 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Run our thread.
|
|
|
|
R_TRY(main_thread->Run());
|
2019-03-20 22:53:48 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Open a reference to represent that we're running.
|
|
|
|
this->Open();
|
2022-06-13 22:36:30 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// We succeeded! Commit our memory reservation.
|
|
|
|
mem_reservation.Commit();
|
2019-03-20 22:53:48 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
R_SUCCEED();
|
2022-12-15 19:22:07 +00:00
|
|
|
}
|
|
|
|
|
2022-06-26 03:44:19 +00:00
|
|
|
Result KProcess::Reset() {
|
2021-02-01 00:55:11 +00:00
|
|
|
// Lock the process and the scheduler.
|
2023-03-07 21:45:13 +00:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2023-10-21 20:47:43 +00:00
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
2018-12-05 00:08:56 +00:00
|
|
|
|
2021-02-01 00:55:11 +00:00
|
|
|
// Validate that we're in a state that we can reset.
|
2023-03-07 21:45:13 +00:00
|
|
|
R_UNLESS(m_state != State::Terminated, ResultInvalidState);
|
|
|
|
R_UNLESS(m_is_signaled, ResultInvalidState);
|
2018-12-05 00:08:56 +00:00
|
|
|
|
2021-02-01 00:55:11 +00:00
|
|
|
// Clear signaled.
|
2023-03-07 21:45:13 +00:00
|
|
|
m_is_signaled = false;
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2018-12-05 00:08:56 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
Result KProcess::SetActivity(Svc::ProcessActivity activity) {
|
2022-06-13 22:36:30 +00:00
|
|
|
// Lock ourselves and the scheduler.
|
2023-10-21 20:47:43 +00:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
KScopedLightLock list_lk(m_list_lock);
|
|
|
|
KScopedSchedulerLock sl(m_kernel);
|
2022-06-13 22:36:30 +00:00
|
|
|
|
|
|
|
// Validate our state.
|
2023-03-07 21:45:13 +00:00
|
|
|
R_UNLESS(m_state != State::Terminating, ResultInvalidState);
|
|
|
|
R_UNLESS(m_state != State::Terminated, ResultInvalidState);
|
2022-06-13 22:36:30 +00:00
|
|
|
|
|
|
|
// Either pause or resume.
|
2023-10-21 20:47:43 +00:00
|
|
|
if (activity == Svc::ProcessActivity::Paused) {
|
2022-06-13 22:36:30 +00:00
|
|
|
// Verify that we're not suspended.
|
2023-03-07 21:45:13 +00:00
|
|
|
R_UNLESS(!m_is_suspended, ResultInvalidState);
|
2022-06-13 22:36:30 +00:00
|
|
|
|
|
|
|
// Suspend all threads.
|
2023-10-21 20:47:43 +00:00
|
|
|
auto end = this->GetThreadList().end();
|
|
|
|
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
|
|
|
|
it->RequestSuspend(SuspendType::Process);
|
2022-06-13 22:36:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set ourselves as suspended.
|
2023-03-11 15:38:33 +00:00
|
|
|
this->SetSuspended(true);
|
2022-06-13 22:36:30 +00:00
|
|
|
} else {
|
2023-10-21 20:47:43 +00:00
|
|
|
ASSERT(activity == Svc::ProcessActivity::Runnable);
|
2022-06-13 22:36:30 +00:00
|
|
|
|
|
|
|
// Verify that we're suspended.
|
2023-03-07 21:45:13 +00:00
|
|
|
R_UNLESS(m_is_suspended, ResultInvalidState);
|
2022-06-13 22:36:30 +00:00
|
|
|
|
|
|
|
// Resume all threads.
|
2023-10-21 20:47:43 +00:00
|
|
|
auto end = this->GetThreadList().end();
|
|
|
|
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
|
|
|
|
it->Resume(SuspendType::Process);
|
2022-06-13 22:36:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set ourselves as resumed.
|
2023-03-11 15:38:33 +00:00
|
|
|
this->SetSuspended(false);
|
2022-06-13 22:36:30 +00:00
|
|
|
}
|
|
|
|
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2022-06-13 22:36:30 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
void KProcess::PinCurrentThread() {
|
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2023-08-25 21:59:32 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Get the current thread.
|
|
|
|
const s32 core_id = GetCurrentCoreId(m_kernel);
|
|
|
|
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
2020-04-09 02:19:12 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// If the thread isn't terminated, pin it.
|
|
|
|
if (!cur_thread->IsTerminationRequested()) {
|
|
|
|
// Pin it.
|
|
|
|
this->PinThread(core_id, cur_thread);
|
|
|
|
cur_thread->Pin(core_id);
|
2020-04-09 02:19:12 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// An update is needed.
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded(m_kernel);
|
2020-04-09 02:19:12 +00:00
|
|
|
}
|
2015-05-04 03:01:16 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
void KProcess::UnpinCurrentThread() {
|
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2017-05-06 06:11:06 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Get the current thread.
|
|
|
|
const s32 core_id = GetCurrentCoreId(m_kernel);
|
|
|
|
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
2019-07-07 08:13:56 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Unpin it.
|
|
|
|
cur_thread->Unpin();
|
|
|
|
this->UnpinThread(core_id, cur_thread);
|
2017-10-10 03:56:20 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// An update is needed.
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded(m_kernel);
|
2017-09-30 18:15:09 +00:00
|
|
|
}
|
2017-09-24 15:12:16 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
void KProcess::UnpinThread(KThread* thread) {
|
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
2018-09-21 06:06:47 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Get the thread's core id.
|
|
|
|
const auto core_id = thread->GetActiveCore();
|
2018-09-21 06:06:47 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Unpin it.
|
|
|
|
this->UnpinThread(core_id, thread);
|
|
|
|
thread->Unpin();
|
2018-09-21 06:06:47 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// An update is needed.
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded(m_kernel);
|
|
|
|
}
|
2018-09-21 06:06:47 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids,
|
|
|
|
s32 max_out_count) {
|
|
|
|
// TODO: use current memory reference
|
|
|
|
auto& memory = m_kernel.System().ApplicationMemory();
|
|
|
|
|
|
|
|
// Lock the list.
|
|
|
|
KScopedLightLock lk(m_list_lock);
|
|
|
|
|
|
|
|
// Iterate over the list.
|
|
|
|
s32 count = 0;
|
|
|
|
auto end = this->GetThreadList().end();
|
|
|
|
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
|
|
|
|
// If we're within array bounds, write the id.
|
|
|
|
if (count < max_out_count) {
|
|
|
|
// Get the thread id.
|
|
|
|
KThread* thread = std::addressof(*it);
|
|
|
|
const u64 id = thread->GetId();
|
|
|
|
|
|
|
|
// Copy the id to userland.
|
|
|
|
memory.Write64(out_thread_ids + count * sizeof(u64), id);
|
2018-09-21 06:06:47 +00:00
|
|
|
}
|
2019-07-07 08:19:16 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Increment the count.
|
|
|
|
++count;
|
2021-02-05 01:06:54 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// We successfully iterated the list.
|
|
|
|
*out_num_threads = count;
|
|
|
|
R_SUCCEED();
|
2018-09-21 06:06:47 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
|
2021-09-25 15:01:53 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
KProcess::KProcess(KernelCore& kernel)
|
2023-10-23 01:16:38 +00:00
|
|
|
: KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
|
|
|
|
m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
|
|
|
|
m_handle_table{kernel} {}
|
2023-10-21 20:47:43 +00:00
|
|
|
KProcess::~KProcess() = default;
|
2021-10-26 10:12:13 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
|
2023-11-17 19:58:29 +00:00
|
|
|
KProcessAddress aslr_space_start, bool is_hbl) {
|
2023-10-21 20:47:43 +00:00
|
|
|
// Create a resource limit for the process.
|
|
|
|
const auto physical_memory_size =
|
|
|
|
m_kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application);
|
|
|
|
auto* res_limit =
|
|
|
|
Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size);
|
2022-03-12 01:15:04 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Ensure we maintain a clean state on exit.
|
|
|
|
SCOPE_EXIT({ res_limit->Close(); });
|
2021-04-21 04:28:11 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Declare flags and code address.
|
|
|
|
Svc::CreateProcessFlag flag{};
|
|
|
|
u64 code_address{};
|
2018-09-21 05:26:29 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// We are an application.
|
|
|
|
flag |= Svc::CreateProcessFlag::IsApplication;
|
2022-03-12 01:14:17 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// If we are 64-bit, create as such.
|
|
|
|
if (metadata.Is64BitProgram()) {
|
|
|
|
flag |= Svc::CreateProcessFlag::Is64Bit;
|
|
|
|
}
|
2022-03-12 01:14:17 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Set the address space type and code address.
|
|
|
|
switch (metadata.GetAddressSpaceType()) {
|
|
|
|
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
|
|
|
flag |= Svc::CreateProcessFlag::AddressSpace64Bit;
|
2018-09-21 05:26:29 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// For 39-bit processes, the ASLR region starts at 0x800'0000 and is ~512GiB large.
|
|
|
|
// However, some (buggy) programs/libraries like skyline incorrectly depend on the
|
|
|
|
// existence of ASLR pages before the entry point, so we will adjust the load address
|
|
|
|
// to point to about 2GiB into the ASLR region.
|
|
|
|
code_address = 0x8000'0000;
|
|
|
|
break;
|
|
|
|
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
|
|
|
flag |= Svc::CreateProcessFlag::AddressSpace64BitDeprecated;
|
|
|
|
code_address = 0x800'0000;
|
|
|
|
break;
|
|
|
|
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
|
|
|
flag |= Svc::CreateProcessFlag::AddressSpace32Bit;
|
|
|
|
code_address = 0x20'0000;
|
|
|
|
break;
|
|
|
|
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
|
|
|
flag |= Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias;
|
|
|
|
code_address = 0x20'0000;
|
|
|
|
break;
|
2022-03-12 01:14:17 +00:00
|
|
|
}
|
2018-09-21 05:26:29 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
Svc::CreateProcessParameter params{
|
|
|
|
.name = {},
|
|
|
|
.version = {},
|
|
|
|
.program_id = metadata.GetTitleID(),
|
2023-11-17 19:58:29 +00:00
|
|
|
.code_address = code_address + GetInteger(aslr_space_start),
|
2023-10-21 20:47:43 +00:00
|
|
|
.code_num_pages = static_cast<s32>(code_size / PageSize),
|
|
|
|
.flags = flag,
|
|
|
|
.reslimit = Svc::InvalidHandle,
|
|
|
|
.system_resource_num_pages = static_cast<s32>(metadata.GetSystemResourceSize() / PageSize),
|
|
|
|
};
|
2018-09-21 05:26:29 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Set the process name.
|
|
|
|
const auto& name = metadata.GetName();
|
|
|
|
static_assert(sizeof(params.name) <= sizeof(name));
|
|
|
|
std::memcpy(params.name.data(), name.data(), sizeof(params.name));
|
2018-09-21 05:26:29 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Initialize for application process.
|
|
|
|
R_TRY(this->Initialize(params, metadata.GetKernelCapabilities(), res_limit,
|
2023-11-17 19:58:29 +00:00
|
|
|
KMemoryManager::Pool::Application, aslr_space_start));
|
2018-09-21 05:26:29 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// Assign remaining properties.
|
|
|
|
m_is_hbl = is_hbl;
|
|
|
|
m_ideal_core_id = metadata.GetMainThreadCore();
|
2018-09-21 05:26:29 +00:00
|
|
|
|
2023-11-28 19:30:39 +00:00
|
|
|
// Set up emulation context.
|
|
|
|
this->InitializeInterfaces();
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
// We succeeded.
|
2022-10-15 05:55:51 +00:00
|
|
|
R_SUCCEED();
|
2018-09-21 05:26:29 +00:00
|
|
|
}
|
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
|
|
|
|
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
|
|
|
|
Svc::MemoryPermission permission) {
|
|
|
|
m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
|
|
|
};
|
2019-06-05 18:32:33 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
this->GetMemory().WriteBlock(base_addr, code_set.memory.data(), code_set.memory.size());
|
2018-09-21 05:26:29 +00:00
|
|
|
|
2023-10-21 20:47:43 +00:00
|
|
|
ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
|
|
|
|
ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
|
|
|
|
ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite);
|
2023-11-17 21:44:53 +00:00
|
|
|
|
2023-11-27 00:50:10 +00:00
|
|
|
#ifdef HAS_NCE
|
2023-11-17 21:44:53 +00:00
|
|
|
if (Settings::IsNceEnabled()) {
|
|
|
|
auto& buffer = m_kernel.System().DeviceMemory().buffer;
|
|
|
|
const auto& code = code_set.CodeSegment();
|
|
|
|
const auto& patch = code_set.PatchSegment();
|
|
|
|
buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true);
|
|
|
|
buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true);
|
|
|
|
ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None);
|
|
|
|
}
|
|
|
|
#endif
|
2018-09-21 05:26:29 +00:00
|
|
|
}
|
|
|
|
|
2023-11-28 19:30:39 +00:00
|
|
|
void KProcess::InitializeInterfaces() {
|
|
|
|
this->GetMemory().SetCurrentPageTable(*this);
|
|
|
|
|
|
|
|
#ifdef HAS_NCE
|
|
|
|
if (this->Is64Bit() && Settings::IsNceEnabled()) {
|
|
|
|
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
|
|
|
m_arm_interfaces[i] = std::make_unique<Core::ArmNce>(m_kernel.System(), true, i);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
if (this->Is64Bit()) {
|
|
|
|
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
|
|
|
m_arm_interfaces[i] = std::make_unique<Core::ArmDynarmic64>(
|
|
|
|
m_kernel.System(), m_kernel.IsMulticore(), this,
|
|
|
|
static_cast<Core::DynarmicExclusiveMonitor&>(m_kernel.GetExclusiveMonitor()), i);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
|
|
|
m_arm_interfaces[i] = std::make_unique<Core::ArmDynarmic32>(
|
|
|
|
m_kernel.System(), m_kernel.IsMulticore(), this,
|
|
|
|
static_cast<Core::DynarmicExclusiveMonitor&>(m_kernel.GetExclusiveMonitor()), i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-23 23:58:48 +00:00
|
|
|
bool KProcess::InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
|
2023-03-07 21:45:13 +00:00
|
|
|
const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
|
2022-06-06 16:56:01 +00:00
|
|
|
return wp.type == DebugWatchpointType::None;
|
|
|
|
})};
|
|
|
|
|
2023-03-07 21:45:13 +00:00
|
|
|
if (watch == m_watchpoints.end()) {
|
2022-06-06 16:56:01 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
watch->start_address = addr;
|
|
|
|
watch->end_address = addr + size;
|
|
|
|
watch->type = type;
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
|
|
|
|
page += PageSize) {
|
2023-03-07 21:45:13 +00:00
|
|
|
m_debug_page_refcounts[page]++;
|
2023-03-23 23:58:48 +00:00
|
|
|
this->GetMemory().MarkRegionDebug(page, PageSize, true);
|
2022-06-06 16:56:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-03-23 23:58:48 +00:00
|
|
|
bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
|
2023-03-07 21:45:13 +00:00
|
|
|
const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
|
2022-06-06 16:56:01 +00:00
|
|
|
return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
|
|
|
|
})};
|
|
|
|
|
2023-03-07 21:45:13 +00:00
|
|
|
if (watch == m_watchpoints.end()) {
|
2022-06-06 16:56:01 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
watch->start_address = 0;
|
|
|
|
watch->end_address = 0;
|
|
|
|
watch->type = DebugWatchpointType::None;
|
|
|
|
|
2023-03-18 01:26:04 +00:00
|
|
|
for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
|
|
|
|
page += PageSize) {
|
2023-03-07 21:45:13 +00:00
|
|
|
m_debug_page_refcounts[page]--;
|
|
|
|
if (!m_debug_page_refcounts[page]) {
|
2023-03-23 23:58:48 +00:00
|
|
|
this->GetMemory().MarkRegionDebug(page, PageSize, false);
|
2022-06-06 16:56:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-03-23 23:58:48 +00:00
|
|
|
Core::Memory::Memory& KProcess::GetMemory() const {
|
|
|
|
// TODO: per-process memory
|
|
|
|
return m_kernel.System().ApplicationMemory();
|
|
|
|
}
|
|
|
|
|
2018-01-01 19:38:34 +00:00
|
|
|
} // namespace Kernel
|