mirror of
https://gitlab.com/suyu-emu/suyu.git
synced 2024-03-15 23:15:44 +00:00
scope_exit: Make constexpr
Allows the use of the macro in constexpr-contexts. Also avoids some potential problems when nesting braces inside it.
This commit is contained in:
parent
665fce871f
commit
310c1f50be
|
@ -404,7 +404,9 @@ static Core::SystemResultStatus RunEmulation(const std::string& filepath,
|
||||||
const size_t program_index,
|
const size_t program_index,
|
||||||
const bool frontend_initiated) {
|
const bool frontend_initiated) {
|
||||||
MicroProfileOnThreadCreate("EmuThread");
|
MicroProfileOnThreadCreate("EmuThread");
|
||||||
SCOPE_EXIT({ MicroProfileShutdown(); });
|
SCOPE_EXIT {
|
||||||
|
MicroProfileShutdown();
|
||||||
|
};
|
||||||
|
|
||||||
LOG_INFO(Frontend, "starting");
|
LOG_INFO(Frontend, "starting");
|
||||||
|
|
||||||
|
@ -413,7 +415,9 @@ static Core::SystemResultStatus RunEmulation(const std::string& filepath,
|
||||||
return Core::SystemResultStatus::ErrorLoader;
|
return Core::SystemResultStatus::ErrorLoader;
|
||||||
}
|
}
|
||||||
|
|
||||||
SCOPE_EXIT({ EmulationSession::GetInstance().ShutdownEmulation(); });
|
SCOPE_EXIT {
|
||||||
|
EmulationSession::GetInstance().ShutdownEmulation();
|
||||||
|
};
|
||||||
|
|
||||||
jconst result = EmulationSession::GetInstance().InitializeEmulation(filepath, program_index,
|
jconst result = EmulationSession::GetInstance().InitializeEmulation(filepath, program_index,
|
||||||
frontend_initiated);
|
frontend_initiated);
|
||||||
|
|
|
@ -357,7 +357,9 @@ bool IsCubebSuitable() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
SCOPE_EXIT({ cubeb_destroy(ctx); });
|
SCOPE_EXIT {
|
||||||
|
cubeb_destroy(ctx);
|
||||||
|
};
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
if (SUCCEEDED(com_init_result)) {
|
if (SUCCEEDED(com_init_result)) {
|
||||||
|
|
|
@ -20,10 +20,10 @@
|
||||||
namespace AudioCore::Sink {
|
namespace AudioCore::Sink {
|
||||||
|
|
||||||
void SinkStream::AppendBuffer(SinkBuffer& buffer, std::span<s16> samples) {
|
void SinkStream::AppendBuffer(SinkBuffer& buffer, std::span<s16> samples) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
queue.enqueue(buffer);
|
queue.enqueue(buffer);
|
||||||
++queued_buffers;
|
++queued_buffers;
|
||||||
});
|
};
|
||||||
|
|
||||||
if (type == StreamType::In) {
|
if (type == StreamType::In) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -20,7 +20,9 @@ std::string DemangleSymbol(const std::string& mangled) {
|
||||||
}
|
}
|
||||||
|
|
||||||
char* demangled = nullptr;
|
char* demangled = nullptr;
|
||||||
SCOPE_EXIT({ std::free(demangled); });
|
SCOPE_EXIT {
|
||||||
|
std::free(demangled);
|
||||||
|
};
|
||||||
|
|
||||||
if (is_itanium(mangled)) {
|
if (is_itanium(mangled)) {
|
||||||
demangled = llvm::itaniumDemangle(mangled.c_str());
|
demangled = llvm::itaniumDemangle(mangled.c_str());
|
||||||
|
|
|
@ -430,11 +430,11 @@ public:
|
||||||
explicit Impl(size_t backing_size_, size_t virtual_size_)
|
explicit Impl(size_t backing_size_, size_t virtual_size_)
|
||||||
: backing_size{backing_size_}, virtual_size{virtual_size_} {
|
: backing_size{backing_size_}, virtual_size{virtual_size_} {
|
||||||
bool good = false;
|
bool good = false;
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (!good) {
|
if (!good) {
|
||||||
Release();
|
Release();
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
long page_size = sysconf(_SC_PAGESIZE);
|
long page_size = sysconf(_SC_PAGESIZE);
|
||||||
if (page_size != 0x1000) {
|
if (page_size != 0x1000) {
|
||||||
|
|
|
@ -24,10 +24,10 @@ bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* c
|
||||||
out_entry->block_size = page_size;
|
out_entry->block_size = page_size;
|
||||||
|
|
||||||
// Regardless of whether the page was mapped, advance on exit.
|
// Regardless of whether the page was mapped, advance on exit.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
context->next_page += 1;
|
context->next_page += 1;
|
||||||
context->next_offset += page_size;
|
context->next_offset += page_size;
|
||||||
});
|
};
|
||||||
|
|
||||||
// Validate that we can read the actual entry.
|
// Validate that we can read the actual entry.
|
||||||
const auto page = context->next_page;
|
const auto page = context->next_page;
|
||||||
|
|
|
@ -7,29 +7,61 @@
|
||||||
#include "common/common_funcs.h"
|
#include "common/common_funcs.h"
|
||||||
|
|
||||||
namespace detail {
|
namespace detail {
|
||||||
template <typename Func>
|
template <class F>
|
||||||
struct ScopeExitHelper {
|
class ScopeGuard {
|
||||||
explicit ScopeExitHelper(Func&& func_) : func(std::move(func_)) {}
|
YUZU_NON_COPYABLE(ScopeGuard);
|
||||||
~ScopeExitHelper() {
|
|
||||||
|
private:
|
||||||
|
F f;
|
||||||
|
bool active;
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr ScopeGuard(F f_) : f(std::move(f_)), active(true) {}
|
||||||
|
constexpr ~ScopeGuard() {
|
||||||
if (active) {
|
if (active) {
|
||||||
func();
|
f();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
constexpr void Cancel() {
|
||||||
void Cancel() {
|
|
||||||
active = false;
|
active = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
Func func;
|
constexpr ScopeGuard(ScopeGuard&& rhs) : f(std::move(rhs.f)), active(rhs.active) {
|
||||||
bool active{true};
|
rhs.Cancel();
|
||||||
|
}
|
||||||
|
|
||||||
|
ScopeGuard& operator=(ScopeGuard&& rhs) = delete;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename Func>
|
template <class F>
|
||||||
ScopeExitHelper<Func> ScopeExit(Func&& func) {
|
constexpr ScopeGuard<F> MakeScopeGuard(F f) {
|
||||||
return ScopeExitHelper<Func>(std::forward<Func>(func));
|
return ScopeGuard<F>(std::move(f));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum class ScopeGuardOnExit {};
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
constexpr ScopeGuard<F> operator+(ScopeGuardOnExit, F&& f) {
|
||||||
|
return ScopeGuard<F>(std::forward<F>(f));
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace detail
|
} // namespace detail
|
||||||
|
|
||||||
|
#define CONCATENATE_IMPL(s1, s2) s1##s2
|
||||||
|
#define CONCATENATE(s1, s2) CONCATENATE_IMPL(s1, s2)
|
||||||
|
|
||||||
|
#ifdef __COUNTER__
|
||||||
|
#define ANONYMOUS_VARIABLE(pref) CONCATENATE(pref, __COUNTER__)
|
||||||
|
#else
|
||||||
|
#define ANONYMOUS_VARIABLE(pref) CONCATENATE(pref, __LINE__)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This macro is similar to SCOPE_EXIT, except the object is caller managed. This is intended to be
|
||||||
|
* used when the caller might want to cancel the ScopeExit.
|
||||||
|
*/
|
||||||
|
#define SCOPE_GUARD detail::ScopeGuardOnExit() + [&]()
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This macro allows you to conveniently specify a block of code that will run on scope exit. Handy
|
* This macro allows you to conveniently specify a block of code that will run on scope exit. Handy
|
||||||
* for doing ad-hoc clean-up tasks in a function with multiple returns.
|
* for doing ad-hoc clean-up tasks in a function with multiple returns.
|
||||||
|
@ -38,7 +70,7 @@ ScopeExitHelper<Func> ScopeExit(Func&& func) {
|
||||||
* \code
|
* \code
|
||||||
* const int saved_val = g_foo;
|
* const int saved_val = g_foo;
|
||||||
* g_foo = 55;
|
* g_foo = 55;
|
||||||
* SCOPE_EXIT({ g_foo = saved_val; });
|
* SCOPE_EXIT{ g_foo = saved_val; };
|
||||||
*
|
*
|
||||||
* if (Bar()) {
|
* if (Bar()) {
|
||||||
* return 0;
|
* return 0;
|
||||||
|
@ -47,10 +79,4 @@ ScopeExitHelper<Func> ScopeExit(Func&& func) {
|
||||||
* }
|
* }
|
||||||
* \endcode
|
* \endcode
|
||||||
*/
|
*/
|
||||||
#define SCOPE_EXIT(body) auto CONCAT2(scope_exit_helper_, __LINE__) = detail::ScopeExit([&]() body)
|
#define SCOPE_EXIT auto ANONYMOUS_VARIABLE(SCOPE_EXIT_STATE_) = SCOPE_GUARD
|
||||||
|
|
||||||
/**
|
|
||||||
* This macro is similar to SCOPE_EXIT, except the object is caller managed. This is intended to be
|
|
||||||
* used when the caller might want to cancel the ScopeExit.
|
|
||||||
*/
|
|
||||||
#define SCOPE_GUARD(body) detail::ScopeExit([&]() body)
|
|
||||||
|
|
|
@ -199,10 +199,10 @@ void CpuManager::RunThread(std::stop_token token, std::size_t core) {
|
||||||
data.host_context = Common::Fiber::ThreadToFiber();
|
data.host_context = Common::Fiber::ThreadToFiber();
|
||||||
|
|
||||||
// Cleanup
|
// Cleanup
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
data.host_context->Exit();
|
data.host_context->Exit();
|
||||||
MicroProfileOnThreadExit();
|
MicroProfileOnThreadExit();
|
||||||
});
|
};
|
||||||
|
|
||||||
// Running
|
// Running
|
||||||
if (!gpu_barrier->Sync(token)) {
|
if (!gpu_barrier->Sync(token)) {
|
||||||
|
|
|
@ -391,12 +391,12 @@ void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto o
|
||||||
std::min((next_pages << Memory::YUZU_PAGEBITS) - page_offset, remaining_size);
|
std::min((next_pages << Memory::YUZU_PAGEBITS) - page_offset, remaining_size);
|
||||||
const auto current_vaddr =
|
const auto current_vaddr =
|
||||||
static_cast<u64>((page_index << Memory::YUZU_PAGEBITS) + page_offset);
|
static_cast<u64>((page_index << Memory::YUZU_PAGEBITS) + page_offset);
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT{
|
||||||
page_index += next_pages;
|
page_index += next_pages;
|
||||||
page_offset = 0;
|
page_offset = 0;
|
||||||
increment(copy_amount);
|
increment(copy_amount);
|
||||||
remaining_size -= copy_amount;
|
remaining_size -= copy_amount;
|
||||||
});
|
};
|
||||||
|
|
||||||
auto phys_addr = compressed_physical_ptr[page_index];
|
auto phys_addr = compressed_physical_ptr[page_index];
|
||||||
if (phys_addr == 0) {
|
if (phys_addr == 0) {
|
||||||
|
|
|
@ -3,6 +3,10 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <string_view>
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
namespace FileSys {
|
namespace FileSys {
|
||||||
|
|
||||||
constexpr inline size_t EntryNameLengthMax = 0x300;
|
constexpr inline size_t EntryNameLengthMax = 0x300;
|
||||||
|
|
|
@ -447,7 +447,7 @@ public:
|
||||||
char* replacement_path = nullptr;
|
char* replacement_path = nullptr;
|
||||||
size_t replacement_path_size = 0;
|
size_t replacement_path_size = 0;
|
||||||
|
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (replacement_path != nullptr) {
|
if (replacement_path != nullptr) {
|
||||||
if (std::is_constant_evaluated()) {
|
if (std::is_constant_evaluated()) {
|
||||||
delete[] replacement_path;
|
delete[] replacement_path;
|
||||||
|
@ -455,7 +455,7 @@ public:
|
||||||
Deallocate(replacement_path, replacement_path_size);
|
Deallocate(replacement_path, replacement_path_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
// Perform path replacement, if necessary
|
// Perform path replacement, if necessary
|
||||||
if (IsParentDirectoryPathReplacementNeeded(cur_path)) {
|
if (IsParentDirectoryPathReplacementNeeded(cur_path)) {
|
||||||
|
@ -1102,8 +1102,8 @@ public:
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
static Result Normalize(char* dst, size_t dst_size, const char* path, size_t path_len,
|
static constexpr Result Normalize(char* dst, size_t dst_size, const char* path, size_t path_len,
|
||||||
const PathFlags& flags) {
|
const PathFlags& flags) {
|
||||||
// Use StringTraits names for remainder of scope
|
// Use StringTraits names for remainder of scope
|
||||||
using namespace StringTraits;
|
using namespace StringTraits;
|
||||||
|
|
||||||
|
@ -1199,7 +1199,7 @@ public:
|
||||||
const size_t replaced_src_len = path_len - (src - path);
|
const size_t replaced_src_len = path_len - (src - path);
|
||||||
|
|
||||||
char* replaced_src = nullptr;
|
char* replaced_src = nullptr;
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (replaced_src != nullptr) {
|
if (replaced_src != nullptr) {
|
||||||
if (std::is_constant_evaluated()) {
|
if (std::is_constant_evaluated()) {
|
||||||
delete[] replaced_src;
|
delete[] replaced_src;
|
||||||
|
@ -1207,7 +1207,7 @@ public:
|
||||||
Deallocate(replaced_src, replaced_src_len);
|
Deallocate(replaced_src, replaced_src_len);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
if (std::is_constant_evaluated()) {
|
if (std::is_constant_evaluated()) {
|
||||||
replaced_src = new char[replaced_src_len];
|
replaced_src = new char[replaced_src_len];
|
||||||
|
|
|
@ -36,7 +36,9 @@ Result HierarchicalSha256Storage::Initialize(VirtualFile* base_storages, s32 lay
|
||||||
// Get the base storage size.
|
// Get the base storage size.
|
||||||
m_base_storage_size = base_storages[2]->GetSize();
|
m_base_storage_size = base_storages[2]->GetSize();
|
||||||
{
|
{
|
||||||
auto size_guard = SCOPE_GUARD({ m_base_storage_size = 0; });
|
auto size_guard = SCOPE_GUARD {
|
||||||
|
m_base_storage_size = 0;
|
||||||
|
};
|
||||||
R_UNLESS(m_base_storage_size <= static_cast<s64>(HashSize)
|
R_UNLESS(m_base_storage_size <= static_cast<s64>(HashSize)
|
||||||
<< m_log_size_ratio << m_log_size_ratio,
|
<< m_log_size_ratio << m_log_size_ratio,
|
||||||
ResultHierarchicalSha256BaseStorageTooLarge);
|
ResultHierarchicalSha256BaseStorageTooLarge);
|
||||||
|
|
|
@ -98,7 +98,9 @@ Loader::ResultStatus ProgramMetadata::Load(VirtualFile file) {
|
||||||
|
|
||||||
Loader::ResultStatus ProgramMetadata::Reload(VirtualFile file) {
|
Loader::ResultStatus ProgramMetadata::Reload(VirtualFile file) {
|
||||||
const u64 original_program_id = aci_header.title_id;
|
const u64 original_program_id = aci_header.title_id;
|
||||||
SCOPE_EXIT({ aci_header.title_id = original_program_id; });
|
SCOPE_EXIT {
|
||||||
|
aci_header.title_id = original_program_id;
|
||||||
|
};
|
||||||
|
|
||||||
return this->Load(file);
|
return this->Load(file);
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,9 @@ Result KClientSession::SendSyncRequest(uintptr_t address, size_t size) {
|
||||||
// Create a session request.
|
// Create a session request.
|
||||||
KSessionRequest* request = KSessionRequest::Create(m_kernel);
|
KSessionRequest* request = KSessionRequest::Create(m_kernel);
|
||||||
R_UNLESS(request != nullptr, ResultOutOfResource);
|
R_UNLESS(request != nullptr, ResultOutOfResource);
|
||||||
SCOPE_EXIT({ request->Close(); });
|
SCOPE_EXIT {
|
||||||
|
request->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Initialize the request.
|
// Initialize the request.
|
||||||
request->Initialize(nullptr, address, size);
|
request->Initialize(nullptr, address, size);
|
||||||
|
@ -37,7 +39,9 @@ Result KClientSession::SendAsyncRequest(KEvent* event, uintptr_t address, size_t
|
||||||
// Create a session request.
|
// Create a session request.
|
||||||
KSessionRequest* request = KSessionRequest::Create(m_kernel);
|
KSessionRequest* request = KSessionRequest::Create(m_kernel);
|
||||||
R_UNLESS(request != nullptr, ResultOutOfResource);
|
R_UNLESS(request != nullptr, ResultOutOfResource);
|
||||||
SCOPE_EXIT({ request->Close(); });
|
SCOPE_EXIT {
|
||||||
|
request->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Initialize the request.
|
// Initialize the request.
|
||||||
request->Initialize(event, address, size);
|
request->Initialize(event, address, size);
|
||||||
|
|
|
@ -1305,11 +1305,11 @@ Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddr
|
||||||
|
|
||||||
// Ensure that we maintain the instruction cache.
|
// Ensure that we maintain the instruction cache.
|
||||||
bool reprotected_pages = false;
|
bool reprotected_pages = false;
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (reprotected_pages && any_code_pages) {
|
if (reprotected_pages && any_code_pages) {
|
||||||
InvalidateInstructionCache(m_kernel, this, dst_address, size);
|
InvalidateInstructionCache(m_kernel, this, dst_address, size);
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
// Unmap.
|
// Unmap.
|
||||||
{
|
{
|
||||||
|
@ -1397,7 +1397,9 @@ Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) {
|
||||||
// Close the opened pages when we're done with them.
|
// Close the opened pages when we're done with them.
|
||||||
// If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
|
// If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
|
||||||
// automatically.
|
// automatically.
|
||||||
SCOPE_EXIT({ pg.Close(); });
|
SCOPE_EXIT {
|
||||||
|
pg.Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Clear all the newly allocated pages.
|
// Clear all the newly allocated pages.
|
||||||
for (const auto& it : pg) {
|
for (const auto& it : pg) {
|
||||||
|
@ -1603,7 +1605,9 @@ Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProce
|
||||||
m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
|
m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
|
||||||
|
|
||||||
// Ensure that the page group is closed when we're done working with it.
|
// Ensure that the page group is closed when we're done working with it.
|
||||||
SCOPE_EXIT({ pg.Close(); });
|
SCOPE_EXIT {
|
||||||
|
pg.Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Clear all pages.
|
// Clear all pages.
|
||||||
for (const auto& it : pg) {
|
for (const auto& it : pg) {
|
||||||
|
@ -2191,7 +2195,9 @@ Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) {
|
||||||
// Close the opened pages when we're done with them.
|
// Close the opened pages when we're done with them.
|
||||||
// If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
|
// If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
|
||||||
// automatically.
|
// automatically.
|
||||||
SCOPE_EXIT({ pg.Close(); });
|
SCOPE_EXIT {
|
||||||
|
pg.Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Clear all the newly allocated pages.
|
// Clear all the newly allocated pages.
|
||||||
for (const auto& it : pg) {
|
for (const auto& it : pg) {
|
||||||
|
@ -2592,7 +2598,9 @@ Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddre
|
||||||
// Temporarily unlock ourselves, so that other operations can occur while we flush the
|
// Temporarily unlock ourselves, so that other operations can occur while we flush the
|
||||||
// region.
|
// region.
|
||||||
m_general_lock.Unlock();
|
m_general_lock.Unlock();
|
||||||
SCOPE_EXIT({ m_general_lock.Lock(); });
|
SCOPE_EXIT {
|
||||||
|
m_general_lock.Lock();
|
||||||
|
};
|
||||||
|
|
||||||
// Flush the region.
|
// Flush the region.
|
||||||
R_ASSERT(FlushDataCache(dst_address, size));
|
R_ASSERT(FlushDataCache(dst_address, size));
|
||||||
|
@ -3311,10 +3319,10 @@ Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddre
|
||||||
// Ensure we unmap the io memory when we're done with it.
|
// Ensure we unmap the io memory when we're done with it.
|
||||||
const KPageProperties unmap_properties =
|
const KPageProperties unmap_properties =
|
||||||
KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
|
KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
|
R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
|
||||||
unmap_properties, OperationType::Unmap, true));
|
unmap_properties, OperationType::Unmap, true));
|
||||||
});
|
};
|
||||||
|
|
||||||
// Read the memory.
|
// Read the memory.
|
||||||
const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
|
const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
|
||||||
|
@ -3347,10 +3355,10 @@ Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAdd
|
||||||
// Ensure we unmap the io memory when we're done with it.
|
// Ensure we unmap the io memory when we're done with it.
|
||||||
const KPageProperties unmap_properties =
|
const KPageProperties unmap_properties =
|
||||||
KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
|
KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
|
R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
|
||||||
unmap_properties, OperationType::Unmap, true));
|
unmap_properties, OperationType::Unmap, true));
|
||||||
});
|
};
|
||||||
|
|
||||||
// Write the memory.
|
// Write the memory.
|
||||||
const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
|
const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
|
||||||
|
@ -4491,14 +4499,14 @@ Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
|
||||||
|
|
||||||
// If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
|
// If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
|
||||||
// free on scope exit.
|
// free on scope exit.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (start_partial_page != 0) {
|
if (start_partial_page != 0) {
|
||||||
m_kernel.MemoryManager().Close(start_partial_page, 1);
|
m_kernel.MemoryManager().Close(start_partial_page, 1);
|
||||||
}
|
}
|
||||||
if (end_partial_page != 0) {
|
if (end_partial_page != 0) {
|
||||||
m_kernel.MemoryManager().Close(end_partial_page, 1);
|
m_kernel.MemoryManager().Close(end_partial_page, 1);
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
ON_RESULT_FAILURE {
|
ON_RESULT_FAILURE {
|
||||||
if (cur_mapped_addr != dst_addr) {
|
if (cur_mapped_addr != dst_addr) {
|
||||||
|
@ -5166,10 +5174,10 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||||
GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value));
|
GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value));
|
||||||
|
|
||||||
// If we fail in the next bit (or retry), we need to cleanup the pages.
|
// If we fail in the next bit (or retry), we need to cleanup the pages.
|
||||||
auto pg_guard = SCOPE_GUARD({
|
auto pg_guard = SCOPE_GUARD {
|
||||||
pg.OpenFirst();
|
pg.OpenFirst();
|
||||||
pg.Close();
|
pg.Close();
|
||||||
});
|
};
|
||||||
|
|
||||||
// Map the memory.
|
// Map the memory.
|
||||||
{
|
{
|
||||||
|
@ -5694,7 +5702,9 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
|
||||||
|
|
||||||
// Ensure that any pages we track are closed on exit.
|
// Ensure that any pages we track are closed on exit.
|
||||||
KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
|
KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
|
||||||
SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
|
SCOPE_EXIT {
|
||||||
|
pages_to_close.CloseAndReset();
|
||||||
|
};
|
||||||
|
|
||||||
// Make a page group representing the region to unmap.
|
// Make a page group representing the region to unmap.
|
||||||
this->MakePageGroup(pages_to_close, virt_addr, num_pages);
|
this->MakePageGroup(pages_to_close, virt_addr, num_pages);
|
||||||
|
|
|
@ -77,7 +77,9 @@ Result TerminateChildren(KernelCore& kernel, KProcess* process,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Terminate and close the thread.
|
// Terminate and close the thread.
|
||||||
SCOPE_EXIT({ cur_child->Close(); });
|
SCOPE_EXIT {
|
||||||
|
cur_child->Close();
|
||||||
|
};
|
||||||
|
|
||||||
if (const Result terminate_result = cur_child->Terminate();
|
if (const Result terminate_result = cur_child->Terminate();
|
||||||
ResultTerminationRequested == terminate_result) {
|
ResultTerminationRequested == terminate_result) {
|
||||||
|
@ -466,11 +468,11 @@ void KProcess::DoWorkerTaskImpl() {
|
||||||
|
|
||||||
Result KProcess::StartTermination() {
|
Result KProcess::StartTermination() {
|
||||||
// Finalize the handle table when we're done, if the process isn't immortal.
|
// Finalize the handle table when we're done, if the process isn't immortal.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (!m_is_immortal) {
|
if (!m_is_immortal) {
|
||||||
this->FinalizeHandleTable();
|
this->FinalizeHandleTable();
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
// Terminate child threads other than the current one.
|
// Terminate child threads other than the current one.
|
||||||
R_RETURN(TerminateChildren(m_kernel, this, GetCurrentThreadPointer(m_kernel)));
|
R_RETURN(TerminateChildren(m_kernel, this, GetCurrentThreadPointer(m_kernel)));
|
||||||
|
@ -964,7 +966,9 @@ Result KProcess::Run(s32 priority, size_t stack_size) {
|
||||||
// Create a new thread for the process.
|
// Create a new thread for the process.
|
||||||
KThread* main_thread = KThread::Create(m_kernel);
|
KThread* main_thread = KThread::Create(m_kernel);
|
||||||
R_UNLESS(main_thread != nullptr, ResultOutOfResource);
|
R_UNLESS(main_thread != nullptr, ResultOutOfResource);
|
||||||
SCOPE_EXIT({ main_thread->Close(); });
|
SCOPE_EXIT {
|
||||||
|
main_thread->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Initialize the thread.
|
// Initialize the thread.
|
||||||
R_TRY(KThread::InitializeUserThread(m_kernel.System(), main_thread, this->GetEntryPoint(), 0,
|
R_TRY(KThread::InitializeUserThread(m_kernel.System(), main_thread, this->GetEntryPoint(), 0,
|
||||||
|
@ -1155,7 +1159,9 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
||||||
Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size);
|
Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size);
|
||||||
|
|
||||||
// Ensure we maintain a clean state on exit.
|
// Ensure we maintain a clean state on exit.
|
||||||
SCOPE_EXIT({ res_limit->Close(); });
|
SCOPE_EXIT {
|
||||||
|
res_limit->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Declare flags and code address.
|
// Declare flags and code address.
|
||||||
Svc::CreateProcessFlag flag{};
|
Svc::CreateProcessFlag flag{};
|
||||||
|
|
|
@ -651,11 +651,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
|
||||||
// Process any special data.
|
// Process any special data.
|
||||||
if (src_header.GetHasSpecialHeader()) {
|
if (src_header.GetHasSpecialHeader()) {
|
||||||
// After we process, make sure we track whether the receive list is broken.
|
// After we process, make sure we track whether the receive list is broken.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (offset > dst_recv_list_idx) {
|
if (offset > dst_recv_list_idx) {
|
||||||
recv_list_broken = true;
|
recv_list_broken = true;
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
// Process special data.
|
// Process special data.
|
||||||
R_TRY(ProcessMessageSpecialData<false>(offset, dst_process, src_process, src_thread,
|
R_TRY(ProcessMessageSpecialData<false>(offset, dst_process, src_process, src_thread,
|
||||||
|
@ -665,11 +665,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
|
||||||
// Process any pointer buffers.
|
// Process any pointer buffers.
|
||||||
for (auto i = 0; i < src_header.GetPointerCount(); ++i) {
|
for (auto i = 0; i < src_header.GetPointerCount(); ++i) {
|
||||||
// After we process, make sure we track whether the receive list is broken.
|
// After we process, make sure we track whether the receive list is broken.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (offset > dst_recv_list_idx) {
|
if (offset > dst_recv_list_idx) {
|
||||||
recv_list_broken = true;
|
recv_list_broken = true;
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
R_TRY(ProcessReceiveMessagePointerDescriptors(
|
R_TRY(ProcessReceiveMessagePointerDescriptors(
|
||||||
offset, pointer_key, dst_page_table, src_page_table, dst_msg, src_msg, dst_recv_list,
|
offset, pointer_key, dst_page_table, src_page_table, dst_msg, src_msg, dst_recv_list,
|
||||||
|
@ -680,11 +680,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
|
||||||
// Process any map alias buffers.
|
// Process any map alias buffers.
|
||||||
for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) {
|
for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) {
|
||||||
// After we process, make sure we track whether the receive list is broken.
|
// After we process, make sure we track whether the receive list is broken.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (offset > dst_recv_list_idx) {
|
if (offset > dst_recv_list_idx) {
|
||||||
recv_list_broken = true;
|
recv_list_broken = true;
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
// We process in order send, recv, exch. Buffers after send (recv/exch) are ReadWrite.
|
// We process in order send, recv, exch. Buffers after send (recv/exch) are ReadWrite.
|
||||||
const KMemoryPermission perm = (i >= src_header.GetSendCount())
|
const KMemoryPermission perm = (i >= src_header.GetSendCount())
|
||||||
|
@ -702,11 +702,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
|
||||||
// Process any raw data.
|
// Process any raw data.
|
||||||
if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) {
|
if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) {
|
||||||
// After we process, make sure we track whether the receive list is broken.
|
// After we process, make sure we track whether the receive list is broken.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (offset + raw_count > dst_recv_list_idx) {
|
if (offset + raw_count > dst_recv_list_idx) {
|
||||||
recv_list_broken = true;
|
recv_list_broken = true;
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
// Get the offset and size.
|
// Get the offset and size.
|
||||||
const size_t offset_words = offset * sizeof(u32);
|
const size_t offset_words = offset * sizeof(u32);
|
||||||
|
@ -1124,7 +1124,9 @@ Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server
|
||||||
client_thread->Open();
|
client_thread->Open();
|
||||||
}
|
}
|
||||||
|
|
||||||
SCOPE_EXIT({ client_thread->Close(); });
|
SCOPE_EXIT {
|
||||||
|
client_thread->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Set the request as our current.
|
// Set the request as our current.
|
||||||
m_current_request = request;
|
m_current_request = request;
|
||||||
|
@ -1174,7 +1176,9 @@ Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server
|
||||||
// Reply to the client.
|
// Reply to the client.
|
||||||
{
|
{
|
||||||
// After we reply, close our reference to the request.
|
// After we reply, close our reference to the request.
|
||||||
SCOPE_EXIT({ request->Close(); });
|
SCOPE_EXIT {
|
||||||
|
request->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Get the event to check whether the request is async.
|
// Get the event to check whether the request is async.
|
||||||
if (KEvent* event = request->GetEvent(); event != nullptr) {
|
if (KEvent* event = request->GetEvent(); event != nullptr) {
|
||||||
|
@ -1236,7 +1240,9 @@ Result KServerSession::SendReply(uintptr_t server_message, uintptr_t server_buff
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close reference to the request once we're done processing it.
|
// Close reference to the request once we're done processing it.
|
||||||
SCOPE_EXIT({ request->Close(); });
|
SCOPE_EXIT {
|
||||||
|
request->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Extract relevant information from the request.
|
// Extract relevant information from the request.
|
||||||
const uint64_t client_message = request->GetAddress();
|
const uint64_t client_message = request->GetAddress();
|
||||||
|
@ -1394,7 +1400,9 @@ void KServerSession::CleanupRequests() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close a reference to the request once it's cleaned up.
|
// Close a reference to the request once it's cleaned up.
|
||||||
SCOPE_EXIT({ request->Close(); });
|
SCOPE_EXIT {
|
||||||
|
request->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Extract relevant information from the request.
|
// Extract relevant information from the request.
|
||||||
const uint64_t client_message = request->GetAddress();
|
const uint64_t client_message = request->GetAddress();
|
||||||
|
@ -1491,7 +1499,9 @@ void KServerSession::OnClientClosed() {
|
||||||
ASSERT(thread != nullptr);
|
ASSERT(thread != nullptr);
|
||||||
|
|
||||||
// Ensure that we close the request when done.
|
// Ensure that we close the request when done.
|
||||||
SCOPE_EXIT({ request->Close(); });
|
SCOPE_EXIT {
|
||||||
|
request->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// If we're terminating, close a reference to the thread and event.
|
// If we're terminating, close a reference to the thread and event.
|
||||||
if (terminate) {
|
if (terminate) {
|
||||||
|
|
|
@ -21,7 +21,9 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
|
||||||
// Allocate a new page.
|
// Allocate a new page.
|
||||||
KPageBuffer* page_buf = KPageBuffer::Allocate(kernel);
|
KPageBuffer* page_buf = KPageBuffer::Allocate(kernel);
|
||||||
R_UNLESS(page_buf != nullptr, ResultOutOfMemory);
|
R_UNLESS(page_buf != nullptr, ResultOutOfMemory);
|
||||||
auto page_buf_guard = SCOPE_GUARD({ KPageBuffer::Free(kernel, page_buf); });
|
auto page_buf_guard = SCOPE_GUARD {
|
||||||
|
KPageBuffer::Free(kernel, page_buf);
|
||||||
|
};
|
||||||
|
|
||||||
// Map the address in.
|
// Map the address in.
|
||||||
const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf);
|
const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf);
|
||||||
|
|
|
@ -24,7 +24,9 @@ Result KTransferMemory::Initialize(KProcessAddress addr, std::size_t size,
|
||||||
|
|
||||||
// Construct the page group, guarding to make sure our state is valid on exit.
|
// Construct the page group, guarding to make sure our state is valid on exit.
|
||||||
m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
|
m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
|
||||||
auto pg_guard = SCOPE_GUARD({ m_page_group.reset(); });
|
auto pg_guard = SCOPE_GUARD {
|
||||||
|
m_page_group.reset();
|
||||||
|
};
|
||||||
|
|
||||||
// Lock the memory.
|
// Lock the memory.
|
||||||
R_TRY(page_table.LockForTransferMemory(std::addressof(*m_page_group), addr, size,
|
R_TRY(page_table.LockForTransferMemory(std::addressof(*m_page_group), addr, size,
|
||||||
|
|
|
@ -109,7 +109,9 @@ struct KernelCore::Impl {
|
||||||
|
|
||||||
void Shutdown() {
|
void Shutdown() {
|
||||||
is_shutting_down.store(true, std::memory_order_relaxed);
|
is_shutting_down.store(true, std::memory_order_relaxed);
|
||||||
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
|
SCOPE_EXIT {
|
||||||
|
is_shutting_down.store(false, std::memory_order_relaxed);
|
||||||
|
};
|
||||||
|
|
||||||
CloseServices();
|
CloseServices();
|
||||||
|
|
||||||
|
@ -1080,7 +1082,9 @@ std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
|
||||||
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
|
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
|
||||||
|
|
||||||
// Ensure that we don't hold onto any extra references.
|
// Ensure that we don't hold onto any extra references.
|
||||||
SCOPE_EXIT({ process->Close(); });
|
SCOPE_EXIT {
|
||||||
|
process->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Register the new process.
|
// Register the new process.
|
||||||
KProcess::Register(*this, process);
|
KProcess::Register(*this, process);
|
||||||
|
@ -1108,7 +1112,9 @@ void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function
|
||||||
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
|
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
|
||||||
|
|
||||||
// Ensure that we don't hold onto any extra references.
|
// Ensure that we don't hold onto any extra references.
|
||||||
SCOPE_EXIT({ process->Close(); });
|
SCOPE_EXIT {
|
||||||
|
process->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Register the new process.
|
// Register the new process.
|
||||||
KProcess::Register(*this, process);
|
KProcess::Register(*this, process);
|
||||||
|
|
|
@ -45,7 +45,9 @@ Result CreateCodeMemory(Core::System& system, Handle* out, u64 address, uint64_t
|
||||||
|
|
||||||
KCodeMemory* code_mem = KCodeMemory::Create(kernel);
|
KCodeMemory* code_mem = KCodeMemory::Create(kernel);
|
||||||
R_UNLESS(code_mem != nullptr, ResultOutOfResource);
|
R_UNLESS(code_mem != nullptr, ResultOutOfResource);
|
||||||
SCOPE_EXIT({ code_mem->Close(); });
|
SCOPE_EXIT {
|
||||||
|
code_mem->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Verify that the region is in range.
|
// Verify that the region is in range.
|
||||||
R_UNLESS(GetCurrentProcess(system.Kernel()).GetPageTable().Contains(address, size),
|
R_UNLESS(GetCurrentProcess(system.Kernel()).GetPageTable().Contains(address, size),
|
||||||
|
|
|
@ -28,7 +28,9 @@ Result CreateDeviceAddressSpace(Core::System& system, Handle* out, uint64_t das_
|
||||||
// Create the device address space.
|
// Create the device address space.
|
||||||
KDeviceAddressSpace* das = KDeviceAddressSpace::Create(system.Kernel());
|
KDeviceAddressSpace* das = KDeviceAddressSpace::Create(system.Kernel());
|
||||||
R_UNLESS(das != nullptr, ResultOutOfResource);
|
R_UNLESS(das != nullptr, ResultOutOfResource);
|
||||||
SCOPE_EXIT({ das->Close(); });
|
SCOPE_EXIT {
|
||||||
|
das->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Initialize the device address space.
|
// Initialize the device address space.
|
||||||
R_TRY(das->Initialize(das_address, das_size));
|
R_TRY(das->Initialize(das_address, das_size));
|
||||||
|
|
|
@ -72,10 +72,10 @@ Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
|
||||||
event_reservation.Commit();
|
event_reservation.Commit();
|
||||||
|
|
||||||
// Ensure that we clean up the event (and its only references are handle table) on function end.
|
// Ensure that we clean up the event (and its only references are handle table) on function end.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
event->GetReadableEvent().Close();
|
event->GetReadableEvent().Close();
|
||||||
event->Close();
|
event->Close();
|
||||||
});
|
};
|
||||||
|
|
||||||
// Register the event.
|
// Register the event.
|
||||||
KEvent::Register(kernel, event);
|
KEvent::Register(kernel, event);
|
||||||
|
|
|
@ -129,11 +129,11 @@ Result ReplyAndReceiveImpl(KernelCore& kernel, int32_t* out_index, uintptr_t mes
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure handles are closed when we're done.
|
// Ensure handles are closed when we're done.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
for (auto i = 0; i < num_handles; ++i) {
|
for (auto i = 0; i < num_handles; ++i) {
|
||||||
objs[i]->Close();
|
objs[i]->Close();
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(ReplyAndReceiveImpl(kernel, out_index, message, buffer_size, message_paddr, objs,
|
R_RETURN(ReplyAndReceiveImpl(kernel, out_index, message, buffer_size, message_paddr, objs,
|
||||||
num_handles, reply_target, timeout_ns));
|
num_handles, reply_target, timeout_ns));
|
||||||
|
@ -208,10 +208,10 @@ Result SendAsyncRequestWithUserBuffer(Core::System& system, Handle* out_event_ha
|
||||||
event_reservation.Commit();
|
event_reservation.Commit();
|
||||||
|
|
||||||
// At end of scope, kill the standing references to the sub events.
|
// At end of scope, kill the standing references to the sub events.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
event->GetReadableEvent().Close();
|
event->GetReadableEvent().Close();
|
||||||
event->Close();
|
event->Close();
|
||||||
});
|
};
|
||||||
|
|
||||||
// Register the event.
|
// Register the event.
|
||||||
KEvent::Register(system.Kernel(), event);
|
KEvent::Register(system.Kernel(), event);
|
||||||
|
|
|
@ -68,10 +68,10 @@ Result CreatePort(Core::System& system, Handle* out_server, Handle* out_client,
|
||||||
port->Initialize(max_sessions, is_light, name);
|
port->Initialize(max_sessions, is_light, name);
|
||||||
|
|
||||||
// Ensure that we clean up the port (and its only references are handle table) on function end.
|
// Ensure that we clean up the port (and its only references are handle table) on function end.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
port->GetServerPort().Close();
|
port->GetServerPort().Close();
|
||||||
port->GetClientPort().Close();
|
port->GetClientPort().Close();
|
||||||
});
|
};
|
||||||
|
|
||||||
// Register the port.
|
// Register the port.
|
||||||
KPort::Register(kernel, port);
|
KPort::Register(kernel, port);
|
||||||
|
@ -150,10 +150,10 @@ Result ManageNamedPort(Core::System& system, Handle* out_server_handle, uint64_t
|
||||||
KPort::Register(system.Kernel(), port);
|
KPort::Register(system.Kernel(), port);
|
||||||
|
|
||||||
// Ensure that our only reference to the port is in the handle table when we're done.
|
// Ensure that our only reference to the port is in the handle table when we're done.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
port->GetClientPort().Close();
|
port->GetClientPort().Close();
|
||||||
port->GetServerPort().Close();
|
port->GetServerPort().Close();
|
||||||
});
|
};
|
||||||
|
|
||||||
// Register the handle in the table.
|
// Register the handle in the table.
|
||||||
R_TRY(handle_table.Add(out_server_handle, std::addressof(port->GetServerPort())));
|
R_TRY(handle_table.Add(out_server_handle, std::addressof(port->GetServerPort())));
|
||||||
|
|
|
@ -18,7 +18,9 @@ Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
|
||||||
R_UNLESS(resource_limit != nullptr, ResultOutOfResource);
|
R_UNLESS(resource_limit != nullptr, ResultOutOfResource);
|
||||||
|
|
||||||
// Ensure we don't leak a reference to the limit.
|
// Ensure we don't leak a reference to the limit.
|
||||||
SCOPE_EXIT({ resource_limit->Close(); });
|
SCOPE_EXIT {
|
||||||
|
resource_limit->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Initialize the resource limit.
|
// Initialize the resource limit.
|
||||||
resource_limit->Initialize();
|
resource_limit->Initialize();
|
||||||
|
|
|
@ -69,10 +69,10 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien
|
||||||
|
|
||||||
// Ensure that we clean up the session (and its only references are handle table) on function
|
// Ensure that we clean up the session (and its only references are handle table) on function
|
||||||
// end.
|
// end.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
session->GetClientSession().Close();
|
session->GetClientSession().Close();
|
||||||
session->GetServerSession().Close();
|
session->GetServerSession().Close();
|
||||||
});
|
};
|
||||||
|
|
||||||
// Register the session.
|
// Register the session.
|
||||||
T::Register(system.Kernel(), session);
|
T::Register(system.Kernel(), session);
|
||||||
|
|
|
@ -78,11 +78,11 @@ Result WaitSynchronization(Core::System& system, int32_t* out_index, u64 user_ha
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure handles are closed when we're done.
|
// Ensure handles are closed when we're done.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
for (auto i = 0; i < num_handles; ++i) {
|
for (auto i = 0; i < num_handles; ++i) {
|
||||||
objs[i]->Close();
|
objs[i]->Close();
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
// Convert the timeout from nanoseconds to ticks.
|
// Convert the timeout from nanoseconds to ticks.
|
||||||
s64 timeout;
|
s64 timeout;
|
||||||
|
|
|
@ -51,7 +51,9 @@ Result CreateThread(Core::System& system, Handle* out_handle, u64 entry_point, u
|
||||||
// Create the thread.
|
// Create the thread.
|
||||||
KThread* thread = KThread::Create(kernel);
|
KThread* thread = KThread::Create(kernel);
|
||||||
R_UNLESS(thread != nullptr, ResultOutOfResource)
|
R_UNLESS(thread != nullptr, ResultOutOfResource)
|
||||||
SCOPE_EXIT({ thread->Close(); });
|
SCOPE_EXIT {
|
||||||
|
thread->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Initialize the thread.
|
// Initialize the thread.
|
||||||
{
|
{
|
||||||
|
|
|
@ -52,7 +52,9 @@ Result CreateTransferMemory(Core::System& system, Handle* out, u64 address, u64
|
||||||
R_UNLESS(trmem != nullptr, ResultOutOfResource);
|
R_UNLESS(trmem != nullptr, ResultOutOfResource);
|
||||||
|
|
||||||
// Ensure the only reference is in the handle table when we're done.
|
// Ensure the only reference is in the handle table when we're done.
|
||||||
SCOPE_EXIT({ trmem->Close(); });
|
SCOPE_EXIT {
|
||||||
|
trmem->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Ensure that the region is in range.
|
// Ensure that the region is in range.
|
||||||
R_UNLESS(process.GetPageTable().Contains(address, size), ResultInvalidCurrentMemory);
|
R_UNLESS(process.GetPageTable().Contains(address, size), ResultInvalidCurrentMemory);
|
||||||
|
|
|
@ -24,11 +24,11 @@ void AppletStorageChannel::Push(std::shared_ptr<IStorage> storage) {
|
||||||
Result AppletStorageChannel::Pop(std::shared_ptr<IStorage>* out_storage) {
|
Result AppletStorageChannel::Pop(std::shared_ptr<IStorage>* out_storage) {
|
||||||
std::scoped_lock lk{m_lock};
|
std::scoped_lock lk{m_lock};
|
||||||
|
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (m_data.empty()) {
|
if (m_data.empty()) {
|
||||||
m_event.Clear();
|
m_event.Clear();
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
R_UNLESS(!m_data.empty(), AM::ResultNoDataInChannel);
|
R_UNLESS(!m_data.empty(), AM::ResultNoDataInChannel);
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,9 @@ bool Process::Initialize(u64 program_id, u8 minimum_key_generation, u8 maximum_k
|
||||||
Kernel::KProcess::Register(m_system.Kernel(), process);
|
Kernel::KProcess::Register(m_system.Kernel(), process);
|
||||||
|
|
||||||
// On exit, ensure we free the additional reference to the process.
|
// On exit, ensure we free the additional reference to the process.
|
||||||
SCOPE_EXIT({ process->Close(); });
|
SCOPE_EXIT {
|
||||||
|
process->Close();
|
||||||
|
};
|
||||||
|
|
||||||
// Insert process modules into memory.
|
// Insert process modules into memory.
|
||||||
const auto [load_result, load_parameters] = app_loader->Load(*process, m_system);
|
const auto [load_result, load_parameters] = app_loader->Load(*process, m_system);
|
||||||
|
|
|
@ -142,16 +142,18 @@ Result StaticService::SetStandardSteadyClockInternalOffset(s64 offset_ns) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result StaticService::GetStandardSteadyClockRtcValue(Out<s64> out_rtc_value) {
|
Result StaticService::GetStandardSteadyClockRtcValue(Out<s64> out_rtc_value) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_rtc_value={}", *out_rtc_value); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_rtc_value={}", *out_rtc_value);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_standard_steady_clock_resource.GetRtcTimeInSeconds(*out_rtc_value));
|
R_RETURN(m_standard_steady_clock_resource.GetRtcTimeInSeconds(*out_rtc_value));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result StaticService::IsStandardUserSystemClockAutomaticCorrectionEnabled(
|
Result StaticService::IsStandardUserSystemClockAutomaticCorrectionEnabled(
|
||||||
Out<bool> out_automatic_correction) {
|
Out<bool> out_automatic_correction) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time, "called. out_automatic_correction={}", *out_automatic_correction);
|
LOG_DEBUG(Service_Time, "called. out_automatic_correction={}", *out_automatic_correction);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->IsStandardUserSystemClockAutomaticCorrectionEnabled(
|
R_RETURN(m_wrapped_service->IsStandardUserSystemClockAutomaticCorrectionEnabled(
|
||||||
out_automatic_correction));
|
out_automatic_correction));
|
||||||
|
@ -166,21 +168,27 @@ Result StaticService::SetStandardUserSystemClockAutomaticCorrectionEnabled(
|
||||||
}
|
}
|
||||||
|
|
||||||
Result StaticService::GetStandardUserSystemClockInitialYear(Out<s32> out_year) {
|
Result StaticService::GetStandardUserSystemClockInitialYear(Out<s32> out_year) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_year={}", *out_year); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_year={}", *out_year);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_set_sys->GetSettingsItemValueImpl<s32>(*out_year, "time",
|
R_RETURN(m_set_sys->GetSettingsItemValueImpl<s32>(*out_year, "time",
|
||||||
"standard_user_clock_initial_year"));
|
"standard_user_clock_initial_year"));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result StaticService::IsStandardNetworkSystemClockAccuracySufficient(Out<bool> out_is_sufficient) {
|
Result StaticService::IsStandardNetworkSystemClockAccuracySufficient(Out<bool> out_is_sufficient) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_is_sufficient={}", *out_is_sufficient); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_is_sufficient={}", *out_is_sufficient);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->IsStandardNetworkSystemClockAccuracySufficient(out_is_sufficient));
|
R_RETURN(m_wrapped_service->IsStandardNetworkSystemClockAccuracySufficient(out_is_sufficient));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result StaticService::GetStandardUserSystemClockAutomaticCorrectionUpdatedTime(
|
Result StaticService::GetStandardUserSystemClockAutomaticCorrectionUpdatedTime(
|
||||||
Out<Service::PSC::Time::SteadyClockTimePoint> out_time_point) {
|
Out<Service::PSC::Time::SteadyClockTimePoint> out_time_point) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_time_point={}", *out_time_point); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_time_point={}", *out_time_point);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->GetStandardUserSystemClockAutomaticCorrectionUpdatedTime(
|
R_RETURN(m_wrapped_service->GetStandardUserSystemClockAutomaticCorrectionUpdatedTime(
|
||||||
out_time_point));
|
out_time_point));
|
||||||
|
@ -188,15 +196,18 @@ Result StaticService::GetStandardUserSystemClockAutomaticCorrectionUpdatedTime(
|
||||||
|
|
||||||
Result StaticService::CalculateMonotonicSystemClockBaseTimePoint(
|
Result StaticService::CalculateMonotonicSystemClockBaseTimePoint(
|
||||||
Out<s64> out_time, const Service::PSC::Time::SystemClockContext& context) {
|
Out<s64> out_time, const Service::PSC::Time::SystemClockContext& context) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. context={} out_time={}", context, *out_time); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. context={} out_time={}", context, *out_time);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->CalculateMonotonicSystemClockBaseTimePoint(out_time, context));
|
R_RETURN(m_wrapped_service->CalculateMonotonicSystemClockBaseTimePoint(out_time, context));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result StaticService::GetClockSnapshot(OutClockSnapshot out_snapshot,
|
Result StaticService::GetClockSnapshot(OutClockSnapshot out_snapshot,
|
||||||
Service::PSC::Time::TimeType type) {
|
Service::PSC::Time::TimeType type) {
|
||||||
SCOPE_EXIT(
|
SCOPE_EXIT {
|
||||||
{ LOG_DEBUG(Service_Time, "called. type={} out_snapshot={}", type, *out_snapshot); });
|
LOG_DEBUG(Service_Time, "called. type={} out_snapshot={}", type, *out_snapshot);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->GetClockSnapshot(out_snapshot, type));
|
R_RETURN(m_wrapped_service->GetClockSnapshot(out_snapshot, type));
|
||||||
}
|
}
|
||||||
|
@ -205,11 +216,11 @@ Result StaticService::GetClockSnapshotFromSystemClockContext(
|
||||||
Service::PSC::Time::TimeType type, OutClockSnapshot out_snapshot,
|
Service::PSC::Time::TimeType type, OutClockSnapshot out_snapshot,
|
||||||
const Service::PSC::Time::SystemClockContext& user_context,
|
const Service::PSC::Time::SystemClockContext& user_context,
|
||||||
const Service::PSC::Time::SystemClockContext& network_context) {
|
const Service::PSC::Time::SystemClockContext& network_context) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time,
|
LOG_DEBUG(Service_Time,
|
||||||
"called. type={} out_snapshot={} user_context={} network_context={}", type,
|
"called. type={} out_snapshot={} user_context={} network_context={}", type,
|
||||||
*out_snapshot, user_context, network_context);
|
*out_snapshot, user_context, network_context);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->GetClockSnapshotFromSystemClockContext(
|
R_RETURN(m_wrapped_service->GetClockSnapshotFromSystemClockContext(
|
||||||
type, out_snapshot, user_context, network_context));
|
type, out_snapshot, user_context, network_context));
|
||||||
|
@ -218,14 +229,18 @@ Result StaticService::GetClockSnapshotFromSystemClockContext(
|
||||||
Result StaticService::CalculateStandardUserSystemClockDifferenceByUser(Out<s64> out_time,
|
Result StaticService::CalculateStandardUserSystemClockDifferenceByUser(Out<s64> out_time,
|
||||||
InClockSnapshot a,
|
InClockSnapshot a,
|
||||||
InClockSnapshot b) {
|
InClockSnapshot b) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. a={} b={} out_time={}", *a, *b, *out_time); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. a={} b={} out_time={}", *a, *b, *out_time);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->CalculateStandardUserSystemClockDifferenceByUser(out_time, a, b));
|
R_RETURN(m_wrapped_service->CalculateStandardUserSystemClockDifferenceByUser(out_time, a, b));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result StaticService::CalculateSpanBetween(Out<s64> out_time, InClockSnapshot a,
|
Result StaticService::CalculateSpanBetween(Out<s64> out_time, InClockSnapshot a,
|
||||||
InClockSnapshot b) {
|
InClockSnapshot b) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. a={} b={} out_time={}", *a, *b, *out_time); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. a={} b={} out_time={}", *a, *b, *out_time);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->CalculateSpanBetween(out_time, a, b));
|
R_RETURN(m_wrapped_service->CalculateSpanBetween(out_time, a, b));
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,9 @@ TimeZoneService::~TimeZoneService() = default;
|
||||||
|
|
||||||
Result TimeZoneService::GetDeviceLocationName(
|
Result TimeZoneService::GetDeviceLocationName(
|
||||||
Out<Service::PSC::Time::LocationName> out_location_name) {
|
Out<Service::PSC::Time::LocationName> out_location_name) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_location_name={}", *out_location_name); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_location_name={}", *out_location_name);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->GetDeviceLocationName(out_location_name));
|
R_RETURN(m_wrapped_service->GetDeviceLocationName(out_location_name));
|
||||||
}
|
}
|
||||||
|
@ -94,7 +96,9 @@ Result TimeZoneService::SetDeviceLocationName(
|
||||||
}
|
}
|
||||||
|
|
||||||
Result TimeZoneService::GetTotalLocationNameCount(Out<u32> out_count) {
|
Result TimeZoneService::GetTotalLocationNameCount(Out<u32> out_count) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_count={}", *out_count); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_count={}", *out_count);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->GetTotalLocationNameCount(out_count));
|
R_RETURN(m_wrapped_service->GetTotalLocationNameCount(out_count));
|
||||||
}
|
}
|
||||||
|
@ -102,10 +106,10 @@ Result TimeZoneService::GetTotalLocationNameCount(Out<u32> out_count) {
|
||||||
Result TimeZoneService::LoadLocationNameList(
|
Result TimeZoneService::LoadLocationNameList(
|
||||||
Out<u32> out_count,
|
Out<u32> out_count,
|
||||||
OutArray<Service::PSC::Time::LocationName, BufferAttr_HipcMapAlias> out_names, u32 index) {
|
OutArray<Service::PSC::Time::LocationName, BufferAttr_HipcMapAlias> out_names, u32 index) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time, "called. index={} out_count={} out_names[0]={} out_names[1]={}",
|
LOG_DEBUG(Service_Time, "called. index={} out_count={} out_names[0]={} out_names[1]={}",
|
||||||
index, *out_count, out_names[0], out_names[1]);
|
index, *out_count, out_names[0], out_names[1]);
|
||||||
});
|
};
|
||||||
|
|
||||||
std::scoped_lock l{m_mutex};
|
std::scoped_lock l{m_mutex};
|
||||||
R_RETURN(GetTimeZoneLocationList(*out_count, out_names, out_names.size(), index));
|
R_RETURN(GetTimeZoneLocationList(*out_count, out_names, out_names.size(), index));
|
||||||
|
@ -124,7 +128,9 @@ Result TimeZoneService::LoadTimeZoneRule(OutRule out_rule,
|
||||||
|
|
||||||
Result TimeZoneService::GetTimeZoneRuleVersion(
|
Result TimeZoneService::GetTimeZoneRuleVersion(
|
||||||
Out<Service::PSC::Time::RuleVersion> out_rule_version) {
|
Out<Service::PSC::Time::RuleVersion> out_rule_version) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_rule_version={}", *out_rule_version); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_rule_version={}", *out_rule_version);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->GetTimeZoneRuleVersion(out_rule_version));
|
R_RETURN(m_wrapped_service->GetTimeZoneRuleVersion(out_rule_version));
|
||||||
}
|
}
|
||||||
|
@ -132,10 +138,10 @@ Result TimeZoneService::GetTimeZoneRuleVersion(
|
||||||
Result TimeZoneService::GetDeviceLocationNameAndUpdatedTime(
|
Result TimeZoneService::GetDeviceLocationNameAndUpdatedTime(
|
||||||
Out<Service::PSC::Time::LocationName> location_name,
|
Out<Service::PSC::Time::LocationName> location_name,
|
||||||
Out<Service::PSC::Time::SteadyClockTimePoint> out_time_point) {
|
Out<Service::PSC::Time::SteadyClockTimePoint> out_time_point) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time, "called. location_name={} out_time_point={}", *location_name,
|
LOG_DEBUG(Service_Time, "called. location_name={} out_time_point={}", *location_name,
|
||||||
*out_time_point);
|
*out_time_point);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->GetDeviceLocationNameAndUpdatedTime(location_name, out_time_point));
|
R_RETURN(m_wrapped_service->GetDeviceLocationNameAndUpdatedTime(location_name, out_time_point));
|
||||||
}
|
}
|
||||||
|
@ -178,10 +184,10 @@ Result TimeZoneService::GetDeviceLocationNameOperationEventReadableHandle(
|
||||||
Result TimeZoneService::ToCalendarTime(
|
Result TimeZoneService::ToCalendarTime(
|
||||||
Out<Service::PSC::Time::CalendarTime> out_calendar_time,
|
Out<Service::PSC::Time::CalendarTime> out_calendar_time,
|
||||||
Out<Service::PSC::Time::CalendarAdditionalInfo> out_additional_info, s64 time, InRule rule) {
|
Out<Service::PSC::Time::CalendarAdditionalInfo> out_additional_info, s64 time, InRule rule) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time, "called. time={} out_calendar_time={} out_additional_info={}", time,
|
LOG_DEBUG(Service_Time, "called. time={} out_calendar_time={} out_additional_info={}", time,
|
||||||
*out_calendar_time, *out_additional_info);
|
*out_calendar_time, *out_additional_info);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->ToCalendarTime(out_calendar_time, out_additional_info, time, rule));
|
R_RETURN(m_wrapped_service->ToCalendarTime(out_calendar_time, out_additional_info, time, rule));
|
||||||
}
|
}
|
||||||
|
@ -189,10 +195,10 @@ Result TimeZoneService::ToCalendarTime(
|
||||||
Result TimeZoneService::ToCalendarTimeWithMyRule(
|
Result TimeZoneService::ToCalendarTimeWithMyRule(
|
||||||
Out<Service::PSC::Time::CalendarTime> out_calendar_time,
|
Out<Service::PSC::Time::CalendarTime> out_calendar_time,
|
||||||
Out<Service::PSC::Time::CalendarAdditionalInfo> out_additional_info, s64 time) {
|
Out<Service::PSC::Time::CalendarAdditionalInfo> out_additional_info, s64 time) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time, "called. time={} out_calendar_time={} out_additional_info={}", time,
|
LOG_DEBUG(Service_Time, "called. time={} out_calendar_time={} out_additional_info={}", time,
|
||||||
*out_calendar_time, *out_additional_info);
|
*out_calendar_time, *out_additional_info);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(
|
R_RETURN(
|
||||||
m_wrapped_service->ToCalendarTimeWithMyRule(out_calendar_time, out_additional_info, time));
|
m_wrapped_service->ToCalendarTimeWithMyRule(out_calendar_time, out_additional_info, time));
|
||||||
|
@ -202,11 +208,11 @@ Result TimeZoneService::ToPosixTime(Out<u32> out_count,
|
||||||
OutArray<s64, BufferAttr_HipcPointer> out_times,
|
OutArray<s64, BufferAttr_HipcPointer> out_times,
|
||||||
const Service::PSC::Time::CalendarTime& calendar_time,
|
const Service::PSC::Time::CalendarTime& calendar_time,
|
||||||
InRule rule) {
|
InRule rule) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time,
|
LOG_DEBUG(Service_Time,
|
||||||
"called. calendar_time={} out_count={} out_times[0]={} out_times[1]={}",
|
"called. calendar_time={} out_count={} out_times[0]={} out_times[1]={}",
|
||||||
calendar_time, *out_count, out_times[0], out_times[1]);
|
calendar_time, *out_count, out_times[0], out_times[1]);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->ToPosixTime(out_count, out_times, calendar_time, rule));
|
R_RETURN(m_wrapped_service->ToPosixTime(out_count, out_times, calendar_time, rule));
|
||||||
}
|
}
|
||||||
|
@ -214,11 +220,11 @@ Result TimeZoneService::ToPosixTime(Out<u32> out_count,
|
||||||
Result TimeZoneService::ToPosixTimeWithMyRule(
|
Result TimeZoneService::ToPosixTimeWithMyRule(
|
||||||
Out<u32> out_count, OutArray<s64, BufferAttr_HipcPointer> out_times,
|
Out<u32> out_count, OutArray<s64, BufferAttr_HipcPointer> out_times,
|
||||||
const Service::PSC::Time::CalendarTime& calendar_time) {
|
const Service::PSC::Time::CalendarTime& calendar_time) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time,
|
LOG_DEBUG(Service_Time,
|
||||||
"called. calendar_time={} out_count={} out_times[0]={} out_times[1]={}",
|
"called. calendar_time={} out_count={} out_times[0]={} out_times[1]={}",
|
||||||
calendar_time, *out_count, out_times[0], out_times[1]);
|
calendar_time, *out_count, out_times[0], out_times[1]);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(m_wrapped_service->ToPosixTimeWithMyRule(out_count, out_times, calendar_time));
|
R_RETURN(m_wrapped_service->ToPosixTimeWithMyRule(out_count, out_times, calendar_time));
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,11 +92,11 @@ NvResult nvhost_ctrl::IocCtrlEventWait(IocCtrlEventWaitParams& params, bool is_a
|
||||||
|
|
||||||
bool must_unmark_fail = !is_allocation;
|
bool must_unmark_fail = !is_allocation;
|
||||||
const u32 event_id = params.value.raw;
|
const u32 event_id = params.value.raw;
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (must_unmark_fail) {
|
if (must_unmark_fail) {
|
||||||
events[event_id].fails = 0;
|
events[event_id].fails = 0;
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
const u32 fence_id = static_cast<u32>(params.fence.id);
|
const u32 fence_id = static_cast<u32>(params.fence.id);
|
||||||
|
|
||||||
|
|
|
@ -154,10 +154,10 @@ void NVDRV::Close(HLERequestContext& ctx) {
|
||||||
void NVDRV::Initialize(HLERequestContext& ctx) {
|
void NVDRV::Initialize(HLERequestContext& ctx) {
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||||
IPC::ResponseBuilder rb{ctx, 3};
|
IPC::ResponseBuilder rb{ctx, 3};
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
rb.Push(ResultSuccess);
|
rb.Push(ResultSuccess);
|
||||||
rb.PushEnum(NvResult::Success);
|
rb.PushEnum(NvResult::Success);
|
||||||
});
|
};
|
||||||
|
|
||||||
if (is_initialized) {
|
if (is_initialized) {
|
||||||
// No need to initialize again
|
// No need to initialize again
|
||||||
|
|
|
@ -144,7 +144,9 @@ Result StaticService::GetStandardSteadyClockRtcValue(Out<s64> out_rtc_value) {
|
||||||
|
|
||||||
Result StaticService::IsStandardUserSystemClockAutomaticCorrectionEnabled(
|
Result StaticService::IsStandardUserSystemClockAutomaticCorrectionEnabled(
|
||||||
Out<bool> out_is_enabled) {
|
Out<bool> out_is_enabled) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_is_enabled={}", *out_is_enabled); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_is_enabled={}", *out_is_enabled);
|
||||||
|
};
|
||||||
|
|
||||||
R_UNLESS(m_user_system_clock.IsInitialized(), ResultClockUninitialized);
|
R_UNLESS(m_user_system_clock.IsInitialized(), ResultClockUninitialized);
|
||||||
|
|
||||||
|
@ -180,7 +182,9 @@ Result StaticService::GetStandardUserSystemClockInitialYear(Out<s32> out_year) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result StaticService::IsStandardNetworkSystemClockAccuracySufficient(Out<bool> out_is_sufficient) {
|
Result StaticService::IsStandardNetworkSystemClockAccuracySufficient(Out<bool> out_is_sufficient) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_is_sufficient={}", *out_is_sufficient); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_is_sufficient={}", *out_is_sufficient);
|
||||||
|
};
|
||||||
|
|
||||||
*out_is_sufficient = m_network_system_clock.IsAccuracySufficient();
|
*out_is_sufficient = m_network_system_clock.IsAccuracySufficient();
|
||||||
|
|
||||||
|
@ -189,7 +193,9 @@ Result StaticService::IsStandardNetworkSystemClockAccuracySufficient(Out<bool> o
|
||||||
|
|
||||||
Result StaticService::GetStandardUserSystemClockAutomaticCorrectionUpdatedTime(
|
Result StaticService::GetStandardUserSystemClockAutomaticCorrectionUpdatedTime(
|
||||||
Out<SteadyClockTimePoint> out_time_point) {
|
Out<SteadyClockTimePoint> out_time_point) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_time_point={}", *out_time_point); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_time_point={}", *out_time_point);
|
||||||
|
};
|
||||||
|
|
||||||
R_UNLESS(m_user_system_clock.IsInitialized(), ResultClockUninitialized);
|
R_UNLESS(m_user_system_clock.IsInitialized(), ResultClockUninitialized);
|
||||||
|
|
||||||
|
@ -200,7 +206,9 @@ Result StaticService::GetStandardUserSystemClockAutomaticCorrectionUpdatedTime(
|
||||||
|
|
||||||
Result StaticService::CalculateMonotonicSystemClockBaseTimePoint(
|
Result StaticService::CalculateMonotonicSystemClockBaseTimePoint(
|
||||||
Out<s64> out_time, const SystemClockContext& context) {
|
Out<s64> out_time, const SystemClockContext& context) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. context={} out_time={}", context, *out_time); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. context={} out_time={}", context, *out_time);
|
||||||
|
};
|
||||||
|
|
||||||
R_UNLESS(m_time->m_standard_steady_clock.IsInitialized(), ResultClockUninitialized);
|
R_UNLESS(m_time->m_standard_steady_clock.IsInitialized(), ResultClockUninitialized);
|
||||||
|
|
||||||
|
@ -219,8 +227,9 @@ Result StaticService::CalculateMonotonicSystemClockBaseTimePoint(
|
||||||
}
|
}
|
||||||
|
|
||||||
Result StaticService::GetClockSnapshot(OutClockSnapshot out_snapshot, TimeType type) {
|
Result StaticService::GetClockSnapshot(OutClockSnapshot out_snapshot, TimeType type) {
|
||||||
SCOPE_EXIT(
|
SCOPE_EXIT {
|
||||||
{ LOG_DEBUG(Service_Time, "called. type={} out_snapshot={}", type, *out_snapshot); });
|
LOG_DEBUG(Service_Time, "called. type={} out_snapshot={}", type, *out_snapshot);
|
||||||
|
};
|
||||||
|
|
||||||
SystemClockContext user_context{};
|
SystemClockContext user_context{};
|
||||||
R_TRY(m_user_system_clock.GetContext(user_context));
|
R_TRY(m_user_system_clock.GetContext(user_context));
|
||||||
|
@ -234,11 +243,11 @@ Result StaticService::GetClockSnapshot(OutClockSnapshot out_snapshot, TimeType t
|
||||||
Result StaticService::GetClockSnapshotFromSystemClockContext(
|
Result StaticService::GetClockSnapshotFromSystemClockContext(
|
||||||
TimeType type, OutClockSnapshot out_snapshot, const SystemClockContext& user_context,
|
TimeType type, OutClockSnapshot out_snapshot, const SystemClockContext& user_context,
|
||||||
const SystemClockContext& network_context) {
|
const SystemClockContext& network_context) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time,
|
LOG_DEBUG(Service_Time,
|
||||||
"called. type={} user_context={} network_context={} out_snapshot={}", type,
|
"called. type={} user_context={} network_context={} out_snapshot={}", type,
|
||||||
user_context, network_context, *out_snapshot);
|
user_context, network_context, *out_snapshot);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(GetClockSnapshotImpl(out_snapshot, user_context, network_context, type));
|
R_RETURN(GetClockSnapshotImpl(out_snapshot, user_context, network_context, type));
|
||||||
}
|
}
|
||||||
|
@ -246,9 +255,9 @@ Result StaticService::GetClockSnapshotFromSystemClockContext(
|
||||||
Result StaticService::CalculateStandardUserSystemClockDifferenceByUser(Out<s64> out_difference,
|
Result StaticService::CalculateStandardUserSystemClockDifferenceByUser(Out<s64> out_difference,
|
||||||
InClockSnapshot a,
|
InClockSnapshot a,
|
||||||
InClockSnapshot b) {
|
InClockSnapshot b) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time, "called. a={} b={} out_difference={}", *a, *b, *out_difference);
|
LOG_DEBUG(Service_Time, "called. a={} b={} out_difference={}", *a, *b, *out_difference);
|
||||||
});
|
};
|
||||||
|
|
||||||
auto diff_s =
|
auto diff_s =
|
||||||
std::chrono::seconds(b->user_context.offset) - std::chrono::seconds(a->user_context.offset);
|
std::chrono::seconds(b->user_context.offset) - std::chrono::seconds(a->user_context.offset);
|
||||||
|
@ -276,7 +285,9 @@ Result StaticService::CalculateStandardUserSystemClockDifferenceByUser(Out<s64>
|
||||||
|
|
||||||
Result StaticService::CalculateSpanBetween(Out<s64> out_time, InClockSnapshot a,
|
Result StaticService::CalculateSpanBetween(Out<s64> out_time, InClockSnapshot a,
|
||||||
InClockSnapshot b) {
|
InClockSnapshot b) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. a={} b={} out_time={}", *a, *b, *out_time); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. a={} b={} out_time={}", *a, *b, *out_time);
|
||||||
|
};
|
||||||
|
|
||||||
s64 time_s{};
|
s64 time_s{};
|
||||||
auto res =
|
auto res =
|
||||||
|
|
|
@ -29,7 +29,9 @@ SteadyClock::SteadyClock(Core::System& system_, std::shared_ptr<TimeManager> man
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SteadyClock::GetCurrentTimePoint(Out<SteadyClockTimePoint> out_time_point) {
|
Result SteadyClock::GetCurrentTimePoint(Out<SteadyClockTimePoint> out_time_point) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_time_point={}", *out_time_point); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_time_point={}", *out_time_point);
|
||||||
|
};
|
||||||
|
|
||||||
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
||||||
ResultClockUninitialized);
|
ResultClockUninitialized);
|
||||||
|
@ -38,7 +40,9 @@ Result SteadyClock::GetCurrentTimePoint(Out<SteadyClockTimePoint> out_time_point
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SteadyClock::GetTestOffset(Out<s64> out_test_offset) {
|
Result SteadyClock::GetTestOffset(Out<s64> out_test_offset) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_test_offset={}", *out_test_offset); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_test_offset={}", *out_test_offset);
|
||||||
|
};
|
||||||
|
|
||||||
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
||||||
ResultClockUninitialized);
|
ResultClockUninitialized);
|
||||||
|
@ -59,7 +63,9 @@ Result SteadyClock::SetTestOffset(s64 test_offset) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SteadyClock::GetRtcValue(Out<s64> out_rtc_value) {
|
Result SteadyClock::GetRtcValue(Out<s64> out_rtc_value) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_rtc_value={}", *out_rtc_value); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_rtc_value={}", *out_rtc_value);
|
||||||
|
};
|
||||||
|
|
||||||
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
||||||
ResultClockUninitialized);
|
ResultClockUninitialized);
|
||||||
|
@ -68,7 +74,9 @@ Result SteadyClock::GetRtcValue(Out<s64> out_rtc_value) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SteadyClock::IsRtcResetDetected(Out<bool> out_is_detected) {
|
Result SteadyClock::IsRtcResetDetected(Out<bool> out_is_detected) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_is_detected={}", *out_is_detected); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_is_detected={}", *out_is_detected);
|
||||||
|
};
|
||||||
|
|
||||||
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
||||||
ResultClockUninitialized);
|
ResultClockUninitialized);
|
||||||
|
@ -78,7 +86,9 @@ Result SteadyClock::IsRtcResetDetected(Out<bool> out_is_detected) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SteadyClock::GetSetupResultValue(Out<Result> out_result) {
|
Result SteadyClock::GetSetupResultValue(Out<Result> out_result) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_result=0x{:X}", out_result->raw); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_result=0x{:X}", out_result->raw);
|
||||||
|
};
|
||||||
|
|
||||||
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
||||||
ResultClockUninitialized);
|
ResultClockUninitialized);
|
||||||
|
@ -88,8 +98,9 @@ Result SteadyClock::GetSetupResultValue(Out<Result> out_result) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SteadyClock::GetInternalOffset(Out<s64> out_internal_offset) {
|
Result SteadyClock::GetInternalOffset(Out<s64> out_internal_offset) {
|
||||||
SCOPE_EXIT(
|
SCOPE_EXIT {
|
||||||
{ LOG_DEBUG(Service_Time, "called. out_internal_offset={}", *out_internal_offset); });
|
LOG_DEBUG(Service_Time, "called. out_internal_offset={}", *out_internal_offset);
|
||||||
|
};
|
||||||
|
|
||||||
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
||||||
ResultClockUninitialized);
|
ResultClockUninitialized);
|
||||||
|
|
|
@ -26,7 +26,9 @@ SystemClock::SystemClock(Core::System& system_, SystemClockCore& clock_core, boo
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SystemClock::GetCurrentTime(Out<s64> out_time) {
|
Result SystemClock::GetCurrentTime(Out<s64> out_time) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_time={}", *out_time); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_time={}", *out_time);
|
||||||
|
};
|
||||||
|
|
||||||
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
||||||
ResultClockUninitialized);
|
ResultClockUninitialized);
|
||||||
|
@ -45,7 +47,9 @@ Result SystemClock::SetCurrentTime(s64 time) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SystemClock::GetSystemClockContext(Out<SystemClockContext> out_context) {
|
Result SystemClock::GetSystemClockContext(Out<SystemClockContext> out_context) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_context={}", *out_context); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_context={}", *out_context);
|
||||||
|
};
|
||||||
|
|
||||||
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
R_UNLESS(m_can_write_uninitialized_clock || m_clock_core.IsInitialized(),
|
||||||
ResultClockUninitialized);
|
ResultClockUninitialized);
|
||||||
|
|
|
@ -37,7 +37,9 @@ TimeZoneService::TimeZoneService(Core::System& system_, StandardSteadyClockCore&
|
||||||
}
|
}
|
||||||
|
|
||||||
Result TimeZoneService::GetDeviceLocationName(Out<LocationName> out_location_name) {
|
Result TimeZoneService::GetDeviceLocationName(Out<LocationName> out_location_name) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_location_name={}", *out_location_name); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_location_name={}", *out_location_name);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_time_zone.GetLocationName(*out_location_name));
|
R_RETURN(m_time_zone.GetLocationName(*out_location_name));
|
||||||
}
|
}
|
||||||
|
@ -50,7 +52,9 @@ Result TimeZoneService::SetDeviceLocationName(const LocationName& location_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
Result TimeZoneService::GetTotalLocationNameCount(Out<u32> out_count) {
|
Result TimeZoneService::GetTotalLocationNameCount(Out<u32> out_count) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_count={}", *out_count); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_count={}", *out_count);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_time_zone.GetTotalLocationCount(*out_count));
|
R_RETURN(m_time_zone.GetTotalLocationCount(*out_count));
|
||||||
}
|
}
|
||||||
|
@ -69,17 +73,19 @@ Result TimeZoneService::LoadTimeZoneRule(OutRule out_rule, const LocationName& l
|
||||||
}
|
}
|
||||||
|
|
||||||
Result TimeZoneService::GetTimeZoneRuleVersion(Out<RuleVersion> out_rule_version) {
|
Result TimeZoneService::GetTimeZoneRuleVersion(Out<RuleVersion> out_rule_version) {
|
||||||
SCOPE_EXIT({ LOG_DEBUG(Service_Time, "called. out_rule_version={}", *out_rule_version); });
|
SCOPE_EXIT {
|
||||||
|
LOG_DEBUG(Service_Time, "called. out_rule_version={}", *out_rule_version);
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(m_time_zone.GetRuleVersion(*out_rule_version));
|
R_RETURN(m_time_zone.GetRuleVersion(*out_rule_version));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result TimeZoneService::GetDeviceLocationNameAndUpdatedTime(
|
Result TimeZoneService::GetDeviceLocationNameAndUpdatedTime(
|
||||||
Out<LocationName> out_location_name, Out<SteadyClockTimePoint> out_time_point) {
|
Out<LocationName> out_location_name, Out<SteadyClockTimePoint> out_time_point) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time, "called. out_location_name={} out_time_point={}",
|
LOG_DEBUG(Service_Time, "called. out_location_name={} out_time_point={}",
|
||||||
*out_location_name, *out_time_point);
|
*out_location_name, *out_time_point);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_TRY(m_time_zone.GetLocationName(*out_location_name));
|
R_TRY(m_time_zone.GetLocationName(*out_location_name));
|
||||||
R_RETURN(m_time_zone.GetTimePoint(*out_time_point));
|
R_RETURN(m_time_zone.GetTimePoint(*out_time_point));
|
||||||
|
@ -116,10 +122,10 @@ Result TimeZoneService::GetDeviceLocationNameOperationEventReadableHandle(
|
||||||
Result TimeZoneService::ToCalendarTime(Out<CalendarTime> out_calendar_time,
|
Result TimeZoneService::ToCalendarTime(Out<CalendarTime> out_calendar_time,
|
||||||
Out<CalendarAdditionalInfo> out_additional_info, s64 time,
|
Out<CalendarAdditionalInfo> out_additional_info, s64 time,
|
||||||
InRule rule) {
|
InRule rule) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time, "called. time={} out_calendar_time={} out_additional_info={}", time,
|
LOG_DEBUG(Service_Time, "called. time={} out_calendar_time={} out_additional_info={}", time,
|
||||||
*out_calendar_time, *out_additional_info);
|
*out_calendar_time, *out_additional_info);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(
|
R_RETURN(
|
||||||
m_time_zone.ToCalendarTime(*out_calendar_time, *out_additional_info, time, *rule.Get()));
|
m_time_zone.ToCalendarTime(*out_calendar_time, *out_additional_info, time, *rule.Get()));
|
||||||
|
@ -128,10 +134,10 @@ Result TimeZoneService::ToCalendarTime(Out<CalendarTime> out_calendar_time,
|
||||||
Result TimeZoneService::ToCalendarTimeWithMyRule(Out<CalendarTime> out_calendar_time,
|
Result TimeZoneService::ToCalendarTimeWithMyRule(Out<CalendarTime> out_calendar_time,
|
||||||
Out<CalendarAdditionalInfo> out_additional_info,
|
Out<CalendarAdditionalInfo> out_additional_info,
|
||||||
s64 time) {
|
s64 time) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time, "called. time={} out_calendar_time={} out_additional_info={}", time,
|
LOG_DEBUG(Service_Time, "called. time={} out_calendar_time={} out_additional_info={}", time,
|
||||||
*out_calendar_time, *out_additional_info);
|
*out_calendar_time, *out_additional_info);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(m_time_zone.ToCalendarTimeWithMyRule(*out_calendar_time, *out_additional_info, time));
|
R_RETURN(m_time_zone.ToCalendarTimeWithMyRule(*out_calendar_time, *out_additional_info, time));
|
||||||
}
|
}
|
||||||
|
@ -139,11 +145,11 @@ Result TimeZoneService::ToCalendarTimeWithMyRule(Out<CalendarTime> out_calendar_
|
||||||
Result TimeZoneService::ToPosixTime(Out<u32> out_count,
|
Result TimeZoneService::ToPosixTime(Out<u32> out_count,
|
||||||
OutArray<s64, BufferAttr_HipcPointer> out_times,
|
OutArray<s64, BufferAttr_HipcPointer> out_times,
|
||||||
const CalendarTime& calendar_time, InRule rule) {
|
const CalendarTime& calendar_time, InRule rule) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time,
|
LOG_DEBUG(Service_Time,
|
||||||
"called. calendar_time={} out_count={} out_times[0]={} out_times[1]={} ",
|
"called. calendar_time={} out_count={} out_times[0]={} out_times[1]={} ",
|
||||||
calendar_time, *out_count, out_times[0], out_times[1]);
|
calendar_time, *out_count, out_times[0], out_times[1]);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(
|
R_RETURN(
|
||||||
m_time_zone.ToPosixTime(*out_count, out_times, out_times.size(), calendar_time, *rule));
|
m_time_zone.ToPosixTime(*out_count, out_times, out_times.size(), calendar_time, *rule));
|
||||||
|
@ -152,11 +158,11 @@ Result TimeZoneService::ToPosixTime(Out<u32> out_count,
|
||||||
Result TimeZoneService::ToPosixTimeWithMyRule(Out<u32> out_count,
|
Result TimeZoneService::ToPosixTimeWithMyRule(Out<u32> out_count,
|
||||||
OutArray<s64, BufferAttr_HipcPointer> out_times,
|
OutArray<s64, BufferAttr_HipcPointer> out_times,
|
||||||
const CalendarTime& calendar_time) {
|
const CalendarTime& calendar_time) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
LOG_DEBUG(Service_Time,
|
LOG_DEBUG(Service_Time,
|
||||||
"called. calendar_time={} out_count={} out_times[0]={} out_times[1]={} ",
|
"called. calendar_time={} out_count={} out_times[0]={} out_times[1]={} ",
|
||||||
calendar_time, *out_count, out_times[0], out_times[1]);
|
calendar_time, *out_count, out_times[0], out_times[1]);
|
||||||
});
|
};
|
||||||
|
|
||||||
R_RETURN(
|
R_RETURN(
|
||||||
m_time_zone.ToPosixTimeWithMyRule(*out_count, out_times, out_times.size(), calendar_time));
|
m_time_zone.ToPosixTimeWithMyRule(*out_count, out_times, out_times.size(), calendar_time));
|
||||||
|
|
|
@ -177,10 +177,10 @@ Result ServerManager::ManageNamedPort(const std::string& service_name,
|
||||||
Kernel::KPort::Register(m_system.Kernel(), port);
|
Kernel::KPort::Register(m_system.Kernel(), port);
|
||||||
|
|
||||||
// Ensure that our reference to the port is closed if we fail to register it.
|
// Ensure that our reference to the port is closed if we fail to register it.
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
port->GetClientPort().Close();
|
port->GetClientPort().Close();
|
||||||
port->GetServerPort().Close();
|
port->GetServerPort().Close();
|
||||||
});
|
};
|
||||||
|
|
||||||
// Register the object name with the kernel.
|
// Register the object name with the kernel.
|
||||||
R_TRY(Kernel::KObjectName::NewFromName(m_system.Kernel(), std::addressof(port->GetClientPort()),
|
R_TRY(Kernel::KObjectName::NewFromName(m_system.Kernel(), std::addressof(port->GetClientPort()),
|
||||||
|
@ -237,7 +237,9 @@ void ServerManager::StartAdditionalHostThreads(const char* name, size_t num_thre
|
||||||
}
|
}
|
||||||
|
|
||||||
Result ServerManager::LoopProcess() {
|
Result ServerManager::LoopProcess() {
|
||||||
SCOPE_EXIT({ m_stopped.Set(); });
|
SCOPE_EXIT {
|
||||||
|
m_stopped.Set();
|
||||||
|
};
|
||||||
|
|
||||||
R_RETURN(this->LoopProcessImpl());
|
R_RETURN(this->LoopProcessImpl());
|
||||||
}
|
}
|
||||||
|
|
|
@ -118,7 +118,9 @@ ResultStatus AppLoader_NCA::VerifyIntegrity(std::function<bool(size_t, size_t)>
|
||||||
mbedtls_sha256_starts_ret(&ctx, 0);
|
mbedtls_sha256_starts_ret(&ctx, 0);
|
||||||
|
|
||||||
// Ensure we maintain a clean state on exit.
|
// Ensure we maintain a clean state on exit.
|
||||||
SCOPE_EXIT({ mbedtls_sha256_free(&ctx); });
|
SCOPE_EXIT {
|
||||||
|
mbedtls_sha256_free(&ctx);
|
||||||
|
};
|
||||||
|
|
||||||
// Declare counters.
|
// Declare counters.
|
||||||
const size_t total_size = file->GetSize();
|
const size_t total_size = file->GetSize();
|
||||||
|
|
|
@ -831,11 +831,11 @@ struct Memory::Impl {
|
||||||
if (core == sys_core) [[unlikely]] {
|
if (core == sys_core) [[unlikely]] {
|
||||||
sys_core_guard.lock();
|
sys_core_guard.lock();
|
||||||
}
|
}
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (core == sys_core) [[unlikely]] {
|
if (core == sys_core) [[unlikely]] {
|
||||||
sys_core_guard.unlock();
|
sys_core_guard.unlock();
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) {
|
gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) {
|
||||||
auto& current_area = rasterizer_write_areas[core];
|
auto& current_area = rasterizer_write_areas[core];
|
||||||
PAddr subaddress = address >> YUZU_PAGEBITS;
|
PAddr subaddress = address >> YUZU_PAGEBITS;
|
||||||
|
@ -866,11 +866,11 @@ struct Memory::Impl {
|
||||||
if (core == sys_core) [[unlikely]] {
|
if (core == sys_core) [[unlikely]] {
|
||||||
sys_core_guard.lock();
|
sys_core_guard.lock();
|
||||||
}
|
}
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (core == sys_core) [[unlikely]] {
|
if (core == sys_core) [[unlikely]] {
|
||||||
sys_core_guard.unlock();
|
sys_core_guard.unlock();
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
auto& gpu = system.GPU();
|
auto& gpu = system.GPU();
|
||||||
gpu_device_memory->ApplyOpOnPointer(
|
gpu_device_memory->ApplyOpOnPointer(
|
||||||
p, scratch_buffers[core], [&](DAddr address) { gpu.InvalidateRegion(address, size); });
|
p, scratch_buffers[core], [&](DAddr address) { gpu.InvalidateRegion(address, size); });
|
||||||
|
|
|
@ -224,12 +224,12 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
|
||||||
// If we've ever seen a decode failure, return false.
|
// If we've ever seen a decode failure, return false.
|
||||||
bool valid = decode_success;
|
bool valid = decode_success;
|
||||||
CheatVmOpcode opcode = {};
|
CheatVmOpcode opcode = {};
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
decode_success &= valid;
|
decode_success &= valid;
|
||||||
if (valid) {
|
if (valid) {
|
||||||
out = opcode;
|
out = opcode;
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
// Helper function for getting instruction dwords.
|
// Helper function for getting instruction dwords.
|
||||||
const auto GetNextDword = [&] {
|
const auto GetNextDword = [&] {
|
||||||
|
|
|
@ -933,8 +933,9 @@ void EmulatedController::SetStick(const Common::Input::CallbackStatus& callback,
|
||||||
if (index >= controller.stick_values.size()) {
|
if (index >= controller.stick_values.size()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
auto trigger_guard =
|
auto trigger_guard = SCOPE_GUARD {
|
||||||
SCOPE_GUARD({ TriggerOnChange(ControllerTriggerType::Stick, !is_configuring); });
|
TriggerOnChange(ControllerTriggerType::Stick, !is_configuring);
|
||||||
|
};
|
||||||
std::scoped_lock lock{mutex};
|
std::scoped_lock lock{mutex};
|
||||||
const auto stick_value = TransformToStick(callback);
|
const auto stick_value = TransformToStick(callback);
|
||||||
|
|
||||||
|
@ -989,8 +990,9 @@ void EmulatedController::SetTrigger(const Common::Input::CallbackStatus& callbac
|
||||||
if (index >= controller.trigger_values.size()) {
|
if (index >= controller.trigger_values.size()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
auto trigger_guard =
|
auto trigger_guard = SCOPE_GUARD {
|
||||||
SCOPE_GUARD({ TriggerOnChange(ControllerTriggerType::Trigger, !is_configuring); });
|
TriggerOnChange(ControllerTriggerType::Trigger, !is_configuring);
|
||||||
|
};
|
||||||
std::scoped_lock lock{mutex};
|
std::scoped_lock lock{mutex};
|
||||||
const auto trigger_value = TransformToTrigger(callback);
|
const auto trigger_value = TransformToTrigger(callback);
|
||||||
|
|
||||||
|
@ -1036,7 +1038,9 @@ void EmulatedController::SetMotion(const Common::Input::CallbackStatus& callback
|
||||||
if (index >= controller.motion_values.size()) {
|
if (index >= controller.motion_values.size()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
SCOPE_EXIT({ TriggerOnChange(ControllerTriggerType::Motion, !is_configuring); });
|
SCOPE_EXIT {
|
||||||
|
TriggerOnChange(ControllerTriggerType::Motion, !is_configuring);
|
||||||
|
};
|
||||||
std::scoped_lock lock{mutex};
|
std::scoped_lock lock{mutex};
|
||||||
auto& raw_status = controller.motion_values[index].raw_status;
|
auto& raw_status = controller.motion_values[index].raw_status;
|
||||||
auto& emulated = controller.motion_values[index].emulated;
|
auto& emulated = controller.motion_values[index].emulated;
|
||||||
|
@ -1070,8 +1074,9 @@ void EmulatedController::SetColors(const Common::Input::CallbackStatus& callback
|
||||||
if (index >= controller.color_values.size()) {
|
if (index >= controller.color_values.size()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
auto trigger_guard =
|
auto trigger_guard = SCOPE_GUARD {
|
||||||
SCOPE_GUARD({ TriggerOnChange(ControllerTriggerType::Color, !is_configuring); });
|
TriggerOnChange(ControllerTriggerType::Color, !is_configuring);
|
||||||
|
};
|
||||||
std::scoped_lock lock{mutex};
|
std::scoped_lock lock{mutex};
|
||||||
controller.color_values[index] = TransformToColor(callback);
|
controller.color_values[index] = TransformToColor(callback);
|
||||||
|
|
||||||
|
@ -1120,7 +1125,9 @@ void EmulatedController::SetBattery(const Common::Input::CallbackStatus& callbac
|
||||||
if (index >= controller.battery_values.size()) {
|
if (index >= controller.battery_values.size()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
SCOPE_EXIT({ TriggerOnChange(ControllerTriggerType::Battery, !is_configuring); });
|
SCOPE_EXIT {
|
||||||
|
TriggerOnChange(ControllerTriggerType::Battery, !is_configuring);
|
||||||
|
};
|
||||||
std::scoped_lock lock{mutex};
|
std::scoped_lock lock{mutex};
|
||||||
controller.battery_values[index] = TransformToBattery(callback);
|
controller.battery_values[index] = TransformToBattery(callback);
|
||||||
|
|
||||||
|
@ -1183,7 +1190,9 @@ void EmulatedController::SetBattery(const Common::Input::CallbackStatus& callbac
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmulatedController::SetCamera(const Common::Input::CallbackStatus& callback) {
|
void EmulatedController::SetCamera(const Common::Input::CallbackStatus& callback) {
|
||||||
SCOPE_EXIT({ TriggerOnChange(ControllerTriggerType::IrSensor, !is_configuring); });
|
SCOPE_EXIT {
|
||||||
|
TriggerOnChange(ControllerTriggerType::IrSensor, !is_configuring);
|
||||||
|
};
|
||||||
std::scoped_lock lock{mutex};
|
std::scoped_lock lock{mutex};
|
||||||
controller.camera_values = TransformToCamera(callback);
|
controller.camera_values = TransformToCamera(callback);
|
||||||
|
|
||||||
|
@ -1198,7 +1207,9 @@ void EmulatedController::SetCamera(const Common::Input::CallbackStatus& callback
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmulatedController::SetRingAnalog(const Common::Input::CallbackStatus& callback) {
|
void EmulatedController::SetRingAnalog(const Common::Input::CallbackStatus& callback) {
|
||||||
SCOPE_EXIT({ TriggerOnChange(ControllerTriggerType::RingController, !is_configuring); });
|
SCOPE_EXIT {
|
||||||
|
TriggerOnChange(ControllerTriggerType::RingController, !is_configuring);
|
||||||
|
};
|
||||||
std::scoped_lock lock{mutex};
|
std::scoped_lock lock{mutex};
|
||||||
const auto force_value = TransformToStick(callback);
|
const auto force_value = TransformToStick(callback);
|
||||||
|
|
||||||
|
@ -1212,7 +1223,9 @@ void EmulatedController::SetRingAnalog(const Common::Input::CallbackStatus& call
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmulatedController::SetNfc(const Common::Input::CallbackStatus& callback) {
|
void EmulatedController::SetNfc(const Common::Input::CallbackStatus& callback) {
|
||||||
SCOPE_EXIT({ TriggerOnChange(ControllerTriggerType::Nfc, !is_configuring); });
|
SCOPE_EXIT {
|
||||||
|
TriggerOnChange(ControllerTriggerType::Nfc, !is_configuring);
|
||||||
|
};
|
||||||
std::scoped_lock lock{mutex};
|
std::scoped_lock lock{mutex};
|
||||||
controller.nfc_values = TransformToNfc(callback);
|
controller.nfc_values = TransformToNfc(callback);
|
||||||
|
|
||||||
|
@ -1685,8 +1698,9 @@ void EmulatedController::Connect(bool use_temporary_value) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto trigger_guard =
|
auto trigger_guard = SCOPE_GUARD {
|
||||||
SCOPE_GUARD({ TriggerOnChange(ControllerTriggerType::Connected, !is_configuring); });
|
TriggerOnChange(ControllerTriggerType::Connected, !is_configuring);
|
||||||
|
};
|
||||||
std::scoped_lock lock{connect_mutex, mutex};
|
std::scoped_lock lock{connect_mutex, mutex};
|
||||||
if (is_configuring) {
|
if (is_configuring) {
|
||||||
tmp_is_connected = true;
|
tmp_is_connected = true;
|
||||||
|
@ -1701,8 +1715,9 @@ void EmulatedController::Connect(bool use_temporary_value) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmulatedController::Disconnect() {
|
void EmulatedController::Disconnect() {
|
||||||
auto trigger_guard =
|
auto trigger_guard = SCOPE_GUARD {
|
||||||
SCOPE_GUARD({ TriggerOnChange(ControllerTriggerType::Disconnected, !is_configuring); });
|
TriggerOnChange(ControllerTriggerType::Disconnected, !is_configuring);
|
||||||
|
};
|
||||||
std::scoped_lock lock{connect_mutex, mutex};
|
std::scoped_lock lock{connect_mutex, mutex};
|
||||||
if (is_configuring) {
|
if (is_configuring) {
|
||||||
tmp_is_connected = false;
|
tmp_is_connected = false;
|
||||||
|
@ -1738,8 +1753,9 @@ NpadStyleIndex EmulatedController::GetNpadStyleIndex(bool get_temporary_value) c
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmulatedController::SetNpadStyleIndex(NpadStyleIndex npad_type_) {
|
void EmulatedController::SetNpadStyleIndex(NpadStyleIndex npad_type_) {
|
||||||
auto trigger_guard =
|
auto trigger_guard = SCOPE_GUARD {
|
||||||
SCOPE_GUARD({ TriggerOnChange(ControllerTriggerType::Type, !is_configuring); });
|
TriggerOnChange(ControllerTriggerType::Type, !is_configuring);
|
||||||
|
};
|
||||||
std::scoped_lock lock{mutex, npad_mutex};
|
std::scoped_lock lock{mutex, npad_mutex};
|
||||||
|
|
||||||
if (is_configuring) {
|
if (is_configuring) {
|
||||||
|
|
|
@ -268,7 +268,9 @@ void JoyconDriver::OnNewData(std::span<u8> buffer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Common::Input::DriverResult JoyconDriver::SetPollingMode() {
|
Common::Input::DriverResult JoyconDriver::SetPollingMode() {
|
||||||
SCOPE_EXIT({ disable_input_thread = false; });
|
SCOPE_EXIT {
|
||||||
|
disable_input_thread = false;
|
||||||
|
};
|
||||||
disable_input_thread = true;
|
disable_input_thread = true;
|
||||||
|
|
||||||
rumble_protocol->EnableRumble(vibration_enabled && supported_features.vibration);
|
rumble_protocol->EnableRumble(vibration_enabled && supported_features.vibration);
|
||||||
|
|
|
@ -291,7 +291,9 @@ u32 Maxwell3D::ProcessShadowRam(u32 method, u32 argument) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Maxwell3D::ConsumeSinkImpl() {
|
void Maxwell3D::ConsumeSinkImpl() {
|
||||||
SCOPE_EXIT({ method_sink.clear(); });
|
SCOPE_EXIT {
|
||||||
|
method_sink.clear();
|
||||||
|
};
|
||||||
const auto control = shadow_state.shadow_ram_control;
|
const auto control = shadow_state.shadow_ram_control;
|
||||||
if (control == Regs::ShadowRamControl::Track ||
|
if (control == Regs::ShadowRamControl::Track ||
|
||||||
control == Regs::ShadowRamControl::TrackWithFilter) {
|
control == Regs::ShadowRamControl::TrackWithFilter) {
|
||||||
|
|
|
@ -197,7 +197,9 @@ private:
|
||||||
MicroProfileOnThreadCreate(name.c_str());
|
MicroProfileOnThreadCreate(name.c_str());
|
||||||
|
|
||||||
// Cleanup
|
// Cleanup
|
||||||
SCOPE_EXIT({ MicroProfileOnThreadExit(); });
|
SCOPE_EXIT {
|
||||||
|
MicroProfileOnThreadExit();
|
||||||
|
};
|
||||||
|
|
||||||
Common::SetCurrentThreadName(name.c_str());
|
Common::SetCurrentThreadName(name.c_str());
|
||||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||||
|
|
|
@ -22,7 +22,9 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
|
||||||
Tegra::Control::Scheduler& scheduler, SynchState& state) {
|
Tegra::Control::Scheduler& scheduler, SynchState& state) {
|
||||||
std::string name = "GPU";
|
std::string name = "GPU";
|
||||||
MicroProfileOnThreadCreate(name.c_str());
|
MicroProfileOnThreadCreate(name.c_str());
|
||||||
SCOPE_EXIT({ MicroProfileOnThreadExit(); });
|
SCOPE_EXIT {
|
||||||
|
MicroProfileOnThreadExit();
|
||||||
|
};
|
||||||
|
|
||||||
Common::SetCurrentThreadName(name.c_str());
|
Common::SetCurrentThreadName(name.c_str());
|
||||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
|
Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
|
||||||
|
|
|
@ -273,10 +273,10 @@ DeinterlaceFilter::DeinterlaceFilter(const Frame& frame) {
|
||||||
const AVFilter* buffer_sink = avfilter_get_by_name("buffersink");
|
const AVFilter* buffer_sink = avfilter_get_by_name("buffersink");
|
||||||
AVFilterInOut* inputs = avfilter_inout_alloc();
|
AVFilterInOut* inputs = avfilter_inout_alloc();
|
||||||
AVFilterInOut* outputs = avfilter_inout_alloc();
|
AVFilterInOut* outputs = avfilter_inout_alloc();
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
avfilter_inout_free(&inputs);
|
avfilter_inout_free(&inputs);
|
||||||
avfilter_inout_free(&outputs);
|
avfilter_inout_free(&outputs);
|
||||||
});
|
};
|
||||||
|
|
||||||
// Don't know how to get the accurate time_base but it doesn't matter for yadif filter
|
// Don't know how to get the accurate time_base but it doesn't matter for yadif filter
|
||||||
// so just use 1/1 to make buffer filter happy
|
// so just use 1/1 to make buffer filter happy
|
||||||
|
|
|
@ -92,12 +92,12 @@ public:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void Fallback(const std::vector<u32>& parameters) {
|
void Fallback(const std::vector<u32>& parameters) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (extended) {
|
if (extended) {
|
||||||
maxwell3d.engine_state = Maxwell3D::EngineHint::None;
|
maxwell3d.engine_state = Maxwell3D::EngineHint::None;
|
||||||
maxwell3d.replace_table.clear();
|
maxwell3d.replace_table.clear();
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
maxwell3d.RefreshParameters();
|
maxwell3d.RefreshParameters();
|
||||||
const u32 instance_count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]);
|
const u32 instance_count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]);
|
||||||
|
|
||||||
|
@ -281,12 +281,12 @@ public:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void Fallback(const std::vector<u32>& parameters) {
|
void Fallback(const std::vector<u32>& parameters) {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
// Clean everything.
|
// Clean everything.
|
||||||
maxwell3d.regs.vertex_id_base = 0x0;
|
maxwell3d.regs.vertex_id_base = 0x0;
|
||||||
maxwell3d.engine_state = Maxwell3D::EngineHint::None;
|
maxwell3d.engine_state = Maxwell3D::EngineHint::None;
|
||||||
maxwell3d.replace_table.clear();
|
maxwell3d.replace_table.clear();
|
||||||
});
|
};
|
||||||
maxwell3d.RefreshParameters();
|
maxwell3d.RefreshParameters();
|
||||||
const u32 start_indirect = parameters[0];
|
const u32 start_indirect = parameters[0];
|
||||||
const u32 end_indirect = parameters[1];
|
const u32 end_indirect = parameters[1];
|
||||||
|
|
|
@ -230,7 +230,9 @@ template <typename Func>
|
||||||
void RasterizerOpenGL::PrepareDraw(bool is_indexed, Func&& draw_func) {
|
void RasterizerOpenGL::PrepareDraw(bool is_indexed, Func&& draw_func) {
|
||||||
MICROPROFILE_SCOPE(OpenGL_Drawing);
|
MICROPROFILE_SCOPE(OpenGL_Drawing);
|
||||||
|
|
||||||
SCOPE_EXIT({ gpu.TickWork(); });
|
SCOPE_EXIT {
|
||||||
|
gpu.TickWork();
|
||||||
|
};
|
||||||
gpu_memory->FlushCaching();
|
gpu_memory->FlushCaching();
|
||||||
|
|
||||||
GraphicsPipeline* const pipeline{shader_cache.CurrentGraphicsPipeline()};
|
GraphicsPipeline* const pipeline{shader_cache.CurrentGraphicsPipeline()};
|
||||||
|
@ -355,7 +357,9 @@ void RasterizerOpenGL::DrawIndirect() {
|
||||||
void RasterizerOpenGL::DrawTexture() {
|
void RasterizerOpenGL::DrawTexture() {
|
||||||
MICROPROFILE_SCOPE(OpenGL_Drawing);
|
MICROPROFILE_SCOPE(OpenGL_Drawing);
|
||||||
|
|
||||||
SCOPE_EXIT({ gpu.TickWork(); });
|
SCOPE_EXIT {
|
||||||
|
gpu.TickWork();
|
||||||
|
};
|
||||||
|
|
||||||
texture_cache.SynchronizeGraphicsDescriptors();
|
texture_cache.SynchronizeGraphicsDescriptors();
|
||||||
texture_cache.UpdateRenderTargets(false);
|
texture_cache.UpdateRenderTargets(false);
|
||||||
|
|
|
@ -82,7 +82,9 @@ void Layer::ConfigureDraw(PresentPushConstants* out_push_constants,
|
||||||
// Finish any pending renderpass
|
// Finish any pending renderpass
|
||||||
scheduler.RequestOutsideRenderPassOperationContext();
|
scheduler.RequestOutsideRenderPassOperationContext();
|
||||||
scheduler.Wait(resource_ticks[image_index]);
|
scheduler.Wait(resource_ticks[image_index]);
|
||||||
SCOPE_EXIT({ resource_ticks[image_index] = scheduler.CurrentTick(); });
|
SCOPE_EXIT {
|
||||||
|
resource_ticks[image_index] = scheduler.CurrentTick();
|
||||||
|
};
|
||||||
|
|
||||||
if (!use_accelerated) {
|
if (!use_accelerated) {
|
||||||
UpdateRawImage(framebuffer, image_index);
|
UpdateRawImage(framebuffer, image_index);
|
||||||
|
|
|
@ -144,7 +144,9 @@ void RendererVulkan::Composite(std::span<const Tegra::FramebufferConfig> framebu
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
SCOPE_EXIT({ render_window.OnFrameDisplayed(); });
|
SCOPE_EXIT {
|
||||||
|
render_window.OnFrameDisplayed();
|
||||||
|
};
|
||||||
|
|
||||||
RenderAppletCaptureLayer(framebuffers);
|
RenderAppletCaptureLayer(framebuffers);
|
||||||
|
|
||||||
|
|
|
@ -196,7 +196,9 @@ template <typename Func>
|
||||||
void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) {
|
void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) {
|
||||||
MICROPROFILE_SCOPE(Vulkan_Drawing);
|
MICROPROFILE_SCOPE(Vulkan_Drawing);
|
||||||
|
|
||||||
SCOPE_EXIT({ gpu.TickWork(); });
|
SCOPE_EXIT {
|
||||||
|
gpu.TickWork();
|
||||||
|
};
|
||||||
FlushWork();
|
FlushWork();
|
||||||
gpu_memory->FlushCaching();
|
gpu_memory->FlushCaching();
|
||||||
|
|
||||||
|
@ -288,7 +290,9 @@ void RasterizerVulkan::DrawIndirect() {
|
||||||
void RasterizerVulkan::DrawTexture() {
|
void RasterizerVulkan::DrawTexture() {
|
||||||
MICROPROFILE_SCOPE(Vulkan_Drawing);
|
MICROPROFILE_SCOPE(Vulkan_Drawing);
|
||||||
|
|
||||||
SCOPE_EXIT({ gpu.TickWork(); });
|
SCOPE_EXIT {
|
||||||
|
gpu.TickWork();
|
||||||
|
};
|
||||||
FlushWork();
|
FlushWork();
|
||||||
|
|
||||||
query_cache.NotifySegment(true);
|
query_cache.NotifySegment(true);
|
||||||
|
|
|
@ -116,7 +116,9 @@ void NsightAftermathTracker::OnGpuCrashDumpCallback(const void* gpu_crash_dump,
|
||||||
LOG_ERROR(Render_Vulkan, "Failed to create decoder");
|
LOG_ERROR(Render_Vulkan, "Failed to create decoder");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
SCOPE_EXIT({ GFSDK_Aftermath_GpuCrashDump_DestroyDecoder(decoder); });
|
SCOPE_EXIT {
|
||||||
|
GFSDK_Aftermath_GpuCrashDump_DestroyDecoder(decoder);
|
||||||
|
};
|
||||||
|
|
||||||
u32 json_size = 0;
|
u32 json_size = 0;
|
||||||
if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_GenerateJSON(
|
if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_GenerateJSON(
|
||||||
|
|
|
@ -646,10 +646,10 @@ void GMainWindow::AmiiboSettingsShowDialog(const Core::Frontend::CabinetParamete
|
||||||
std::shared_ptr<Service::NFC::NfcDevice> nfp_device) {
|
std::shared_ptr<Service::NFC::NfcDevice> nfp_device) {
|
||||||
cabinet_applet =
|
cabinet_applet =
|
||||||
new QtAmiiboSettingsDialog(this, parameters, input_subsystem.get(), nfp_device);
|
new QtAmiiboSettingsDialog(this, parameters, input_subsystem.get(), nfp_device);
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
cabinet_applet->deleteLater();
|
cabinet_applet->deleteLater();
|
||||||
cabinet_applet = nullptr;
|
cabinet_applet = nullptr;
|
||||||
});
|
};
|
||||||
|
|
||||||
cabinet_applet->setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint | Qt::WindowStaysOnTopHint |
|
cabinet_applet->setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint | Qt::WindowStaysOnTopHint |
|
||||||
Qt::WindowTitleHint | Qt::WindowSystemMenuHint);
|
Qt::WindowTitleHint | Qt::WindowSystemMenuHint);
|
||||||
|
@ -673,10 +673,10 @@ void GMainWindow::ControllerSelectorReconfigureControllers(
|
||||||
const Core::Frontend::ControllerParameters& parameters) {
|
const Core::Frontend::ControllerParameters& parameters) {
|
||||||
controller_applet =
|
controller_applet =
|
||||||
new QtControllerSelectorDialog(this, parameters, input_subsystem.get(), *system);
|
new QtControllerSelectorDialog(this, parameters, input_subsystem.get(), *system);
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
controller_applet->deleteLater();
|
controller_applet->deleteLater();
|
||||||
controller_applet = nullptr;
|
controller_applet = nullptr;
|
||||||
});
|
};
|
||||||
|
|
||||||
controller_applet->setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint |
|
controller_applet->setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint |
|
||||||
Qt::WindowStaysOnTopHint | Qt::WindowTitleHint |
|
Qt::WindowStaysOnTopHint | Qt::WindowTitleHint |
|
||||||
|
@ -703,10 +703,10 @@ void GMainWindow::ControllerSelectorRequestExit() {
|
||||||
void GMainWindow::ProfileSelectorSelectProfile(
|
void GMainWindow::ProfileSelectorSelectProfile(
|
||||||
const Core::Frontend::ProfileSelectParameters& parameters) {
|
const Core::Frontend::ProfileSelectParameters& parameters) {
|
||||||
profile_select_applet = new QtProfileSelectionDialog(*system, this, parameters);
|
profile_select_applet = new QtProfileSelectionDialog(*system, this, parameters);
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
profile_select_applet->deleteLater();
|
profile_select_applet->deleteLater();
|
||||||
profile_select_applet = nullptr;
|
profile_select_applet = nullptr;
|
||||||
});
|
};
|
||||||
|
|
||||||
profile_select_applet->setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint |
|
profile_select_applet->setWindowFlags(Qt::Dialog | Qt::CustomizeWindowHint |
|
||||||
Qt::WindowStaysOnTopHint | Qt::WindowTitleHint |
|
Qt::WindowStaysOnTopHint | Qt::WindowTitleHint |
|
||||||
|
@ -2885,17 +2885,19 @@ bool GMainWindow::CreateShortcutLink(const std::filesystem::path& shortcut_path,
|
||||||
LOG_ERROR(Frontend, "CoInitialize failed");
|
LOG_ERROR(Frontend, "CoInitialize failed");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
SCOPE_EXIT({ CoUninitialize(); });
|
SCOPE_EXIT {
|
||||||
|
CoUninitialize();
|
||||||
|
};
|
||||||
IShellLinkW* ps1 = nullptr;
|
IShellLinkW* ps1 = nullptr;
|
||||||
IPersistFile* persist_file = nullptr;
|
IPersistFile* persist_file = nullptr;
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
if (persist_file != nullptr) {
|
if (persist_file != nullptr) {
|
||||||
persist_file->Release();
|
persist_file->Release();
|
||||||
}
|
}
|
||||||
if (ps1 != nullptr) {
|
if (ps1 != nullptr) {
|
||||||
ps1->Release();
|
ps1->Release();
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
HRESULT hres = CoCreateInstance(CLSID_ShellLink, nullptr, CLSCTX_INPROC_SERVER, IID_IShellLinkW,
|
HRESULT hres = CoCreateInstance(CLSID_ShellLink, nullptr, CLSCTX_INPROC_SERVER, IID_IShellLinkW,
|
||||||
reinterpret_cast<void**>(&ps1));
|
reinterpret_cast<void**>(&ps1));
|
||||||
if (FAILED(hres)) {
|
if (FAILED(hres)) {
|
||||||
|
@ -3520,10 +3522,10 @@ void GMainWindow::OnSaveConfig() {
|
||||||
void GMainWindow::ErrorDisplayDisplayError(QString error_code, QString error_text) {
|
void GMainWindow::ErrorDisplayDisplayError(QString error_code, QString error_text) {
|
||||||
error_applet = new OverlayDialog(render_window, *system, error_code, error_text, QString{},
|
error_applet = new OverlayDialog(render_window, *system, error_code, error_text, QString{},
|
||||||
tr("OK"), Qt::AlignLeft | Qt::AlignVCenter);
|
tr("OK"), Qt::AlignLeft | Qt::AlignVCenter);
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT {
|
||||||
error_applet->deleteLater();
|
error_applet->deleteLater();
|
||||||
error_applet = nullptr;
|
error_applet = nullptr;
|
||||||
});
|
};
|
||||||
error_applet->exec();
|
error_applet->exec();
|
||||||
|
|
||||||
emit ErrorDisplayFinished();
|
emit ErrorDisplayFinished();
|
||||||
|
@ -5192,7 +5194,9 @@ int main(int argc, char* argv[]) {
|
||||||
|
|
||||||
Common::DetachedTasks detached_tasks;
|
Common::DetachedTasks detached_tasks;
|
||||||
MicroProfileOnThreadCreate("Frontend");
|
MicroProfileOnThreadCreate("Frontend");
|
||||||
SCOPE_EXIT({ MicroProfileShutdown(); });
|
SCOPE_EXIT {
|
||||||
|
MicroProfileShutdown();
|
||||||
|
};
|
||||||
|
|
||||||
Common::ConfigureNvidiaEnvironmentFlags();
|
Common::ConfigureNvidiaEnvironmentFlags();
|
||||||
|
|
||||||
|
|
|
@ -327,7 +327,9 @@ int main(int argc, char** argv) {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
MicroProfileOnThreadCreate("EmuThread");
|
MicroProfileOnThreadCreate("EmuThread");
|
||||||
SCOPE_EXIT({ MicroProfileShutdown(); });
|
SCOPE_EXIT {
|
||||||
|
MicroProfileShutdown();
|
||||||
|
};
|
||||||
|
|
||||||
Common::ConfigureNvidiaEnvironmentFlags();
|
Common::ConfigureNvidiaEnvironmentFlags();
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue