2015-05-21 03:37:07 +00:00
|
|
|
// Copyright 2015 Citra Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2018-08-02 16:45:56 +00:00
|
|
|
#include <algorithm>
|
2015-06-21 14:11:32 +00:00
|
|
|
#include <iterator>
|
2018-07-18 23:02:47 +00:00
|
|
|
#include <utility>
|
2015-05-21 03:37:07 +00:00
|
|
|
#include "common/assert.h"
|
2018-01-01 20:59:31 +00:00
|
|
|
#include "common/logging/log.h"
|
2019-03-02 20:20:28 +00:00
|
|
|
#include "common/memory_hook.h"
|
2017-10-10 03:56:20 +00:00
|
|
|
#include "core/arm/arm_interface.h"
|
2018-01-16 18:05:21 +00:00
|
|
|
#include "core/core.h"
|
2018-09-23 00:09:32 +00:00
|
|
|
#include "core/file_sys/program_metadata.h"
|
2017-05-21 07:11:36 +00:00
|
|
|
#include "core/hle/kernel/errors.h"
|
2019-07-07 16:42:54 +00:00
|
|
|
#include "core/hle/kernel/process.h"
|
|
|
|
#include "core/hle/kernel/resource_limit.h"
|
2016-09-21 06:52:38 +00:00
|
|
|
#include "core/hle/kernel/vm_manager.h"
|
2016-05-26 17:53:30 +00:00
|
|
|
#include "core/memory.h"
|
2015-05-21 03:37:07 +00:00
|
|
|
#include "core/memory_setup.h"
|
|
|
|
|
|
|
|
namespace Kernel {
|
2019-03-04 21:30:17 +00:00
|
|
|
namespace {
|
|
|
|
const char* GetMemoryStateName(MemoryState state) {
|
2018-07-18 22:40:35 +00:00
|
|
|
static constexpr const char* names[] = {
|
2019-03-21 15:39:55 +00:00
|
|
|
"Unmapped", "Io",
|
|
|
|
"Normal", "Code",
|
|
|
|
"CodeData", "Heap",
|
|
|
|
"Shared", "Unknown1",
|
|
|
|
"ModuleCode", "ModuleCodeData",
|
|
|
|
"IpcBuffer0", "Stack",
|
|
|
|
"ThreadLocal", "TransferMemoryIsolated",
|
|
|
|
"TransferMemory", "ProcessMemory",
|
|
|
|
"Inaccessible", "IpcBuffer1",
|
|
|
|
"IpcBuffer3", "KernelStack",
|
2015-07-18 00:55:48 +00:00
|
|
|
};
|
|
|
|
|
2018-12-12 15:08:46 +00:00
|
|
|
return names[ToSvcMemoryState(state)];
|
2015-07-18 00:55:48 +00:00
|
|
|
}
|
|
|
|
|
2019-03-04 21:30:17 +00:00
|
|
|
// Checks if a given address range lies within a larger address range.
|
|
|
|
constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin,
|
|
|
|
VAddr address_range_end) {
|
|
|
|
const VAddr end_address = address + size - 1;
|
|
|
|
return address_range_begin <= address && end_address <= address_range_end - 1;
|
|
|
|
}
|
|
|
|
} // Anonymous namespace
|
|
|
|
|
2015-05-21 03:37:07 +00:00
|
|
|
bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
|
|
|
|
ASSERT(base + size == next.base);
|
2018-12-15 01:59:08 +00:00
|
|
|
if (permissions != next.permissions || state != next.state || attribute != next.attribute ||
|
|
|
|
type != next.type) {
|
2015-05-21 03:37:07 +00:00
|
|
|
return false;
|
|
|
|
}
|
2019-07-09 05:19:27 +00:00
|
|
|
if ((attribute & MemoryAttribute::DeviceMapped) == MemoryAttribute::DeviceMapped) {
|
|
|
|
// TODO: Can device mapped memory be merged sanely?
|
|
|
|
// Not merging it may cause inaccuracies versus hardware when memory layout is queried.
|
|
|
|
return false;
|
|
|
|
}
|
2019-07-07 16:42:54 +00:00
|
|
|
if (type == VMAType::AllocatedMemoryBlock) {
|
|
|
|
return true;
|
2015-05-21 03:37:07 +00:00
|
|
|
}
|
|
|
|
if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (type == VMAType::MMIO && paddr + size != next.paddr) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-04-16 09:07:43 +00:00
|
|
|
VMManager::VMManager(Core::System& system) : system{system} {
|
2018-09-23 00:09:32 +00:00
|
|
|
// Default to assuming a 39-bit address space. This way we have a sane
|
|
|
|
// starting point with executables that don't provide metadata.
|
|
|
|
Reset(FileSys::ProgramAddressSpaceType::Is39Bit);
|
2015-05-21 03:37:07 +00:00
|
|
|
}
|
|
|
|
|
2019-06-12 20:09:57 +00:00
|
|
|
VMManager::~VMManager() = default;
|
2015-07-10 01:52:15 +00:00
|
|
|
|
2018-09-23 00:09:32 +00:00
|
|
|
void VMManager::Reset(FileSys::ProgramAddressSpaceType type) {
|
|
|
|
Clear();
|
2018-09-24 14:29:56 +00:00
|
|
|
|
2018-09-23 00:09:32 +00:00
|
|
|
InitializeMemoryRegionRanges(type);
|
2015-05-21 03:37:07 +00:00
|
|
|
|
2018-09-24 14:29:56 +00:00
|
|
|
page_table.Resize(address_space_width);
|
|
|
|
|
2015-05-21 03:37:07 +00:00
|
|
|
// Initialize the map with a single free region covering the entire managed space.
|
|
|
|
VirtualMemoryArea initial_vma;
|
2018-09-24 14:29:56 +00:00
|
|
|
initial_vma.size = address_space_end;
|
2015-05-21 03:37:07 +00:00
|
|
|
vma_map.emplace(initial_vma.base, initial_vma);
|
|
|
|
|
2018-01-01 20:59:31 +00:00
|
|
|
UpdatePageTableForVMA(initial_vma);
|
2015-05-21 03:37:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
|
2018-09-24 14:29:56 +00:00
|
|
|
if (target >= address_space_end) {
|
2015-07-18 02:19:16 +00:00
|
|
|
return vma_map.end();
|
|
|
|
} else {
|
|
|
|
return std::prev(vma_map.upper_bound(target));
|
|
|
|
}
|
2015-05-21 03:37:07 +00:00
|
|
|
}
|
|
|
|
|
2018-12-06 15:59:22 +00:00
|
|
|
bool VMManager::IsValidHandle(VMAHandle handle) const {
|
|
|
|
return handle != vma_map.cend();
|
|
|
|
}
|
|
|
|
|
2015-05-21 03:37:07 +00:00
|
|
|
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
|
2016-09-18 00:38:01 +00:00
|
|
|
std::shared_ptr<std::vector<u8>> block,
|
2018-09-15 13:21:06 +00:00
|
|
|
std::size_t offset, u64 size,
|
2019-07-07 16:42:54 +00:00
|
|
|
MemoryState state, VMAPermission perm) {
|
2015-05-21 03:37:07 +00:00
|
|
|
ASSERT(block != nullptr);
|
|
|
|
ASSERT(offset + size <= block->size());
|
|
|
|
|
|
|
|
// This is the appropriately sized VMA that will turn into our allocation.
|
|
|
|
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
|
|
|
|
VirtualMemoryArea& final_vma = vma_handle->second;
|
|
|
|
ASSERT(final_vma.size == size);
|
|
|
|
|
2018-05-03 02:36:51 +00:00
|
|
|
system.ArmInterface(0).MapBackingMemory(target, size, block->data() + offset,
|
|
|
|
VMAPermission::ReadWriteExecute);
|
|
|
|
system.ArmInterface(1).MapBackingMemory(target, size, block->data() + offset,
|
|
|
|
VMAPermission::ReadWriteExecute);
|
|
|
|
system.ArmInterface(2).MapBackingMemory(target, size, block->data() + offset,
|
|
|
|
VMAPermission::ReadWriteExecute);
|
|
|
|
system.ArmInterface(3).MapBackingMemory(target, size, block->data() + offset,
|
|
|
|
VMAPermission::ReadWriteExecute);
|
2017-10-10 03:56:20 +00:00
|
|
|
|
2015-05-21 03:37:07 +00:00
|
|
|
final_vma.type = VMAType::AllocatedMemoryBlock;
|
2019-07-07 16:42:54 +00:00
|
|
|
final_vma.permissions = perm;
|
2018-12-15 01:59:08 +00:00
|
|
|
final_vma.state = state;
|
2018-07-18 23:02:47 +00:00
|
|
|
final_vma.backing_block = std::move(block);
|
2015-05-21 03:37:07 +00:00
|
|
|
final_vma.offset = offset;
|
|
|
|
UpdatePageTableForVMA(final_vma);
|
|
|
|
|
|
|
|
return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
|
|
|
|
}
|
|
|
|
|
2017-09-02 03:10:03 +00:00
|
|
|
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u64 size,
|
2016-09-18 00:38:01 +00:00
|
|
|
MemoryState state) {
|
2015-05-21 03:37:07 +00:00
|
|
|
ASSERT(memory != nullptr);
|
|
|
|
|
|
|
|
// This is the appropriately sized VMA that will turn into our allocation.
|
|
|
|
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
|
|
|
|
VirtualMemoryArea& final_vma = vma_handle->second;
|
|
|
|
ASSERT(final_vma.size == size);
|
|
|
|
|
2018-05-03 02:36:51 +00:00
|
|
|
system.ArmInterface(0).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
|
|
|
|
system.ArmInterface(1).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
|
|
|
|
system.ArmInterface(2).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
|
|
|
|
system.ArmInterface(3).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
|
2017-10-10 03:56:20 +00:00
|
|
|
|
2015-05-21 03:37:07 +00:00
|
|
|
final_vma.type = VMAType::BackingMemory;
|
|
|
|
final_vma.permissions = VMAPermission::ReadWrite;
|
2018-12-15 01:59:08 +00:00
|
|
|
final_vma.state = state;
|
2015-05-21 03:37:07 +00:00
|
|
|
final_vma.backing_memory = memory;
|
|
|
|
UpdatePageTableForVMA(final_vma);
|
|
|
|
|
|
|
|
return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
|
|
|
|
}
|
|
|
|
|
2018-10-23 22:39:10 +00:00
|
|
|
ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
|
2019-06-05 18:20:13 +00:00
|
|
|
return FindFreeRegion(GetASLRRegionBaseAddress(), GetASLRRegionEndAddress(), size);
|
|
|
|
}
|
2018-10-23 22:39:10 +00:00
|
|
|
|
2019-06-05 18:20:13 +00:00
|
|
|
ResultVal<VAddr> VMManager::FindFreeRegion(VAddr begin, VAddr end, u64 size) const {
|
|
|
|
ASSERT(begin < end);
|
|
|
|
ASSERT(size <= end - begin);
|
|
|
|
|
|
|
|
const VMAHandle vma_handle =
|
|
|
|
std::find_if(vma_map.begin(), vma_map.end(), [begin, end, size](const auto& vma) {
|
|
|
|
if (vma.second.type != VMAType::Free) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
const VAddr vma_base = vma.second.base;
|
|
|
|
const VAddr vma_end = vma_base + vma.second.size;
|
|
|
|
const VAddr assumed_base = (begin < vma_base) ? vma_base : begin;
|
|
|
|
const VAddr used_range = assumed_base + size;
|
|
|
|
|
|
|
|
return vma_base <= assumed_base && assumed_base < used_range && used_range < end &&
|
|
|
|
used_range <= vma_end;
|
|
|
|
});
|
|
|
|
|
|
|
|
if (vma_handle == vma_map.cend()) {
|
2018-10-23 22:39:10 +00:00
|
|
|
// TODO(Subv): Find the correct error code here.
|
|
|
|
return ResultCode(-1);
|
|
|
|
}
|
|
|
|
|
2019-06-05 18:20:13 +00:00
|
|
|
const VAddr target = std::max(begin, vma_handle->second.base);
|
2018-10-23 22:39:10 +00:00
|
|
|
return MakeResult<VAddr>(target);
|
|
|
|
}
|
|
|
|
|
2017-09-02 03:10:03 +00:00
|
|
|
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size,
|
2016-09-18 00:38:01 +00:00
|
|
|
MemoryState state,
|
2019-03-02 20:20:28 +00:00
|
|
|
Common::MemoryHookPointer mmio_handler) {
|
2015-05-21 03:37:07 +00:00
|
|
|
// This is the appropriately sized VMA that will turn into our allocation.
|
|
|
|
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
|
|
|
|
VirtualMemoryArea& final_vma = vma_handle->second;
|
|
|
|
ASSERT(final_vma.size == size);
|
|
|
|
|
|
|
|
final_vma.type = VMAType::MMIO;
|
|
|
|
final_vma.permissions = VMAPermission::ReadWrite;
|
2018-12-15 01:59:08 +00:00
|
|
|
final_vma.state = state;
|
2015-05-21 03:37:07 +00:00
|
|
|
final_vma.paddr = paddr;
|
2018-07-18 23:02:47 +00:00
|
|
|
final_vma.mmio_handler = std::move(mmio_handler);
|
2015-05-21 03:37:07 +00:00
|
|
|
UpdatePageTableForVMA(final_vma);
|
|
|
|
|
|
|
|
return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
|
|
|
|
}
|
|
|
|
|
2015-07-18 02:19:16 +00:00
|
|
|
VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
|
|
|
|
VirtualMemoryArea& vma = vma_handle->second;
|
2015-05-21 03:37:07 +00:00
|
|
|
vma.type = VMAType::Free;
|
|
|
|
vma.permissions = VMAPermission::None;
|
2018-12-15 01:59:08 +00:00
|
|
|
vma.state = MemoryState::Unmapped;
|
2018-12-27 01:15:26 +00:00
|
|
|
vma.attribute = MemoryAttribute::None;
|
2015-05-21 03:37:07 +00:00
|
|
|
|
|
|
|
vma.backing_block = nullptr;
|
|
|
|
vma.offset = 0;
|
|
|
|
vma.backing_memory = nullptr;
|
|
|
|
vma.paddr = 0;
|
|
|
|
|
|
|
|
UpdatePageTableForVMA(vma);
|
|
|
|
|
2015-07-18 02:19:16 +00:00
|
|
|
return MergeAdjacent(vma_handle);
|
|
|
|
}
|
|
|
|
|
2017-09-02 03:10:03 +00:00
|
|
|
ResultCode VMManager::UnmapRange(VAddr target, u64 size) {
|
2015-07-18 02:19:16 +00:00
|
|
|
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
2018-08-02 16:19:05 +00:00
|
|
|
const VAddr target_end = target + size;
|
2015-07-18 02:19:16 +00:00
|
|
|
|
2018-08-02 16:19:05 +00:00
|
|
|
const VMAIter end = vma_map.end();
|
2015-07-18 02:19:16 +00:00
|
|
|
// The comparison against the end of the range must be done using addresses since VMAs can be
|
|
|
|
// merged during this process, causing invalidation of the iterators.
|
|
|
|
while (vma != end && vma->second.base < target_end) {
|
|
|
|
vma = std::next(Unmap(vma));
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(FindVMA(target)->second.size >= size);
|
2018-03-16 22:22:14 +00:00
|
|
|
|
2018-05-03 02:36:51 +00:00
|
|
|
system.ArmInterface(0).UnmapMemory(target, size);
|
|
|
|
system.ArmInterface(1).UnmapMemory(target, size);
|
|
|
|
system.ArmInterface(2).UnmapMemory(target, size);
|
|
|
|
system.ArmInterface(3).UnmapMemory(target, size);
|
2018-03-16 22:22:14 +00:00
|
|
|
|
2015-07-18 02:19:16 +00:00
|
|
|
return RESULT_SUCCESS;
|
2015-05-21 03:37:07 +00:00
|
|
|
}
|
|
|
|
|
2015-07-18 02:19:16 +00:00
|
|
|
VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
|
2015-05-21 03:37:07 +00:00
|
|
|
VMAIter iter = StripIterConstness(vma_handle);
|
|
|
|
|
|
|
|
VirtualMemoryArea& vma = iter->second;
|
|
|
|
vma.permissions = new_perms;
|
|
|
|
UpdatePageTableForVMA(vma);
|
|
|
|
|
2015-07-18 02:19:16 +00:00
|
|
|
return MergeAdjacent(iter);
|
|
|
|
}
|
|
|
|
|
2017-09-02 03:10:03 +00:00
|
|
|
ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) {
|
2015-07-18 02:19:16 +00:00
|
|
|
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
2018-08-02 16:19:05 +00:00
|
|
|
const VAddr target_end = target + size;
|
2015-07-18 02:19:16 +00:00
|
|
|
|
2018-08-02 16:19:05 +00:00
|
|
|
const VMAIter end = vma_map.end();
|
2015-07-18 02:19:16 +00:00
|
|
|
// The comparison against the end of the range must be done using addresses since VMAs can be
|
|
|
|
// merged during this process, causing invalidation of the iterators.
|
|
|
|
while (vma != end && vma->second.base < target_end) {
|
|
|
|
vma = std::next(StripIterConstness(Reprotect(vma, new_perms)));
|
|
|
|
}
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-03-24 20:28:04 +00:00
|
|
|
ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
|
2019-03-24 19:24:52 +00:00
|
|
|
if (size > GetHeapRegionSize()) {
|
|
|
|
return ERR_OUT_OF_MEMORY;
|
2018-11-13 16:06:33 +00:00
|
|
|
}
|
|
|
|
|
2019-03-24 20:05:25 +00:00
|
|
|
// No need to do any additional work if the heap is already the given size.
|
|
|
|
if (size == GetCurrentHeapSize()) {
|
|
|
|
return MakeResult(heap_region_base);
|
|
|
|
}
|
|
|
|
|
2018-11-13 16:06:33 +00:00
|
|
|
if (heap_memory == nullptr) {
|
|
|
|
// Initialize heap
|
2019-03-24 19:24:52 +00:00
|
|
|
heap_memory = std::make_shared<std::vector<u8>>(size);
|
|
|
|
heap_end = heap_region_base + size;
|
2018-11-13 16:06:33 +00:00
|
|
|
} else {
|
2019-03-24 19:24:52 +00:00
|
|
|
UnmapRange(heap_region_base, GetCurrentHeapSize());
|
2018-11-13 16:06:33 +00:00
|
|
|
}
|
|
|
|
|
2019-03-24 20:30:45 +00:00
|
|
|
// If necessary, expand backing vector to cover new heap extents in
|
|
|
|
// the case of allocating. Otherwise, shrink the backing memory,
|
|
|
|
// if a smaller heap has been requested.
|
|
|
|
const u64 old_heap_size = GetCurrentHeapSize();
|
|
|
|
if (size > old_heap_size) {
|
|
|
|
const u64 alloc_size = size - old_heap_size;
|
2019-03-24 19:24:52 +00:00
|
|
|
|
|
|
|
heap_memory->insert(heap_memory->end(), alloc_size, 0);
|
2019-03-24 20:30:45 +00:00
|
|
|
RefreshMemoryBlockMappings(heap_memory.get());
|
|
|
|
} else if (size < old_heap_size) {
|
|
|
|
heap_memory->resize(size);
|
|
|
|
heap_memory->shrink_to_fit();
|
|
|
|
|
2018-11-13 16:06:33 +00:00
|
|
|
RefreshMemoryBlockMappings(heap_memory.get());
|
|
|
|
}
|
2019-03-24 20:30:45 +00:00
|
|
|
|
|
|
|
heap_end = heap_region_base + size;
|
2019-03-24 19:24:52 +00:00
|
|
|
ASSERT(GetCurrentHeapSize() == heap_memory->size());
|
2018-11-13 16:06:33 +00:00
|
|
|
|
2019-03-24 19:24:52 +00:00
|
|
|
const auto mapping_result =
|
|
|
|
MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap);
|
|
|
|
if (mapping_result.Failed()) {
|
|
|
|
return mapping_result.Code();
|
|
|
|
}
|
2018-11-13 16:06:33 +00:00
|
|
|
|
2019-03-24 19:24:52 +00:00
|
|
|
return MakeResult<VAddr>(heap_region_base);
|
2018-11-13 16:06:33 +00:00
|
|
|
}
|
|
|
|
|
2019-07-07 16:42:54 +00:00
|
|
|
ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
|
2019-07-07 19:55:30 +00:00
|
|
|
const auto end_addr = target + size;
|
|
|
|
const auto last_addr = end_addr - 1;
|
2019-07-07 16:42:54 +00:00
|
|
|
VAddr cur_addr = target;
|
|
|
|
|
|
|
|
ResultCode result = RESULT_SUCCESS;
|
|
|
|
|
2019-07-07 19:55:30 +00:00
|
|
|
// Check how much memory we've already mapped.
|
|
|
|
const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size);
|
|
|
|
if (mapped_size_result.Failed()) {
|
|
|
|
return mapped_size_result.Code();
|
|
|
|
}
|
2019-07-07 16:42:54 +00:00
|
|
|
|
2019-07-07 19:55:30 +00:00
|
|
|
// If we've already mapped the desired amount, return early.
|
|
|
|
const std::size_t mapped_size = *mapped_size_result;
|
|
|
|
if (mapped_size == size) {
|
|
|
|
return RESULT_SUCCESS;
|
2019-07-07 16:42:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we can map the memory we want.
|
2019-07-07 18:48:11 +00:00
|
|
|
const auto res_limit = system.CurrentProcess()->GetResourceLimit();
|
2019-07-07 16:42:54 +00:00
|
|
|
const u64 physmem_remaining = res_limit->GetMaxResourceValue(ResourceType::PhysicalMemory) -
|
|
|
|
res_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory);
|
|
|
|
if (physmem_remaining < (size - mapped_size)) {
|
|
|
|
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Keep track of the memory regions we unmap.
|
|
|
|
std::vector<std::pair<u64, u64>> mapped_regions;
|
|
|
|
|
|
|
|
// Iterate, trying to map memory.
|
|
|
|
{
|
|
|
|
cur_addr = target;
|
|
|
|
|
2019-07-07 19:55:30 +00:00
|
|
|
auto iter = FindVMA(target);
|
|
|
|
ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end");
|
2019-07-07 16:42:54 +00:00
|
|
|
|
|
|
|
while (true) {
|
2019-07-07 19:55:30 +00:00
|
|
|
const auto& vma = iter->second;
|
|
|
|
const auto vma_start = vma.base;
|
|
|
|
const auto vma_end = vma_start + vma.size;
|
|
|
|
const auto vma_last = vma_end - 1;
|
|
|
|
|
|
|
|
// Map the memory block
|
|
|
|
const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr);
|
|
|
|
if (vma.state == MemoryState::Unmapped) {
|
|
|
|
const auto map_res =
|
|
|
|
MapMemoryBlock(cur_addr, std::make_shared<std::vector<u8>>(map_size, 0), 0,
|
|
|
|
map_size, MemoryState::Heap, VMAPermission::ReadWrite);
|
2019-07-07 16:42:54 +00:00
|
|
|
result = map_res.Code();
|
2019-07-07 19:55:30 +00:00
|
|
|
if (result.IsError()) {
|
2019-07-07 16:42:54 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-07-07 19:55:30 +00:00
|
|
|
|
|
|
|
mapped_regions.emplace_back(cur_addr, map_size);
|
2019-07-07 16:42:54 +00:00
|
|
|
}
|
2019-07-07 19:55:30 +00:00
|
|
|
|
|
|
|
// Break once we hit the end of the range.
|
|
|
|
if (last_addr <= vma_last) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance to the next block.
|
|
|
|
cur_addr = vma_end;
|
|
|
|
iter = FindVMA(cur_addr);
|
|
|
|
ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end");
|
2019-07-07 16:42:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we failed, unmap memory.
|
|
|
|
if (result.IsError()) {
|
2019-07-07 19:55:30 +00:00
|
|
|
for (const auto [unmap_address, unmap_size] : mapped_regions) {
|
|
|
|
ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(),
|
|
|
|
"MapPhysicalMemory un-map on error");
|
2019-07-07 16:42:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update amount of mapped physical memory.
|
|
|
|
physical_memory_mapped += size - mapped_size;
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) {
|
2019-07-07 19:55:30 +00:00
|
|
|
const auto end_addr = target + size;
|
|
|
|
const auto last_addr = end_addr - 1;
|
2019-07-07 16:42:54 +00:00
|
|
|
VAddr cur_addr = target;
|
|
|
|
|
|
|
|
ResultCode result = RESULT_SUCCESS;
|
|
|
|
|
2019-07-07 19:55:30 +00:00
|
|
|
// Check how much memory is currently mapped.
|
|
|
|
const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size);
|
|
|
|
if (mapped_size_result.Failed()) {
|
|
|
|
return mapped_size_result.Code();
|
|
|
|
}
|
2019-07-07 16:42:54 +00:00
|
|
|
|
2019-07-07 19:55:30 +00:00
|
|
|
// If we've already unmapped all the memory, return early.
|
|
|
|
const std::size_t mapped_size = *mapped_size_result;
|
|
|
|
if (mapped_size == 0) {
|
|
|
|
return RESULT_SUCCESS;
|
2019-07-07 16:42:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Keep track of the memory regions we unmap.
|
|
|
|
std::vector<std::pair<u64, u64>> unmapped_regions;
|
|
|
|
|
|
|
|
// Try to unmap regions.
|
|
|
|
{
|
|
|
|
cur_addr = target;
|
|
|
|
|
2019-07-07 19:55:30 +00:00
|
|
|
auto iter = FindVMA(target);
|
|
|
|
ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end");
|
2019-07-07 16:42:54 +00:00
|
|
|
|
|
|
|
while (true) {
|
2019-07-07 19:55:30 +00:00
|
|
|
const auto& vma = iter->second;
|
|
|
|
const auto vma_start = vma.base;
|
|
|
|
const auto vma_end = vma_start + vma.size;
|
|
|
|
const auto vma_last = vma_end - 1;
|
|
|
|
|
|
|
|
// Unmap the memory block
|
|
|
|
const auto unmap_size = std::min(end_addr - cur_addr, vma_end - cur_addr);
|
|
|
|
if (vma.state == MemoryState::Heap) {
|
|
|
|
result = UnmapRange(cur_addr, unmap_size);
|
|
|
|
if (result.IsError()) {
|
|
|
|
break;
|
2019-07-07 16:42:54 +00:00
|
|
|
}
|
2019-07-07 19:55:30 +00:00
|
|
|
|
|
|
|
unmapped_regions.emplace_back(cur_addr, unmap_size);
|
2019-07-07 16:42:54 +00:00
|
|
|
}
|
|
|
|
|
2019-07-07 19:55:30 +00:00
|
|
|
// Break once we hit the end of the range.
|
|
|
|
if (last_addr <= vma_last) {
|
|
|
|
break;
|
2019-07-07 16:42:54 +00:00
|
|
|
}
|
|
|
|
|
2019-07-07 19:55:30 +00:00
|
|
|
// Advance to the next block.
|
|
|
|
cur_addr = vma_end;
|
|
|
|
iter = FindVMA(cur_addr);
|
|
|
|
ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end");
|
2019-07-07 16:42:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we failed, re-map regions.
|
|
|
|
// TODO: Preserve memory contents?
|
|
|
|
if (result.IsError()) {
|
2019-07-07 19:55:30 +00:00
|
|
|
for (const auto [map_address, map_size] : unmapped_regions) {
|
2019-07-07 16:42:54 +00:00
|
|
|
const auto remap_res =
|
2019-07-07 19:55:30 +00:00
|
|
|
MapMemoryBlock(map_address, std::make_shared<std::vector<u8>>(map_size, 0), 0,
|
|
|
|
map_size, MemoryState::Heap, VMAPermission::None);
|
2019-07-07 16:42:54 +00:00
|
|
|
ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-07 18:48:11 +00:00
|
|
|
// Update mapped amount
|
|
|
|
physical_memory_mapped -= mapped_size;
|
|
|
|
|
2019-07-07 16:42:54 +00:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-12 03:21:13 +00:00
|
|
|
ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) {
|
|
|
|
constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped;
|
|
|
|
const auto src_check_result = CheckRangeState(
|
|
|
|
src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::All,
|
|
|
|
VMAPermission::ReadWrite, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute);
|
|
|
|
|
|
|
|
if (src_check_result.Failed()) {
|
|
|
|
return src_check_result.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto mirror_result =
|
|
|
|
MirrorMemory(dst_address, src_address, size, MemoryState::ModuleCode);
|
|
|
|
if (mirror_result.IsError()) {
|
|
|
|
return mirror_result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we lock the source memory region.
|
|
|
|
const auto src_vma_result = CarveVMARange(src_address, size);
|
|
|
|
if (src_vma_result.Failed()) {
|
|
|
|
return src_vma_result.Code();
|
|
|
|
}
|
|
|
|
auto src_vma_iter = *src_vma_result;
|
|
|
|
src_vma_iter->second.attribute = MemoryAttribute::Locked;
|
|
|
|
Reprotect(src_vma_iter, VMAPermission::Read);
|
|
|
|
|
|
|
|
// The destination memory region is fine as is, however we need to make it read-only.
|
|
|
|
return ReprotectRange(dst_address, size, VMAPermission::Read);
|
|
|
|
}
|
|
|
|
|
2019-04-12 04:38:54 +00:00
|
|
|
ResultCode VMManager::UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) {
|
|
|
|
constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped;
|
|
|
|
const auto src_check_result = CheckRangeState(
|
|
|
|
src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::None,
|
|
|
|
VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::Locked, ignore_attribute);
|
|
|
|
|
|
|
|
if (src_check_result.Failed()) {
|
|
|
|
return src_check_result.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Yes, the kernel only checks the first page of the region.
|
|
|
|
const auto dst_check_result =
|
|
|
|
CheckRangeState(dst_address, Memory::PAGE_SIZE, MemoryState::FlagModule,
|
|
|
|
MemoryState::FlagModule, VMAPermission::None, VMAPermission::None,
|
|
|
|
MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute);
|
|
|
|
|
|
|
|
if (dst_check_result.Failed()) {
|
|
|
|
return dst_check_result.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto dst_memory_state = std::get<MemoryState>(*dst_check_result);
|
|
|
|
const auto dst_contiguous_check_result = CheckRangeState(
|
|
|
|
dst_address, size, MemoryState::All, dst_memory_state, VMAPermission::None,
|
|
|
|
VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute);
|
|
|
|
|
|
|
|
if (dst_contiguous_check_result.Failed()) {
|
|
|
|
return dst_contiguous_check_result.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto unmap_result = UnmapRange(dst_address, size);
|
|
|
|
if (unmap_result.IsError()) {
|
|
|
|
return unmap_result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the mirrored portion unmapped, restore the original region's traits.
|
|
|
|
const auto src_vma_result = CarveVMARange(src_address, size);
|
|
|
|
if (src_vma_result.Failed()) {
|
|
|
|
return src_vma_result.Code();
|
|
|
|
}
|
|
|
|
auto src_vma_iter = *src_vma_result;
|
|
|
|
src_vma_iter->second.state = MemoryState::Heap;
|
|
|
|
src_vma_iter->second.attribute = MemoryAttribute::None;
|
|
|
|
Reprotect(src_vma_iter, VMAPermission::ReadWrite);
|
|
|
|
|
|
|
|
if (dst_memory_state == MemoryState::ModuleCode) {
|
2019-04-16 09:07:43 +00:00
|
|
|
system.InvalidateCpuInstructionCaches();
|
2019-04-12 04:38:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return unmap_result;
|
|
|
|
}
|
|
|
|
|
2018-12-12 16:34:01 +00:00
|
|
|
MemoryInfo VMManager::QueryMemory(VAddr address) const {
|
|
|
|
const auto vma = FindVMA(address);
|
|
|
|
MemoryInfo memory_info{};
|
|
|
|
|
|
|
|
if (IsValidHandle(vma)) {
|
|
|
|
memory_info.base_address = vma->second.base;
|
2018-12-14 23:19:12 +00:00
|
|
|
memory_info.attributes = ToSvcMemoryAttribute(vma->second.attribute);
|
2018-12-12 16:34:01 +00:00
|
|
|
memory_info.permission = static_cast<u32>(vma->second.permissions);
|
|
|
|
memory_info.size = vma->second.size;
|
2018-12-15 01:59:08 +00:00
|
|
|
memory_info.state = ToSvcMemoryState(vma->second.state);
|
2018-12-12 16:34:01 +00:00
|
|
|
} else {
|
2018-12-12 18:26:33 +00:00
|
|
|
memory_info.base_address = address_space_end;
|
2018-12-12 16:34:01 +00:00
|
|
|
memory_info.permission = static_cast<u32>(VMAPermission::None);
|
2018-12-12 18:26:33 +00:00
|
|
|
memory_info.size = 0 - address_space_end;
|
|
|
|
memory_info.state = static_cast<u32>(MemoryState::Inaccessible);
|
2018-12-12 16:34:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return memory_info;
|
|
|
|
}
|
|
|
|
|
2018-12-15 19:29:39 +00:00
|
|
|
ResultCode VMManager::SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask,
|
|
|
|
MemoryAttribute attribute) {
|
|
|
|
constexpr auto ignore_mask = MemoryAttribute::Uncached | MemoryAttribute::DeviceMapped;
|
|
|
|
constexpr auto attribute_mask = ~ignore_mask;
|
|
|
|
|
|
|
|
const auto result = CheckRangeState(
|
|
|
|
address, size, MemoryState::FlagUncached, MemoryState::FlagUncached, VMAPermission::None,
|
|
|
|
VMAPermission::None, attribute_mask, MemoryAttribute::None, ignore_mask);
|
|
|
|
|
|
|
|
if (result.Failed()) {
|
|
|
|
return result.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto [prev_state, prev_permissions, prev_attributes] = *result;
|
|
|
|
const auto new_attribute = (prev_attributes & ~mask) | (mask & attribute);
|
|
|
|
|
|
|
|
const auto carve_result = CarveVMARange(address, size);
|
|
|
|
if (carve_result.Failed()) {
|
|
|
|
return carve_result.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
auto vma_iter = *carve_result;
|
|
|
|
vma_iter->second.attribute = new_attribute;
|
|
|
|
|
|
|
|
MergeAdjacent(vma_iter);
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-11-18 02:40:17 +00:00
|
|
|
ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state) {
|
2018-11-13 16:06:33 +00:00
|
|
|
const auto vma = FindVMA(src_addr);
|
|
|
|
|
|
|
|
ASSERT_MSG(vma != vma_map.end(), "Invalid memory address");
|
|
|
|
ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address");
|
|
|
|
|
|
|
|
// The returned VMA might be a bigger one encompassing the desired address.
|
|
|
|
const auto vma_offset = src_addr - vma->first;
|
|
|
|
ASSERT_MSG(vma_offset + size <= vma->second.size,
|
|
|
|
"Shared memory exceeds bounds of mapped block");
|
|
|
|
|
|
|
|
const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block;
|
|
|
|
const std::size_t backing_block_offset = vma->second.offset + vma_offset;
|
|
|
|
|
2018-11-18 02:40:17 +00:00
|
|
|
CASCADE_RESULT(auto new_vma,
|
|
|
|
MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size, state));
|
2018-11-13 16:06:33 +00:00
|
|
|
// Protect mirror with permissions from old region
|
|
|
|
Reprotect(new_vma, vma->second.permissions);
|
|
|
|
// Remove permissions from old region
|
2019-07-07 16:42:54 +00:00
|
|
|
ReprotectRange(src_addr, size, VMAPermission::None);
|
2018-11-13 16:06:33 +00:00
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2015-07-18 02:19:16 +00:00
|
|
|
void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) {
|
|
|
|
// If this ever proves to have a noticeable performance impact, allow users of the function to
|
|
|
|
// specify a specific range of addresses to limit the scan to.
|
|
|
|
for (const auto& p : vma_map) {
|
|
|
|
const VirtualMemoryArea& vma = p.second;
|
|
|
|
if (block == vma.backing_block.get()) {
|
|
|
|
UpdatePageTableForVMA(vma);
|
|
|
|
}
|
|
|
|
}
|
2015-05-21 03:37:07 +00:00
|
|
|
}
|
|
|
|
|
2018-04-27 15:49:18 +00:00
|
|
|
void VMManager::LogLayout() const {
|
2015-07-10 01:52:15 +00:00
|
|
|
for (const auto& p : vma_map) {
|
|
|
|
const VirtualMemoryArea& vma = p.second;
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_DEBUG(Kernel, "{:016X} - {:016X} size: {:016X} {}{}{} {}", vma.base,
|
2018-07-02 16:20:50 +00:00
|
|
|
vma.base + vma.size, vma.size,
|
|
|
|
(u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-',
|
|
|
|
(u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-',
|
|
|
|
(u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-',
|
2018-12-15 01:59:08 +00:00
|
|
|
GetMemoryStateName(vma.state));
|
2015-07-10 01:52:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-18 00:38:01 +00:00
|
|
|
VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) {
|
2015-05-21 03:37:07 +00:00
|
|
|
// This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
|
|
|
|
// non-const access to its container.
|
|
|
|
return vma_map.erase(iter, iter); // Erases an empty range of elements
|
|
|
|
}
|
|
|
|
|
2017-09-02 03:10:03 +00:00
|
|
|
ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u64 size) {
|
2018-05-02 13:14:28 +00:00
|
|
|
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size);
|
|
|
|
ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", base);
|
2015-05-21 03:37:07 +00:00
|
|
|
|
|
|
|
VMAIter vma_handle = StripIterConstness(FindVMA(base));
|
|
|
|
if (vma_handle == vma_map.end()) {
|
|
|
|
// Target address is outside the range managed by the kernel
|
2015-07-18 01:34:50 +00:00
|
|
|
return ERR_INVALID_ADDRESS;
|
2015-05-21 03:37:07 +00:00
|
|
|
}
|
|
|
|
|
2018-08-02 16:19:05 +00:00
|
|
|
const VirtualMemoryArea& vma = vma_handle->second;
|
2015-05-21 03:37:07 +00:00
|
|
|
if (vma.type != VMAType::Free) {
|
|
|
|
// Region is already allocated
|
2015-07-18 01:34:50 +00:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
2015-05-21 03:37:07 +00:00
|
|
|
}
|
|
|
|
|
2018-08-02 16:19:05 +00:00
|
|
|
const VAddr start_in_vma = base - vma.base;
|
|
|
|
const VAddr end_in_vma = start_in_vma + size;
|
2015-05-21 03:37:07 +00:00
|
|
|
|
|
|
|
if (end_in_vma > vma.size) {
|
|
|
|
// Requested allocation doesn't fit inside VMA
|
2015-07-18 01:34:50 +00:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
2015-05-21 03:37:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (end_in_vma != vma.size) {
|
|
|
|
// Split VMA at the end of the allocated region
|
|
|
|
SplitVMA(vma_handle, end_in_vma);
|
|
|
|
}
|
|
|
|
if (start_in_vma != 0) {
|
|
|
|
// Split VMA at the start of the allocated region
|
|
|
|
vma_handle = SplitVMA(vma_handle, start_in_vma);
|
|
|
|
}
|
|
|
|
|
|
|
|
return MakeResult<VMAIter>(vma_handle);
|
|
|
|
}
|
|
|
|
|
2017-09-02 03:10:03 +00:00
|
|
|
ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) {
|
2018-05-02 13:14:28 +00:00
|
|
|
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size);
|
|
|
|
ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", target);
|
2015-07-18 02:19:16 +00:00
|
|
|
|
2018-08-02 16:19:05 +00:00
|
|
|
const VAddr target_end = target + size;
|
2015-07-18 02:19:16 +00:00
|
|
|
ASSERT(target_end >= target);
|
2018-09-24 14:29:56 +00:00
|
|
|
ASSERT(target_end <= address_space_end);
|
2015-07-18 02:19:16 +00:00
|
|
|
ASSERT(size > 0);
|
|
|
|
|
|
|
|
VMAIter begin_vma = StripIterConstness(FindVMA(target));
|
2018-08-02 16:19:05 +00:00
|
|
|
const VMAIter i_end = vma_map.lower_bound(target_end);
|
2018-08-02 16:45:56 +00:00
|
|
|
if (std::any_of(begin_vma, i_end,
|
|
|
|
[](const auto& entry) { return entry.second.type == VMAType::Free; })) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
2015-07-18 02:19:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (target != begin_vma->second.base) {
|
|
|
|
begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base);
|
|
|
|
}
|
|
|
|
|
|
|
|
VMAIter end_vma = StripIterConstness(FindVMA(target_end));
|
|
|
|
if (end_vma != vma_map.end() && target_end != end_vma->second.base) {
|
|
|
|
end_vma = SplitVMA(end_vma, target_end - end_vma->second.base);
|
|
|
|
}
|
|
|
|
|
|
|
|
return MakeResult<VMAIter>(begin_vma);
|
|
|
|
}
|
|
|
|
|
2017-09-02 03:10:03 +00:00
|
|
|
VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
|
2015-05-21 03:37:07 +00:00
|
|
|
VirtualMemoryArea& old_vma = vma_handle->second;
|
|
|
|
VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
|
|
|
|
|
|
|
|
// For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
|
|
|
|
// a bug. This restriction might be removed later.
|
|
|
|
ASSERT(offset_in_vma < old_vma.size);
|
|
|
|
ASSERT(offset_in_vma > 0);
|
|
|
|
|
|
|
|
old_vma.size = offset_in_vma;
|
|
|
|
new_vma.base += offset_in_vma;
|
|
|
|
new_vma.size -= offset_in_vma;
|
|
|
|
|
|
|
|
switch (new_vma.type) {
|
|
|
|
case VMAType::Free:
|
|
|
|
break;
|
|
|
|
case VMAType::AllocatedMemoryBlock:
|
|
|
|
new_vma.offset += offset_in_vma;
|
|
|
|
break;
|
|
|
|
case VMAType::BackingMemory:
|
|
|
|
new_vma.backing_memory += offset_in_vma;
|
|
|
|
break;
|
|
|
|
case VMAType::MMIO:
|
|
|
|
new_vma.paddr += offset_in_vma;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(old_vma.CanBeMergedWith(new_vma));
|
|
|
|
|
|
|
|
return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
|
|
|
|
}
|
|
|
|
|
|
|
|
VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
|
2018-08-02 16:19:05 +00:00
|
|
|
const VMAIter next_vma = std::next(iter);
|
2015-05-21 03:37:07 +00:00
|
|
|
if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
|
2019-07-07 16:42:54 +00:00
|
|
|
MergeAdjacentVMA(iter->second, next_vma->second);
|
2015-05-21 03:37:07 +00:00
|
|
|
vma_map.erase(next_vma);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (iter != vma_map.begin()) {
|
|
|
|
VMAIter prev_vma = std::prev(iter);
|
|
|
|
if (prev_vma->second.CanBeMergedWith(iter->second)) {
|
2019-07-07 16:42:54 +00:00
|
|
|
MergeAdjacentVMA(prev_vma->second, iter->second);
|
2015-05-21 03:37:07 +00:00
|
|
|
vma_map.erase(iter);
|
|
|
|
iter = prev_vma;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
2019-07-07 16:42:54 +00:00
|
|
|
void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right) {
|
|
|
|
ASSERT(left.CanBeMergedWith(right));
|
|
|
|
|
|
|
|
// Always merge allocated memory blocks, even when they don't share the same backing block.
|
|
|
|
if (left.type == VMAType::AllocatedMemoryBlock &&
|
|
|
|
(left.backing_block != right.backing_block || left.offset + left.size != right.offset)) {
|
|
|
|
// Check if we can save work.
|
|
|
|
if (left.offset == 0 && left.size == left.backing_block->size()) {
|
|
|
|
// Fast case: left is an entire backing block.
|
|
|
|
left.backing_block->insert(left.backing_block->end(),
|
|
|
|
right.backing_block->begin() + right.offset,
|
|
|
|
right.backing_block->begin() + right.offset + right.size);
|
|
|
|
} else {
|
|
|
|
// Slow case: make a new memory block for left and right.
|
|
|
|
auto new_memory = std::make_shared<std::vector<u8>>();
|
|
|
|
new_memory->insert(new_memory->end(), left.backing_block->begin() + left.offset,
|
|
|
|
left.backing_block->begin() + left.offset + left.size);
|
|
|
|
new_memory->insert(new_memory->end(), right.backing_block->begin() + right.offset,
|
|
|
|
right.backing_block->begin() + right.offset + right.size);
|
|
|
|
left.backing_block = new_memory;
|
|
|
|
left.offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Page table update is needed, because backing memory changed.
|
|
|
|
left.size += right.size;
|
|
|
|
UpdatePageTableForVMA(left);
|
|
|
|
} else {
|
|
|
|
// Just update the size.
|
|
|
|
left.size += right.size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-21 03:37:07 +00:00
|
|
|
void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
|
|
|
|
switch (vma.type) {
|
|
|
|
case VMAType::Free:
|
2017-07-22 02:17:57 +00:00
|
|
|
Memory::UnmapRegion(page_table, vma.base, vma.size);
|
2015-05-21 03:37:07 +00:00
|
|
|
break;
|
|
|
|
case VMAType::AllocatedMemoryBlock:
|
2017-07-22 02:17:57 +00:00
|
|
|
Memory::MapMemoryRegion(page_table, vma.base, vma.size,
|
|
|
|
vma.backing_block->data() + vma.offset);
|
2015-05-21 03:37:07 +00:00
|
|
|
break;
|
|
|
|
case VMAType::BackingMemory:
|
2017-07-22 02:17:57 +00:00
|
|
|
Memory::MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory);
|
2015-05-21 03:37:07 +00:00
|
|
|
break;
|
|
|
|
case VMAType::MMIO:
|
2017-07-22 02:17:57 +00:00
|
|
|
Memory::MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler);
|
2015-05-21 03:37:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-01-01 20:59:31 +00:00
|
|
|
|
2018-09-23 00:09:32 +00:00
|
|
|
void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type) {
|
|
|
|
u64 map_region_size = 0;
|
|
|
|
u64 heap_region_size = 0;
|
2019-07-06 06:02:01 +00:00
|
|
|
u64 stack_region_size = 0;
|
2018-09-23 00:09:32 +00:00
|
|
|
u64 tls_io_region_size = 0;
|
|
|
|
|
2019-07-06 01:49:11 +00:00
|
|
|
u64 stack_and_tls_io_end = 0;
|
|
|
|
|
2018-09-23 00:09:32 +00:00
|
|
|
switch (type) {
|
|
|
|
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
svc: Clarify enum values for AddressSpaceBaseAddr and AddressSpaceSize in svcGetInfo()
So, one thing that's puzzled me is why the kernel seemed to *not* use
the direct code address ranges in some cases for some service functions.
For example, in svcMapMemory, the full address space width is compared
against for validity, but for svcMapSharedMemory, it compares against
0xFFE00000, 0xFF8000000, and 0x7FF8000000 as upper bounds, and uses
either 0x200000 or 0x8000000 as the lower-bounds as the beginning of the
compared range. Coincidentally, these exact same values are also used in
svcGetInfo, and also when initializing the user address space, so this
is actually retrieving the ASLR extents, not the extents of the address
space in general.
2018-10-14 18:44:38 +00:00
|
|
|
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
2018-09-23 00:09:32 +00:00
|
|
|
address_space_width = 32;
|
|
|
|
code_region_base = 0x200000;
|
|
|
|
code_region_end = code_region_base + 0x3FE00000;
|
svc: Clarify enum values for AddressSpaceBaseAddr and AddressSpaceSize in svcGetInfo()
So, one thing that's puzzled me is why the kernel seemed to *not* use
the direct code address ranges in some cases for some service functions.
For example, in svcMapMemory, the full address space width is compared
against for validity, but for svcMapSharedMemory, it compares against
0xFFE00000, 0xFF8000000, and 0x7FF8000000 as upper bounds, and uses
either 0x200000 or 0x8000000 as the lower-bounds as the beginning of the
compared range. Coincidentally, these exact same values are also used in
svcGetInfo, and also when initializing the user address space, so this
is actually retrieving the ASLR extents, not the extents of the address
space in general.
2018-10-14 18:44:38 +00:00
|
|
|
aslr_region_base = 0x200000;
|
|
|
|
aslr_region_end = aslr_region_base + 0xFFE00000;
|
|
|
|
if (type == FileSys::ProgramAddressSpaceType::Is32Bit) {
|
|
|
|
map_region_size = 0x40000000;
|
|
|
|
heap_region_size = 0x40000000;
|
|
|
|
} else {
|
|
|
|
map_region_size = 0;
|
|
|
|
heap_region_size = 0x80000000;
|
|
|
|
}
|
2019-07-06 01:49:11 +00:00
|
|
|
stack_and_tls_io_end = 0x40000000;
|
2018-09-23 00:09:32 +00:00
|
|
|
break;
|
|
|
|
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
|
|
|
address_space_width = 36;
|
|
|
|
code_region_base = 0x8000000;
|
|
|
|
code_region_end = code_region_base + 0x78000000;
|
svc: Clarify enum values for AddressSpaceBaseAddr and AddressSpaceSize in svcGetInfo()
So, one thing that's puzzled me is why the kernel seemed to *not* use
the direct code address ranges in some cases for some service functions.
For example, in svcMapMemory, the full address space width is compared
against for validity, but for svcMapSharedMemory, it compares against
0xFFE00000, 0xFF8000000, and 0x7FF8000000 as upper bounds, and uses
either 0x200000 or 0x8000000 as the lower-bounds as the beginning of the
compared range. Coincidentally, these exact same values are also used in
svcGetInfo, and also when initializing the user address space, so this
is actually retrieving the ASLR extents, not the extents of the address
space in general.
2018-10-14 18:44:38 +00:00
|
|
|
aslr_region_base = 0x8000000;
|
|
|
|
aslr_region_end = aslr_region_base + 0xFF8000000;
|
2018-09-23 00:09:32 +00:00
|
|
|
map_region_size = 0x180000000;
|
|
|
|
heap_region_size = 0x180000000;
|
2019-07-06 01:49:11 +00:00
|
|
|
stack_and_tls_io_end = 0x80000000;
|
2018-09-23 00:09:32 +00:00
|
|
|
break;
|
|
|
|
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
|
|
|
address_space_width = 39;
|
|
|
|
code_region_base = 0x8000000;
|
|
|
|
code_region_end = code_region_base + 0x80000000;
|
svc: Clarify enum values for AddressSpaceBaseAddr and AddressSpaceSize in svcGetInfo()
So, one thing that's puzzled me is why the kernel seemed to *not* use
the direct code address ranges in some cases for some service functions.
For example, in svcMapMemory, the full address space width is compared
against for validity, but for svcMapSharedMemory, it compares against
0xFFE00000, 0xFF8000000, and 0x7FF8000000 as upper bounds, and uses
either 0x200000 or 0x8000000 as the lower-bounds as the beginning of the
compared range. Coincidentally, these exact same values are also used in
svcGetInfo, and also when initializing the user address space, so this
is actually retrieving the ASLR extents, not the extents of the address
space in general.
2018-10-14 18:44:38 +00:00
|
|
|
aslr_region_base = 0x8000000;
|
|
|
|
aslr_region_end = aslr_region_base + 0x7FF8000000;
|
2018-09-23 00:09:32 +00:00
|
|
|
map_region_size = 0x1000000000;
|
|
|
|
heap_region_size = 0x180000000;
|
2019-07-06 06:02:01 +00:00
|
|
|
stack_region_size = 0x80000000;
|
2018-09-23 00:09:32 +00:00
|
|
|
tls_io_region_size = 0x1000000000;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE_MSG("Invalid address space type specified: {}", static_cast<u32>(type));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-07-06 01:49:11 +00:00
|
|
|
const u64 stack_and_tls_io_begin = aslr_region_base;
|
|
|
|
|
2018-09-23 00:09:32 +00:00
|
|
|
address_space_base = 0;
|
|
|
|
address_space_end = 1ULL << address_space_width;
|
|
|
|
|
|
|
|
map_region_base = code_region_end;
|
|
|
|
map_region_end = map_region_base + map_region_size;
|
|
|
|
|
|
|
|
heap_region_base = map_region_end;
|
|
|
|
heap_region_end = heap_region_base + heap_region_size;
|
2019-03-24 19:56:07 +00:00
|
|
|
heap_end = heap_region_base;
|
2018-09-23 00:09:32 +00:00
|
|
|
|
2019-07-06 06:02:01 +00:00
|
|
|
stack_region_base = heap_region_end;
|
|
|
|
stack_region_end = stack_region_base + stack_region_size;
|
2018-09-23 00:09:32 +00:00
|
|
|
|
2019-07-06 06:02:01 +00:00
|
|
|
tls_io_region_base = stack_region_end;
|
2018-09-23 00:09:32 +00:00
|
|
|
tls_io_region_end = tls_io_region_base + tls_io_region_size;
|
|
|
|
|
2019-07-06 06:02:01 +00:00
|
|
|
if (stack_region_size == 0) {
|
|
|
|
stack_region_base = stack_and_tls_io_begin;
|
|
|
|
stack_region_end = stack_and_tls_io_end;
|
2019-07-06 01:49:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (tls_io_region_size == 0) {
|
|
|
|
tls_io_region_base = stack_and_tls_io_begin;
|
|
|
|
tls_io_region_end = stack_and_tls_io_end;
|
2018-09-23 00:09:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VMManager::Clear() {
|
|
|
|
ClearVMAMap();
|
|
|
|
ClearPageTable();
|
|
|
|
}
|
|
|
|
|
|
|
|
void VMManager::ClearVMAMap() {
|
|
|
|
vma_map.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void VMManager::ClearPageTable() {
|
2018-09-24 14:29:56 +00:00
|
|
|
std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
|
2018-09-23 00:09:32 +00:00
|
|
|
page_table.special_regions.clear();
|
2018-09-24 14:29:56 +00:00
|
|
|
std::fill(page_table.attributes.begin(), page_table.attributes.end(),
|
2019-03-02 20:20:28 +00:00
|
|
|
Common::PageType::Unmapped);
|
2018-09-23 00:09:32 +00:00
|
|
|
}
|
|
|
|
|
2018-12-15 18:49:40 +00:00
|
|
|
VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask,
|
|
|
|
MemoryState state, VMAPermission permission_mask,
|
|
|
|
VMAPermission permissions,
|
|
|
|
MemoryAttribute attribute_mask,
|
|
|
|
MemoryAttribute attribute,
|
|
|
|
MemoryAttribute ignore_mask) const {
|
|
|
|
auto iter = FindVMA(address);
|
|
|
|
|
|
|
|
// If we don't have a valid VMA handle at this point, then it means this is
|
|
|
|
// being called with an address outside of the address space, which is definitely
|
|
|
|
// indicative of a bug, as this function only operates on mapped memory regions.
|
|
|
|
DEBUG_ASSERT(IsValidHandle(iter));
|
|
|
|
|
|
|
|
const VAddr end_address = address + size - 1;
|
|
|
|
const MemoryAttribute initial_attributes = iter->second.attribute;
|
|
|
|
const VMAPermission initial_permissions = iter->second.permissions;
|
|
|
|
const MemoryState initial_state = iter->second.state;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// The iterator should be valid throughout the traversal. Hitting the end of
|
|
|
|
// the mapped VMA regions is unquestionably indicative of a bug.
|
|
|
|
DEBUG_ASSERT(IsValidHandle(iter));
|
|
|
|
|
|
|
|
const auto& vma = iter->second;
|
|
|
|
|
|
|
|
if (vma.state != initial_state) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((vma.state & state_mask) != state) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vma.permissions != initial_permissions) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((vma.permissions & permission_mask) != permissions) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((vma.attribute | ignore_mask) != (initial_attributes | ignore_mask)) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((vma.attribute & attribute_mask) != attribute) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (end_address <= vma.EndAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
++iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
return MakeResult(
|
|
|
|
std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask));
|
|
|
|
}
|
|
|
|
|
2019-07-07 19:55:30 +00:00
|
|
|
ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address,
|
|
|
|
std::size_t size) const {
|
|
|
|
const VAddr end_addr = address + size;
|
|
|
|
const VAddr last_addr = end_addr - 1;
|
|
|
|
std::size_t mapped_size = 0;
|
|
|
|
|
|
|
|
VAddr cur_addr = address;
|
|
|
|
auto iter = FindVMA(cur_addr);
|
|
|
|
ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end");
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
const auto& vma = iter->second;
|
|
|
|
const VAddr vma_start = vma.base;
|
|
|
|
const VAddr vma_end = vma_start + vma.size;
|
|
|
|
const VAddr vma_last = vma_end - 1;
|
|
|
|
|
|
|
|
// Add size if relevant.
|
|
|
|
if (vma.state != MemoryState::Unmapped) {
|
|
|
|
mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Break once we hit the end of the range.
|
|
|
|
if (last_addr <= vma_last) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance to the next block.
|
|
|
|
cur_addr = vma_end;
|
|
|
|
iter = std::next(iter);
|
|
|
|
ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end");
|
|
|
|
}
|
|
|
|
|
|
|
|
return MakeResult(mapped_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr address,
|
|
|
|
std::size_t size) const {
|
|
|
|
const VAddr end_addr = address + size;
|
|
|
|
const VAddr last_addr = end_addr - 1;
|
|
|
|
std::size_t mapped_size = 0;
|
|
|
|
|
|
|
|
VAddr cur_addr = address;
|
|
|
|
auto iter = FindVMA(cur_addr);
|
|
|
|
ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end");
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
const auto& vma = iter->second;
|
|
|
|
const auto vma_start = vma.base;
|
|
|
|
const auto vma_end = vma_start + vma.size;
|
|
|
|
const auto vma_last = vma_end - 1;
|
|
|
|
const auto state = vma.state;
|
|
|
|
const auto attr = vma.attribute;
|
|
|
|
|
|
|
|
// Memory within region must be free or mapped heap.
|
|
|
|
if (!((state == MemoryState::Heap && attr == MemoryAttribute::None) ||
|
|
|
|
(state == MemoryState::Unmapped))) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add size if relevant.
|
|
|
|
if (state != MemoryState::Unmapped) {
|
|
|
|
mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Break once we hit the end of the range.
|
|
|
|
if (last_addr <= vma_last) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance to the next block.
|
|
|
|
cur_addr = vma_end;
|
|
|
|
iter = std::next(iter);
|
|
|
|
ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end");
|
|
|
|
}
|
|
|
|
|
|
|
|
return MakeResult(mapped_size);
|
|
|
|
}
|
|
|
|
|
2019-06-09 22:12:02 +00:00
|
|
|
u64 VMManager::GetTotalPhysicalMemoryAvailable() const {
|
2018-07-02 16:13:26 +00:00
|
|
|
LOG_WARNING(Kernel, "(STUBBED) called");
|
2018-04-15 02:04:10 +00:00
|
|
|
return 0xF8000000;
|
2018-01-01 20:59:31 +00:00
|
|
|
}
|
|
|
|
|
2018-09-24 15:16:17 +00:00
|
|
|
VAddr VMManager::GetAddressSpaceBaseAddress() const {
|
|
|
|
return address_space_base;
|
|
|
|
}
|
|
|
|
|
|
|
|
VAddr VMManager::GetAddressSpaceEndAddress() const {
|
|
|
|
return address_space_end;
|
2018-01-01 20:59:31 +00:00
|
|
|
}
|
|
|
|
|
2018-08-02 16:19:05 +00:00
|
|
|
u64 VMManager::GetAddressSpaceSize() const {
|
2018-09-24 15:16:17 +00:00
|
|
|
return address_space_end - address_space_base;
|
2018-01-01 20:59:31 +00:00
|
|
|
}
|
|
|
|
|
2018-09-24 14:29:56 +00:00
|
|
|
u64 VMManager::GetAddressSpaceWidth() const {
|
|
|
|
return address_space_width;
|
|
|
|
}
|
|
|
|
|
2019-03-04 21:30:17 +00:00
|
|
|
bool VMManager::IsWithinAddressSpace(VAddr address, u64 size) const {
|
|
|
|
return IsInsideAddressRange(address, size, GetAddressSpaceBaseAddress(),
|
|
|
|
GetAddressSpaceEndAddress());
|
|
|
|
}
|
|
|
|
|
svc: Clarify enum values for AddressSpaceBaseAddr and AddressSpaceSize in svcGetInfo()
So, one thing that's puzzled me is why the kernel seemed to *not* use
the direct code address ranges in some cases for some service functions.
For example, in svcMapMemory, the full address space width is compared
against for validity, but for svcMapSharedMemory, it compares against
0xFFE00000, 0xFF8000000, and 0x7FF8000000 as upper bounds, and uses
either 0x200000 or 0x8000000 as the lower-bounds as the beginning of the
compared range. Coincidentally, these exact same values are also used in
svcGetInfo, and also when initializing the user address space, so this
is actually retrieving the ASLR extents, not the extents of the address
space in general.
2018-10-14 18:44:38 +00:00
|
|
|
VAddr VMManager::GetASLRRegionBaseAddress() const {
|
|
|
|
return aslr_region_base;
|
|
|
|
}
|
|
|
|
|
|
|
|
VAddr VMManager::GetASLRRegionEndAddress() const {
|
|
|
|
return aslr_region_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 VMManager::GetASLRRegionSize() const {
|
|
|
|
return aslr_region_end - aslr_region_base;
|
|
|
|
}
|
|
|
|
|
2018-10-18 02:39:21 +00:00
|
|
|
bool VMManager::IsWithinASLRRegion(VAddr begin, u64 size) const {
|
|
|
|
const VAddr range_end = begin + size;
|
|
|
|
const VAddr aslr_start = GetASLRRegionBaseAddress();
|
|
|
|
const VAddr aslr_end = GetASLRRegionEndAddress();
|
|
|
|
|
|
|
|
if (aslr_start > begin || begin > range_end || range_end - 1 > aslr_end - 1) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (range_end > heap_region_base && heap_region_end > begin) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (range_end > map_region_base && map_region_end > begin) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-09-23 00:09:32 +00:00
|
|
|
VAddr VMManager::GetCodeRegionBaseAddress() const {
|
|
|
|
return code_region_base;
|
|
|
|
}
|
|
|
|
|
|
|
|
VAddr VMManager::GetCodeRegionEndAddress() const {
|
|
|
|
return code_region_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 VMManager::GetCodeRegionSize() const {
|
|
|
|
return code_region_end - code_region_base;
|
|
|
|
}
|
|
|
|
|
2019-03-04 21:40:17 +00:00
|
|
|
bool VMManager::IsWithinCodeRegion(VAddr address, u64 size) const {
|
|
|
|
return IsInsideAddressRange(address, size, GetCodeRegionBaseAddress(),
|
|
|
|
GetCodeRegionEndAddress());
|
|
|
|
}
|
|
|
|
|
2018-09-23 00:09:32 +00:00
|
|
|
VAddr VMManager::GetHeapRegionBaseAddress() const {
|
|
|
|
return heap_region_base;
|
|
|
|
}
|
|
|
|
|
|
|
|
VAddr VMManager::GetHeapRegionEndAddress() const {
|
|
|
|
return heap_region_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 VMManager::GetHeapRegionSize() const {
|
|
|
|
return heap_region_end - heap_region_base;
|
|
|
|
}
|
|
|
|
|
2019-03-24 19:24:52 +00:00
|
|
|
u64 VMManager::GetCurrentHeapSize() const {
|
|
|
|
return heap_end - heap_region_base;
|
|
|
|
}
|
|
|
|
|
2019-03-04 21:40:17 +00:00
|
|
|
bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const {
|
|
|
|
return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(),
|
|
|
|
GetHeapRegionEndAddress());
|
|
|
|
}
|
|
|
|
|
2018-09-23 00:09:32 +00:00
|
|
|
VAddr VMManager::GetMapRegionBaseAddress() const {
|
|
|
|
return map_region_base;
|
|
|
|
}
|
|
|
|
|
|
|
|
VAddr VMManager::GetMapRegionEndAddress() const {
|
|
|
|
return map_region_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 VMManager::GetMapRegionSize() const {
|
|
|
|
return map_region_end - map_region_base;
|
|
|
|
}
|
|
|
|
|
2019-03-04 21:40:17 +00:00
|
|
|
bool VMManager::IsWithinMapRegion(VAddr address, u64 size) const {
|
|
|
|
return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress());
|
|
|
|
}
|
|
|
|
|
2019-07-06 06:02:01 +00:00
|
|
|
VAddr VMManager::GetStackRegionBaseAddress() const {
|
|
|
|
return stack_region_base;
|
2018-09-23 00:09:32 +00:00
|
|
|
}
|
|
|
|
|
2019-07-06 06:02:01 +00:00
|
|
|
VAddr VMManager::GetStackRegionEndAddress() const {
|
|
|
|
return stack_region_end;
|
2018-09-23 00:09:32 +00:00
|
|
|
}
|
|
|
|
|
2019-07-06 06:02:01 +00:00
|
|
|
u64 VMManager::GetStackRegionSize() const {
|
|
|
|
return stack_region_end - stack_region_base;
|
2018-09-23 00:09:32 +00:00
|
|
|
}
|
|
|
|
|
2019-07-06 06:02:01 +00:00
|
|
|
bool VMManager::IsWithinStackRegion(VAddr address, u64 size) const {
|
|
|
|
return IsInsideAddressRange(address, size, GetStackRegionBaseAddress(),
|
|
|
|
GetStackRegionEndAddress());
|
2019-03-04 21:30:17 +00:00
|
|
|
}
|
|
|
|
|
2018-09-23 00:09:32 +00:00
|
|
|
VAddr VMManager::GetTLSIORegionBaseAddress() const {
|
|
|
|
return tls_io_region_base;
|
|
|
|
}
|
|
|
|
|
|
|
|
VAddr VMManager::GetTLSIORegionEndAddress() const {
|
|
|
|
return tls_io_region_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 VMManager::GetTLSIORegionSize() const {
|
|
|
|
return tls_io_region_end - tls_io_region_base;
|
|
|
|
}
|
|
|
|
|
2019-03-04 21:40:17 +00:00
|
|
|
bool VMManager::IsWithinTLSIORegion(VAddr address, u64 size) const {
|
|
|
|
return IsInsideAddressRange(address, size, GetTLSIORegionBaseAddress(),
|
|
|
|
GetTLSIORegionEndAddress());
|
|
|
|
}
|
|
|
|
|
2018-01-01 20:59:31 +00:00
|
|
|
} // namespace Kernel
|