mirror of
https://gitlab.com/suyu-emu/suyu.git
synced 2024-03-15 23:15:44 +00:00
General: Fix compilation for GCC
This commit is contained in:
parent
fd7afda1e8
commit
afab6c143c
|
@ -22,7 +22,8 @@ struct EmptyStruct {};
|
||||||
*/
|
*/
|
||||||
template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa,
|
template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa,
|
||||||
bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct>
|
bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct>
|
||||||
requires AddressSpaceValid<VaType, AddressSpaceBits> class FlatAddressSpaceMap {
|
requires AddressSpaceValid<VaType, AddressSpaceBits>
|
||||||
|
class FlatAddressSpaceMap {
|
||||||
private:
|
private:
|
||||||
std::function<void(VaType, VaType)>
|
std::function<void(VaType, VaType)>
|
||||||
unmapCallback{}; //!< Callback called when the mappings in an region have changed
|
unmapCallback{}; //!< Callback called when the mappings in an region have changed
|
||||||
|
@ -40,8 +41,8 @@ protected:
|
||||||
|
|
||||||
Block() = default;
|
Block() = default;
|
||||||
|
|
||||||
Block(VaType virt, PaType phys, ExtraBlockInfo extraInfo)
|
Block(VaType virt_, PaType phys_, ExtraBlockInfo extraInfo_)
|
||||||
: virt(virt), phys(phys), extraInfo(extraInfo) {}
|
: virt(virt_), phys(phys_), extraInfo(extraInfo_) {}
|
||||||
|
|
||||||
constexpr bool Valid() {
|
constexpr bool Valid() {
|
||||||
return virt != UnmappedVa;
|
return virt != UnmappedVa;
|
||||||
|
@ -102,7 +103,8 @@ public:
|
||||||
* initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block
|
* initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block
|
||||||
*/
|
*/
|
||||||
template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>
|
template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>
|
||||||
requires AddressSpaceValid<VaType, AddressSpaceBits> class FlatAllocator
|
requires AddressSpaceValid<VaType, AddressSpaceBits>
|
||||||
|
class FlatAllocator
|
||||||
: public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> {
|
: public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> {
|
||||||
private:
|
private:
|
||||||
using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>;
|
using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>;
|
||||||
|
|
|
@ -27,7 +27,7 @@ template <class ForwardIt, class T, class Compare = std::less<>>
|
||||||
template <typename T, typename Func, typename... Args>
|
template <typename T, typename Func, typename... Args>
|
||||||
T FoldRight(T initial_value, Func&& func, Args&&... args) {
|
T FoldRight(T initial_value, Func&& func, Args&&... args) {
|
||||||
T value{initial_value};
|
T value{initial_value};
|
||||||
const auto high_func = [&value, &func]<typename T>(T x) { value = func(value, x); };
|
const auto high_func = [&value, &func]<typename U>(U x) { value = func(value, x); };
|
||||||
(std::invoke(high_func, std::forward<Args>(args)), ...);
|
(std::invoke(high_func, std::forward<Args>(args)), ...);
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,14 +127,11 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BitField(T val) {
|
// This constructor and assignment operator might be considered ambiguous:
|
||||||
Assign(val);
|
// Would they initialize the storage or just the bitfield?
|
||||||
}
|
// Hence, delete them. Use the Assign method to set bitfield values!
|
||||||
|
BitField(T val) = delete;
|
||||||
BitField& operator=(T val) {
|
BitField& operator=(T val) = delete;
|
||||||
Assign(val);
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr BitField() noexcept = default;
|
constexpr BitField() noexcept = default;
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
#include "common/multi_level_page_table.inc"
|
#include "common/multi_level_page_table.inc"
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
template class Common::MultiLevelPageTable<GPUVAddr>;
|
template class Common::MultiLevelPageTable<u64>;
|
||||||
template class Common::MultiLevelPageTable<VAddr>;
|
|
||||||
template class Common::MultiLevelPageTable<PAddr>;
|
|
||||||
template class Common::MultiLevelPageTable<u32>;
|
template class Common::MultiLevelPageTable<u32>;
|
||||||
} // namespace Common
|
} // namespace Common
|
||||||
|
|
|
@ -30,7 +30,7 @@ MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bit
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
|
void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
|
||||||
#else
|
#else
|
||||||
void* base{mmap(nullptr, alloc_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
|
void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
|
||||||
|
|
||||||
if (base == MAP_FAILED) {
|
if (base == MAP_FAILED) {
|
||||||
base = nullptr;
|
base = nullptr;
|
||||||
|
|
|
@ -13,7 +13,8 @@
|
||||||
using Core::Memory::YUZU_PAGESIZE;
|
using Core::Memory::YUZU_PAGESIZE;
|
||||||
|
|
||||||
namespace Service::Nvidia::NvCore {
|
namespace Service::Nvidia::NvCore {
|
||||||
NvMap::Handle::Handle(u64 size, Id id) : size(size), aligned_size(size), orig_size(size), id(id) {
|
NvMap::Handle::Handle(u64 size_, Id id_)
|
||||||
|
: size(size_), aligned_size(size), orig_size(size), id(id_) {
|
||||||
flags.raw = 0;
|
flags.raw = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,19 +22,21 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress)
|
||||||
std::scoped_lock lock(mutex);
|
std::scoped_lock lock(mutex);
|
||||||
|
|
||||||
// Handles cannot be allocated twice
|
// Handles cannot be allocated twice
|
||||||
if (allocated)
|
if (allocated) {
|
||||||
return NvResult::AccessDenied;
|
return NvResult::AccessDenied;
|
||||||
|
}
|
||||||
|
|
||||||
flags = pFlags;
|
flags = pFlags;
|
||||||
kind = pKind;
|
kind = pKind;
|
||||||
align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
|
align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
|
||||||
|
|
||||||
// This flag is only applicable for handles with an address passed
|
// This flag is only applicable for handles with an address passed
|
||||||
if (pAddress)
|
if (pAddress) {
|
||||||
flags.keep_uncached_after_free = 0;
|
flags.keep_uncached_after_free.Assign(0);
|
||||||
else
|
} else {
|
||||||
LOG_CRITICAL(Service_NVDRV,
|
LOG_CRITICAL(Service_NVDRV,
|
||||||
"Mapping nvmap handles without a CPU side address is unimplemented!");
|
"Mapping nvmap handles without a CPU side address is unimplemented!");
|
||||||
|
}
|
||||||
|
|
||||||
size = Common::AlignUp(size, YUZU_PAGESIZE);
|
size = Common::AlignUp(size, YUZU_PAGESIZE);
|
||||||
aligned_size = Common::AlignUp(size, align);
|
aligned_size = Common::AlignUp(size, align);
|
||||||
|
@ -48,17 +51,19 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress)
|
||||||
|
|
||||||
NvResult NvMap::Handle::Duplicate(bool internal_session) {
|
NvResult NvMap::Handle::Duplicate(bool internal_session) {
|
||||||
// Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS)
|
// Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS)
|
||||||
if (!allocated) [[unlikely]]
|
if (!allocated) [[unlikely]] {
|
||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
std::scoped_lock lock(mutex);
|
std::scoped_lock lock(mutex);
|
||||||
|
|
||||||
// If we internally use FromId the duplication tracking of handles won't work accurately due to
|
// If we internally use FromId the duplication tracking of handles won't work accurately due to
|
||||||
// us not implementing per-process handle refs.
|
// us not implementing per-process handle refs.
|
||||||
if (internal_session)
|
if (internal_session) {
|
||||||
internal_dupes++;
|
internal_dupes++;
|
||||||
else
|
} else {
|
||||||
dupes++;
|
dupes++;
|
||||||
|
}
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
@ -92,8 +97,9 @@ bool NvMap::TryRemoveHandle(const Handle& handle_description) {
|
||||||
std::scoped_lock lock(handles_lock);
|
std::scoped_lock lock(handles_lock);
|
||||||
|
|
||||||
auto it{handles.find(handle_description.id)};
|
auto it{handles.find(handle_description.id)};
|
||||||
if (it != handles.end())
|
if (it != handles.end()) {
|
||||||
handles.erase(it);
|
handles.erase(it);
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -102,8 +108,9 @@ bool NvMap::TryRemoveHandle(const Handle& handle_description) {
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) {
|
NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) {
|
||||||
if (!size) [[unlikely]]
|
if (!size) [[unlikely]] {
|
||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)};
|
u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)};
|
||||||
auto handle_description{std::make_shared<Handle>(size, id)};
|
auto handle_description{std::make_shared<Handle>(size, id)};
|
||||||
|
@ -133,8 +140,9 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) {
|
||||||
|
|
||||||
u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
|
u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
|
||||||
auto handle_description{GetHandle(handle)};
|
auto handle_description{GetHandle(handle)};
|
||||||
if (!handle_description) [[unlikely]]
|
if (!handle_description) [[unlikely]] {
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
std::scoped_lock lock(handle_description->mutex);
|
std::scoped_lock lock(handle_description->mutex);
|
||||||
if (!handle_description->pins) {
|
if (!handle_description->pins) {
|
||||||
|
@ -183,8 +191,9 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
|
||||||
|
|
||||||
void NvMap::UnpinHandle(Handle::Id handle) {
|
void NvMap::UnpinHandle(Handle::Id handle) {
|
||||||
auto handle_description{GetHandle(handle)};
|
auto handle_description{GetHandle(handle)};
|
||||||
if (!handle_description)
|
if (!handle_description) {
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
std::scoped_lock lock(handle_description->mutex);
|
std::scoped_lock lock(handle_description->mutex);
|
||||||
if (--handle_description->pins < 0) {
|
if (--handle_description->pins < 0) {
|
||||||
|
@ -226,12 +235,13 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
|
||||||
|
|
||||||
// Try to remove the shared ptr to the handle from the map, if nothing else is using the
|
// Try to remove the shared ptr to the handle from the map, if nothing else is using the
|
||||||
// handle then it will now be freed when `handle_description` goes out of scope
|
// handle then it will now be freed when `handle_description` goes out of scope
|
||||||
if (TryRemoveHandle(*handle_description))
|
if (TryRemoveHandle(*handle_description)) {
|
||||||
LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle);
|
LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle);
|
||||||
else
|
} else {
|
||||||
LOG_DEBUG(Service_NVDRV,
|
LOG_DEBUG(Service_NVDRV,
|
||||||
"Tried to free nvmap handle: {} but didn't as it still has duplicates",
|
"Tried to free nvmap handle: {} but didn't as it still has duplicates",
|
||||||
handle);
|
handle);
|
||||||
|
}
|
||||||
|
|
||||||
freeInfo = {
|
freeInfo = {
|
||||||
.address = handle_description->address,
|
.address = handle_description->address,
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
|
|
@ -188,6 +188,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
|
||||||
|
|
||||||
allocation_map[params.offset] = {
|
allocation_map[params.offset] = {
|
||||||
.size = size,
|
.size = size,
|
||||||
|
.mappings{},
|
||||||
.page_size = params.page_size,
|
.page_size = params.page_size,
|
||||||
.sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None,
|
.sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None,
|
||||||
.big_pages = params.page_size != VM::YUZU_PAGESIZE,
|
.big_pages = params.page_size != VM::YUZU_PAGESIZE,
|
||||||
|
@ -474,11 +475,13 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
|
||||||
VaRegion{
|
VaRegion{
|
||||||
.offset = vm.small_page_allocator->vaStart << VM::PAGE_SIZE_BITS,
|
.offset = vm.small_page_allocator->vaStart << VM::PAGE_SIZE_BITS,
|
||||||
.page_size = VM::YUZU_PAGESIZE,
|
.page_size = VM::YUZU_PAGESIZE,
|
||||||
|
._pad0_{},
|
||||||
.pages = vm.small_page_allocator->vaLimit - vm.small_page_allocator->vaStart,
|
.pages = vm.small_page_allocator->vaLimit - vm.small_page_allocator->vaStart,
|
||||||
},
|
},
|
||||||
VaRegion{
|
VaRegion{
|
||||||
.offset = vm.big_page_allocator->vaStart << vm.big_page_size_bits,
|
.offset = vm.big_page_allocator->vaStart << vm.big_page_size_bits,
|
||||||
.page_size = vm.big_page_size,
|
.page_size = vm.big_page_size,
|
||||||
|
._pad0_{},
|
||||||
.pages = vm.big_page_allocator->vaLimit - vm.big_page_allocator->vaStart,
|
.pages = vm.big_page_allocator->vaLimit - vm.big_page_allocator->vaStart,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
|
@ -204,12 +204,12 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
|
||||||
|
|
||||||
event.wait_handle =
|
event.wait_handle =
|
||||||
host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() {
|
host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() {
|
||||||
auto& event = events[slot];
|
auto& event_ = events[slot];
|
||||||
if (event.status.exchange(EventState::Signalling, std::memory_order_acq_rel) ==
|
if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) ==
|
||||||
EventState::Waiting) {
|
EventState::Waiting) {
|
||||||
event.kevent->GetWritableEvent().Signal();
|
event_.kevent->GetWritableEvent().Signal();
|
||||||
}
|
}
|
||||||
event.status.store(EventState::Signalled, std::memory_order_release);
|
event_.status.store(EventState::Signalled, std::memory_order_release);
|
||||||
});
|
});
|
||||||
return NvResult::Timeout;
|
return NvResult::Timeout;
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,8 +12,8 @@ namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
u32 nvhost_nvdec::next_id{};
|
u32 nvhost_nvdec::next_id{};
|
||||||
|
|
||||||
nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core)
|
nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_)
|
||||||
: nvhost_nvdec_common{system_, core, NvCore::ChannelType::NvDec} {}
|
: nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {}
|
||||||
nvhost_nvdec::~nvhost_nvdec() = default;
|
nvhost_nvdec::~nvhost_nvdec() = default;
|
||||||
|
|
||||||
NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
|
|
@ -11,8 +11,8 @@ namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
u32 nvhost_vic::next_id{};
|
u32 nvhost_vic::next_id{};
|
||||||
|
|
||||||
nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core)
|
nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_)
|
||||||
: nvhost_nvdec_common{system_, core, NvCore::ChannelType::VIC} {}
|
: nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {}
|
||||||
|
|
||||||
nvhost_vic::~nvhost_vic() = default;
|
nvhost_vic::~nvhost_vic() = default;
|
||||||
|
|
||||||
|
|
|
@ -269,7 +269,7 @@ NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
params.address = freeInfo->address;
|
params.address = freeInfo->address;
|
||||||
params.size = static_cast<u32>(freeInfo->size);
|
params.size = static_cast<u32>(freeInfo->size);
|
||||||
params.flags.raw = 0;
|
params.flags.raw = 0;
|
||||||
params.flags.map_uncached = freeInfo->was_uncached;
|
params.flags.map_uncached.Assign(freeInfo->was_uncached);
|
||||||
} else {
|
} else {
|
||||||
// This is possible when there's internel dups or other duplicates.
|
// This is possible when there's internel dups or other duplicates.
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
#include <list>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
|
@ -58,6 +58,7 @@ static_assert(sizeof(DisplayInfo) == 0x60, "DisplayInfo has wrong size");
|
||||||
class NativeWindow final {
|
class NativeWindow final {
|
||||||
public:
|
public:
|
||||||
constexpr explicit NativeWindow(u32 id_) : id{id_} {}
|
constexpr explicit NativeWindow(u32 id_) : id{id_} {}
|
||||||
|
constexpr explicit NativeWindow(const NativeWindow& other) = default;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const u32 magic = 2;
|
const u32 magic = 2;
|
||||||
|
|
|
@ -269,7 +269,7 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environme
|
||||||
}
|
}
|
||||||
std::optional lhs{Track(op1, env)};
|
std::optional lhs{Track(op1, env)};
|
||||||
if (lhs) {
|
if (lhs) {
|
||||||
lhs->shift_left = std::countr_zero(op2.U32());
|
lhs->shift_left = static_cast<u32>(std::countr_zero(op2.U32()));
|
||||||
}
|
}
|
||||||
return lhs;
|
return lhs;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1323,7 +1323,8 @@ void BufferCache<P>::UpdateVertexBuffer(u32 index) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
|
if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
|
||||||
address_size = gpu_memory->MaxContinousRange(gpu_addr_begin, address_size);
|
address_size =
|
||||||
|
static_cast<u32>(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size));
|
||||||
}
|
}
|
||||||
const u32 size = address_size; // TODO: Analyze stride and number of vertices
|
const u32 size = address_size; // TODO: Analyze stride and number of vertices
|
||||||
vertex_buffers[index] = Binding{
|
vertex_buffers[index] = Binding{
|
||||||
|
|
Loading…
Reference in a new issue