mirror of
https://gitlab.com/suyu-emu/suyu.git
synced 2024-03-15 23:15:44 +00:00
NVFlinger: Correct GCC compile error
This commit is contained in:
parent
0335a25d1f
commit
b391e5f638
|
@ -174,6 +174,7 @@ u32 nvhost_ctrl::IocCtrlEventSignal(const std::vector<u8>& input, std::vector<u8
|
|||
if (gpu.CancelSyncptInterrupt(events_interface.assigned_syncpt[event_id],
|
||||
events_interface.assigned_value[event_id])) {
|
||||
events_interface.LiberateEvent(event_id);
|
||||
events_interface.events[event_id].writable->Signal();
|
||||
}
|
||||
}
|
||||
return NvResult::Success;
|
||||
|
|
|
@ -122,9 +122,9 @@ u32 nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
params.unk3);
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
params.fence_out.id = channels;
|
||||
params.fence_out.value = gpu.GetSyncpointValue(channels);
|
||||
channels++;
|
||||
params.fence_out.id = assigned_syncpoints;
|
||||
params.fence_out.value = gpu.GetSyncpointValue(assigned_syncpoints);
|
||||
assigned_syncpoints++;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return 0;
|
||||
}
|
||||
|
@ -169,8 +169,6 @@ u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& outp
|
|||
}
|
||||
gpu.PushGPUEntries(std::move(entries));
|
||||
|
||||
// TODO(Blinkhawk): Figure how thoios fence is set
|
||||
// params.fence_out.value = 0;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo));
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ private:
|
|||
u32 ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
u32 channels{};
|
||||
u32 assigned_syncpoints{};
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
|
|
@ -57,8 +57,8 @@ void NVDRV::Ioctl(Kernel::HLERequestContext& ctx) {
|
|||
ctrl.fresh_call = false;
|
||||
ctx.SleepClientThread(
|
||||
"NVServices::DelayedResponse", ctrl.timeout,
|
||||
[this, ctrl = ctrl](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
|
||||
Kernel::ThreadWakeupReason reason) {
|
||||
[this, ctrl = ctrl](Kernel::SharedPtr<Kernel::Thread> thread,
|
||||
Kernel::HLERequestContext& ctx, Kernel::ThreadWakeupReason reason) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
u32 fd = rp.Pop<u32>();
|
||||
u32 command = rp.Pop<u32>();
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
#include <list>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/math_util.h"
|
||||
|
|
|
@ -37,13 +37,13 @@ NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_t
|
|||
displays.emplace_back(4, "Null");
|
||||
|
||||
// Schedule the screen composition events
|
||||
//const auto ticks = Settings::values.force_30fps_mode ? frame_ticks_30fps : frame_ticks;
|
||||
// const auto ticks = Settings::values.force_30fps_mode ? frame_ticks_30fps : frame_ticks;
|
||||
|
||||
composition_event = core_timing.RegisterEvent(
|
||||
"ScreenComposition", [this](u64 userdata, s64 cycles_late) {
|
||||
composition_event = core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata,
|
||||
s64 cycles_late) {
|
||||
Compose();
|
||||
const auto ticks = Settings::values.force_30fps_mode ? frame_ticks_30fps : GetNextTicks();
|
||||
this->core_timing.ScheduleEvent(std::max(0LL,ticks - cycles_late), composition_event);
|
||||
this->core_timing.ScheduleEvent(std::max<s64>(0LL, ticks - cycles_late), composition_event);
|
||||
});
|
||||
|
||||
core_timing.ScheduleEvent(frame_ticks, composition_event);
|
||||
|
|
|
@ -69,7 +69,7 @@ const DmaPusher& GPU::DmaPusher() const {
|
|||
|
||||
void GPU::IncrementSyncPoint(const u32 syncpoint_id) {
|
||||
syncpoints[syncpoint_id]++;
|
||||
sync_mutex.lock();
|
||||
std::lock_guard lock{sync_mutex};
|
||||
if (!syncpt_interrupts[syncpoint_id].empty()) {
|
||||
u32 value = syncpoints[syncpoint_id].load();
|
||||
auto it = syncpt_interrupts[syncpoint_id].begin();
|
||||
|
@ -82,7 +82,6 @@ void GPU::IncrementSyncPoint(const u32 syncpoint_id) {
|
|||
it++;
|
||||
}
|
||||
}
|
||||
sync_mutex.unlock();
|
||||
}
|
||||
|
||||
u32 GPU::GetSyncpointValue(const u32 syncpoint_id) const {
|
||||
|
@ -98,7 +97,7 @@ void GPU::RegisterSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
|
|||
}
|
||||
|
||||
bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
|
||||
sync_mutex.lock();
|
||||
std::lock_guard lock{sync_mutex};
|
||||
auto it = syncpt_interrupts[syncpoint_id].begin();
|
||||
while (it != syncpt_interrupts[syncpoint_id].end()) {
|
||||
if (value == *it) {
|
||||
|
@ -108,7 +107,6 @@ bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
|
|||
it++;
|
||||
}
|
||||
return false;
|
||||
sync_mutex.unlock();
|
||||
}
|
||||
|
||||
u32 RenderTargetBytesPerPixel(RenderTargetFormat format) {
|
||||
|
|
|
@ -21,7 +21,8 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
|
|||
MicroProfileOnThreadCreate("GpuThread");
|
||||
|
||||
// Wait for first GPU command before acquiring the window context
|
||||
while (state.queue.Empty());
|
||||
while (state.queue.Empty())
|
||||
;
|
||||
|
||||
// If emulation was stopped during disk shader loading, abort before trying to acquire context
|
||||
if (!state.is_running) {
|
||||
|
@ -103,7 +104,8 @@ u64 ThreadManager::PushCommand(CommandData&& command_data) {
|
|||
|
||||
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
|
||||
void SynchState::WaitForSynchronization(u64 fence) {
|
||||
while (signaled_fence.load() < fence);
|
||||
while (signaled_fence.load() < fence)
|
||||
;
|
||||
}
|
||||
|
||||
} // namespace VideoCommon::GPUThread
|
||||
|
|
Loading…
Reference in a new issue