mirror of
https://gitlab.com/suyu-emu/suyu.git
synced 2024-03-15 23:15:44 +00:00
Merge pull request #2308 from lioncash/deduction
kernel/scheduler: Minor tidying up
This commit is contained in:
commit
ba3d95e550
|
@ -29,7 +29,7 @@ Scheduler::~Scheduler() {
|
|||
}
|
||||
|
||||
bool Scheduler::HaveReadyThreads() const {
|
||||
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
return !ready_queue.empty();
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
|
|||
}
|
||||
|
||||
void Scheduler::Reschedule() {
|
||||
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
Thread* cur = GetCurrentThread();
|
||||
Thread* next = PopNextReadyThread();
|
||||
|
@ -148,35 +148,35 @@ void Scheduler::Reschedule() {
|
|||
SwitchContext(next);
|
||||
}
|
||||
|
||||
void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
|
||||
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||
void Scheduler::AddThread(SharedPtr<Thread> thread) {
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
thread_list.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
void Scheduler::RemoveThread(Thread* thread) {
|
||||
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||
thread_list.end());
|
||||
}
|
||||
|
||||
void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
|
||||
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
|
||||
ready_queue.add(thread, priority);
|
||||
}
|
||||
|
||||
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
|
||||
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
|
||||
ready_queue.remove(thread, priority);
|
||||
}
|
||||
|
||||
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
|
||||
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
if (thread->GetPriority() == priority) {
|
||||
return;
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
|
|||
}
|
||||
|
||||
Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
|
||||
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
const u32 mask = 1U << core;
|
||||
for (auto* thread : ready_queue) {
|
||||
|
|
|
@ -38,7 +38,7 @@ public:
|
|||
u64 GetLastContextSwitchTicks() const;
|
||||
|
||||
/// Adds a new thread to the scheduler
|
||||
void AddThread(SharedPtr<Thread> thread, u32 priority);
|
||||
void AddThread(SharedPtr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the scheduler
|
||||
void RemoveThread(Thread* thread);
|
||||
|
|
|
@ -199,7 +199,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
|
|||
thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
|
||||
thread->owner_process = &owner_process;
|
||||
thread->scheduler = &system.Scheduler(processor_id);
|
||||
thread->scheduler->AddThread(thread, priority);
|
||||
thread->scheduler->AddThread(thread);
|
||||
thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
|
||||
|
||||
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
|
||||
|
@ -352,7 +352,7 @@ void Thread::ChangeScheduler() {
|
|||
if (*new_processor_id != processor_id) {
|
||||
// Remove thread from previous core's scheduler
|
||||
scheduler->RemoveThread(this);
|
||||
next_scheduler.AddThread(this, current_priority);
|
||||
next_scheduler.AddThread(this);
|
||||
}
|
||||
|
||||
processor_id = *new_processor_id;
|
||||
|
|
Loading…
Reference in a new issue