diff options
| -rw-r--r-- | src/core/hle/kernel/global_scheduler_context.cpp | 33 | ||||
| -rw-r--r-- | src/core/hle/kernel/global_scheduler_context.h | 25 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_address_arbiter.cpp | 111 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_address_arbiter.h | 45 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 12 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h | 30 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread_queue.cpp | 12 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread_queue.h | 4 | 
8 files changed, 130 insertions, 142 deletions
| diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index fd911a3a5..7b090ccb5 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -12,20 +12,19 @@  namespace Kernel { -GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel_) -    : kernel{kernel_}, scheduler_lock{kernel_} {} +GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel) +    : m_kernel{kernel}, m_scheduler_lock{kernel} {}  GlobalSchedulerContext::~GlobalSchedulerContext() = default;  void GlobalSchedulerContext::AddThread(KThread* thread) { -    std::scoped_lock lock{global_list_guard}; -    thread_list.push_back(thread); +    std::scoped_lock lock{m_global_list_guard}; +    m_thread_list.push_back(thread);  }  void GlobalSchedulerContext::RemoveThread(KThread* thread) { -    std::scoped_lock lock{global_list_guard}; -    thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), -                      thread_list.end()); +    std::scoped_lock lock{m_global_list_guard}; +    std::erase(m_thread_list, thread);  }  void GlobalSchedulerContext::PreemptThreads() { @@ -38,37 +37,37 @@ void GlobalSchedulerContext::PreemptThreads() {          63,      }; -    ASSERT(IsLocked()); +    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));      for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {          const u32 priority = preemption_priorities[core_id]; -        KScheduler::RotateScheduledQueue(kernel, core_id, priority); +        KScheduler::RotateScheduledQueue(m_kernel, core_id, priority);      }  }  bool GlobalSchedulerContext::IsLocked() const { -    return scheduler_lock.IsLockedByCurrentThread(); +    return m_scheduler_lock.IsLockedByCurrentThread();  }  void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) { -    ASSERT(IsLocked()); +    ASSERT(this->IsLocked()); -    woken_dummy_threads.insert(thread); +    m_woken_dummy_threads.insert(thread);  }  void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) { -    ASSERT(IsLocked()); +    ASSERT(this->IsLocked()); -    woken_dummy_threads.erase(thread); +    m_woken_dummy_threads.erase(thread);  }  void GlobalSchedulerContext::WakeupWaitingDummyThreads() { -    ASSERT(IsLocked()); +    ASSERT(this->IsLocked()); -    for (auto* thread : woken_dummy_threads) { +    for (auto* thread : m_woken_dummy_threads) {          thread->DummyThreadEndWait();      } -    woken_dummy_threads.clear(); +    m_woken_dummy_threads.clear();  }  } // namespace Kernel diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h index 220ed6192..b7fb8caec 100644 --- a/src/core/hle/kernel/global_scheduler_context.h +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -33,7 +33,7 @@ class GlobalSchedulerContext final {  public:      using LockType = KAbstractSchedulerLock<KScheduler>; -    explicit GlobalSchedulerContext(KernelCore& kernel_); +    explicit GlobalSchedulerContext(KernelCore& kernel);      ~GlobalSchedulerContext();      /// Adds a new thread to the scheduler @@ -43,8 +43,9 @@ public:      void RemoveThread(KThread* thread);      /// Returns a list of all threads managed by the scheduler +    /// This is only safe to iterate while holding the scheduler lock      [[nodiscard]] const std::vector<KThread*>& GetThreadList() const { -        return thread_list; +        return m_thread_list;      }      /** @@ -64,29 +65,25 @@ public:      void WakeupWaitingDummyThreads();      [[nodiscard]] LockType& SchedulerLock() { -        return scheduler_lock; -    } - -    [[nodiscard]] const LockType& SchedulerLock() const { -        return scheduler_lock; +        return m_scheduler_lock;      }  private:      friend class KScopedSchedulerLock;      friend class KScopedSchedulerLockAndSleep; -    KernelCore& kernel; +    KernelCore& m_kernel; -    std::atomic_bool scheduler_update_needed{}; -    KSchedulerPriorityQueue priority_queue; -    LockType scheduler_lock; +    std::atomic_bool m_scheduler_update_needed{}; +    KSchedulerPriorityQueue m_priority_queue; +    LockType m_scheduler_lock;      /// Lists dummy threads pending wakeup on lock release -    std::set<KThread*> woken_dummy_threads; +    std::set<KThread*> m_woken_dummy_threads;      /// Lists all thread ids that aren't deleted/etc. -    std::vector<KThread*> thread_list; -    std::mutex global_list_guard; +    std::vector<KThread*> m_thread_list; +    std::mutex m_global_list_guard;  };  } // namespace Kernel diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index a4c16eca9..47637a729 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp @@ -14,8 +14,8 @@  namespace Kernel { -KAddressArbiter::KAddressArbiter(Core::System& system_) -    : system{system_}, kernel{system.Kernel()} {} +KAddressArbiter::KAddressArbiter(Core::System& system) +    : m_system{system}, m_kernel{system.Kernel()} {}  KAddressArbiter::~KAddressArbiter() = default;  namespace { @@ -90,8 +90,8 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32  class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {  public: -    explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t) -        : KThreadQueue(kernel_), m_tree(t) {} +    explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel, KAddressArbiter::ThreadTree* t) +        : KThreadQueue(kernel), m_tree(t) {}      void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {          // If the thread is waiting on an address arbiter, remove it from the tree. @@ -105,7 +105,7 @@ public:      }  private: -    KAddressArbiter::ThreadTree* m_tree; +    KAddressArbiter::ThreadTree* m_tree{};  };  } // namespace @@ -114,10 +114,10 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {      // Perform signaling.      s32 num_waiters{};      { -        KScopedSchedulerLock sl(kernel); +        KScopedSchedulerLock sl(m_kernel); -        auto it = thread_tree.nfind_key({addr, -1}); -        while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && +        auto it = m_tree.nfind_key({addr, -1}); +        while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&                 (it->GetAddressArbiterKey() == addr)) {              // End the thread's wait.              KThread* target_thread = std::addressof(*it); @@ -126,31 +126,27 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {              ASSERT(target_thread->IsWaitingForAddressArbiter());              target_thread->ClearAddressArbiter(); -            it = thread_tree.erase(it); +            it = m_tree.erase(it);              ++num_waiters;          }      } -    return ResultSuccess; +    R_SUCCEED();  }  Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {      // Perform signaling.      s32 num_waiters{};      { -        KScopedSchedulerLock sl(kernel); +        KScopedSchedulerLock sl(m_kernel);          // Check the userspace value.          s32 user_value{}; -        if (!UpdateIfEqual(system, &user_value, addr, value, value + 1)) { -            LOG_ERROR(Kernel, "Invalid current memory!"); -            return ResultInvalidCurrentMemory; -        } -        if (user_value != value) { -            return ResultInvalidState; -        } +        R_UNLESS(UpdateIfEqual(m_system, &user_value, addr, value, value + 1), +                 ResultInvalidCurrentMemory); +        R_UNLESS(user_value == value, ResultInvalidState); -        auto it = thread_tree.nfind_key({addr, -1}); -        while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && +        auto it = m_tree.nfind_key({addr, -1}); +        while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&                 (it->GetAddressArbiterKey() == addr)) {              // End the thread's wait.              KThread* target_thread = std::addressof(*it); @@ -159,33 +155,33 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 cou              ASSERT(target_thread->IsWaitingForAddressArbiter());              target_thread->ClearAddressArbiter(); -            it = thread_tree.erase(it); +            it = m_tree.erase(it);              ++num_waiters;          }      } -    return ResultSuccess; +    R_SUCCEED();  }  Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {      // Perform signaling.      s32 num_waiters{};      { -        [[maybe_unused]] const KScopedSchedulerLock sl(kernel); +        KScopedSchedulerLock sl(m_kernel); -        auto it = thread_tree.nfind_key({addr, -1}); +        auto it = m_tree.nfind_key({addr, -1});          // Determine the updated value.          s32 new_value{};          if (count <= 0) { -            if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) { +            if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {                  new_value = value - 2;              } else {                  new_value = value + 1;              }          } else { -            if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) { +            if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {                  auto tmp_it = it;                  s32 tmp_num_waiters{}; -                while (++tmp_it != thread_tree.end() && tmp_it->GetAddressArbiterKey() == addr) { +                while (++tmp_it != m_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {                      if (tmp_num_waiters++ >= count) {                          break;                      } @@ -205,20 +201,15 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val          s32 user_value{};          bool succeeded{};          if (value != new_value) { -            succeeded = UpdateIfEqual(system, &user_value, addr, value, new_value); +            succeeded = UpdateIfEqual(m_system, &user_value, addr, value, new_value);          } else { -            succeeded = ReadFromUser(system, &user_value, addr); +            succeeded = ReadFromUser(m_system, &user_value, addr);          } -        if (!succeeded) { -            LOG_ERROR(Kernel, "Invalid current memory!"); -            return ResultInvalidCurrentMemory; -        } -        if (user_value != value) { -            return ResultInvalidState; -        } +        R_UNLESS(succeeded, ResultInvalidCurrentMemory); +        R_UNLESS(user_value == value, ResultInvalidState); -        while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && +        while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&                 (it->GetAddressArbiterKey() == addr)) {              // End the thread's wait.              KThread* target_thread = std::addressof(*it); @@ -227,57 +218,57 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val              ASSERT(target_thread->IsWaitingForAddressArbiter());              target_thread->ClearAddressArbiter(); -            it = thread_tree.erase(it); +            it = m_tree.erase(it);              ++num_waiters;          }      } -    return ResultSuccess; +    R_SUCCEED();  }  Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {      // Prepare to wait. -    KThread* cur_thread = GetCurrentThreadPointer(kernel); +    KThread* cur_thread = GetCurrentThreadPointer(m_kernel);      KHardwareTimer* timer{}; -    ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); +    ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));      { -        KScopedSchedulerLockAndSleep slp{kernel, std::addressof(timer), cur_thread, timeout}; +        KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};          // Check that the thread isn't terminating.          if (cur_thread->IsTerminationRequested()) {              slp.CancelSleep(); -            return ResultTerminationRequested; +            R_THROW(ResultTerminationRequested);          }          // Read the value from userspace.          s32 user_value{};          bool succeeded{};          if (decrement) { -            succeeded = DecrementIfLessThan(system, &user_value, addr, value); +            succeeded = DecrementIfLessThan(m_system, &user_value, addr, value);          } else { -            succeeded = ReadFromUser(system, &user_value, addr); +            succeeded = ReadFromUser(m_system, &user_value, addr);          }          if (!succeeded) {              slp.CancelSleep(); -            return ResultInvalidCurrentMemory; +            R_THROW(ResultInvalidCurrentMemory);          }          // Check that the value is less than the specified one.          if (user_value >= value) {              slp.CancelSleep(); -            return ResultInvalidState; +            R_THROW(ResultInvalidState);          }          // Check that the timeout is non-zero.          if (timeout == 0) {              slp.CancelSleep(); -            return ResultTimedOut; +            R_THROW(ResultTimedOut);          }          // Set the arbiter. -        cur_thread->SetAddressArbiter(&thread_tree, addr); -        thread_tree.insert(*cur_thread); +        cur_thread->SetAddressArbiter(std::addressof(m_tree), addr); +        m_tree.insert(*cur_thread);          // Wait for the thread to finish.          wait_queue.SetHardwareTimer(timer); @@ -291,41 +282,41 @@ Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s6  Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {      // Prepare to wait. -    KThread* cur_thread = GetCurrentThreadPointer(kernel); +    KThread* cur_thread = GetCurrentThreadPointer(m_kernel);      KHardwareTimer* timer{}; -    ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); +    ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));      { -        KScopedSchedulerLockAndSleep slp{kernel, std::addressof(timer), cur_thread, timeout}; +        KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};          // Check that the thread isn't terminating.          if (cur_thread->IsTerminationRequested()) {              slp.CancelSleep(); -            return ResultTerminationRequested; +            R_THROW(ResultTerminationRequested);          }          // Read the value from userspace.          s32 user_value{}; -        if (!ReadFromUser(system, &user_value, addr)) { +        if (!ReadFromUser(m_system, &user_value, addr)) {              slp.CancelSleep(); -            return ResultInvalidCurrentMemory; +            R_THROW(ResultInvalidCurrentMemory);          }          // Check that the value is equal.          if (value != user_value) {              slp.CancelSleep(); -            return ResultInvalidState; +            R_THROW(ResultInvalidState);          }          // Check that the timeout is non-zero.          if (timeout == 0) {              slp.CancelSleep(); -            return ResultTimedOut; +            R_THROW(ResultTimedOut);          }          // Set the arbiter. -        cur_thread->SetAddressArbiter(&thread_tree, addr); -        thread_tree.insert(*cur_thread); +        cur_thread->SetAddressArbiter(std::addressof(m_tree), addr); +        m_tree.insert(*cur_thread);          // Wait for the thread to finish.          wait_queue.SetHardwareTimer(timer); diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h index e4085ae22..9a8c1ae94 100644 --- a/src/core/hle/kernel/k_address_arbiter.h +++ b/src/core/hle/kernel/k_address_arbiter.h @@ -22,47 +22,46 @@ class KAddressArbiter {  public:      using ThreadTree = KConditionVariable::ThreadTree; -    explicit KAddressArbiter(Core::System& system_); +    explicit KAddressArbiter(Core::System& system);      ~KAddressArbiter(); -    [[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) { +    Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {          switch (type) {          case Svc::SignalType::Signal: -            return Signal(addr, count); +            R_RETURN(this->Signal(addr, count));          case Svc::SignalType::SignalAndIncrementIfEqual: -            return SignalAndIncrementIfEqual(addr, value, count); +            R_RETURN(this->SignalAndIncrementIfEqual(addr, value, count));          case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual: -            return SignalAndModifyByWaitingCountIfEqual(addr, value, count); +            R_RETURN(this->SignalAndModifyByWaitingCountIfEqual(addr, value, count)); +        default: +            UNREACHABLE();          } -        ASSERT(false); -        return ResultUnknown;      } -    [[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, -                                        s64 timeout) { +    Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, s64 timeout) {          switch (type) {          case Svc::ArbitrationType::WaitIfLessThan: -            return WaitIfLessThan(addr, value, false, timeout); +            R_RETURN(WaitIfLessThan(addr, value, false, timeout));          case Svc::ArbitrationType::DecrementAndWaitIfLessThan: -            return WaitIfLessThan(addr, value, true, timeout); +            R_RETURN(WaitIfLessThan(addr, value, true, timeout));          case Svc::ArbitrationType::WaitIfEqual: -            return WaitIfEqual(addr, value, timeout); +            R_RETURN(WaitIfEqual(addr, value, timeout)); +        default: +            UNREACHABLE();          } -        ASSERT(false); -        return ResultUnknown;      }  private: -    [[nodiscard]] Result Signal(VAddr addr, s32 count); -    [[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count); -    [[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count); -    [[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout); -    [[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout); +    Result Signal(VAddr addr, s32 count); +    Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count); +    Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count); +    Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout); +    Result WaitIfEqual(VAddr addr, s32 value, s64 timeout); -    ThreadTree thread_tree; - -    Core::System& system; -    KernelCore& kernel; +private: +    ThreadTree m_tree; +    Core::System& m_system; +    KernelCore& m_kernel;  };  } // namespace Kernel diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 534321d8d..3f13b8193 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -80,17 +80,17 @@ public:          return GetCurrentThread(kernel).GetDisableDispatchCount() == 0;      }      static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { -        return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); +        return kernel.GlobalSchedulerContext().m_scheduler_lock.IsLockedByCurrentThread();      }      static bool IsSchedulerUpdateNeeded(KernelCore& kernel) { -        return kernel.GlobalSchedulerContext().scheduler_update_needed; +        return kernel.GlobalSchedulerContext().m_scheduler_update_needed;      }      static void SetSchedulerUpdateNeeded(KernelCore& kernel) { -        kernel.GlobalSchedulerContext().scheduler_update_needed = true; +        kernel.GlobalSchedulerContext().m_scheduler_update_needed = true;      }      static void ClearSchedulerUpdateNeeded(KernelCore& kernel) { -        kernel.GlobalSchedulerContext().scheduler_update_needed = false; +        kernel.GlobalSchedulerContext().m_scheduler_update_needed = false;      }      static void DisableScheduling(KernelCore& kernel); @@ -115,7 +115,7 @@ public:  private:      // Static private API.      static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) { -        return kernel.GlobalSchedulerContext().priority_queue; +        return kernel.GlobalSchedulerContext().m_priority_queue;      }      static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); @@ -166,7 +166,7 @@ private:  class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> {  public:      explicit KScopedSchedulerLock(KernelCore& kernel) -        : KScopedLock(kernel.GlobalSchedulerContext().scheduler_lock) {} +        : KScopedLock(kernel.GlobalSchedulerContext().m_scheduler_lock) {}      ~KScopedSchedulerLock() = default;  }; diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index 14b83a819..c485022f5 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -11,39 +11,39 @@  namespace Kernel { -class [[nodiscard]] KScopedSchedulerLockAndSleep { +class KScopedSchedulerLockAndSleep {  public: -    explicit KScopedSchedulerLockAndSleep(KernelCore& kernel_, KHardwareTimer** out_timer, -                                          KThread* t, s64 timeout) -        : kernel(kernel_), timeout_tick(timeout), thread(t), timer() { +    explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KHardwareTimer** out_timer, +                                          KThread* thread, s64 timeout_tick) +        : m_kernel(kernel), m_timeout_tick(timeout_tick), m_thread(thread), m_timer() {          // Lock the scheduler. -        kernel.GlobalSchedulerContext().scheduler_lock.Lock(); +        kernel.GlobalSchedulerContext().m_scheduler_lock.Lock();          // Set our timer only if the time is positive. -        timer = (timeout_tick > 0) ? std::addressof(kernel.HardwareTimer()) : nullptr; +        m_timer = (timeout_tick > 0) ? std::addressof(kernel.HardwareTimer()) : nullptr; -        *out_timer = timer; +        *out_timer = m_timer;      }      ~KScopedSchedulerLockAndSleep() {          // Register the sleep. -        if (timeout_tick > 0) { -            timer->RegisterTask(thread, timeout_tick); +        if (m_timeout_tick > 0) { +            m_timer->RegisterTask(m_thread, m_timeout_tick);          }          // Unlock the scheduler. -        kernel.GlobalSchedulerContext().scheduler_lock.Unlock(); +        m_kernel.GlobalSchedulerContext().m_scheduler_lock.Unlock();      }      void CancelSleep() { -        timeout_tick = 0; +        m_timeout_tick = 0;      }  private: -    KernelCore& kernel; -    s64 timeout_tick{}; -    KThread* thread{}; -    KHardwareTimer* timer{}; +    KernelCore& m_kernel; +    s64 m_timeout_tick{}; +    KThread* m_thread{}; +    KHardwareTimer* m_timer{};  };  } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread_queue.cpp b/src/core/hle/kernel/k_thread_queue.cpp index fe648447b..61488f4ce 100644 --- a/src/core/hle/kernel/k_thread_queue.cpp +++ b/src/core/hle/kernel/k_thread_queue.cpp @@ -7,9 +7,10 @@  namespace Kernel { -void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread, -                                   [[maybe_unused]] KSynchronizationObject* signaled_object, -                                   [[maybe_unused]] Result wait_result) {} +void KThreadQueue::NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object, +                                   Result wait_result) { +    UNREACHABLE(); +}  void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) {      // Set the thread's wait result. @@ -43,7 +44,8 @@ void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool      }  } -void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread, -                                         [[maybe_unused]] Result wait_result) {} +void KThreadQueueWithoutEndWait::EndWait(KThread* waiting_thread, Result wait_result) { +    UNREACHABLE(); +}  } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h index 01e330e2e..8ec2f900b 100644 --- a/src/core/hle/kernel/k_thread_queue.h +++ b/src/core/hle/kernel/k_thread_queue.h @@ -12,7 +12,7 @@ class KHardwareTimer;  class KThreadQueue {  public: -    explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_}, m_hardware_timer{} {} +    explicit KThreadQueue(KernelCore& kernel) : m_kernel{kernel}, m_hardware_timer{} {}      virtual ~KThreadQueue() = default;      void SetHardwareTimer(KHardwareTimer* timer) { @@ -25,7 +25,7 @@ public:      virtual void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task);  private: -    KernelCore& kernel; +    KernelCore& m_kernel;      KHardwareTimer* m_hardware_timer{};  }; | 
