diff options
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 9 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler_lock.h | 3 | 
2 files changed, 9 insertions, 3 deletions
| diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index b1cabbca0..d6676904b 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -384,7 +384,8 @@ void KScheduler::SwitchThread(KThread* next_thread) {  void KScheduler::ScheduleImpl() {      // First, clear the needs scheduling bool. -    m_state.needs_scheduling.store(false, std::memory_order_seq_cst); +    m_state.needs_scheduling.store(false, std::memory_order_relaxed); +    std::atomic_thread_fence(std::memory_order_seq_cst);      // Load the appropriate thread pointers for scheduling.      KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; @@ -400,7 +401,8 @@ void KScheduler::ScheduleImpl() {      // If there aren't, we want to check if the highest priority thread is the same as the current      // thread.      if (highest_priority_thread == cur_thread) { -        // If they're the same, then we can just return. +        // If they're the same, then we can just issue a memory barrier and return. +        std::atomic_thread_fence(std::memory_order_seq_cst);          return;      } @@ -476,7 +478,8 @@ void KScheduler::ScheduleImplFiber() {          // We failed to successfully do the context switch, and need to retry.          // Clear needs_scheduling. -        m_state.needs_scheduling.store(false, std::memory_order_seq_cst); +        m_state.needs_scheduling.store(false, std::memory_order_relaxed); +        std::atomic_thread_fence(std::memory_order_seq_cst);          // Refresh the highest priority thread.          highest_priority_thread = m_state.highest_priority_thread; diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 73314b45e..129d60472 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -60,6 +60,9 @@ public:          // Release an instance of the lock.          if ((--lock_count) == 0) { +            // Perform a memory barrier here. +            std::atomic_thread_fence(std::memory_order_seq_cst); +              // We're no longer going to hold the lock. Take note of what cores need scheduling.              const u64 cores_needing_scheduling =                  SchedulerType::UpdateHighestPriorityThreads(kernel); | 
