diff options
| author | Lioncash <mathew1800@gmail.com> | 2020-10-13 08:10:50 -0400 | 
|---|---|---|
| committer | Lioncash <mathew1800@gmail.com> | 2020-10-13 13:16:49 -0400 | 
| commit | 39c8d18feba8eafcd43fbb55e73ae150a1947aad (patch) | |
| tree | 9565ff464bbb9e5a0aa66e6e310098314e88d019 /src/core/hle/kernel | |
| parent | d291fc1a517d0db07e4b32f5b4ad294c5e93e984 (diff) | |
core/CMakeLists: Make some warnings errors
Makes our error coverage a little more consistent across the board by
applying it to Linux side of things as well. This also makes it more
consistent with the warning settings in other libraries in the project.
This also updates httplib to 0.7.9, as there are several warning
cleanups made that allow us to enable several warnings as errors.
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/handle_table.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 10 | 
2 files changed, 6 insertions, 6 deletions
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index fb30b6f8b..3e745c18b 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp @@ -118,7 +118,7 @@ std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {  void HandleTable::Clear() {      for (u16 i = 0; i < table_size; ++i) { -        generations[i] = i + 1; +        generations[i] = static_cast<u16>(i + 1);          objects[i] = nullptr;      }      next_free_slot = 0; diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 5cbd3b912..6b7db5372 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -72,7 +72,7 @@ u32 GlobalScheduler::SelectThreads() {          if (top_thread != nullptr) {              // TODO(Blinkhawk): Implement Thread Pinning          } else { -            idle_cores |= (1ul << core); +            idle_cores |= (1U << core);          }          top_threads[core] = top_thread;      } @@ -126,7 +126,7 @@ u32 GlobalScheduler::SelectThreads() {              top_threads[core_id] = suggested;          } -        idle_cores &= ~(1ul << core_id); +        idle_cores &= ~(1U << core_id);      }      u32 cores_needing_context_switch{};      for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { @@ -134,7 +134,7 @@ u32 GlobalScheduler::SelectThreads() {          ASSERT(top_threads[core] == nullptr ||                 static_cast<u32>(top_threads[core]->GetProcessorID()) == core);          if (update_thread(top_threads[core], sched)) { -            cores_needing_context_switch |= (1ul << core); +            cores_needing_context_switch |= (1U << core);          }      }      return cores_needing_context_switch; @@ -364,7 +364,7 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,          } else {              must_context_switch = true;          } -        cores_pending_reschedule &= ~(1ul << core); +        cores_pending_reschedule &= ~(1U << core);      }      if (must_context_switch) {          auto& core_scheduler = kernel.CurrentScheduler(); @@ -767,7 +767,7 @@ void Scheduler::SwitchToCurrent() {                      current_thread->context_guard.unlock();                      break;                  } -                if (current_thread->GetProcessorID() != core_id) { +                if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {                      current_thread->context_guard.unlock();                      break;                  }  | 
