diff options
Diffstat (limited to 'src')
88 files changed, 1343 insertions, 433 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 43ae8a9e7..850ce8006 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -98,6 +98,7 @@ add_library(common STATIC microprofile.h microprofileui.h misc.cpp + multi_level_queue.h page_table.cpp page_table.h param_package.cpp diff --git a/src/common/bit_util.h b/src/common/bit_util.h index 1eea17ba1..a4f9ed4aa 100644 --- a/src/common/bit_util.h +++ b/src/common/bit_util.h @@ -58,4 +58,43 @@ inline u64 CountLeadingZeroes64(u64 value) { return __builtin_clzll(value); } #endif + +#ifdef _MSC_VER +inline u32 CountTrailingZeroes32(u32 value) { + unsigned long trailing_zero = 0; + + if (_BitScanForward(&trailing_zero, value) != 0) { + return trailing_zero; + } + + return 32; +} + +inline u64 CountTrailingZeroes64(u64 value) { + unsigned long trailing_zero = 0; + + if (_BitScanForward64(&trailing_zero, value) != 0) { + return trailing_zero; + } + + return 64; +} +#else +inline u32 CountTrailingZeroes32(u32 value) { + if (value == 0) { + return 32; + } + + return __builtin_ctz(value); +} + +inline u64 CountTrailingZeroes64(u64 value) { + if (value == 0) { + return 64; + } + + return __builtin_ctzll(value); +} +#endif + } // namespace Common diff --git a/src/common/detached_tasks.cpp b/src/common/detached_tasks.cpp index a347d9e02..f268d6021 100644 --- a/src/common/detached_tasks.cpp +++ b/src/common/detached_tasks.cpp @@ -16,22 +16,22 @@ DetachedTasks::DetachedTasks() { } void DetachedTasks::WaitForAllTasks() { - std::unique_lock<std::mutex> lock(mutex); + std::unique_lock lock{mutex}; cv.wait(lock, [this]() { return count == 0; }); } DetachedTasks::~DetachedTasks() { - std::unique_lock<std::mutex> lock(mutex); + std::unique_lock lock{mutex}; ASSERT(count == 0); instance = nullptr; } void DetachedTasks::AddTask(std::function<void()> task) { - std::unique_lock<std::mutex> lock(instance->mutex); + std::unique_lock lock{instance->mutex}; ++instance->count; std::thread([task{std::move(task)}]() { task(); - std::unique_lock<std::mutex> lock(instance->mutex); + std::unique_lock lock{instance->mutex}; --instance->count; std::notify_all_at_thread_exit(instance->cv, std::move(lock)); }) diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp index 4462ff3fb..a03179520 100644 --- a/src/common/logging/backend.cpp +++ b/src/common/logging/backend.cpp @@ -46,12 +46,12 @@ public: } void AddBackend(std::unique_ptr<Backend> backend) { - std::lock_guard<std::mutex> lock(writing_mutex); + std::lock_guard lock{writing_mutex}; backends.push_back(std::move(backend)); } void RemoveBackend(std::string_view backend_name) { - std::lock_guard<std::mutex> lock(writing_mutex); + std::lock_guard lock{writing_mutex}; const auto it = std::remove_if(backends.begin(), backends.end(), [&backend_name](const auto& i) { return backend_name == i->GetName(); }); @@ -80,7 +80,7 @@ private: backend_thread = std::thread([&] { Entry entry; auto write_logs = [&](Entry& e) { - std::lock_guard<std::mutex> lock(writing_mutex); + std::lock_guard lock{writing_mutex}; for (const auto& backend : backends) { backend->Write(e); } diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h new file mode 100644 index 000000000..2b61b91e0 --- /dev/null +++ b/src/common/multi_level_queue.h @@ -0,0 +1,337 @@ +// Copyright 2019 TuxSH +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> +#include <iterator> +#include <list> +#include <utility> + +#include "common/bit_util.h" +#include "common/common_types.h" + +namespace Common { + +/** + * A MultiLevelQueue is a type of priority queue which has the following characteristics: + * - iteratable through each of its elements. + * - back can be obtained. + * - O(1) add, lookup (both front and back) + * - discrete priorities and a max of 64 priorities (limited domain) + * This type of priority queue is normaly used for managing threads within an scheduler + */ +template <typename T, std::size_t Depth> +class MultiLevelQueue { +public: + using value_type = T; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = value_type*; + using const_pointer = const value_type*; + + using difference_type = typename std::pointer_traits<pointer>::difference_type; + using size_type = std::size_t; + + template <bool is_constant> + class iterator_impl { + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = T; + using pointer = std::conditional_t<is_constant, T*, const T*>; + using reference = std::conditional_t<is_constant, const T&, T&>; + using difference_type = typename std::pointer_traits<pointer>::difference_type; + + friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) { + if (lhs.IsEnd() && rhs.IsEnd()) + return true; + return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it); + } + + friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) { + return !operator==(lhs, rhs); + } + + reference operator*() const { + return *it; + } + + pointer operator->() const { + return it.operator->(); + } + + iterator_impl& operator++() { + if (IsEnd()) { + return *this; + } + + ++it; + + if (it == GetEndItForPrio()) { + u64 prios = mlq.used_priorities; + prios &= ~((1ULL << (current_priority + 1)) - 1); + if (prios == 0) { + current_priority = mlq.depth(); + } else { + current_priority = CountTrailingZeroes64(prios); + it = GetBeginItForPrio(); + } + } + return *this; + } + + iterator_impl& operator--() { + if (IsEnd()) { + if (mlq.used_priorities != 0) { + current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities); + it = GetEndItForPrio(); + --it; + } + } else if (it == GetBeginItForPrio()) { + u64 prios = mlq.used_priorities; + prios &= (1ULL << current_priority) - 1; + if (prios != 0) { + current_priority = CountTrailingZeroes64(prios); + it = GetEndItForPrio(); + --it; + } + } else { + --it; + } + return *this; + } + + iterator_impl operator++(int) { + const iterator_impl v{*this}; + ++(*this); + return v; + } + + iterator_impl operator--(int) { + const iterator_impl v{*this}; + --(*this); + return v; + } + + // allow implicit const->non-const + iterator_impl(const iterator_impl<false>& other) + : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {} + + iterator_impl(const iterator_impl<true>& other) + : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {} + + iterator_impl& operator=(const iterator_impl<false>& other) { + mlq = other.mlq; + it = other.it; + current_priority = other.current_priority; + return *this; + } + + friend class iterator_impl<true>; + iterator_impl() = default; + + private: + friend class MultiLevelQueue; + using container_ref = + std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>; + using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator, + typename std::list<T>::iterator>; + + explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority) + : mlq(mlq), it(it), current_priority(current_priority) {} + explicit iterator_impl(container_ref mlq, u32 current_priority) + : mlq(mlq), it(), current_priority(current_priority) {} + + bool IsEnd() const { + return current_priority == mlq.depth(); + } + + list_iterator GetBeginItForPrio() const { + return mlq.levels[current_priority].begin(); + } + + list_iterator GetEndItForPrio() const { + return mlq.levels[current_priority].end(); + } + + container_ref mlq; + list_iterator it; + u32 current_priority; + }; + + using iterator = iterator_impl<false>; + using const_iterator = iterator_impl<true>; + + void add(const T& element, u32 priority, bool send_back = true) { + if (send_back) + levels[priority].push_back(element); + else + levels[priority].push_front(element); + used_priorities |= 1ULL << priority; + } + + void remove(const T& element, u32 priority) { + auto it = ListIterateTo(levels[priority], element); + if (it == levels[priority].end()) + return; + levels[priority].erase(it); + if (levels[priority].empty()) { + used_priorities &= ~(1ULL << priority); + } + } + + void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) { + remove(element, old_priority); + add(element, new_priority, !adjust_front); + } + void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) { + adjust(*it, old_priority, new_priority, adjust_front); + } + + void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) { + ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority], + ListIterateTo(levels[priority], element)); + + other.used_priorities |= 1ULL << priority; + + if (levels[priority].empty()) { + used_priorities &= ~(1ULL << priority); + } + } + + void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) { + transfer_to_front(*it, priority, other); + } + + void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) { + ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority], + ListIterateTo(levels[priority], element)); + + other.used_priorities |= 1ULL << priority; + + if (levels[priority].empty()) { + used_priorities &= ~(1ULL << priority); + } + } + + void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) { + transfer_to_back(*it, priority, other); + } + + void yield(u32 priority, std::size_t n = 1) { + ListShiftForward(levels[priority], n); + } + + std::size_t depth() const { + return Depth; + } + + std::size_t size(u32 priority) const { + return levels[priority].size(); + } + + std::size_t size() const { + u64 priorities = used_priorities; + std::size_t size = 0; + while (priorities != 0) { + const u64 current_priority = CountTrailingZeroes64(priorities); + size += levels[current_priority].size(); + priorities &= ~(1ULL << current_priority); + } + return size; + } + + bool empty() const { + return used_priorities == 0; + } + + bool empty(u32 priority) const { + return (used_priorities & (1ULL << priority)) == 0; + } + + u32 highest_priority_set(u32 max_priority = 0) const { + const u64 priorities = + max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1)); + return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities)); + } + + u32 lowest_priority_set(u32 min_priority = Depth - 1) const { + const u64 priorities = min_priority >= Depth - 1 + ? used_priorities + : (used_priorities & ((1ULL << (min_priority + 1)) - 1)); + return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities); + } + + const_iterator cbegin(u32 max_prio = 0) const { + const u32 priority = highest_priority_set(max_prio); + return priority == Depth ? cend() + : const_iterator{*this, levels[priority].cbegin(), priority}; + } + const_iterator begin(u32 max_prio = 0) const { + return cbegin(max_prio); + } + iterator begin(u32 max_prio = 0) { + const u32 priority = highest_priority_set(max_prio); + return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority}; + } + + const_iterator cend(u32 min_prio = Depth - 1) const { + return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1); + } + const_iterator end(u32 min_prio = Depth - 1) const { + return cend(min_prio); + } + iterator end(u32 min_prio = Depth - 1) { + return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1); + } + + T& front(u32 max_priority = 0) { + const u32 priority = highest_priority_set(max_priority); + return levels[priority == Depth ? 0 : priority].front(); + } + const T& front(u32 max_priority = 0) const { + const u32 priority = highest_priority_set(max_priority); + return levels[priority == Depth ? 0 : priority].front(); + } + + T back(u32 min_priority = Depth - 1) { + const u32 priority = lowest_priority_set(min_priority); // intended + return levels[priority == Depth ? 63 : priority].back(); + } + const T& back(u32 min_priority = Depth - 1) const { + const u32 priority = lowest_priority_set(min_priority); // intended + return levels[priority == Depth ? 63 : priority].back(); + } + +private: + using const_list_iterator = typename std::list<T>::const_iterator; + + static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) { + if (shift >= list.size()) { + return; + } + + const auto begin_range = list.begin(); + const auto end_range = std::next(begin_range, shift); + list.splice(list.end(), list, begin_range, end_range); + } + + static void ListSplice(std::list<T>& in_list, const_list_iterator position, + std::list<T>& out_list, const_list_iterator element) { + in_list.splice(position, out_list, element); + } + + static const_list_iterator ListIterateTo(const std::list<T>& list, const T& element) { + auto it = list.cbegin(); + while (it != list.cend() && *it != element) { + ++it; + } + return it; + } + + std::array<std::list<T>, Depth> levels; + u64 used_priorities = 0; +}; + +} // namespace Common diff --git a/src/common/thread.cpp b/src/common/thread.cpp index 5144c0d9f..fe7a420cc 100644 --- a/src/common/thread.cpp +++ b/src/common/thread.cpp @@ -27,18 +27,6 @@ namespace Common { #ifdef _MSC_VER -void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask) { - SetThreadAffinityMask(thread, mask); -} - -void SetCurrentThreadAffinity(u32 mask) { - SetThreadAffinityMask(GetCurrentThread(), mask); -} - -void SwitchCurrentThread() { - SwitchToThread(); -} - // Sets the debugger-visible name of the current thread. // Uses undocumented (actually, it is now documented) trick. // http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vsdebug/html/vxtsksettingthreadname.asp @@ -70,31 +58,6 @@ void SetCurrentThreadName(const char* name) { #else // !MSVC_VER, so must be POSIX threads -void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask) { -#ifdef __APPLE__ - thread_policy_set(pthread_mach_thread_np(thread), THREAD_AFFINITY_POLICY, (integer_t*)&mask, 1); -#elif (defined __linux__ || defined __FreeBSD__) && !(defined ANDROID) - cpu_set_t cpu_set; - CPU_ZERO(&cpu_set); - - for (int i = 0; i != sizeof(mask) * 8; ++i) - if ((mask >> i) & 1) - CPU_SET(i, &cpu_set); - - pthread_setaffinity_np(thread, sizeof(cpu_set), &cpu_set); -#endif -} - -void SetCurrentThreadAffinity(u32 mask) { - SetThreadAffinity(pthread_self(), mask); -} - -#ifndef _WIN32 -void SwitchCurrentThread() { - usleep(1000 * 1); -} -#endif - // MinGW with the POSIX threading model does not support pthread_setname_np #if !defined(_WIN32) || defined(_MSC_VER) void SetCurrentThreadName(const char* name) { diff --git a/src/common/thread.h b/src/common/thread.h index 2cf74452d..0cfd98be6 100644 --- a/src/common/thread.h +++ b/src/common/thread.h @@ -9,14 +9,13 @@ #include <cstddef> #include <mutex> #include <thread> -#include "common/common_types.h" namespace Common { class Event { public: void Set() { - std::lock_guard<std::mutex> lk(mutex); + std::lock_guard lk{mutex}; if (!is_set) { is_set = true; condvar.notify_one(); @@ -24,14 +23,14 @@ public: } void Wait() { - std::unique_lock<std::mutex> lk(mutex); + std::unique_lock lk{mutex}; condvar.wait(lk, [&] { return is_set; }); is_set = false; } template <class Clock, class Duration> bool WaitUntil(const std::chrono::time_point<Clock, Duration>& time) { - std::unique_lock<std::mutex> lk(mutex); + std::unique_lock lk{mutex}; if (!condvar.wait_until(lk, time, [this] { return is_set; })) return false; is_set = false; @@ -39,7 +38,7 @@ public: } void Reset() { - std::unique_lock<std::mutex> lk(mutex); + std::unique_lock lk{mutex}; // no other action required, since wait loops on the predicate and any lingering signal will // get cleared on the first iteration is_set = false; @@ -57,7 +56,7 @@ public: /// Blocks until all "count" threads have called Sync() void Sync() { - std::unique_lock<std::mutex> lk(mutex); + std::unique_lock lk{mutex}; const std::size_t current_generation = generation; if (++waiting == count) { @@ -78,9 +77,6 @@ private: std::size_t generation = 0; // Incremented once each time the barrier is used }; -void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask); -void SetCurrentThreadAffinity(u32 mask); -void SwitchCurrentThread(); // On Linux, this is equal to sleep 1ms void SetCurrentThreadName(const char* name); } // namespace Common diff --git a/src/common/threadsafe_queue.h b/src/common/threadsafe_queue.h index 821e8536a..e714ba5b3 100644 --- a/src/common/threadsafe_queue.h +++ b/src/common/threadsafe_queue.h @@ -78,7 +78,7 @@ public: T PopWait() { if (Empty()) { - std::unique_lock<std::mutex> lock(cv_mutex); + std::unique_lock lock{cv_mutex}; cv.wait(lock, [this]() { return !Empty(); }); } T t; @@ -137,7 +137,7 @@ public: template <typename Arg> void Push(Arg&& t) { - std::lock_guard<std::mutex> lock(write_lock); + std::lock_guard lock{write_lock}; spsc_queue.Push(t); } diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp index 1eefed6d0..e75741db0 100644 --- a/src/core/core_cpu.cpp +++ b/src/core/core_cpu.cpp @@ -22,7 +22,7 @@ namespace Core { void CpuBarrier::NotifyEnd() { - std::unique_lock<std::mutex> lock(mutex); + std::unique_lock lock{mutex}; end = true; condition.notify_all(); } @@ -34,7 +34,7 @@ bool CpuBarrier::Rendezvous() { } if (!end) { - std::unique_lock<std::mutex> lock(mutex); + std::unique_lock lock{mutex}; --cores_waiting; if (!cores_waiting) { @@ -131,7 +131,7 @@ void Cpu::Reschedule() { reschedule_pending = false; // Lock the global kernel mutex when we manipulate the HLE state - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; scheduler->Reschedule(); } diff --git a/src/core/frontend/emu_window.cpp b/src/core/frontend/emu_window.cpp index e29afd630..1320bbe77 100644 --- a/src/core/frontend/emu_window.cpp +++ b/src/core/frontend/emu_window.cpp @@ -30,7 +30,7 @@ private: explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {} std::tuple<float, float, bool> GetStatus() const override { if (auto state = touch_state.lock()) { - std::lock_guard<std::mutex> guard(state->mutex); + std::lock_guard guard{state->mutex}; return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed); } return std::make_tuple(0.0f, 0.0f, false); @@ -81,7 +81,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) { if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y)) return; - std::lock_guard<std::mutex> guard(touch_state->mutex); + std::lock_guard guard{touch_state->mutex}; touch_state->touch_x = static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) / (framebuffer_layout.screen.right - framebuffer_layout.screen.left); touch_state->touch_y = static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) / @@ -91,7 +91,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) { } void EmuWindow::TouchReleased() { - std::lock_guard<std::mutex> guard(touch_state->mutex); + std::lock_guard guard{touch_state->mutex}; touch_state->touch_pressed = false; touch_state->touch_x = 0; touch_state->touch_y = 0; diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 352190da8..c8842410b 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -26,7 +26,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_ // them all. std::size_t last = waiting_threads.size(); if (num_to_wake > 0) { - last = num_to_wake; + last = std::min(last, static_cast<std::size_t>(num_to_wake)); } // Signal the waiting threads. @@ -90,9 +90,9 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a // Determine the modified value depending on the waiting count. s32 updated_value; if (waiting_threads.empty()) { - updated_value = value - 1; - } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) { updated_value = value + 1; + } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) { + updated_value = value - 1; } else { updated_value = value; } diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h index 834fd23d2..879957dcb 100644 --- a/src/core/hle/kernel/code_set.h +++ b/src/core/hle/kernel/code_set.h @@ -5,7 +5,6 @@ #pragma once #include <cstddef> -#include <memory> #include <vector> #include "common/common_types.h" @@ -78,7 +77,7 @@ struct CodeSet final { } /// The overall data that backs this code set. - std::shared_ptr<std::vector<u8>> memory; + std::vector<u8> memory; /// The segments that comprise this code set. std::array<Segment, 3> segments; diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index a7e4ddc05..3f14bfa86 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -34,7 +34,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_ const auto& system = Core::System::GetInstance(); // Lock the global kernel mutex when we enter the kernel HLE. - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; SharedPtr<Thread> thread = system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle); @@ -62,7 +62,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_ if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 || thread->GetWaitHandle() != 0) { - ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); + ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex || + thread->GetStatus() == ThreadStatus::WaitCondVar); thread->SetMutexWaitAddress(0); thread->SetCondVarWaitAddress(0); thread->SetWaitHandle(0); @@ -114,7 +115,7 @@ struct KernelCore::Impl { // Creates the default system resource limit void InitializeSystemResourceLimit(KernelCore& kernel) { - system_resource_limit = ResourceLimit::Create(kernel, "System"); + system_resource_limit = ResourceLimit::Create(kernel); // If setting the default system values fails, then something seriously wrong has occurred. ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000) @@ -190,6 +191,10 @@ const Process* KernelCore::CurrentProcess() const { return impl->current_process; } +const std::vector<SharedPtr<Process>>& KernelCore::GetProcessList() const { + return impl->process_list; +} + void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) { impl->named_ports.emplace(std::move(name), std::move(port)); } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 03ea5b659..6b8738599 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -72,6 +72,9 @@ public: /// Retrieves a const pointer to the current process. const Process* CurrentProcess() const; + /// Retrieves the list of processes. + const std::vector<SharedPtr<Process>>& GetProcessList() const; + /// Adds a port to the named port table void AddNamedPort(std::string name, SharedPtr<ClientPort> port); diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp index 217144efc..10431e94c 100644 --- a/src/core/hle/kernel/object.cpp +++ b/src/core/hle/kernel/object.cpp @@ -24,7 +24,6 @@ bool Object::IsWaitable() const { case HandleType::WritableEvent: case HandleType::SharedMemory: case HandleType::TransferMemory: - case HandleType::AddressArbiter: case HandleType::ResourceLimit: case HandleType::ClientPort: case HandleType::ClientSession: diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h index 3f6baa094..332876c27 100644 --- a/src/core/hle/kernel/object.h +++ b/src/core/hle/kernel/object.h @@ -25,7 +25,6 @@ enum class HandleType : u32 { TransferMemory, Thread, Process, - AddressArbiter, ResourceLimit, ClientPort, ServerPort, diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 0d782e4ba..041267318 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -5,6 +5,7 @@ #include <algorithm> #include <memory> #include <random> +#include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" @@ -75,6 +76,18 @@ SharedPtr<ResourceLimit> Process::GetResourceLimit() const { return resource_limit; } +u64 Process::GetTotalPhysicalMemoryUsed() const { + return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size; +} + +void Process::RegisterThread(const Thread* thread) { + thread_list.push_back(thread); +} + +void Process::UnregisterThread(const Thread* thread) { + thread_list.remove(thread); +} + ResultCode Process::ClearSignalState() { if (status == ProcessStatus::Exited) { LOG_ERROR(Kernel, "called on a terminated process instance."); @@ -107,14 +120,17 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { return handle_table.SetSize(capabilities.GetHandleTableSize()); } -void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { +void Process::Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size) { + // The kernel always ensures that the given stack size is page aligned. + main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE); + // Allocate and map the main thread stack // TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part // of the user address space. + const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size; vm_manager - .MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size, - std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, - MemoryState::Stack) + .MapMemoryBlock(mapping_address, std::make_shared<std::vector<u8>>(main_thread_stack_size), + 0, main_thread_stack_size, MemoryState::Stack) .Unwrap(); vm_manager.LogLayout(); @@ -210,11 +226,13 @@ void Process::FreeTLSSlot(VAddr tls_address) { } void Process::LoadModule(CodeSet module_, VAddr base_addr) { + const auto memory = std::make_shared<std::vector<u8>>(std::move(module_.memory)); + const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) { const auto vma = vm_manager - .MapMemoryBlock(segment.addr + base_addr, module_.memory, - segment.offset, segment.size, memory_state) + .MapMemoryBlock(segment.addr + base_addr, memory, segment.offset, + segment.size, memory_state) .Unwrap(); vm_manager.Reprotect(vma, permissions); }; @@ -224,6 +242,8 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) { MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData); MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData); + code_memory_size += module_.memory.size(); + // Clear instruction cache in CPU JIT system.InvalidateCpuInstructionCaches(); } @@ -237,7 +257,7 @@ void Process::Acquire(Thread* thread) { ASSERT_MSG(!ShouldWait(thread), "Object unavailable!"); } -bool Process::ShouldWait(Thread* thread) const { +bool Process::ShouldWait(const Thread* thread) const { return !is_signaled; } diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index a0217d3d8..f060f2a3b 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -7,6 +7,7 @@ #include <array> #include <bitset> #include <cstddef> +#include <list> #include <string> #include <vector> #include <boost/container/static_vector.hpp> @@ -186,6 +187,22 @@ public: return random_entropy.at(index); } + /// Retrieves the total physical memory used by this process in bytes. + u64 GetTotalPhysicalMemoryUsed() const; + + /// Gets the list of all threads created with this process as their owner. + const std::list<const Thread*>& GetThreadList() const { + return thread_list; + } + + /// Registers a thread as being created under this process, + /// adding it to this process' thread list. + void RegisterThread(const Thread* thread); + + /// Unregisters a thread from this process, removing it + /// from this process' thread list. + void UnregisterThread(const Thread* thread); + /// Clears the signaled state of the process if and only if it's signaled. /// /// @pre The process must not be already terminated. If this is called on a @@ -210,7 +227,7 @@ public: /** * Applies address space changes and launches the process main thread. */ - void Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size); + void Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size); /** * Prepares a process for termination by stopping all of its threads @@ -234,7 +251,7 @@ private: ~Process() override; /// Checks if the specified thread should wait until this process is available. - bool ShouldWait(Thread* thread) const override; + bool ShouldWait(const Thread* thread) const override; /// Acquires/locks this process for the specified thread if it's available. void Acquire(Thread* thread) override; @@ -247,6 +264,12 @@ private: /// Memory manager for this process. Kernel::VMManager vm_manager; + /// Size of the main thread's stack in bytes. + u64 main_thread_stack_size = 0; + + /// Size of the loaded code memory in bytes. + u64 code_memory_size = 0; + /// Current status of the process ProcessStatus status; @@ -299,6 +322,9 @@ private: /// Random values for svcGetInfo RandomEntropy std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy; + /// List of threads that are running with this process as their owner. + std::list<const Thread*> thread_list; + /// System context Core::System& system; diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp index 0e5083f70..c2b798a4e 100644 --- a/src/core/hle/kernel/readable_event.cpp +++ b/src/core/hle/kernel/readable_event.cpp @@ -14,7 +14,7 @@ namespace Kernel { ReadableEvent::ReadableEvent(KernelCore& kernel) : WaitObject{kernel} {} ReadableEvent::~ReadableEvent() = default; -bool ReadableEvent::ShouldWait(Thread* thread) const { +bool ReadableEvent::ShouldWait(const Thread* thread) const { return !signaled; } diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h index 77a9c362c..2eb9dcbb7 100644 --- a/src/core/hle/kernel/readable_event.h +++ b/src/core/hle/kernel/readable_event.h @@ -36,7 +36,7 @@ public: return HANDLE_TYPE; } - bool ShouldWait(Thread* thread) const override; + bool ShouldWait(const Thread* thread) const override; void Acquire(Thread* thread) override; /// Unconditionally clears the readable event's state. diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp index 2f9695005..173f69915 100644 --- a/src/core/hle/kernel/resource_limit.cpp +++ b/src/core/hle/kernel/resource_limit.cpp @@ -16,11 +16,8 @@ constexpr std::size_t ResourceTypeToIndex(ResourceType type) { ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {} ResourceLimit::~ResourceLimit() = default; -SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel, std::string name) { - SharedPtr<ResourceLimit> resource_limit(new ResourceLimit(kernel)); - - resource_limit->name = std::move(name); - return resource_limit; +SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) { + return new ResourceLimit(kernel); } s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const { diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h index 59dc11c22..70e09858a 100644 --- a/src/core/hle/kernel/resource_limit.h +++ b/src/core/hle/kernel/resource_limit.h @@ -31,16 +31,14 @@ constexpr bool IsValidResourceType(ResourceType type) { class ResourceLimit final : public Object { public: - /** - * Creates a resource limit object. - */ - static SharedPtr<ResourceLimit> Create(KernelCore& kernel, std::string name = "Unknown"); + /// Creates a resource limit object. + static SharedPtr<ResourceLimit> Create(KernelCore& kernel); std::string GetTypeName() const override { return "ResourceLimit"; } std::string GetName() const override { - return name; + return GetTypeName(); } static const HandleType HANDLE_TYPE = HandleType::ResourceLimit; @@ -95,9 +93,6 @@ private: ResourceArray limits{}; /// Current resource limit values. ResourceArray values{}; - - /// Name of resource limit object. - std::string name; }; } // namespace Kernel diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index cc189cc64..ac501bf7f 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -29,8 +29,8 @@ Scheduler::~Scheduler() { } bool Scheduler::HaveReadyThreads() const { - std::lock_guard<std::mutex> lock(scheduler_mutex); - return ready_queue.get_first() != nullptr; + std::lock_guard lock{scheduler_mutex}; + return !ready_queue.empty(); } Thread* Scheduler::GetCurrentThread() const { @@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() { Thread* thread = GetCurrentThread(); if (thread && thread->GetStatus() == ThreadStatus::Running) { + if (ready_queue.empty()) { + return thread; + } // We have to do better than the current thread. // This call returns null when that's not possible. - next = ready_queue.pop_first_better(thread->GetPriority()); - if (!next) { - // Otherwise just keep going with the current thread + next = ready_queue.front(); + if (next == nullptr || next->GetPriority() >= thread->GetPriority()) { next = thread; } } else { - next = ready_queue.pop_first(); + if (ready_queue.empty()) { + return nullptr; + } + next = ready_queue.front(); } return next; } void Scheduler::SwitchContext(Thread* new_thread) { - Thread* const previous_thread = GetCurrentThread(); + Thread* previous_thread = GetCurrentThread(); Process* const previous_process = system.Kernel().CurrentProcess(); UpdateLastContextSwitchTime(previous_thread, previous_process); @@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { if (previous_thread->GetStatus() == ThreadStatus::Running) { // This is only the case when a reschedule is triggered without the current thread // yielding execution (i.e. an event triggered, system core time-sliced, etc) - ready_queue.push_front(previous_thread->GetPriority(), previous_thread); + ready_queue.add(previous_thread, previous_thread->GetPriority(), false); previous_thread->SetStatus(ThreadStatus::Ready); } } @@ -90,7 +95,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { current_thread = new_thread; - ready_queue.remove(new_thread->GetPriority(), new_thread); + ready_queue.remove(new_thread, new_thread->GetPriority()); new_thread->SetStatus(ThreadStatus::Running); auto* const thread_owner_process = current_thread->GetOwnerProcess(); @@ -127,7 +132,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { } void Scheduler::Reschedule() { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; Thread* cur = GetCurrentThread(); Thread* next = PopNextReadyThread(); @@ -143,51 +148,54 @@ void Scheduler::Reschedule() { SwitchContext(next); } -void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) { - std::lock_guard<std::mutex> lock(scheduler_mutex); +void Scheduler::AddThread(SharedPtr<Thread> thread) { + std::lock_guard lock{scheduler_mutex}; thread_list.push_back(std::move(thread)); - ready_queue.prepare(priority); } void Scheduler::RemoveThread(Thread* thread) { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), thread_list.end()); } void Scheduler::ScheduleThread(Thread* thread, u32 priority) { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; ASSERT(thread->GetStatus() == ThreadStatus::Ready); - ready_queue.push_back(priority, thread); + ready_queue.add(thread, priority); } void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; ASSERT(thread->GetStatus() == ThreadStatus::Ready); - ready_queue.remove(priority, thread); + ready_queue.remove(thread, priority); } void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; + if (thread->GetPriority() == priority) { + return; + } // If thread was ready, adjust queues if (thread->GetStatus() == ThreadStatus::Ready) - ready_queue.move(thread, thread->GetPriority(), priority); - else - ready_queue.prepare(priority); + ready_queue.adjust(thread, thread->GetPriority(), priority); } Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; const u32 mask = 1U << core; - return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) { - return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority; - }); + for (auto* thread : ready_queue) { + if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) { + return thread; + } + } + return nullptr; } void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 1c5bf57d9..b29bf7be8 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -7,7 +7,7 @@ #include <mutex> #include <vector> #include "common/common_types.h" -#include "common/thread_queue_list.h" +#include "common/multi_level_queue.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/thread.h" @@ -38,7 +38,7 @@ public: u64 GetLastContextSwitchTicks() const; /// Adds a new thread to the scheduler - void AddThread(SharedPtr<Thread> thread, u32 priority); + void AddThread(SharedPtr<Thread> thread); /// Removes a thread from the scheduler void RemoveThread(Thread* thread); @@ -156,7 +156,7 @@ private: std::vector<SharedPtr<Thread>> thread_list; /// Lists only ready thread ids. - Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue; + Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue; SharedPtr<Thread> current_thread = nullptr; diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp index 0e1515c89..708fdf9e1 100644 --- a/src/core/hle/kernel/server_port.cpp +++ b/src/core/hle/kernel/server_port.cpp @@ -30,7 +30,7 @@ void ServerPort::AppendPendingSession(SharedPtr<ServerSession> pending_session) pending_sessions.push_back(std::move(pending_session)); } -bool ServerPort::ShouldWait(Thread* thread) const { +bool ServerPort::ShouldWait(const Thread* thread) const { // If there are no pending sessions, we wait until a new one is added. return pending_sessions.empty(); } diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h index 9bc667cf2..76293cb8b 100644 --- a/src/core/hle/kernel/server_port.h +++ b/src/core/hle/kernel/server_port.h @@ -75,7 +75,7 @@ public: /// waiting to be accepted by this port. void AppendPendingSession(SharedPtr<ServerSession> pending_session); - bool ShouldWait(Thread* thread) const override; + bool ShouldWait(const Thread* thread) const override; void Acquire(Thread* thread) override; private: diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 4d8a337a7..40cec143e 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -46,7 +46,7 @@ ResultVal<SharedPtr<ServerSession>> ServerSession::Create(KernelCore& kernel, st return MakeResult(std::move(server_session)); } -bool ServerSession::ShouldWait(Thread* thread) const { +bool ServerSession::ShouldWait(const Thread* thread) const { // Closed sessions should never wait, an error will be returned from svcReplyAndReceive. if (parent->client == nullptr) return false; diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h index aea4ccfeb..79b84bade 100644 --- a/src/core/hle/kernel/server_session.h +++ b/src/core/hle/kernel/server_session.h @@ -82,7 +82,7 @@ public: */ ResultCode HandleSyncRequest(SharedPtr<Thread> thread); - bool ShouldWait(Thread* thread) const override; + bool ShouldWait(const Thread* thread) const override; void Acquire(Thread* thread) override; diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp index 62861da36..f15c5ee36 100644 --- a/src/core/hle/kernel/shared_memory.cpp +++ b/src/core/hle/kernel/shared_memory.cpp @@ -9,7 +9,6 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/shared_memory.h" -#include "core/memory.h" namespace Kernel { @@ -119,7 +118,15 @@ ResultCode SharedMemory::Map(Process& target_process, VAddr address, MemoryPermi ConvertPermissions(permissions)); } -ResultCode SharedMemory::Unmap(Process& target_process, VAddr address) { +ResultCode SharedMemory::Unmap(Process& target_process, VAddr address, u64 unmap_size) { + if (unmap_size != size) { + LOG_ERROR(Kernel, + "Invalid size passed to Unmap. Size must be equal to the size of the " + "memory managed. Shared memory size=0x{:016X}, Unmap size=0x{:016X}", + size, unmap_size); + return ERR_INVALID_SIZE; + } + // TODO(Subv): Verify what happens if the application tries to unmap an address that is not // mapped to a SharedMemory. return target_process.VMManager().UnmapRange(address, size); diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h index dab2a6bea..37e18c443 100644 --- a/src/core/hle/kernel/shared_memory.h +++ b/src/core/hle/kernel/shared_memory.h @@ -104,11 +104,17 @@ public: /** * Unmaps a shared memory block from the specified address in system memory + * * @param target_process Process from which to unmap the memory block. - * @param address Address in system memory where the shared memory block is mapped + * @param address Address in system memory where the shared memory block is mapped. + * @param unmap_size The amount of bytes to unmap from this shared memory instance. + * * @return Result code of the unmap operation + * + * @pre The given size to unmap must be the same size as the amount of memory managed by + * the SharedMemory instance itself, otherwise ERR_INVALID_SIZE will be returned. */ - ResultCode Unmap(Process& target_process, VAddr address); + ResultCode Unmap(Process& target_process, VAddr address, u64 unmap_size); /** * Gets a pointer to the shared memory block diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index e5e7f99e1..ab10db3df 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -175,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) { return ERR_INVALID_SIZE; } - auto& vm_manager = Core::CurrentProcess()->VMManager(); - const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress(); - const auto alloc_result = - vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite); - + auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager(); + const auto alloc_result = vm_manager.SetHeapSize(heap_size); if (alloc_result.Failed()) { return alloc_result.Code(); } @@ -712,7 +709,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) HeapRegionBaseAddr = 4, HeapRegionSize = 5, TotalMemoryUsage = 6, - TotalHeapUsage = 7, + TotalPhysicalMemoryUsed = 7, IsCurrentProcessBeingDebugged = 8, RegisterResourceLimit = 9, IdleTickCount = 10, @@ -748,7 +745,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) case GetInfoType::NewMapRegionBaseAddr: case GetInfoType::NewMapRegionSize: case GetInfoType::TotalMemoryUsage: - case GetInfoType::TotalHeapUsage: + case GetInfoType::TotalPhysicalMemoryUsed: case GetInfoType::IsVirtualAddressMemoryEnabled: case GetInfoType::PersonalMmHeapUsage: case GetInfoType::TitleId: @@ -808,8 +805,8 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) *result = process->VMManager().GetTotalMemoryUsage(); return RESULT_SUCCESS; - case GetInfoType::TotalHeapUsage: - *result = process->VMManager().GetTotalHeapUsage(); + case GetInfoType::TotalPhysicalMemoryUsed: + *result = process->GetTotalPhysicalMemoryUsed(); return RESULT_SUCCESS; case GetInfoType::IsVirtualAddressMemoryEnabled: @@ -1143,7 +1140,7 @@ static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 return ERR_INVALID_MEMORY_RANGE; } - return shared_memory->Unmap(*current_process, addr); + return shared_memory->Unmap(*current_process, addr, size); } static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address, @@ -1356,7 +1353,7 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var current_thread->SetCondVarWaitAddress(condition_variable_addr); current_thread->SetMutexWaitAddress(mutex_addr); current_thread->SetWaitHandle(thread_handle); - current_thread->SetStatus(ThreadStatus::WaitMutex); + current_thread->SetStatus(ThreadStatus::WaitCondVar); current_thread->InvalidateWakeupCallback(); current_thread->WakeAfterDelay(nano_seconds); @@ -1400,10 +1397,10 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target // them all. std::size_t last = waiting_threads.size(); if (target != -1) - last = target; + last = std::min(waiting_threads.size(), static_cast<std::size_t>(target)); // If there are no threads waiting on this condition variable, just exit - if (last > waiting_threads.size()) + if (last == 0) return RESULT_SUCCESS; for (std::size_t index = 0; index < last; ++index) { @@ -1411,6 +1408,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); + // liberate Cond Var Thread. + thread->SetCondVarWaitAddress(0); + std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); auto& monitor = Core::System::GetInstance().Monitor(); @@ -1429,10 +1429,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target } } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), thread->GetWaitHandle())); - if (mutex_val == 0) { // We were able to acquire the mutex, resume this thread. - ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); + ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); thread->ResumeFromWait(); auto* const lock_owner = thread->GetLockOwner(); @@ -1442,8 +1441,8 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target thread->SetLockOwner(nullptr); thread->SetMutexWaitAddress(0); - thread->SetCondVarWaitAddress(0); thread->SetWaitHandle(0); + Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); } else { // Atomically signal that the mutex now has a waiting thread. do { @@ -1462,12 +1461,11 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); auto owner = handle_table.Get<Thread>(owner_handle); ASSERT(owner); - ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); + ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); thread->InvalidateWakeupCallback(); + thread->SetStatus(ThreadStatus::WaitMutex); owner->AddMutexWaiter(thread); - - Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); } } @@ -1985,6 +1983,83 @@ static ResultCode SetResourceLimitLimitValue(Handle resource_limit, u32 resource return RESULT_SUCCESS; } +static ResultCode GetProcessList(u32* out_num_processes, VAddr out_process_ids, + u32 out_process_ids_size) { + LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}", + out_process_ids, out_process_ids_size); + + // If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail. + if ((out_process_ids_size & 0xF0000000) != 0) { + LOG_ERROR(Kernel_SVC, + "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}", + out_process_ids_size); + return ERR_OUT_OF_RANGE; + } + + const auto& kernel = Core::System::GetInstance().Kernel(); + const auto& vm_manager = kernel.CurrentProcess()->VMManager(); + const auto total_copy_size = out_process_ids_size * sizeof(u64); + + if (out_process_ids_size > 0 && + !vm_manager.IsWithinAddressSpace(out_process_ids, total_copy_size)) { + LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", + out_process_ids, out_process_ids + total_copy_size); + return ERR_INVALID_ADDRESS_STATE; + } + + const auto& process_list = kernel.GetProcessList(); + const auto num_processes = process_list.size(); + const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes); + + for (std::size_t i = 0; i < copy_amount; ++i) { + Memory::Write64(out_process_ids, process_list[i]->GetProcessID()); + out_process_ids += sizeof(u64); + } + + *out_num_processes = static_cast<u32>(num_processes); + return RESULT_SUCCESS; +} + +ResultCode GetThreadList(u32* out_num_threads, VAddr out_thread_ids, u32 out_thread_ids_size, + Handle debug_handle) { + // TODO: Handle this case when debug events are supported. + UNIMPLEMENTED_IF(debug_handle != InvalidHandle); + + LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}", + out_thread_ids, out_thread_ids_size); + + // If the size is negative or larger than INT32_MAX / sizeof(u64) + if ((out_thread_ids_size & 0xF0000000) != 0) { + LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}", + out_thread_ids_size); + return ERR_OUT_OF_RANGE; + } + + const auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess(); + const auto& vm_manager = current_process->VMManager(); + const auto total_copy_size = out_thread_ids_size * sizeof(u64); + + if (out_thread_ids_size > 0 && + !vm_manager.IsWithinAddressSpace(out_thread_ids, total_copy_size)) { + LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", + out_thread_ids, out_thread_ids + total_copy_size); + return ERR_INVALID_ADDRESS_STATE; + } + + const auto& thread_list = current_process->GetThreadList(); + const auto num_threads = thread_list.size(); + const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads); + + auto list_iter = thread_list.cbegin(); + for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) { + Memory::Write64(out_thread_ids, (*list_iter)->GetThreadID()); + out_thread_ids += sizeof(u64); + } + + *out_num_threads = static_cast<u32>(num_threads); + return RESULT_SUCCESS; +} + namespace { struct FunctionDef { using Func = void(); @@ -2097,8 +2172,8 @@ static const FunctionDef SVC_Table[] = { {0x62, nullptr, "TerminateDebugProcess"}, {0x63, nullptr, "GetDebugEvent"}, {0x64, nullptr, "ContinueDebugEvent"}, - {0x65, nullptr, "GetProcessList"}, - {0x66, nullptr, "GetThreadList"}, + {0x65, SvcWrap<GetProcessList>, "GetProcessList"}, + {0x66, SvcWrap<GetThreadList>, "GetThreadList"}, {0x67, nullptr, "GetDebugThreadContext"}, {0x68, nullptr, "SetDebugThreadContext"}, {0x69, nullptr, "QueryDebugProcessMemory"}, @@ -2140,7 +2215,7 @@ void CallSVC(u32 immediate) { MICROPROFILE_SCOPE(Kernel_SVC); // Lock the global kernel mutex when we enter the kernel HLE. - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; const FunctionDef* info = GetSVCInfo(immediate); if (info) { diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 2a2c2c5ea..b3733680f 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -78,6 +78,14 @@ void SvcWrap() { FuncReturn(retval); } +template <ResultCode func(u32*, u64, u32)> +void SvcWrap() { + u32 param_1 = 0; + const u32 retval = func(¶m_1, Param(1), static_cast<u32>(Param(2))).raw; + Core::CurrentArmInterface().SetReg(1, param_1); + FuncReturn(retval); +} + template <ResultCode func(u64*, u32)> void SvcWrap() { u64 param_1 = 0; diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 3b22e8e0d..1b891f632 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -28,7 +28,7 @@ namespace Kernel { -bool Thread::ShouldWait(Thread* thread) const { +bool Thread::ShouldWait(const Thread* thread) const { return status != ThreadStatus::Dead; } @@ -62,6 +62,8 @@ void Thread::Stop() { } wait_objects.clear(); + owner_process->UnregisterThread(this); + // Mark the TLS slot in the thread's page as free. owner_process->FreeTLSSlot(tls_address); } @@ -105,6 +107,7 @@ void Thread::ResumeFromWait() { case ThreadStatus::WaitSleep: case ThreadStatus::WaitIPC: case ThreadStatus::WaitMutex: + case ThreadStatus::WaitCondVar: case ThreadStatus::WaitArb: break; @@ -198,9 +201,11 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); thread->owner_process = &owner_process; thread->scheduler = &system.Scheduler(processor_id); - thread->scheduler->AddThread(thread, priority); + thread->scheduler->AddThread(thread); thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread); + thread->owner_process->RegisterThread(thread.get()); + // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used // to initialize the context ResetThreadContext(thread->context, stack_top, entry_point, arg); @@ -228,16 +233,16 @@ void Thread::SetWaitSynchronizationOutput(s32 output) { context.cpu_registers[1] = output; } -s32 Thread::GetWaitObjectIndex(WaitObject* object) const { +s32 Thread::GetWaitObjectIndex(const WaitObject* object) const { ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything"); - auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object); + const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object); return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1); } VAddr Thread::GetCommandBufferAddress() const { // Offset from the start of TLS at which the IPC command buffer begins. - static constexpr int CommandHeaderOffset = 0x80; - return GetTLSAddress() + CommandHeaderOffset; + constexpr u64 command_header_offset = 0x80; + return GetTLSAddress() + command_header_offset; } void Thread::SetStatus(ThreadStatus new_status) { @@ -351,7 +356,7 @@ void Thread::ChangeScheduler() { if (*new_processor_id != processor_id) { // Remove thread from previous core's scheduler scheduler->RemoveThread(this); - next_scheduler.AddThread(this, current_priority); + next_scheduler.AddThread(this); } processor_id = *new_processor_id; @@ -366,7 +371,7 @@ void Thread::ChangeScheduler() { system.CpuCore(processor_id).PrepareReschedule(); } -bool Thread::AllWaitObjectsReady() { +bool Thread::AllWaitObjectsReady() const { return std::none_of( wait_objects.begin(), wait_objects.end(), [this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); }); diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index faad5f391..73e5d1bb4 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -51,7 +51,8 @@ enum class ThreadStatus { WaitIPC, ///< Waiting for the reply from an IPC request WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true - WaitMutex, ///< Waiting due to an ArbitrateLock/WaitProcessWideKey svc + WaitMutex, ///< Waiting due to an ArbitrateLock svc + WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc Dormant, ///< Created but not yet made ready Dead ///< Run to completion, or forcefully terminated @@ -110,7 +111,7 @@ public: return HANDLE_TYPE; } - bool ShouldWait(Thread* thread) const override; + bool ShouldWait(const Thread* thread) const override; void Acquire(Thread* thread) override; /** @@ -204,7 +205,7 @@ public: * object in the list. * @param object Object to query the index of. */ - s32 GetWaitObjectIndex(WaitObject* object) const; + s32 GetWaitObjectIndex(const WaitObject* object) const; /** * Stops a thread, invalidating it from further use @@ -298,7 +299,7 @@ public: } /// Determines whether all the objects this thread is waiting on are ready. - bool AllWaitObjectsReady(); + bool AllWaitObjectsReady() const; const MutexWaitingThreads& GetMutexWaitingThreads() const { return wait_mutex_threads; diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 22bf55ce7..ec0a480ce 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -256,57 +256,50 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p return RESULT_SUCCESS; } -ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { - if (!IsWithinHeapRegion(target, size)) { - return ERR_INVALID_ADDRESS; +ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { + if (size > GetHeapRegionSize()) { + return ERR_OUT_OF_MEMORY; + } + + // No need to do any additional work if the heap is already the given size. + if (size == GetCurrentHeapSize()) { + return MakeResult(heap_region_base); } if (heap_memory == nullptr) { // Initialize heap - heap_memory = std::make_shared<std::vector<u8>>(); - heap_start = heap_end = target; + heap_memory = std::make_shared<std::vector<u8>>(size); + heap_end = heap_region_base + size; } else { - UnmapRange(heap_start, heap_end - heap_start); - } - - // If necessary, expand backing vector to cover new heap extents. - if (target < heap_start) { - heap_memory->insert(begin(*heap_memory), heap_start - target, 0); - heap_start = target; - RefreshMemoryBlockMappings(heap_memory.get()); - } - if (target + size > heap_end) { - heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0); - heap_end = target + size; - RefreshMemoryBlockMappings(heap_memory.get()); + UnmapRange(heap_region_base, GetCurrentHeapSize()); } - ASSERT(heap_end - heap_start == heap_memory->size()); - CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size, - MemoryState::Heap)); - Reprotect(vma, perms); + // If necessary, expand backing vector to cover new heap extents in + // the case of allocating. Otherwise, shrink the backing memory, + // if a smaller heap has been requested. + const u64 old_heap_size = GetCurrentHeapSize(); + if (size > old_heap_size) { + const u64 alloc_size = size - old_heap_size; - heap_used = size; - - return MakeResult<VAddr>(heap_end - size); -} + heap_memory->insert(heap_memory->end(), alloc_size, 0); + RefreshMemoryBlockMappings(heap_memory.get()); + } else if (size < old_heap_size) { + heap_memory->resize(size); + heap_memory->shrink_to_fit(); -ResultCode VMManager::HeapFree(VAddr target, u64 size) { - if (!IsWithinHeapRegion(target, size)) { - return ERR_INVALID_ADDRESS; + RefreshMemoryBlockMappings(heap_memory.get()); } - if (size == 0) { - return RESULT_SUCCESS; - } + heap_end = heap_region_base + size; + ASSERT(GetCurrentHeapSize() == heap_memory->size()); - const ResultCode result = UnmapRange(target, size); - if (result.IsError()) { - return result; + const auto mapping_result = + MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap); + if (mapping_result.Failed()) { + return mapping_result.Code(); } - heap_used -= size; - return RESULT_SUCCESS; + return MakeResult<VAddr>(heap_region_base); } MemoryInfo VMManager::QueryMemory(VAddr address) const { @@ -598,6 +591,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty heap_region_base = map_region_end; heap_region_end = heap_region_base + heap_region_size; + heap_end = heap_region_base; new_map_region_base = heap_region_end; new_map_region_end = new_map_region_base + new_map_region_size; @@ -692,10 +686,6 @@ u64 VMManager::GetTotalMemoryUsage() const { return 0xF8000000; } -u64 VMManager::GetTotalHeapUsage() const { - return heap_used; -} - VAddr VMManager::GetAddressSpaceBaseAddress() const { return address_space_base; } @@ -778,6 +768,10 @@ u64 VMManager::GetHeapRegionSize() const { return heap_region_end - heap_region_base; } +u64 VMManager::GetCurrentHeapSize() const { + return heap_end - heap_region_base; +} + bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), GetHeapRegionEndAddress()); diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 7cdff6094..6f484b7bf 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -380,11 +380,41 @@ public: /// Changes the permissions of a range of addresses, splitting VMAs as necessary. ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); - ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms); - ResultCode HeapFree(VAddr target, u64 size); - ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state); + /// Attempts to allocate a heap with the given size. + /// + /// @param size The size of the heap to allocate in bytes. + /// + /// @note If a heap is currently allocated, and this is called + /// with a size that is equal to the size of the current heap, + /// then this function will do nothing and return the current + /// heap's starting address, as there's no need to perform + /// any additional heap allocation work. + /// + /// @note If a heap is currently allocated, and this is called + /// with a size less than the current heap's size, then + /// this function will attempt to shrink the heap. + /// + /// @note If a heap is currently allocated, and this is called + /// with a size larger than the current heap's size, then + /// this function will attempt to extend the size of the heap. + /// + /// @returns A result indicating either success or failure. + /// <p> + /// If successful, this function will return a result + /// containing the starting address to the allocated heap. + /// <p> + /// If unsuccessful, this function will return a result + /// containing an error code. + /// + /// @pre The given size must lie within the allowable heap + /// memory region managed by this VMManager instance. + /// Failure to abide by this will result in ERR_OUT_OF_MEMORY + /// being returned as the result. + /// + ResultVal<VAddr> SetHeapSize(u64 size); + /// Queries the memory manager for information about the given address. /// /// @param address The address to query the memory manager about for information. @@ -418,9 +448,6 @@ public: /// Gets the total memory usage, used by svcGetInfo u64 GetTotalMemoryUsage() const; - /// Gets the total heap usage, used by svcGetInfo - u64 GetTotalHeapUsage() const; - /// Gets the address space base address VAddr GetAddressSpaceBaseAddress() const; @@ -469,6 +496,13 @@ public: /// Gets the total size of the heap region in bytes. u64 GetHeapRegionSize() const; + /// Gets the total size of the current heap in bytes. + /// + /// @note This is the current allocated heap size, not the size + /// of the region it's allowed to exist within. + /// + u64 GetCurrentHeapSize() const; + /// Determines whether or not the specified range is within the heap region. bool IsWithinHeapRegion(VAddr address, u64 size) const; @@ -617,9 +651,6 @@ private: VAddr new_map_region_base = 0; VAddr new_map_region_end = 0; - VAddr main_code_region_base = 0; - VAddr main_code_region_end = 0; - VAddr tls_io_region_base = 0; VAddr tls_io_region_end = 0; @@ -628,9 +659,9 @@ private: // This makes deallocation and reallocation of holes fast and keeps process memory contiguous // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. std::shared_ptr<std::vector<u8>> heap_memory; - // The left/right bounds of the address space covered by heap_memory. - VAddr heap_start = 0; + + // The end of the currently allocated heap. This is not an inclusive + // end of the range. This is essentially 'base_address + current_size'. VAddr heap_end = 0; - u64 heap_used = 0; }; } // namespace Kernel diff --git a/src/core/hle/kernel/wait_object.h b/src/core/hle/kernel/wait_object.h index 5987fb971..04464a51a 100644 --- a/src/core/hle/kernel/wait_object.h +++ b/src/core/hle/kernel/wait_object.h @@ -24,7 +24,7 @@ public: * @param thread The thread about which we're deciding. * @return True if the current thread should wait due to this object being unavailable */ - virtual bool ShouldWait(Thread* thread) const = 0; + virtual bool ShouldWait(const Thread* thread) const = 0; /// Acquire/lock the object for the specified thread if it is available virtual void Acquire(Thread* thread) = 0; diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index 9c44e27c6..d31ab7970 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp @@ -239,8 +239,8 @@ ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger {0, nullptr, "Exit"}, {1, &ISelfController::LockExit, "LockExit"}, {2, &ISelfController::UnlockExit, "UnlockExit"}, - {3, nullptr, "EnterFatalSection"}, - {4, nullptr, "LeaveFatalSection"}, + {3, &ISelfController::EnterFatalSection, "EnterFatalSection"}, + {4, &ISelfController::LeaveFatalSection, "LeaveFatalSection"}, {9, &ISelfController::GetLibraryAppletLaunchableEvent, "GetLibraryAppletLaunchableEvent"}, {10, &ISelfController::SetScreenShotPermission, "SetScreenShotPermission"}, {11, &ISelfController::SetOperationModeChangedNotification, "SetOperationModeChangedNotification"}, @@ -285,41 +285,54 @@ ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger ISelfController::~ISelfController() = default; -void ISelfController::SetFocusHandlingMode(Kernel::HLERequestContext& ctx) { - // Takes 3 input u8s with each field located immediately after the previous - // u8, these are bool flags. No output. +void ISelfController::LockExit(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_AM, "(STUBBED) called"); - IPC::RequestParser rp{ctx}; + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); +} - struct FocusHandlingModeParams { - u8 unknown0; - u8 unknown1; - u8 unknown2; - }; - auto flags = rp.PopRaw<FocusHandlingModeParams>(); +void ISelfController::UnlockExit(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_AM, "(STUBBED) called"); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); } -void ISelfController::SetRestartMessageEnabled(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_AM, "(STUBBED) called"); +void ISelfController::EnterFatalSection(Kernel::HLERequestContext& ctx) { + ++num_fatal_sections_entered; + LOG_DEBUG(Service_AM, "called. Num fatal sections entered: {}", num_fatal_sections_entered); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); } -void ISelfController::SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; +void ISelfController::LeaveFatalSection(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_AM, "called."); - bool flag = rp.Pop<bool>(); - LOG_WARNING(Service_AM, "(STUBBED) called flag={}", flag); + // Entry and exit of fatal sections must be balanced. + if (num_fatal_sections_entered == 0) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultCode{ErrorModule::AM, 512}); + return; + } + + --num_fatal_sections_entered; IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); } +void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_AM, "(STUBBED) called"); + + launchable_event.writable->Signal(); + + IPC::ResponseBuilder rb{ctx, 2, 1}; + rb.Push(RESULT_SUCCESS); + rb.PushCopyObjects(launchable_event.readable); +} + void ISelfController::SetScreenShotPermission(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_AM, "(STUBBED) called"); @@ -337,40 +350,51 @@ void ISelfController::SetOperationModeChangedNotification(Kernel::HLERequestCont rb.Push(RESULT_SUCCESS); } -void ISelfController::SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx) { - // Takes 3 input u8s with each field located immediately after the previous - // u8, these are bool flags. No output. +void ISelfController::SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; - bool enabled = rp.Pop<bool>(); - LOG_WARNING(Service_AM, "(STUBBED) called enabled={}", enabled); + bool flag = rp.Pop<bool>(); + LOG_WARNING(Service_AM, "(STUBBED) called flag={}", flag); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); } -void ISelfController::LockExit(Kernel::HLERequestContext& ctx) { +void ISelfController::SetFocusHandlingMode(Kernel::HLERequestContext& ctx) { + // Takes 3 input u8s with each field located immediately after the previous + // u8, these are bool flags. No output. LOG_WARNING(Service_AM, "(STUBBED) called"); + IPC::RequestParser rp{ctx}; + + struct FocusHandlingModeParams { + u8 unknown0; + u8 unknown1; + u8 unknown2; + }; + auto flags = rp.PopRaw<FocusHandlingModeParams>(); + IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); } -void ISelfController::UnlockExit(Kernel::HLERequestContext& ctx) { +void ISelfController::SetRestartMessageEnabled(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_AM, "(STUBBED) called"); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); } -void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_AM, "(STUBBED) called"); +void ISelfController::SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx) { + // Takes 3 input u8s with each field located immediately after the previous + // u8, these are bool flags. No output. + IPC::RequestParser rp{ctx}; - launchable_event.writable->Signal(); + bool enabled = rp.Pop<bool>(); + LOG_WARNING(Service_AM, "(STUBBED) called enabled={}", enabled); - IPC::ResponseBuilder rb{ctx, 2, 1}; + IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); - rb.PushCopyObjects(launchable_event.readable); } void ISelfController::SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx) { diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h index 565dd8e9e..991b7d47c 100644 --- a/src/core/hle/service/am/am.h +++ b/src/core/hle/service/am/am.h @@ -117,17 +117,19 @@ public: ~ISelfController() override; private: - void SetFocusHandlingMode(Kernel::HLERequestContext& ctx); - void SetRestartMessageEnabled(Kernel::HLERequestContext& ctx); - void SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx); - void SetOperationModeChangedNotification(Kernel::HLERequestContext& ctx); - void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx); void LockExit(Kernel::HLERequestContext& ctx); void UnlockExit(Kernel::HLERequestContext& ctx); + void EnterFatalSection(Kernel::HLERequestContext& ctx); + void LeaveFatalSection(Kernel::HLERequestContext& ctx); void GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx); + void SetScreenShotPermission(Kernel::HLERequestContext& ctx); + void SetOperationModeChangedNotification(Kernel::HLERequestContext& ctx); + void SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx); + void SetFocusHandlingMode(Kernel::HLERequestContext& ctx); + void SetRestartMessageEnabled(Kernel::HLERequestContext& ctx); + void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx); void SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx); void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx); - void SetScreenShotPermission(Kernel::HLERequestContext& ctx); void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx); void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); @@ -135,6 +137,7 @@ private: std::shared_ptr<NVFlinger::NVFlinger> nvflinger; Kernel::EventPair launchable_event; u32 idle_time_detection_extension = 0; + u64 num_fatal_sections_entered = 0; }; class ICommonStateGetter final : public ServiceFramework<ICommonStateGetter> { diff --git a/src/core/hle/service/fatal/fatal.cpp b/src/core/hle/service/fatal/fatal.cpp index 770590d0b..2c229bcad 100644 --- a/src/core/hle/service/fatal/fatal.cpp +++ b/src/core/hle/service/fatal/fatal.cpp @@ -25,21 +25,34 @@ Module::Interface::Interface(std::shared_ptr<Module> module, const char* name) Module::Interface::~Interface() = default; struct FatalInfo { - std::array<u64_le, 31> registers{}; // TODO(ogniK): See if this actually is registers or - // not(find a game which has non zero valeus) - u64_le unk0{}; - u64_le unk1{}; - u64_le unk2{}; - u64_le unk3{}; - u64_le unk4{}; - u64_le unk5{}; - u64_le unk6{}; + enum class Architecture : s32 { + AArch64, + AArch32, + }; + + const char* ArchAsString() const { + return arch == Architecture::AArch64 ? "AArch64" : "AArch32"; + } + + std::array<u64_le, 31> registers{}; + u64_le sp{}; + u64_le pc{}; + u64_le pstate{}; + u64_le afsr0{}; + u64_le afsr1{}; + u64_le esr{}; + u64_le far{}; std::array<u64_le, 32> backtrace{}; - u64_le unk7{}; - u64_le unk8{}; + u64_le program_entry_point{}; + + // Bit flags that indicate which registers have been set with values + // for this context. The service itself uses these to determine which + // registers to specifically print out. + u64_le set_flags{}; + u32_le backtrace_size{}; - u32_le unk9{}; + Architecture arch{}; u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding? }; static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size"); @@ -52,36 +65,36 @@ enum class FatalType : u32 { static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) { const auto title_id = Core::CurrentProcess()->GetTitleID(); - std::string crash_report = - fmt::format("Yuzu {}-{} crash report\n" - "Title ID: {:016x}\n" - "Result: 0x{:X} ({:04}-{:04d})\n" - "\n", - Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw, - 2000 + static_cast<u32>(error_code.module.Value()), - static_cast<u32>(error_code.description.Value()), info.unk8, info.unk7); + std::string crash_report = fmt::format( + "Yuzu {}-{} crash report\n" + "Title ID: {:016x}\n" + "Result: 0x{:X} ({:04}-{:04d})\n" + "Set flags: 0x{:16X}\n" + "Program entry point: 0x{:16X}\n" + "\n", + Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw, + 2000 + static_cast<u32>(error_code.module.Value()), + static_cast<u32>(error_code.description.Value()), info.set_flags, info.program_entry_point); if (info.backtrace_size != 0x0) { crash_report += "Registers:\n"; - // TODO(ogniK): This is just a guess, find a game which actually has non zero values for (size_t i = 0; i < info.registers.size(); i++) { crash_report += fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]); } - crash_report += fmt::format(" Unknown 0: {:016x}\n", info.unk0); - crash_report += fmt::format(" Unknown 1: {:016x}\n", info.unk1); - crash_report += fmt::format(" Unknown 2: {:016x}\n", info.unk2); - crash_report += fmt::format(" Unknown 3: {:016x}\n", info.unk3); - crash_report += fmt::format(" Unknown 4: {:016x}\n", info.unk4); - crash_report += fmt::format(" Unknown 5: {:016x}\n", info.unk5); - crash_report += fmt::format(" Unknown 6: {:016x}\n", info.unk6); + crash_report += fmt::format(" SP: {:016x}\n", info.sp); + crash_report += fmt::format(" PC: {:016x}\n", info.pc); + crash_report += fmt::format(" PSTATE: {:016x}\n", info.pstate); + crash_report += fmt::format(" AFSR0: {:016x}\n", info.afsr0); + crash_report += fmt::format(" AFSR1: {:016x}\n", info.afsr1); + crash_report += fmt::format(" ESR: {:016x}\n", info.esr); + crash_report += fmt::format(" FAR: {:016x}\n", info.far); crash_report += "\nBacktrace:\n"; for (size_t i = 0; i < info.backtrace_size; i++) { crash_report += fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]); } - crash_report += fmt::format("\nUnknown 7: 0x{:016x}\n", info.unk7); - crash_report += fmt::format("Unknown 8: 0x{:016x}\n", info.unk8); - crash_report += fmt::format("Unknown 9: 0x{:016x}\n", info.unk9); + + crash_report += fmt::format("Architecture: {}\n", info.ArchAsString()); crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10); } @@ -125,13 +138,13 @@ static void ThrowFatalError(ResultCode error_code, FatalType fatal_type, const F case FatalType::ErrorReport: GenerateErrorReport(error_code, info); break; - }; + } } void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { LOG_ERROR(Service_Fatal, "called"); IPC::RequestParser rp{ctx}; - auto error_code = rp.Pop<ResultCode>(); + const auto error_code = rp.Pop<ResultCode>(); ThrowFatalError(error_code, FatalType::ErrorScreen, {}); IPC::ResponseBuilder rb{ctx, 2}; @@ -141,8 +154,8 @@ void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { LOG_ERROR(Service_Fatal, "called"); IPC::RequestParser rp(ctx); - auto error_code = rp.Pop<ResultCode>(); - auto fatal_type = rp.PopEnum<FatalType>(); + const auto error_code = rp.Pop<ResultCode>(); + const auto fatal_type = rp.PopEnum<FatalType>(); ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy IPC::ResponseBuilder rb{ctx, 2}; @@ -152,9 +165,9 @@ void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) { LOG_ERROR(Service_Fatal, "called"); IPC::RequestParser rp(ctx); - auto error_code = rp.Pop<ResultCode>(); - auto fatal_type = rp.PopEnum<FatalType>(); - auto fatal_info = ctx.ReadBuffer(); + const auto error_code = rp.Pop<ResultCode>(); + const auto fatal_type = rp.PopEnum<FatalType>(); + const auto fatal_info = ctx.ReadBuffer(); FatalInfo info{}; ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!"); diff --git a/src/core/hle/service/nfc/nfc.cpp b/src/core/hle/service/nfc/nfc.cpp index 5c62d42ba..ca88bf97f 100644 --- a/src/core/hle/service/nfc/nfc.cpp +++ b/src/core/hle/service/nfc/nfc.cpp @@ -150,7 +150,7 @@ private: IPC::ResponseBuilder rb{ctx, 3}; rb.Push(RESULT_SUCCESS); - rb.PushRaw<u8>(Settings::values.enable_nfc); + rb.PushRaw<u8>(true); } void GetStateOld(Kernel::HLERequestContext& ctx) { diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp index 1c4482e47..c6babdd4d 100644 --- a/src/core/hle/service/nfp/nfp.cpp +++ b/src/core/hle/service/nfp/nfp.cpp @@ -335,7 +335,7 @@ void Module::Interface::CreateUserInterface(Kernel::HLERequestContext& ctx) { } bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) { - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; if (buffer.size() < sizeof(AmiiboFile)) { return false; } diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp index 8b1920f22..46ac372f6 100644 --- a/src/core/loader/elf.cpp +++ b/src/core/loader/elf.cpp @@ -341,7 +341,7 @@ Kernel::CodeSet ElfReader::LoadInto(VAddr vaddr) { } codeset.entrypoint = base_addr + header->e_entry; - codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); + codeset.memory = std::move(program_image); LOG_DEBUG(Loader, "Done loading."); diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp index 5de02a94b..31e4a0c84 100644 --- a/src/core/loader/nro.cpp +++ b/src/core/loader/nro.cpp @@ -187,7 +187,7 @@ static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data, program_image.resize(static_cast<u32>(program_image.size()) + bss_size); // Load codeset for current process - codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); + codeset.memory = std::move(program_image); process.LoadModule(std::move(codeset), load_base); // Register module with GDBStub diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp index 714d85a59..babc7e646 100644 --- a/src/core/loader/nso.cpp +++ b/src/core/loader/nso.cpp @@ -161,7 +161,7 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, } // Load codeset for current process - codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); + codeset.memory = std::move(program_image); process.LoadModule(std::move(codeset), load_base); // Register module with GDBStub diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp index c716a462b..4afd6c8a3 100644 --- a/src/core/perf_stats.cpp +++ b/src/core/perf_stats.cpp @@ -18,13 +18,13 @@ using std::chrono::microseconds; namespace Core { void PerfStats::BeginSystemFrame() { - std::lock_guard<std::mutex> lock(object_mutex); + std::lock_guard lock{object_mutex}; frame_begin = Clock::now(); } void PerfStats::EndSystemFrame() { - std::lock_guard<std::mutex> lock(object_mutex); + std::lock_guard lock{object_mutex}; auto frame_end = Clock::now(); accumulated_frametime += frame_end - frame_begin; @@ -35,13 +35,13 @@ void PerfStats::EndSystemFrame() { } void PerfStats::EndGameFrame() { - std::lock_guard<std::mutex> lock(object_mutex); + std::lock_guard lock{object_mutex}; game_frames += 1; } PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) { - std::lock_guard<std::mutex> lock(object_mutex); + std::lock_guard lock{object_mutex}; const auto now = Clock::now(); // Walltime elapsed since stats were reset @@ -67,7 +67,7 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us } double PerfStats::GetLastFrameTimeScale() { - std::lock_guard<std::mutex> lock(object_mutex); + std::lock_guard lock{object_mutex}; constexpr double FRAME_LENGTH = 1.0 / 60; return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH; diff --git a/src/core/settings.cpp b/src/core/settings.cpp index 6dd3139cc..6d32ebea3 100644 --- a/src/core/settings.cpp +++ b/src/core/settings.cpp @@ -82,7 +82,6 @@ void LogSetting(const std::string& name, const T& value) { void LogSettings() { LOG_INFO(Config, "yuzu Configuration:"); LogSetting("System_UseDockedMode", Settings::values.use_docked_mode); - LogSetting("System_EnableNfc", Settings::values.enable_nfc); LogSetting("System_RngSeed", Settings::values.rng_seed.value_or(0)); LogSetting("System_CurrentUser", Settings::values.current_user); LogSetting("System_LanguageIndex", Settings::values.language_index); diff --git a/src/core/settings.h b/src/core/settings.h index cdfb2f742..d543eb32f 100644 --- a/src/core/settings.h +++ b/src/core/settings.h @@ -349,7 +349,6 @@ struct TouchscreenInput { struct Values { // System bool use_docked_mode; - bool enable_nfc; std::optional<u32> rng_seed; // Measured in seconds since epoch std::optional<std::chrono::seconds> custom_rtc; diff --git a/src/input_common/keyboard.cpp b/src/input_common/keyboard.cpp index 525fe6abc..078374be5 100644 --- a/src/input_common/keyboard.cpp +++ b/src/input_common/keyboard.cpp @@ -36,18 +36,18 @@ struct KeyButtonPair { class KeyButtonList { public: void AddKeyButton(int key_code, KeyButton* key_button) { - std::lock_guard<std::mutex> guard(mutex); + std::lock_guard guard{mutex}; list.push_back(KeyButtonPair{key_code, key_button}); } void RemoveKeyButton(const KeyButton* key_button) { - std::lock_guard<std::mutex> guard(mutex); + std::lock_guard guard{mutex}; list.remove_if( [key_button](const KeyButtonPair& pair) { return pair.key_button == key_button; }); } void ChangeKeyStatus(int key_code, bool pressed) { - std::lock_guard<std::mutex> guard(mutex); + std::lock_guard guard{mutex}; for (const KeyButtonPair& pair : list) { if (pair.key_code == key_code) pair.key_button->status.store(pressed); @@ -55,7 +55,7 @@ public: } void ChangeAllKeyStatus(bool pressed) { - std::lock_guard<std::mutex> guard(mutex); + std::lock_guard guard{mutex}; for (const KeyButtonPair& pair : list) { pair.key_button->status.store(pressed); } diff --git a/src/input_common/motion_emu.cpp b/src/input_common/motion_emu.cpp index 6d96d4019..868251628 100644 --- a/src/input_common/motion_emu.cpp +++ b/src/input_common/motion_emu.cpp @@ -39,7 +39,7 @@ public: void Tilt(int x, int y) { auto mouse_move = Common::MakeVec(x, y) - mouse_origin; if (is_tilting) { - std::lock_guard<std::mutex> guard(tilt_mutex); + std::lock_guard guard{tilt_mutex}; if (mouse_move.x == 0 && mouse_move.y == 0) { tilt_angle = 0; } else { @@ -51,13 +51,13 @@ public: } void EndTilt() { - std::lock_guard<std::mutex> guard(tilt_mutex); + std::lock_guard guard{tilt_mutex}; tilt_angle = 0; is_tilting = false; } std::tuple<Common::Vec3<float>, Common::Vec3<float>> GetStatus() { - std::lock_guard<std::mutex> guard(status_mutex); + std::lock_guard guard{status_mutex}; return status; } @@ -93,7 +93,7 @@ private: old_q = q; { - std::lock_guard<std::mutex> guard(tilt_mutex); + std::lock_guard guard{tilt_mutex}; // Find the quaternion describing current 3DS tilting q = Common::MakeQuaternion( @@ -115,7 +115,7 @@ private: // Update the sensor state { - std::lock_guard<std::mutex> guard(status_mutex); + std::lock_guard guard{status_mutex}; status = std::make_tuple(gravity, angular_rate); } } diff --git a/src/input_common/sdl/sdl_impl.cpp b/src/input_common/sdl/sdl_impl.cpp index b132d77f5..5949ecbae 100644 --- a/src/input_common/sdl/sdl_impl.cpp +++ b/src/input_common/sdl/sdl_impl.cpp @@ -55,22 +55,22 @@ public: : guid{std::move(guid_)}, port{port_}, sdl_joystick{joystick, deleter} {} void SetButton(int button, bool value) { - std::lock_guard<std::mutex> lock(mutex); + std::lock_guard lock{mutex}; state.buttons[button] = value; } bool GetButton(int button) const { - std::lock_guard<std::mutex> lock(mutex); + std::lock_guard lock{mutex}; return state.buttons.at(button); } void SetAxis(int axis, Sint16 value) { - std::lock_guard<std::mutex> lock(mutex); + std::lock_guard lock{mutex}; state.axes[axis] = value; } float GetAxis(int axis) const { - std::lock_guard<std::mutex> lock(mutex); + std::lock_guard lock{mutex}; return state.axes.at(axis) / 32767.0f; } @@ -92,12 +92,12 @@ public: } void SetHat(int hat, Uint8 direction) { - std::lock_guard<std::mutex> lock(mutex); + std::lock_guard lock{mutex}; state.hats[hat] = direction; } bool GetHatDirection(int hat, Uint8 direction) const { - std::lock_guard<std::mutex> lock(mutex); + std::lock_guard lock{mutex}; return (state.hats.at(hat) & direction) != 0; } /** @@ -140,7 +140,7 @@ private: * Get the nth joystick with the corresponding GUID */ std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickByGUID(const std::string& guid, int port) { - std::lock_guard<std::mutex> lock(joystick_map_mutex); + std::lock_guard lock{joystick_map_mutex}; const auto it = joystick_map.find(guid); if (it != joystick_map.end()) { while (it->second.size() <= port) { @@ -161,7 +161,8 @@ std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickByGUID(const std::string& g std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickBySDLID(SDL_JoystickID sdl_id) { auto sdl_joystick = SDL_JoystickFromInstanceID(sdl_id); const std::string guid = GetGUID(sdl_joystick); - std::lock_guard<std::mutex> lock(joystick_map_mutex); + + std::lock_guard lock{joystick_map_mutex}; auto map_it = joystick_map.find(guid); if (map_it != joystick_map.end()) { auto vec_it = std::find_if(map_it->second.begin(), map_it->second.end(), @@ -198,8 +199,9 @@ void SDLState::InitJoystick(int joystick_index) { LOG_ERROR(Input, "failed to open joystick {}", joystick_index); return; } - std::string guid = GetGUID(sdl_joystick); - std::lock_guard<std::mutex> lock(joystick_map_mutex); + const std::string guid = GetGUID(sdl_joystick); + + std::lock_guard lock{joystick_map_mutex}; if (joystick_map.find(guid) == joystick_map.end()) { auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick); joystick_map[guid].emplace_back(std::move(joystick)); @@ -221,7 +223,7 @@ void SDLState::CloseJoystick(SDL_Joystick* sdl_joystick) { std::string guid = GetGUID(sdl_joystick); std::shared_ptr<SDLJoystick> joystick; { - std::lock_guard<std::mutex> lock(joystick_map_mutex); + std::lock_guard lock{joystick_map_mutex}; // This call to guid is safe since the joystick is guaranteed to be in the map auto& joystick_guid_list = joystick_map[guid]; const auto joystick_it = @@ -274,7 +276,7 @@ void SDLState::HandleGameControllerEvent(const SDL_Event& event) { } void SDLState::CloseJoysticks() { - std::lock_guard<std::mutex> lock(joystick_map_mutex); + std::lock_guard lock{joystick_map_mutex}; joystick_map.clear(); } diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index d0284bdf4..c7038b217 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -1,5 +1,7 @@ add_executable(tests common/bit_field.cpp + common/bit_utils.cpp + common/multi_level_queue.cpp common/param_package.cpp common/ring_buffer.cpp core/arm/arm_test_common.cpp diff --git a/src/tests/common/bit_utils.cpp b/src/tests/common/bit_utils.cpp new file mode 100644 index 000000000..479b5995a --- /dev/null +++ b/src/tests/common/bit_utils.cpp @@ -0,0 +1,23 @@ +// Copyright 2017 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <catch2/catch.hpp> +#include <math.h> +#include "common/bit_util.h" + +namespace Common { + +TEST_CASE("BitUtils::CountTrailingZeroes", "[common]") { + REQUIRE(Common::CountTrailingZeroes32(0) == 32); + REQUIRE(Common::CountTrailingZeroes64(0) == 64); + REQUIRE(Common::CountTrailingZeroes32(9) == 0); + REQUIRE(Common::CountTrailingZeroes32(8) == 3); + REQUIRE(Common::CountTrailingZeroes32(0x801000) == 12); + REQUIRE(Common::CountTrailingZeroes64(9) == 0); + REQUIRE(Common::CountTrailingZeroes64(8) == 3); + REQUIRE(Common::CountTrailingZeroes64(0x801000) == 12); + REQUIRE(Common::CountTrailingZeroes64(0x801000000000UL) == 36); +} + +} // namespace Common diff --git a/src/tests/common/multi_level_queue.cpp b/src/tests/common/multi_level_queue.cpp new file mode 100644 index 000000000..cca7ec7da --- /dev/null +++ b/src/tests/common/multi_level_queue.cpp @@ -0,0 +1,55 @@ +// Copyright 2019 Yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <catch2/catch.hpp> +#include <math.h> +#include "common/common_types.h" +#include "common/multi_level_queue.h" + +namespace Common { + +TEST_CASE("MultiLevelQueue", "[common]") { + std::array<f32, 8> values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0}; + Common::MultiLevelQueue<f32, 64> mlq; + REQUIRE(mlq.empty()); + mlq.add(values[2], 2); + mlq.add(values[7], 7); + mlq.add(values[3], 3); + mlq.add(values[4], 4); + mlq.add(values[0], 0); + mlq.add(values[5], 5); + mlq.add(values[6], 6); + mlq.add(values[1], 1); + u32 index = 0; + bool all_set = true; + for (auto& f : mlq) { + all_set &= (f == values[index]); + index++; + } + REQUIRE(all_set); + REQUIRE(!mlq.empty()); + f32 v = 8.0; + mlq.add(v, 2); + v = -7.0; + mlq.add(v, 2, false); + REQUIRE(mlq.front(2) == -7.0); + mlq.yield(2); + REQUIRE(mlq.front(2) == values[2]); + REQUIRE(mlq.back(2) == -7.0); + REQUIRE(mlq.empty(8)); + v = 10.0; + mlq.add(v, 8); + mlq.adjust(v, 8, 9); + REQUIRE(mlq.front(9) == v); + REQUIRE(mlq.empty(8)); + REQUIRE(!mlq.empty(9)); + mlq.adjust(values[0], 0, 9); + REQUIRE(mlq.highest_priority_set() == 1); + REQUIRE(mlq.lowest_priority_set() == 9); + mlq.remove(values[1], 1); + REQUIRE(mlq.highest_priority_set() == 2); + REQUIRE(mlq.empty(1)); +} + +} // namespace Common diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 14b76680f..44c761d3e 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -128,7 +128,9 @@ if (ENABLE_VULKAN) renderer_vulkan/vk_scheduler.cpp renderer_vulkan/vk_scheduler.h renderer_vulkan/vk_stream_buffer.cpp - renderer_vulkan/vk_stream_buffer.h) + renderer_vulkan/vk_stream_buffer.h + renderer_vulkan/vk_swapchain.cpp + renderer_vulkan/vk_swapchain.h) target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include) target_compile_definitions(video_core PRIVATE HAS_VULKAN) diff --git a/src/video_core/debug_utils/debug_utils.cpp b/src/video_core/debug_utils/debug_utils.cpp index 5ffb492ea..f0ef67535 100644 --- a/src/video_core/debug_utils/debug_utils.cpp +++ b/src/video_core/debug_utils/debug_utils.cpp @@ -10,7 +10,7 @@ namespace Tegra { void DebugContext::DoOnEvent(Event event, void* data) { { - std::unique_lock<std::mutex> lock(breakpoint_mutex); + std::unique_lock lock{breakpoint_mutex}; // TODO(Subv): Commit the rasterizer's caches so framebuffers, render targets, etc. will // show on debug widgets @@ -32,7 +32,7 @@ void DebugContext::DoOnEvent(Event event, void* data) { void DebugContext::Resume() { { - std::lock_guard<std::mutex> lock(breakpoint_mutex); + std::lock_guard lock{breakpoint_mutex}; // Tell all observers that we are about to resume for (auto& breakpoint_observer : breakpoint_observers) { diff --git a/src/video_core/debug_utils/debug_utils.h b/src/video_core/debug_utils/debug_utils.h index c235faf46..ac3a2eb01 100644 --- a/src/video_core/debug_utils/debug_utils.h +++ b/src/video_core/debug_utils/debug_utils.h @@ -40,7 +40,7 @@ public: /// Constructs the object such that it observes events of the given DebugContext. explicit BreakPointObserver(std::shared_ptr<DebugContext> debug_context) : context_weak(debug_context) { - std::unique_lock<std::mutex> lock(debug_context->breakpoint_mutex); + std::unique_lock lock{debug_context->breakpoint_mutex}; debug_context->breakpoint_observers.push_back(this); } @@ -48,7 +48,7 @@ public: auto context = context_weak.lock(); if (context) { { - std::unique_lock<std::mutex> lock(context->breakpoint_mutex); + std::unique_lock lock{context->breakpoint_mutex}; context->breakpoint_observers.remove(this); } diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 267a03f2d..30b29e14d 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -286,9 +286,10 @@ void GPU::ProcessSemaphoreTriggerMethod() { // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of // CoreTiming block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks(); - memory_manager->WriteBlock(regs.smaphore_address.SmaphoreAddress(), &block, sizeof(block)); + memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block, + sizeof(block)); } else { - const u32 word{memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress())}; + const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())}; if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || (op == GpuSemaphoreOperation::AcquireGequal && static_cast<s32>(word - regs.semaphore_sequence) > 0) || @@ -315,11 +316,11 @@ void GPU::ProcessSemaphoreTriggerMethod() { } void GPU::ProcessSemaphoreRelease() { - memory_manager->Write<u32>(regs.smaphore_address.SmaphoreAddress(), regs.semaphore_release); + memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(), regs.semaphore_release); } void GPU::ProcessSemaphoreAcquire() { - const u32 word = memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress()); + const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress()); const auto value = regs.semaphore_acquire; if (word != value) { regs.acquire_active = true; diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index c1830ac8d..de30ea354 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -177,11 +177,11 @@ public: u32 address_high; u32 address_low; - GPUVAddr SmaphoreAddress() const { + GPUVAddr SemaphoreAddress() const { return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | address_low); } - } smaphore_address; + } semaphore_address; u32 semaphore_sequence; u32 semaphore_trigger; @@ -263,7 +263,7 @@ private: static_assert(offsetof(GPU::Regs, field_name) == position * 4, \ "Field " #field_name " has invalid position") -ASSERT_REG_POSITION(smaphore_address, 0x4); +ASSERT_REG_POSITION(semaphore_address, 0x4); ASSERT_REG_POSITION(semaphore_sequence, 0x6); ASSERT_REG_POSITION(semaphore_trigger, 0x7); ASSERT_REG_POSITION(reference_count, 0x14); diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index 086b2f625..c5dc199c5 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp @@ -52,8 +52,8 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p } ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) - : renderer{renderer}, dma_pusher{dma_pusher}, thread{RunThread, std::ref(renderer), - std::ref(dma_pusher), std::ref(state)} {} + : renderer{renderer}, thread{RunThread, std::ref(renderer), std::ref(dma_pusher), + std::ref(state)} {} ThreadManager::~ThreadManager() { // Notify GPU thread that a shutdown is pending diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h index 8cd7db1c6..70acb2e79 100644 --- a/src/video_core/gpu_thread.h +++ b/src/video_core/gpu_thread.h @@ -4,10 +4,8 @@ #pragma once -#include <array> #include <atomic> #include <condition_variable> -#include <memory> #include <mutex> #include <optional> #include <thread> @@ -97,13 +95,13 @@ struct SynchState final { std::condition_variable frames_condition; void IncrementFramesCounter() { - std::lock_guard<std::mutex> lock{frames_mutex}; + std::lock_guard lock{frames_mutex}; ++queued_frame_count; } void DecrementFramesCounter() { { - std::lock_guard<std::mutex> lock{frames_mutex}; + std::lock_guard lock{frames_mutex}; --queued_frame_count; if (queued_frame_count) { @@ -115,7 +113,7 @@ struct SynchState final { void WaitForFrames() { { - std::lock_guard<std::mutex> lock{frames_mutex}; + std::lock_guard lock{frames_mutex}; if (!queued_frame_count) { return; } @@ -123,14 +121,14 @@ struct SynchState final { // Wait for the GPU to be idle (all commands to be executed) { - std::unique_lock<std::mutex> lock{frames_mutex}; + std::unique_lock lock{frames_mutex}; frames_condition.wait(lock, [this] { return !queued_frame_count; }); } } void SignalCommands() { { - std::unique_lock<std::mutex> lock{commands_mutex}; + std::unique_lock lock{commands_mutex}; if (queue.Empty()) { return; } @@ -140,7 +138,7 @@ struct SynchState final { } void WaitForCommands() { - std::unique_lock<std::mutex> lock{commands_mutex}; + std::unique_lock lock{commands_mutex}; commands_condition.wait(lock, [this] { return !queue.Empty(); }); } @@ -177,7 +175,6 @@ private: private: SynchState state; VideoCore::RendererBase& renderer; - Tegra::DmaPusher& dma_pusher; std::thread thread; std::thread::id thread_id; }; diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h index 9fc9f3056..291772186 100644 --- a/src/video_core/rasterizer_cache.h +++ b/src/video_core/rasterizer_cache.h @@ -71,8 +71,8 @@ private: bool is_registered{}; ///< Whether the object is currently registered with the cache bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory) u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing - CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space const u8* host_ptr{}; ///< Pointer to the memory backing this cached region + CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space }; template <class T> @@ -84,7 +84,7 @@ public: /// Write any cached resources overlapping the specified region back to memory void FlushRegion(CacheAddr addr, std::size_t size) { - std::lock_guard<std::recursive_mutex> lock{mutex}; + std::lock_guard lock{mutex}; const auto& objects{GetSortedObjectsFromRegion(addr, size)}; for (auto& object : objects) { @@ -94,7 +94,7 @@ public: /// Mark the specified region as being invalidated void InvalidateRegion(CacheAddr addr, u64 size) { - std::lock_guard<std::recursive_mutex> lock{mutex}; + std::lock_guard lock{mutex}; const auto& objects{GetSortedObjectsFromRegion(addr, size)}; for (auto& object : objects) { @@ -108,7 +108,7 @@ public: /// Invalidates everything in the cache void InvalidateAll() { - std::lock_guard<std::recursive_mutex> lock{mutex}; + std::lock_guard lock{mutex}; while (interval_cache.begin() != interval_cache.end()) { Unregister(*interval_cache.begin()->second.begin()); @@ -133,7 +133,7 @@ protected: /// Register an object into the cache virtual void Register(const T& object) { - std::lock_guard<std::recursive_mutex> lock{mutex}; + std::lock_guard lock{mutex}; object->SetIsRegistered(true); interval_cache.add({GetInterval(object), ObjectSet{object}}); @@ -143,7 +143,7 @@ protected: /// Unregisters an object from the cache virtual void Unregister(const T& object) { - std::lock_guard<std::recursive_mutex> lock{mutex}; + std::lock_guard lock{mutex}; object->SetIsRegistered(false); rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), -1); @@ -153,14 +153,14 @@ protected: /// Returns a ticks counter used for tracking when cached objects were last modified u64 GetModifiedTicks() { - std::lock_guard<std::recursive_mutex> lock{mutex}; + std::lock_guard lock{mutex}; return ++modified_ticks; } /// Flushes the specified object, updating appropriate cache state as needed void FlushObject(const T& object) { - std::lock_guard<std::recursive_mutex> lock{mutex}; + std::lock_guard lock{mutex}; if (!object->IsDirty()) { return; diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index f75c65825..fd091c84c 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp @@ -15,8 +15,8 @@ namespace OpenGL { CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset, std::size_t alignment, u8* host_ptr) - : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ - host_ptr} {} + : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size}, offset{offset}, + alignment{alignment} {} OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size) : RasterizerCache{rasterizer}, stream_buffer(size, true) {} diff --git a/src/video_core/renderer_opengl/gl_global_cache.cpp b/src/video_core/renderer_opengl/gl_global_cache.cpp index 0fbfbad55..da9326253 100644 --- a/src/video_core/renderer_opengl/gl_global_cache.cpp +++ b/src/video_core/renderer_opengl/gl_global_cache.cpp @@ -15,7 +15,7 @@ namespace OpenGL { CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr) - : cpu_addr{cpu_addr}, size{size}, RasterizerCacheObject{host_ptr} { + : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size} { buffer.Create(); // Bind and unbind the buffer so it gets allocated by the driver glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 7c23056eb..8f012db62 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -100,11 +100,9 @@ struct FramebufferCacheKey { } }; -RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system, - ScreenInfo& info) - : res_cache{*this}, shader_cache{*this, system}, global_cache{*this}, - emu_window{window}, system{system}, screen_info{info}, - buffer_cache(*this, STREAM_BUFFER_SIZE) { +RasterizerOpenGL::RasterizerOpenGL(Core::System& system, ScreenInfo& info) + : res_cache{*this}, shader_cache{*this, system}, global_cache{*this}, system{system}, + screen_info{info}, buffer_cache(*this, STREAM_BUFFER_SIZE) { // Create sampler objects for (std::size_t i = 0; i < texture_samplers.size(); ++i) { texture_samplers[i].Create(); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 30f3e8acb..4de565321 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -50,8 +50,7 @@ struct FramebufferCacheKey; class RasterizerOpenGL : public VideoCore::RasterizerInterface { public: - explicit RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system, - ScreenInfo& info); + explicit RasterizerOpenGL(Core::System& system, ScreenInfo& info); ~RasterizerOpenGL() override; void DrawArrays() override; @@ -214,7 +213,6 @@ private: ShaderCacheOpenGL shader_cache; GlobalRegionCacheOpenGL global_cache; - Core::Frontend::EmuWindow& emu_window; Core::System& system; ScreenInfo& screen_info; diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 0235317c0..aba6ce731 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -562,8 +562,8 @@ void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surfac } CachedSurface::CachedSurface(const SurfaceParams& params) - : params{params}, gl_target{SurfaceTargetToGL(params.target)}, - cached_size_in_bytes{params.size_in_bytes}, RasterizerCacheObject{params.host_ptr} { + : RasterizerCacheObject{params.host_ptr}, params{params}, + gl_target{SurfaceTargetToGL(params.target)}, cached_size_in_bytes{params.size_in_bytes} { const auto optional_cpu_addr{ Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(params.gpu_addr)}; diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index c644271d0..e8073579f 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -538,12 +538,12 @@ private: return nullptr; } - void Register(const Surface& object) { + void Register(const Surface& object) override { RasterizerCache<Surface>::Register(object); } /// Unregisters an object from the cache - void Unregister(const Surface& object) { + void Unregister(const Surface& object) override { if (object->IsReinterpreted()) { auto interval = GetReinterpretInterval(object); reinterpreted_surfaces.erase(interval); diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index f5d6ac1d5..7030db365 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -213,9 +213,9 @@ CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier, Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, const PrecompiledPrograms& precompiled_programs, ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr) - : host_ptr{host_ptr}, cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, - program_type{program_type}, disk_cache{disk_cache}, - precompiled_programs{precompiled_programs}, RasterizerCacheObject{host_ptr} { + : RasterizerCacheObject{host_ptr}, host_ptr{host_ptr}, cpu_addr{cpu_addr}, + unique_identifier{unique_identifier}, program_type{program_type}, disk_cache{disk_cache}, + precompiled_programs{precompiled_programs} { const std::size_t code_size = CalculateProgramSize(program_code); const std::size_t code_size_b = @@ -243,9 +243,9 @@ CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier, Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, const PrecompiledPrograms& precompiled_programs, GLShader::ProgramResult result, u8* host_ptr) - : cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, program_type{program_type}, - disk_cache{disk_cache}, precompiled_programs{precompiled_programs}, RasterizerCacheObject{ - host_ptr} { + : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, + program_type{program_type}, disk_cache{disk_cache}, precompiled_programs{ + precompiled_programs} { code = std::move(result.first); entries = result.second; diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 5e3d862c6..a01efeb05 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -266,7 +266,7 @@ void RendererOpenGL::CreateRasterizer() { } // Initialize sRGB Usage OpenGLState::ClearsRGBUsed(); - rasterizer = std::make_unique<RasterizerOpenGL>(render_window, system, screen_info); + rasterizer = std::make_unique<RasterizerOpenGL>(system, screen_info); } void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture, diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index eac51ecb3..388b5ffd5 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp @@ -19,8 +19,8 @@ namespace Vulkan { CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset, std::size_t alignment, u8* host_ptr) - : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ - host_ptr} {} + : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size}, offset{offset}, + alignment{alignment} {} VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp index a1e117443..13c46e5b8 100644 --- a/src/video_core/renderer_vulkan/vk_resource_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp @@ -21,7 +21,7 @@ public: CommandBufferPool(const VKDevice& device) : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {} - void Allocate(std::size_t begin, std::size_t end) { + void Allocate(std::size_t begin, std::size_t end) override { const auto dev = device.GetLogical(); const auto& dld = device.GetDispatchLoader(); const u32 graphics_family = device.GetGraphicsFamily(); diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h index 5bfe4cead..08ee86fa6 100644 --- a/src/video_core/renderer_vulkan/vk_resource_manager.h +++ b/src/video_core/renderer_vulkan/vk_resource_manager.h @@ -97,7 +97,7 @@ private: class VKFenceWatch final : public VKResource { public: explicit VKFenceWatch(); - ~VKFenceWatch(); + ~VKFenceWatch() override; /// Waits for the fence to be released. void Wait(); diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp new file mode 100644 index 000000000..08279e562 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp @@ -0,0 +1,210 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <array> +#include <limits> +#include <vector> + +#include "common/assert.h" +#include "common/logging/log.h" +#include "core/core.h" +#include "core/frontend/framebuffer_layout.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_resource_manager.h" +#include "video_core/renderer_vulkan/vk_swapchain.h" + +namespace Vulkan { + +namespace { +vk::SurfaceFormatKHR ChooseSwapSurfaceFormat(const std::vector<vk::SurfaceFormatKHR>& formats) { + if (formats.size() == 1 && formats[0].format == vk::Format::eUndefined) { + return {vk::Format::eB8G8R8A8Unorm, vk::ColorSpaceKHR::eSrgbNonlinear}; + } + const auto& found = std::find_if(formats.begin(), formats.end(), [](const auto& format) { + return format.format == vk::Format::eB8G8R8A8Unorm && + format.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear; + }); + return found != formats.end() ? *found : formats[0]; +} + +vk::PresentModeKHR ChooseSwapPresentMode(const std::vector<vk::PresentModeKHR>& modes) { + // Mailbox doesn't lock the application like fifo (vsync), prefer it + const auto& found = std::find_if(modes.begin(), modes.end(), [](const auto& mode) { + return mode == vk::PresentModeKHR::eMailbox; + }); + return found != modes.end() ? *found : vk::PresentModeKHR::eFifo; +} + +vk::Extent2D ChooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, + u32 height) { + constexpr auto undefined_size{std::numeric_limits<u32>::max()}; + if (capabilities.currentExtent.width != undefined_size) { + return capabilities.currentExtent; + } + vk::Extent2D extent = {width, height}; + extent.width = std::max(capabilities.minImageExtent.width, + std::min(capabilities.maxImageExtent.width, extent.width)); + extent.height = std::max(capabilities.minImageExtent.height, + std::min(capabilities.maxImageExtent.height, extent.height)); + return extent; +} +} // namespace + +VKSwapchain::VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device) + : surface{surface}, device{device} {} + +VKSwapchain::~VKSwapchain() = default; + +void VKSwapchain::Create(u32 width, u32 height) { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + const auto physical_device = device.GetPhysical(); + + const vk::SurfaceCapabilitiesKHR capabilities{ + physical_device.getSurfaceCapabilitiesKHR(surface, dld)}; + if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) { + return; + } + + dev.waitIdle(dld); + Destroy(); + + CreateSwapchain(capabilities, width, height); + CreateSemaphores(); + CreateImageViews(); + + fences.resize(image_count, nullptr); +} + +void VKSwapchain::AcquireNextImage() { + const auto dev{device.GetLogical()}; + const auto& dld{device.GetDispatchLoader()}; + dev.acquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(), + *present_semaphores[frame_index], {}, &image_index, dld); + + if (auto& fence = fences[image_index]; fence) { + fence->Wait(); + fence->Release(); + fence = nullptr; + } +} + +bool VKSwapchain::Present(vk::Semaphore render_semaphore, VKFence& fence) { + const vk::Semaphore present_semaphore{*present_semaphores[frame_index]}; + const std::array<vk::Semaphore, 2> semaphores{present_semaphore, render_semaphore}; + const u32 wait_semaphore_count{render_semaphore ? 2U : 1U}; + const auto& dld{device.GetDispatchLoader()}; + const auto present_queue{device.GetPresentQueue()}; + bool recreated = false; + + const vk::PresentInfoKHR present_info(wait_semaphore_count, semaphores.data(), 1, + &swapchain.get(), &image_index, {}); + switch (const auto result = present_queue.presentKHR(&present_info, dld); result) { + case vk::Result::eSuccess: + break; + case vk::Result::eErrorOutOfDateKHR: + if (current_width > 0 && current_height > 0) { + Create(current_width, current_height); + recreated = true; + } + break; + default: + LOG_CRITICAL(Render_Vulkan, "Vulkan failed to present swapchain due to {}!", + vk::to_string(result)); + UNREACHABLE(); + } + + ASSERT(fences[image_index] == nullptr); + fences[image_index] = &fence; + frame_index = (frame_index + 1) % image_count; + return recreated; +} + +bool VKSwapchain::HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const { + // TODO(Rodrigo): Handle framebuffer pixel format changes + return framebuffer.width != current_width || framebuffer.height != current_height; +} + +void VKSwapchain::CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, + u32 height) { + const auto dev{device.GetLogical()}; + const auto& dld{device.GetDispatchLoader()}; + const auto physical_device{device.GetPhysical()}; + + const std::vector<vk::SurfaceFormatKHR> formats{ + physical_device.getSurfaceFormatsKHR(surface, dld)}; + + const std::vector<vk::PresentModeKHR> present_modes{ + physical_device.getSurfacePresentModesKHR(surface, dld)}; + + const vk::SurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats)}; + const vk::PresentModeKHR present_mode{ChooseSwapPresentMode(present_modes)}; + extent = ChooseSwapExtent(capabilities, width, height); + + current_width = extent.width; + current_height = extent.height; + + u32 requested_image_count{capabilities.minImageCount + 1}; + if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) { + requested_image_count = capabilities.maxImageCount; + } + + vk::SwapchainCreateInfoKHR swapchain_ci( + {}, surface, requested_image_count, surface_format.format, surface_format.colorSpace, + extent, 1, vk::ImageUsageFlagBits::eColorAttachment, {}, {}, {}, + capabilities.currentTransform, vk::CompositeAlphaFlagBitsKHR::eOpaque, present_mode, false, + {}); + + const u32 graphics_family{device.GetGraphicsFamily()}; + const u32 present_family{device.GetPresentFamily()}; + const std::array<u32, 2> queue_indices{graphics_family, present_family}; + if (graphics_family != present_family) { + swapchain_ci.imageSharingMode = vk::SharingMode::eConcurrent; + swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size()); + swapchain_ci.pQueueFamilyIndices = queue_indices.data(); + } else { + swapchain_ci.imageSharingMode = vk::SharingMode::eExclusive; + } + + swapchain = dev.createSwapchainKHRUnique(swapchain_ci, nullptr, dld); + + images = dev.getSwapchainImagesKHR(*swapchain, dld); + image_count = static_cast<u32>(images.size()); + image_format = surface_format.format; +} + +void VKSwapchain::CreateSemaphores() { + const auto dev{device.GetLogical()}; + const auto& dld{device.GetDispatchLoader()}; + + present_semaphores.resize(image_count); + for (std::size_t i = 0; i < image_count; i++) { + present_semaphores[i] = dev.createSemaphoreUnique({}, nullptr, dld); + } +} + +void VKSwapchain::CreateImageViews() { + const auto dev{device.GetLogical()}; + const auto& dld{device.GetDispatchLoader()}; + + image_views.resize(image_count); + for (std::size_t i = 0; i < image_count; i++) { + const vk::ImageViewCreateInfo image_view_ci({}, images[i], vk::ImageViewType::e2D, + image_format, {}, + {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1}); + image_views[i] = dev.createImageViewUnique(image_view_ci, nullptr, dld); + } +} + +void VKSwapchain::Destroy() { + frame_index = 0; + present_semaphores.clear(); + framebuffers.clear(); + image_views.clear(); + swapchain.reset(); +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_swapchain.h b/src/video_core/renderer_vulkan/vk_swapchain.h new file mode 100644 index 000000000..2ad84f185 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_swapchain.h @@ -0,0 +1,92 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <vector> + +#include "common/common_types.h" +#include "video_core/renderer_vulkan/declarations.h" + +namespace Layout { +struct FramebufferLayout; +} + +namespace Vulkan { + +class VKDevice; +class VKFence; + +class VKSwapchain { +public: + explicit VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device); + ~VKSwapchain(); + + /// Creates (or recreates) the swapchain with a given size. + void Create(u32 width, u32 height); + + /// Acquires the next image in the swapchain, waits as needed. + void AcquireNextImage(); + + /// Presents the rendered image to the swapchain. Returns true when the swapchains had to be + /// recreated. Takes responsability for the ownership of fence. + bool Present(vk::Semaphore render_semaphore, VKFence& fence); + + /// Returns true when the framebuffer layout has changed. + bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const; + + const vk::Extent2D& GetSize() const { + return extent; + } + + u32 GetImageCount() const { + return image_count; + } + + u32 GetImageIndex() const { + return image_index; + } + + vk::Image GetImageIndex(u32 index) const { + return images[index]; + } + + vk::ImageView GetImageViewIndex(u32 index) const { + return *image_views[index]; + } + + vk::Format GetImageFormat() const { + return image_format; + } + +private: + void CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, u32 height); + void CreateSemaphores(); + void CreateImageViews(); + + void Destroy(); + + const vk::SurfaceKHR surface; + const VKDevice& device; + + UniqueSwapchainKHR swapchain; + + u32 image_count{}; + std::vector<vk::Image> images; + std::vector<UniqueImageView> image_views; + std::vector<UniqueFramebuffer> framebuffers; + std::vector<VKFence*> fences; + std::vector<UniqueSemaphore> present_semaphores; + + u32 image_index{}; + u32 frame_index{}; + + vk::Format image_format{}; + vk::Extent2D extent{}; + + u32 current_width{}; + u32 current_height{}; +}; + +} // namespace Vulkan diff --git a/src/web_service/web_backend.cpp b/src/web_service/web_backend.cpp index 40da1a4e2..dc149d2ed 100644 --- a/src/web_service/web_backend.cpp +++ b/src/web_service/web_backend.cpp @@ -24,7 +24,7 @@ constexpr u32 TIMEOUT_SECONDS = 30; struct Client::Impl { Impl(std::string host, std::string username, std::string token) : host{std::move(host)}, username{std::move(username)}, token{std::move(token)} { - std::lock_guard<std::mutex> lock(jwt_cache.mutex); + std::lock_guard lock{jwt_cache.mutex}; if (this->username == jwt_cache.username && this->token == jwt_cache.token) { jwt = jwt_cache.jwt; } @@ -151,7 +151,7 @@ struct Client::Impl { if (result.result_code != Common::WebResult::Code::Success) { LOG_ERROR(WebService, "UpdateJWT failed"); } else { - std::lock_guard<std::mutex> lock(jwt_cache.mutex); + std::lock_guard lock{jwt_cache.mutex}; jwt_cache.username = username; jwt_cache.token = token; jwt_cache.jwt = jwt = result.returned_data; diff --git a/src/yuzu/applets/profile_select.cpp b/src/yuzu/applets/profile_select.cpp index 5c1b65a2c..f95f7fe3c 100644 --- a/src/yuzu/applets/profile_select.cpp +++ b/src/yuzu/applets/profile_select.cpp @@ -58,10 +58,7 @@ QtProfileSelectionDialog::QtProfileSelectionDialog(QWidget* parent) scroll_area = new QScrollArea; - buttons = new QDialogButtonBox; - buttons->addButton(tr("Cancel"), QDialogButtonBox::RejectRole); - buttons->addButton(tr("OK"), QDialogButtonBox::AcceptRole); - + buttons = new QDialogButtonBox(QDialogButtonBox::Cancel | QDialogButtonBox::Ok); connect(buttons, &QDialogButtonBox::accepted, this, &QtProfileSelectionDialog::accept); connect(buttons, &QDialogButtonBox::rejected, this, &QtProfileSelectionDialog::reject); @@ -163,6 +160,6 @@ void QtProfileSelector::SelectProfile( void QtProfileSelector::MainWindowFinishedSelection(std::optional<Service::Account::UUID> uuid) { // Acquire the HLE mutex - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; callback(uuid); } diff --git a/src/yuzu/applets/software_keyboard.cpp b/src/yuzu/applets/software_keyboard.cpp index 8a26fdff1..f3eb29b25 100644 --- a/src/yuzu/applets/software_keyboard.cpp +++ b/src/yuzu/applets/software_keyboard.cpp @@ -75,13 +75,13 @@ QtSoftwareKeyboardDialog::QtSoftwareKeyboardDialog( length_label->setText(QStringLiteral("%1/%2").arg(text.size()).arg(parameters.max_length)); }); - buttons = new QDialogButtonBox; - buttons->addButton(tr("Cancel"), QDialogButtonBox::RejectRole); - buttons->addButton(parameters.submit_text.empty() - ? tr("OK") - : QString::fromStdU16String(parameters.submit_text), - QDialogButtonBox::AcceptRole); - + buttons = new QDialogButtonBox(QDialogButtonBox::Cancel); + if (parameters.submit_text.empty()) { + buttons->addButton(QDialogButtonBox::Ok); + } else { + buttons->addButton(QString::fromStdU16String(parameters.submit_text), + QDialogButtonBox::AcceptRole); + } connect(buttons, &QDialogButtonBox::accepted, this, &QtSoftwareKeyboardDialog::accept); connect(buttons, &QDialogButtonBox::rejected, this, &QtSoftwareKeyboardDialog::reject); layout->addWidget(header_label); @@ -141,12 +141,12 @@ void QtSoftwareKeyboard::SendTextCheckDialog(std::u16string error_message, void QtSoftwareKeyboard::MainWindowFinishedText(std::optional<std::u16string> text) { // Acquire the HLE mutex - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; text_output(text); } void QtSoftwareKeyboard::MainWindowFinishedCheckDialog() { // Acquire the HLE mutex - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; finished_check(); } diff --git a/src/yuzu/applets/web_browser.cpp b/src/yuzu/applets/web_browser.cpp index 979b9ec14..ac80b2fa2 100644 --- a/src/yuzu/applets/web_browser.cpp +++ b/src/yuzu/applets/web_browser.cpp @@ -104,12 +104,12 @@ void QtWebBrowser::OpenPage(std::string_view url, std::function<void()> unpack_r void QtWebBrowser::MainWindowUnpackRomFS() { // Acquire the HLE mutex - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; unpack_romfs_callback(); } void QtWebBrowser::MainWindowFinishedBrowsing() { // Acquire the HLE mutex - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; finished_callback(); } diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp index 05ad19e1d..7438fbc0a 100644 --- a/src/yuzu/bootmanager.cpp +++ b/src/yuzu/bootmanager.cpp @@ -67,7 +67,7 @@ void EmuThread::run() { was_active = false; } else { - std::unique_lock<std::mutex> lock(running_mutex); + std::unique_lock lock{running_mutex}; running_cv.wait(lock, [this] { return IsRunning() || exec_step || stop_run; }); } } diff --git a/src/yuzu/bootmanager.h b/src/yuzu/bootmanager.h index 7226e690e..3183621bc 100644 --- a/src/yuzu/bootmanager.h +++ b/src/yuzu/bootmanager.h @@ -53,7 +53,7 @@ public: * @note This function is thread-safe */ void SetRunning(bool running) { - std::unique_lock<std::mutex> lock(running_mutex); + std::unique_lock lock{running_mutex}; this->running = running; lock.unlock(); running_cv.notify_all(); diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp index 4650f96a3..dead9f807 100644 --- a/src/yuzu/configuration/config.cpp +++ b/src/yuzu/configuration/config.cpp @@ -413,7 +413,6 @@ void Config::ReadValues() { qt_config->beginGroup("System"); Settings::values.use_docked_mode = ReadSetting("use_docked_mode", false).toBool(); - Settings::values.enable_nfc = ReadSetting("enable_nfc", true).toBool(); Settings::values.current_user = std::clamp<int>(ReadSetting("current_user", 0).toInt(), 0, Service::Account::MAX_USERS - 1); @@ -675,7 +674,6 @@ void Config::SaveValues() { qt_config->beginGroup("System"); WriteSetting("use_docked_mode", Settings::values.use_docked_mode, false); - WriteSetting("enable_nfc", Settings::values.enable_nfc, true); WriteSetting("current_user", Settings::values.current_user, 0); WriteSetting("language_index", Settings::values.language_index, 1); diff --git a/src/yuzu/configuration/configure_general.cpp b/src/yuzu/configuration/configure_general.cpp index 4116b6cd7..389fcf667 100644 --- a/src/yuzu/configuration/configure_general.cpp +++ b/src/yuzu/configuration/configure_general.cpp @@ -33,7 +33,6 @@ void ConfigureGeneral::setConfiguration() { ui->toggle_user_on_boot->setChecked(UISettings::values.select_user_on_boot); ui->theme_combobox->setCurrentIndex(ui->theme_combobox->findData(UISettings::values.theme)); ui->use_cpu_jit->setChecked(Settings::values.use_cpu_jit); - ui->enable_nfc->setChecked(Settings::values.enable_nfc); } void ConfigureGeneral::PopulateHotkeyList(const HotkeyRegistry& registry) { @@ -48,5 +47,4 @@ void ConfigureGeneral::applyConfiguration() { ui->theme_combobox->itemData(ui->theme_combobox->currentIndex()).toString(); Settings::values.use_cpu_jit = ui->use_cpu_jit->isChecked(); - Settings::values.enable_nfc = ui->enable_nfc->isChecked(); } diff --git a/src/yuzu/configuration/configure_general.ui b/src/yuzu/configuration/configure_general.ui index dff0ad5d0..01d1c0b8e 100644 --- a/src/yuzu/configuration/configure_general.ui +++ b/src/yuzu/configuration/configure_general.ui @@ -71,26 +71,6 @@ </widget> </item> <item> - <widget class="QGroupBox" name="EmulationGroupBox"> - <property name="title"> - <string>Emulation</string> - </property> - <layout class="QHBoxLayout" name="EmulationHorizontalLayout"> - <item> - <layout class="QVBoxLayout" name="EmulationVerticalLayout"> - <item> - <widget class="QCheckBox" name="enable_nfc"> - <property name="text"> - <string>Enable NFC</string> - </property> - </widget> - </item> - </layout> - </item> - </layout> - </widget> - </item> - <item> <widget class="QGroupBox" name="theme_group_box"> <property name="title"> <string>Theme</string> diff --git a/src/yuzu/debugger/profiler.cpp b/src/yuzu/debugger/profiler.cpp index 8b30e0a85..86e03e46d 100644 --- a/src/yuzu/debugger/profiler.cpp +++ b/src/yuzu/debugger/profiler.cpp @@ -7,6 +7,7 @@ #include <QMouseEvent> #include <QPainter> #include <QString> +#include <QTimer> #include "common/common_types.h" #include "common/microprofile.h" #include "yuzu/debugger/profiler.h" diff --git a/src/yuzu/debugger/profiler.h b/src/yuzu/debugger/profiler.h index eae1e9e3c..8e69fdb06 100644 --- a/src/yuzu/debugger/profiler.h +++ b/src/yuzu/debugger/profiler.h @@ -4,10 +4,11 @@ #pragma once -#include <QAbstractItemModel> -#include <QDockWidget> -#include <QTimer> -#include "common/microprofile.h" +#include <QWidget> + +class QAction; +class QHideEvent; +class QShowEvent; class MicroProfileDialog : public QWidget { Q_OBJECT diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index 06ad74ffe..593bb681f 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -234,6 +234,9 @@ QString WaitTreeThread::GetText() const { case Kernel::ThreadStatus::WaitMutex: status = tr("waiting for mutex"); break; + case Kernel::ThreadStatus::WaitCondVar: + status = tr("waiting for condition variable"); + break; case Kernel::ThreadStatus::WaitArb: status = tr("waiting for address arbiter"); break; @@ -269,6 +272,7 @@ QColor WaitTreeThread::GetColor() const { case Kernel::ThreadStatus::WaitSynchAll: case Kernel::ThreadStatus::WaitSynchAny: case Kernel::ThreadStatus::WaitMutex: + case Kernel::ThreadStatus::WaitCondVar: case Kernel::ThreadStatus::WaitArb: return QColor(Qt::GlobalColor::red); case Kernel::ThreadStatus::Dormant: diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp index 32e78049c..f24cc77fe 100644 --- a/src/yuzu_cmd/config.cpp +++ b/src/yuzu_cmd/config.cpp @@ -319,7 +319,6 @@ void Config::ReadValues() { // System Settings::values.use_docked_mode = sdl2_config->GetBoolean("System", "use_docked_mode", false); - Settings::values.enable_nfc = sdl2_config->GetBoolean("System", "enable_nfc", true); const auto size = sdl2_config->GetInteger("System", "users_size", 0); Settings::values.current_user = std::clamp<int>( |