diff options
Diffstat (limited to 'src')
25 files changed, 300 insertions, 285 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 39d038493..39ae573b2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -36,7 +36,6 @@ if (MSVC) # /GT - Supports fiber safety for data allocated using static thread-local storage add_compile_options( /MP - /Zi /Zm200 /Zo /permissive- @@ -79,6 +78,13 @@ if (MSVC) /we5245 # 'function': unreferenced function with internal linkage has been removed ) + if (USE_CCACHE) + # when caching, we need to use /Z7 to downgrade debug info to use an older but more cachable format + add_compile_options(/Z7) + else() + add_compile_options(/Zi) + endif() + if (ARCHITECTURE_x86_64) add_compile_options(/QIntel-jcc-erratum) endif() diff --git a/src/common/fiber.cpp b/src/common/fiber.cpp index f9aeb692a..bc92b360b 100644 --- a/src/common/fiber.cpp +++ b/src/common/fiber.cpp @@ -20,10 +20,8 @@ struct Fiber::FiberImpl { VirtualBuffer<u8> rewind_stack; std::mutex guard; - std::function<void(void*)> entry_point; - std::function<void(void*)> rewind_point; - void* rewind_parameter{}; - void* start_parameter{}; + std::function<void()> entry_point; + std::function<void()> rewind_point; std::shared_ptr<Fiber> previous_fiber; bool is_thread_fiber{}; bool released{}; @@ -34,13 +32,8 @@ struct Fiber::FiberImpl { boost::context::detail::fcontext_t rewind_context{}; }; -void Fiber::SetStartParameter(void* new_parameter) { - impl->start_parameter = new_parameter; -} - -void Fiber::SetRewindPoint(std::function<void(void*)>&& rewind_func, void* rewind_param) { +void Fiber::SetRewindPoint(std::function<void()>&& rewind_func) { impl->rewind_point = std::move(rewind_func); - impl->rewind_parameter = rewind_param; } void Fiber::Start(boost::context::detail::transfer_t& transfer) { @@ -48,7 +41,7 @@ void Fiber::Start(boost::context::detail::transfer_t& transfer) { impl->previous_fiber->impl->context = transfer.fctx; impl->previous_fiber->impl->guard.unlock(); impl->previous_fiber.reset(); - impl->entry_point(impl->start_parameter); + impl->entry_point(); UNREACHABLE(); } @@ -59,7 +52,7 @@ void Fiber::OnRewind([[maybe_unused]] boost::context::detail::transfer_t& transf u8* tmp = impl->stack_limit; impl->stack_limit = impl->rewind_stack_limit; impl->rewind_stack_limit = tmp; - impl->rewind_point(impl->rewind_parameter); + impl->rewind_point(); UNREACHABLE(); } @@ -73,10 +66,8 @@ void Fiber::RewindStartFunc(boost::context::detail::transfer_t transfer) { fiber->OnRewind(transfer); } -Fiber::Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter) - : impl{std::make_unique<FiberImpl>()} { +Fiber::Fiber(std::function<void()>&& entry_point_func) : impl{std::make_unique<FiberImpl>()} { impl->entry_point = std::move(entry_point_func); - impl->start_parameter = start_parameter; impl->stack_limit = impl->stack.data(); impl->rewind_stack_limit = impl->rewind_stack.data(); u8* stack_base = impl->stack_limit + default_stack_size; diff --git a/src/common/fiber.h b/src/common/fiber.h index 873604bc6..f24d333a3 100644 --- a/src/common/fiber.h +++ b/src/common/fiber.h @@ -29,7 +29,7 @@ namespace Common { */ class Fiber { public: - Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter); + Fiber(std::function<void()>&& entry_point_func); ~Fiber(); Fiber(const Fiber&) = delete; @@ -43,16 +43,13 @@ public: static void YieldTo(std::weak_ptr<Fiber> weak_from, Fiber& to); [[nodiscard]] static std::shared_ptr<Fiber> ThreadToFiber(); - void SetRewindPoint(std::function<void(void*)>&& rewind_func, void* rewind_param); + void SetRewindPoint(std::function<void()>&& rewind_func); void Rewind(); /// Only call from main thread's fiber void Exit(); - /// Changes the start parameter of the fiber. Has no effect if the fiber already started - void SetStartParameter(void* new_parameter); - private: Fiber(); diff --git a/src/common/thread.cpp b/src/common/thread.cpp index f932a7290..919e33af9 100644 --- a/src/common/thread.cpp +++ b/src/common/thread.cpp @@ -47,6 +47,9 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) { case ThreadPriority::VeryHigh: windows_priority = THREAD_PRIORITY_HIGHEST; break; + case ThreadPriority::Critical: + windows_priority = THREAD_PRIORITY_TIME_CRITICAL; + break; default: windows_priority = THREAD_PRIORITY_NORMAL; break; @@ -59,9 +62,10 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) { void SetCurrentThreadPriority(ThreadPriority new_priority) { pthread_t this_thread = pthread_self(); - s32 max_prio = sched_get_priority_max(SCHED_OTHER); - s32 min_prio = sched_get_priority_min(SCHED_OTHER); - u32 level = static_cast<u32>(new_priority) + 1; + const auto scheduling_type = SCHED_OTHER; + s32 max_prio = sched_get_priority_max(scheduling_type); + s32 min_prio = sched_get_priority_min(scheduling_type); + u32 level = std::max(static_cast<u32>(new_priority) + 1, 4U); struct sched_param params; if (max_prio > min_prio) { @@ -70,7 +74,7 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) { params.sched_priority = min_prio - ((min_prio - max_prio) * level) / 4; } - pthread_setschedparam(this_thread, SCHED_OTHER, ¶ms); + pthread_setschedparam(this_thread, scheduling_type, ¶ms); } #endif diff --git a/src/common/thread.h b/src/common/thread.h index a63122516..1552f58e0 100644 --- a/src/common/thread.h +++ b/src/common/thread.h @@ -92,6 +92,7 @@ enum class ThreadPriority : u32 { Normal = 1, High = 2, VeryHigh = 3, + Critical = 4, }; void SetCurrentThreadPriority(ThreadPriority new_priority); diff --git a/src/common/uint128.h b/src/common/uint128.h index f890ffec2..199d0f55e 100644 --- a/src/common/uint128.h +++ b/src/common/uint128.h @@ -31,12 +31,17 @@ namespace Common { return _udiv128(r[1], r[0], d, &remainder); #endif #else +#ifdef __SIZEOF_INT128__ + const auto product = static_cast<unsigned __int128>(a) * static_cast<unsigned __int128>(b); + return static_cast<u64>(product / d); +#else const u64 diva = a / d; const u64 moda = a % d; const u64 divb = b / d; const u64 modb = b % d; return diva * b + moda * divb + moda * modb / d; #endif +#endif } // This function multiplies 2 u64 values and produces a u128 value; diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp index 1b7194503..6aaa8cdf9 100644 --- a/src/common/x64/native_clock.cpp +++ b/src/common/x64/native_clock.cpp @@ -75,8 +75,8 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen } u64 NativeClock::GetRTSC() { - TimePoint new_time_point{}; TimePoint current_time_point{}; + TimePoint new_time_point{}; current_time_point.pack = Common::AtomicLoad128(time_point.pack.data()); do { @@ -89,8 +89,7 @@ u64 NativeClock::GetRTSC() { new_time_point.inner.accumulated_ticks = current_time_point.inner.accumulated_ticks + diff; } while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack, current_time_point.pack, current_time_point.pack)); - /// The clock cannot be more precise than the guest timer, remove the lower bits - return new_time_point.inner.accumulated_ticks & inaccuracy_mask; + return new_time_point.inner.accumulated_ticks; } void NativeClock::Pause(bool is_paused) { diff --git a/src/common/x64/native_clock.h b/src/common/x64/native_clock.h index 30d2ba2e9..38ae7a462 100644 --- a/src/common/x64/native_clock.h +++ b/src/common/x64/native_clock.h @@ -37,12 +37,8 @@ private: } inner; }; - /// value used to reduce the native clocks accuracy as some apss rely on - /// undefined behavior where the level of accuracy in the clock shouldn't - /// be higher. - static constexpr u64 inaccuracy_mask = ~(UINT64_C(0x400) - 1); - TimePoint time_point; + // factors u64 clock_rtsc_factor{}; u64 cpu_rtsc_factor{}; diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index 8e095cdcd..0efc3732f 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp @@ -119,16 +119,23 @@ void ARM_Interface::Run() { } system.ExitDynarmicProfile(); - // Notify the debugger and go to sleep if a breakpoint was hit. - if (Has(hr, breakpoint)) { + // Notify the debugger and go to sleep if a breakpoint was hit, + // or if the thread is unable to continue for any reason. + if (Has(hr, breakpoint) || Has(hr, no_execute)) { RewindBreakpointInstruction(); - system.GetDebugger().NotifyThreadStopped(current_thread); - current_thread->RequestSuspend(SuspendType::Debug); + if (system.DebuggerEnabled()) { + system.GetDebugger().NotifyThreadStopped(current_thread); + } + current_thread->RequestSuspend(Kernel::SuspendType::Debug); break; } + + // Notify the debugger and go to sleep if a watchpoint was hit. if (Has(hr, watchpoint)) { RewindBreakpointInstruction(); - system.GetDebugger().NotifyThreadWatchpoint(current_thread, *HaltedWatchpoint()); + if (system.DebuggerEnabled()) { + system.GetDebugger().NotifyThreadWatchpoint(current_thread, *HaltedWatchpoint()); + } current_thread->RequestSuspend(SuspendType::Debug); break; } diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index 4e431e27a..8a066ed91 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h @@ -204,6 +204,7 @@ public: static constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3; static constexpr Dynarmic::HaltReason breakpoint = Dynarmic::HaltReason::UserDefined4; static constexpr Dynarmic::HaltReason watchpoint = Dynarmic::HaltReason::UserDefined5; + static constexpr Dynarmic::HaltReason no_execute = Dynarmic::HaltReason::UserDefined6; protected: /// System context that this ARM interface is running under. diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 8c90c8be0..10cf72a45 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -48,6 +48,12 @@ public: CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read); return memory.Read64(vaddr); } + std::optional<u32> MemoryReadCode(u32 vaddr) override { + if (!memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) { + return std::nullopt; + } + return MemoryRead32(vaddr); + } void MemoryWrite8(u32 vaddr, u8 value) override { if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) { @@ -89,21 +95,28 @@ public: void InterpreterFallback(u32 pc, std::size_t num_instructions) override { parent.LogBacktrace(); - UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc, - MemoryReadCode(pc)); + LOG_ERROR(Core_ARM, + "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc, + num_instructions, MemoryRead32(pc)); } void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override { - if (debugger_enabled) { - parent.SaveContext(parent.breakpoint_context); - parent.jit.load()->HaltExecution(ARM_Interface::breakpoint); + switch (exception) { + case Dynarmic::A32::Exception::NoExecuteFault: + LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#08x}", pc); + ReturnException(pc, ARM_Interface::no_execute); return; - } + default: + if (debugger_enabled) { + ReturnException(pc, ARM_Interface::breakpoint); + return; + } - parent.LogBacktrace(); - LOG_CRITICAL(Core_ARM, - "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})", - exception, pc, MemoryReadCode(pc), parent.IsInThumbMode()); + parent.LogBacktrace(); + LOG_CRITICAL(Core_ARM, + "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})", + exception, pc, MemoryRead32(pc), parent.IsInThumbMode()); + } } void CallSVC(u32 swi) override { @@ -141,15 +154,20 @@ public: const auto match{parent.MatchingWatchpoint(addr, size, type)}; if (match) { - parent.SaveContext(parent.breakpoint_context); - parent.jit.load()->HaltExecution(ARM_Interface::watchpoint); parent.halted_watchpoint = match; + ReturnException(parent.jit.load()->Regs()[15], ARM_Interface::watchpoint); return false; } return true; } + void ReturnException(u32 pc, Dynarmic::HaltReason hr) { + parent.SaveContext(parent.breakpoint_context); + parent.breakpoint_context.cpu_registers[15] = pc; + parent.jit.load()->HaltExecution(hr); + } + ARM_Dynarmic_32& parent; Core::Memory::Memory& memory; std::size_t num_interpreted_instructions{}; diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 4370ca294..92266aa9e 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -52,6 +52,12 @@ public: CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Read); return {memory.Read64(vaddr), memory.Read64(vaddr + 8)}; } + std::optional<u32> MemoryReadCode(u64 vaddr) override { + if (!memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) { + return std::nullopt; + } + return MemoryRead32(vaddr); + } void MemoryWrite8(u64 vaddr, u8 value) override { if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) { @@ -105,7 +111,7 @@ public: parent.LogBacktrace(); LOG_ERROR(Core_ARM, "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc, - num_instructions, MemoryReadCode(pc)); + num_instructions, MemoryRead32(pc)); } void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op, @@ -138,16 +144,19 @@ public: case Dynarmic::A64::Exception::SendEventLocal: case Dynarmic::A64::Exception::Yield: return; + case Dynarmic::A64::Exception::NoExecuteFault: + LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#016x}", pc); + ReturnException(pc, ARM_Interface::no_execute); + return; default: if (debugger_enabled) { - parent.SaveContext(parent.breakpoint_context); - parent.jit.load()->HaltExecution(ARM_Interface::breakpoint); + ReturnException(pc, ARM_Interface::breakpoint); return; } parent.LogBacktrace(); - ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})", - static_cast<std::size_t>(exception), pc, MemoryReadCode(pc)); + LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})", + static_cast<std::size_t>(exception), pc, MemoryRead32(pc)); } } @@ -188,15 +197,20 @@ public: const auto match{parent.MatchingWatchpoint(addr, size, type)}; if (match) { - parent.SaveContext(parent.breakpoint_context); - parent.jit.load()->HaltExecution(ARM_Interface::watchpoint); parent.halted_watchpoint = match; + ReturnException(parent.jit.load()->GetPC(), ARM_Interface::watchpoint); return false; } return true; } + void ReturnException(u64 pc, Dynarmic::HaltReason hr) { + parent.SaveContext(parent.breakpoint_context); + parent.breakpoint_context.pc = pc; + parent.jit.load()->HaltExecution(hr); + } + ARM_Dynarmic_64& parent; Core::Memory::Memory& memory; u64 tpidrro_el0 = 0; diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 29e7dba9b..140578069 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp @@ -6,7 +6,9 @@ #include <string> #include <tuple> +#include "common/logging/log.h" #include "common/microprofile.h" +#include "common/thread.h" #include "core/core_timing.h" #include "core/core_timing_util.h" #include "core/hardware_properties.h" @@ -41,11 +43,11 @@ CoreTiming::CoreTiming() CoreTiming::~CoreTiming() = default; -void CoreTiming::ThreadEntry(CoreTiming& instance) { - constexpr char name[] = "yuzu:HostTiming"; - MicroProfileOnThreadCreate(name); - Common::SetCurrentThreadName(name); - Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh); +void CoreTiming::ThreadEntry(CoreTiming& instance, size_t id) { + const std::string name = "yuzu:HostTiming_" + std::to_string(id); + MicroProfileOnThreadCreate(name.c_str()); + Common::SetCurrentThreadName(name.c_str()); + Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical); instance.on_thread_init(); instance.ThreadLoop(); MicroProfileOnThreadExit(); @@ -59,68 +61,97 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { const auto empty_timed_callback = [](std::uintptr_t, std::chrono::nanoseconds) {}; ev_lost = CreateEvent("_lost_event", empty_timed_callback); if (is_multicore) { - timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this)); + const auto hardware_concurrency = std::thread::hardware_concurrency(); + size_t id = 0; + worker_threads.emplace_back(ThreadEntry, std::ref(*this), id++); + if (hardware_concurrency > 8) { + worker_threads.emplace_back(ThreadEntry, std::ref(*this), id++); + } } } void CoreTiming::Shutdown() { - paused = true; + is_paused = true; shutting_down = true; - pause_event.Set(); - event.Set(); - if (timer_thread) { - timer_thread->join(); + std::atomic_thread_fence(std::memory_order_release); + + event_cv.notify_all(); + wait_pause_cv.notify_all(); + for (auto& thread : worker_threads) { + thread.join(); } + worker_threads.clear(); ClearPendingEvents(); - timer_thread.reset(); has_started = false; } -void CoreTiming::Pause(bool is_paused) { - paused = is_paused; - pause_event.Set(); +void CoreTiming::Pause(bool is_paused_) { + std::unique_lock main_lock(event_mutex); + if (is_paused_ == paused_state.load(std::memory_order_relaxed)) { + return; + } + if (is_multicore) { + is_paused = is_paused_; + event_cv.notify_all(); + if (!is_paused_) { + wait_pause_cv.notify_all(); + } + } + paused_state.store(is_paused_, std::memory_order_relaxed); } -void CoreTiming::SyncPause(bool is_paused) { - if (is_paused == paused && paused_set == paused) { +void CoreTiming::SyncPause(bool is_paused_) { + std::unique_lock main_lock(event_mutex); + if (is_paused_ == paused_state.load(std::memory_order_relaxed)) { return; } - Pause(is_paused); - if (timer_thread) { - if (!is_paused) { - pause_event.Set(); + + if (is_multicore) { + is_paused = is_paused_; + event_cv.notify_all(); + if (!is_paused_) { + wait_pause_cv.notify_all(); + } + } + paused_state.store(is_paused_, std::memory_order_relaxed); + if (is_multicore) { + if (is_paused_) { + wait_signal_cv.wait(main_lock, [this] { return pause_count == worker_threads.size(); }); + } else { + wait_signal_cv.wait(main_lock, [this] { return pause_count == 0; }); } - event.Set(); - while (paused_set != is_paused) - ; } } bool CoreTiming::IsRunning() const { - return !paused_set; + return !paused_state.load(std::memory_order_acquire); } bool CoreTiming::HasPendingEvents() const { - return !(wait_set && event_queue.empty()); + std::unique_lock main_lock(event_mutex); + return !event_queue.empty() || pending_events.load(std::memory_order_relaxed) != 0; } void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future, const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data) { - { - std::scoped_lock scope{basic_lock}; - const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count()); - event_queue.emplace_back(Event{timeout, event_fifo_id++, user_data, event_type}); + std::unique_lock main_lock(event_mutex); + const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count()); + + event_queue.emplace_back(Event{timeout, event_fifo_id++, user_data, event_type}); + pending_events.fetch_add(1, std::memory_order_relaxed); - std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); + std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); + + if (is_multicore) { + event_cv.notify_one(); } - event.Set(); } void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data) { - std::scoped_lock scope{basic_lock}; + std::unique_lock main_lock(event_mutex); const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { return e.type.lock().get() == event_type.get() && e.user_data == user_data; }); @@ -129,6 +160,7 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, if (itr != event_queue.end()) { event_queue.erase(itr, event_queue.end()); std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); + pending_events.fetch_sub(1, std::memory_order_relaxed); } } @@ -168,11 +200,12 @@ u64 CoreTiming::GetClockTicks() const { } void CoreTiming::ClearPendingEvents() { + std::unique_lock main_lock(event_mutex); event_queue.clear(); } void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { - std::scoped_lock lock{basic_lock}; + std::unique_lock main_lock(event_mutex); const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { return e.type.lock().get() == event_type.get(); @@ -186,21 +219,28 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { } std::optional<s64> CoreTiming::Advance() { - std::scoped_lock lock{advance_lock, basic_lock}; global_timer = GetGlobalTimeNs().count(); + std::unique_lock main_lock(event_mutex); while (!event_queue.empty() && event_queue.front().time <= global_timer) { Event evt = std::move(event_queue.front()); std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>()); event_queue.pop_back(); - basic_lock.unlock(); if (const auto event_type{evt.type.lock()}) { - event_type->callback( - evt.user_data, std::chrono::nanoseconds{static_cast<s64>(global_timer - evt.time)}); + sequence_mutex.lock(); + event_mutex.unlock(); + + event_type->guard.lock(); + sequence_mutex.unlock(); + const s64 delay = static_cast<s64>(GetGlobalTimeNs().count() - evt.time); + event_type->callback(evt.user_data, std::chrono::nanoseconds{delay}); + event_type->guard.unlock(); + + event_mutex.lock(); + pending_events.fetch_sub(1, std::memory_order_relaxed); } - basic_lock.lock(); global_timer = GetGlobalTimeNs().count(); } @@ -213,26 +253,34 @@ std::optional<s64> CoreTiming::Advance() { } void CoreTiming::ThreadLoop() { + const auto predicate = [this] { return !event_queue.empty() || is_paused; }; has_started = true; while (!shutting_down) { - while (!paused) { - paused_set = false; + while (!is_paused && !shutting_down) { const auto next_time = Advance(); if (next_time) { if (*next_time > 0) { std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time); - event.WaitFor(next_time_ns); + std::unique_lock main_lock(event_mutex); + event_cv.wait_for(main_lock, next_time_ns, predicate); } } else { - wait_set = true; - event.Wait(); + std::unique_lock main_lock(event_mutex); + event_cv.wait(main_lock, predicate); } - wait_set = false; } - paused_set = true; - clock->Pause(true); - pause_event.Wait(); - clock->Pause(false); + std::unique_lock main_lock(event_mutex); + pause_count++; + if (pause_count == worker_threads.size()) { + clock->Pause(true); + wait_signal_cv.notify_all(); + } + wait_pause_cv.wait(main_lock, [this] { return !is_paused || shutting_down; }); + pause_count--; + if (pause_count == 0) { + clock->Pause(false); + wait_signal_cv.notify_all(); + } } } diff --git a/src/core/core_timing.h b/src/core/core_timing.h index d27773009..a86553e08 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h @@ -5,6 +5,7 @@ #include <atomic> #include <chrono> +#include <condition_variable> #include <functional> #include <memory> #include <mutex> @@ -14,7 +15,6 @@ #include <vector> #include "common/common_types.h" -#include "common/thread.h" #include "common/wall_clock.h" namespace Core::Timing { @@ -32,6 +32,7 @@ struct EventType { TimedCallback callback; /// A pointer to the name of the event. const std::string name; + mutable std::mutex guard; }; /** @@ -131,7 +132,7 @@ private: /// Clear all pending events. This should ONLY be done on exit. void ClearPendingEvents(); - static void ThreadEntry(CoreTiming& instance); + static void ThreadEntry(CoreTiming& instance, size_t id); void ThreadLoop(); std::unique_ptr<Common::WallClock> clock; @@ -144,21 +145,25 @@ private: // accomodated by the standard adaptor class. std::vector<Event> event_queue; u64 event_fifo_id = 0; + std::atomic<size_t> pending_events{}; std::shared_ptr<EventType> ev_lost; - Common::Event event{}; - Common::Event pause_event{}; - std::mutex basic_lock; - std::mutex advance_lock; - std::unique_ptr<std::thread> timer_thread; - std::atomic<bool> paused{}; - std::atomic<bool> paused_set{}; - std::atomic<bool> wait_set{}; - std::atomic<bool> shutting_down{}; std::atomic<bool> has_started{}; std::function<void()> on_thread_init{}; + std::vector<std::thread> worker_threads; + + std::condition_variable event_cv; + std::condition_variable wait_pause_cv; + std::condition_variable wait_signal_cv; + mutable std::mutex event_mutex; + mutable std::mutex sequence_mutex; + + std::atomic<bool> paused_state{}; + bool is_paused{}; + bool shutting_down{}; bool is_multicore{}; + size_t pause_count{}; /// Cycle timing u64 ticks{}; diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index fd6928105..37d3d83b9 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp @@ -41,51 +41,32 @@ void CpuManager::Shutdown() { } } -std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() { - return GuestThreadFunction; -} - -std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() { - return IdleThreadFunction; -} - -std::function<void(void*)> CpuManager::GetShutdownThreadStartFunc() { - return ShutdownThreadFunction; -} - -void CpuManager::GuestThreadFunction(void* cpu_manager_) { - CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_); - if (cpu_manager->is_multicore) { - cpu_manager->MultiCoreRunGuestThread(); +void CpuManager::GuestThreadFunction() { + if (is_multicore) { + MultiCoreRunGuestThread(); } else { - cpu_manager->SingleCoreRunGuestThread(); + SingleCoreRunGuestThread(); } } -void CpuManager::GuestRewindFunction(void* cpu_manager_) { - CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_); - if (cpu_manager->is_multicore) { - cpu_manager->MultiCoreRunGuestLoop(); +void CpuManager::GuestRewindFunction() { + if (is_multicore) { + MultiCoreRunGuestLoop(); } else { - cpu_manager->SingleCoreRunGuestLoop(); + SingleCoreRunGuestLoop(); } } -void CpuManager::IdleThreadFunction(void* cpu_manager_) { - CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_); - if (cpu_manager->is_multicore) { - cpu_manager->MultiCoreRunIdleThread(); +void CpuManager::IdleThreadFunction() { + if (is_multicore) { + MultiCoreRunIdleThread(); } else { - cpu_manager->SingleCoreRunIdleThread(); + SingleCoreRunIdleThread(); } } -void CpuManager::ShutdownThreadFunction(void* cpu_manager) { - static_cast<CpuManager*>(cpu_manager)->ShutdownThread(); -} - -void* CpuManager::GetStartFuncParameter() { - return this; +void CpuManager::ShutdownThreadFunction() { + ShutdownThread(); } /////////////////////////////////////////////////////////////////////////////// @@ -97,7 +78,7 @@ void CpuManager::MultiCoreRunGuestThread() { kernel.CurrentScheduler()->OnThreadStart(); auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); auto& host_context = thread->GetHostContext(); - host_context->SetRewindPoint(GuestRewindFunction, this); + host_context->SetRewindPoint([this] { GuestRewindFunction(); }); MultiCoreRunGuestLoop(); } @@ -134,7 +115,7 @@ void CpuManager::SingleCoreRunGuestThread() { kernel.CurrentScheduler()->OnThreadStart(); auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); auto& host_context = thread->GetHostContext(); - host_context->SetRewindPoint(GuestRewindFunction, this); + host_context->SetRewindPoint([this] { GuestRewindFunction(); }); SingleCoreRunGuestLoop(); } @@ -194,7 +175,9 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) { { auto& scheduler = system.Kernel().Scheduler(current_core); scheduler.Reload(scheduler.GetSchedulerCurrentThread()); - idle_count = 0; + if (!scheduler.IsIdle()) { + idle_count = 0; + } } } diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h index f0751fc58..76dc58ee1 100644 --- a/src/core/cpu_manager.h +++ b/src/core/cpu_manager.h @@ -50,10 +50,15 @@ public: void Initialize(); void Shutdown(); - static std::function<void(void*)> GetGuestThreadStartFunc(); - static std::function<void(void*)> GetIdleThreadStartFunc(); - static std::function<void(void*)> GetShutdownThreadStartFunc(); - void* GetStartFuncParameter(); + std::function<void()> GetGuestThreadStartFunc() { + return [this] { GuestThreadFunction(); }; + } + std::function<void()> GetIdleThreadStartFunc() { + return [this] { IdleThreadFunction(); }; + } + std::function<void()> GetShutdownThreadStartFunc() { + return [this] { ShutdownThreadFunction(); }; + } void PreemptSingleCore(bool from_running_enviroment = true); @@ -62,10 +67,10 @@ public: } private: - static void GuestThreadFunction(void* cpu_manager); - static void GuestRewindFunction(void* cpu_manager); - static void IdleThreadFunction(void* cpu_manager); - static void ShutdownThreadFunction(void* cpu_manager); + void GuestThreadFunction(); + void GuestRewindFunction(); + void IdleThreadFunction(); + void ShutdownThreadFunction(); void MultiCoreRunGuestThread(); void MultiCoreRunGuestLoop(); diff --git a/src/core/debugger/gdbstub_arch.cpp b/src/core/debugger/gdbstub_arch.cpp index 750c353b9..4bef09bd7 100644 --- a/src/core/debugger/gdbstub_arch.cpp +++ b/src/core/debugger/gdbstub_arch.cpp @@ -191,8 +191,10 @@ std::string GDBStubA64::RegRead(const Kernel::KThread* thread, size_t id) const const auto& gprs{context.cpu_registers}; const auto& fprs{context.vector_registers}; - if (id <= SP_REGISTER) { + if (id < SP_REGISTER) { return ValueToHex(gprs[id]); + } else if (id == SP_REGISTER) { + return ValueToHex(context.sp); } else if (id == PC_REGISTER) { return ValueToHex(context.pc); } else if (id == PSTATE_REGISTER) { @@ -215,8 +217,10 @@ void GDBStubA64::RegWrite(Kernel::KThread* thread, size_t id, std::string_view v auto& context{thread->GetContext64()}; - if (id <= SP_REGISTER) { + if (id < SP_REGISTER) { context.cpu_registers[id] = HexToValue<u64>(value); + } else if (id == SP_REGISTER) { + context.sp = HexToValue<u64>(value); } else if (id == PC_REGISTER) { context.pc = HexToValue<u64>(value); } else if (id == PSTATE_REGISTER) { diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index d586b3f5c..d599d2bcb 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -622,7 +622,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { } KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, core_id{core_id_} { - switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this); + switch_fiber = std::make_shared<Common::Fiber>([this] { SwitchToCurrent(); }); state.needs_scheduling.store(true); state.interrupt_task_thread_runnable = false; state.should_count_idle = false; @@ -778,11 +778,6 @@ void KScheduler::ScheduleImpl() { next_scheduler.SwitchContextStep2(); } -void KScheduler::OnSwitch(void* this_scheduler) { - KScheduler* sched = static_cast<KScheduler*>(this_scheduler); - sched->SwitchToCurrent(); -} - void KScheduler::SwitchToCurrent() { while (true) { { diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 3f90656ee..6a4760eca 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -55,6 +55,11 @@ public: return idle_thread; } + /// Returns true if the scheduler is idle + [[nodiscard]] bool IsIdle() const { + return GetSchedulerCurrentThread() == idle_thread; + } + /// Gets the timestamp for the last context switch in ticks. [[nodiscard]] u64 GetLastContextSwitchTicks() const; @@ -165,7 +170,6 @@ private: */ void UpdateLastContextSwitchTime(KThread* thread, KProcess* process); - static void OnSwitch(void* this_scheduler); void SwitchToCurrent(); KThread* prev_thread{}; diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 8d7faa662..23bf7425a 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -246,14 +246,12 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, s32 core, KProcess* owner, - ThreadType type, std::function<void(void*)>&& init_func, - void* init_func_parameter) { + ThreadType type, std::function<void()>&& init_func) { // Initialize the thread. R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); // Initialize emulation parameters. - thread->host_context = - std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); + thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); thread->is_single_core = !Settings::values.use_multi_core.GetValue(); return ResultSuccess; @@ -265,15 +263,13 @@ Result KThread::InitializeDummyThread(KThread* thread) { Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, - Core::CpuManager::GetIdleThreadStartFunc(), - system.GetCpuManager().GetStartFuncParameter()); + system.GetCpuManager().GetIdleThreadStartFunc()); } Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, KThreadFunction func, uintptr_t arg, s32 virt_core) { return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, - Core::CpuManager::GetShutdownThreadStartFunc(), - system.GetCpuManager().GetStartFuncParameter()); + system.GetCpuManager().GetShutdownThreadStartFunc()); } Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, @@ -281,8 +277,7 @@ Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThr KProcess* owner) { system.Kernel().GlobalSchedulerContext().AddThread(thread); return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, - ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(), - system.GetCpuManager().GetStartFuncParameter()); + ThreadType::User, system.GetCpuManager().GetGuestThreadStartFunc()); } void KThread::PostDestroy(uintptr_t arg) { diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 94c4cd1c8..28cd7ecb0 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -729,8 +729,7 @@ private: [[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, s32 core, KProcess* owner, ThreadType type, - std::function<void(void*)>&& init_func, - void* init_func_parameter); + std::function<void()>&& init_func); static void RestorePriority(KernelCore& kernel_ctx, KThread* thread); diff --git a/src/tests/common/fibers.cpp b/src/tests/common/fibers.cpp index cfc84d423..4e29f9199 100644 --- a/src/tests/common/fibers.cpp +++ b/src/tests/common/fibers.cpp @@ -43,7 +43,15 @@ class TestControl1 { public: TestControl1() = default; - void DoWork(); + void DoWork() { + const u32 id = thread_ids.Get(); + u32 value = items[id]; + for (u32 i = 0; i < id; i++) { + value++; + } + results[id] = value; + Fiber::YieldTo(work_fibers[id], *thread_fibers[id]); + } void ExecuteThread(u32 id); @@ -54,35 +62,16 @@ public: std::vector<u32> results; }; -static void WorkControl1(void* control) { - auto* test_control = static_cast<TestControl1*>(control); - test_control->DoWork(); -} - -void TestControl1::DoWork() { - const u32 id = thread_ids.Get(); - u32 value = items[id]; - for (u32 i = 0; i < id; i++) { - value++; - } - results[id] = value; - Fiber::YieldTo(work_fibers[id], *thread_fibers[id]); -} - void TestControl1::ExecuteThread(u32 id) { thread_ids.Register(id); auto thread_fiber = Fiber::ThreadToFiber(); thread_fibers[id] = thread_fiber; - work_fibers[id] = std::make_shared<Fiber>(std::function<void(void*)>{WorkControl1}, this); + work_fibers[id] = std::make_shared<Fiber>([this] { DoWork(); }); items[id] = rand() % 256; Fiber::YieldTo(thread_fibers[id], *work_fibers[id]); thread_fibers[id]->Exit(); } -static void ThreadStart1(u32 id, TestControl1& test_control) { - test_control.ExecuteThread(id); -} - /** This test checks for fiber setup configuration and validates that fibers are * doing all the work required. */ @@ -95,7 +84,7 @@ TEST_CASE("Fibers::Setup", "[common]") { test_control.results.resize(num_threads, 0); std::vector<std::thread> threads; for (u32 i = 0; i < num_threads; i++) { - threads.emplace_back(ThreadStart1, i, std::ref(test_control)); + threads.emplace_back([&test_control, i] { test_control.ExecuteThread(i); }); } for (u32 i = 0; i < num_threads; i++) { threads[i].join(); @@ -167,21 +156,6 @@ public: std::shared_ptr<Common::Fiber> fiber3; }; -static void WorkControl2_1(void* control) { - auto* test_control = static_cast<TestControl2*>(control); - test_control->DoWork1(); -} - -static void WorkControl2_2(void* control) { - auto* test_control = static_cast<TestControl2*>(control); - test_control->DoWork2(); -} - -static void WorkControl2_3(void* control) { - auto* test_control = static_cast<TestControl2*>(control); - test_control->DoWork3(); -} - void TestControl2::ExecuteThread(u32 id) { thread_ids.Register(id); auto thread_fiber = Fiber::ThreadToFiber(); @@ -193,18 +167,6 @@ void TestControl2::Exit() { thread_fibers[id]->Exit(); } -static void ThreadStart2_1(u32 id, TestControl2& test_control) { - test_control.ExecuteThread(id); - test_control.CallFiber1(); - test_control.Exit(); -} - -static void ThreadStart2_2(u32 id, TestControl2& test_control) { - test_control.ExecuteThread(id); - test_control.CallFiber2(); - test_control.Exit(); -} - /** This test checks for fiber thread exchange configuration and validates that fibers are * that a fiber has been successfully transferred from one thread to another and that the TLS * region of the thread is kept while changing fibers. @@ -212,14 +174,19 @@ static void ThreadStart2_2(u32 id, TestControl2& test_control) { TEST_CASE("Fibers::InterExchange", "[common]") { TestControl2 test_control{}; test_control.thread_fibers.resize(2); - test_control.fiber1 = - std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_1}, &test_control); - test_control.fiber2 = - std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_2}, &test_control); - test_control.fiber3 = - std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_3}, &test_control); - std::thread thread1(ThreadStart2_1, 0, std::ref(test_control)); - std::thread thread2(ThreadStart2_2, 1, std::ref(test_control)); + test_control.fiber1 = std::make_shared<Fiber>([&test_control] { test_control.DoWork1(); }); + test_control.fiber2 = std::make_shared<Fiber>([&test_control] { test_control.DoWork2(); }); + test_control.fiber3 = std::make_shared<Fiber>([&test_control] { test_control.DoWork3(); }); + std::thread thread1{[&test_control] { + test_control.ExecuteThread(0); + test_control.CallFiber1(); + test_control.Exit(); + }}; + std::thread thread2{[&test_control] { + test_control.ExecuteThread(1); + test_control.CallFiber2(); + test_control.Exit(); + }}; thread1.join(); thread2.join(); REQUIRE(test_control.assert1); @@ -270,16 +237,6 @@ public: std::shared_ptr<Common::Fiber> fiber2; }; -static void WorkControl3_1(void* control) { - auto* test_control = static_cast<TestControl3*>(control); - test_control->DoWork1(); -} - -static void WorkControl3_2(void* control) { - auto* test_control = static_cast<TestControl3*>(control); - test_control->DoWork2(); -} - void TestControl3::ExecuteThread(u32 id) { thread_ids.Register(id); auto thread_fiber = Fiber::ThreadToFiber(); @@ -291,12 +248,6 @@ void TestControl3::Exit() { thread_fibers[id]->Exit(); } -static void ThreadStart3(u32 id, TestControl3& test_control) { - test_control.ExecuteThread(id); - test_control.CallFiber1(); - test_control.Exit(); -} - /** This test checks for one two threads racing for starting the same fiber. * It checks execution occurred in an ordered manner and by no time there were * two contexts at the same time. @@ -304,12 +255,15 @@ static void ThreadStart3(u32 id, TestControl3& test_control) { TEST_CASE("Fibers::StartRace", "[common]") { TestControl3 test_control{}; test_control.thread_fibers.resize(2); - test_control.fiber1 = - std::make_shared<Fiber>(std::function<void(void*)>{WorkControl3_1}, &test_control); - test_control.fiber2 = - std::make_shared<Fiber>(std::function<void(void*)>{WorkControl3_2}, &test_control); - std::thread thread1(ThreadStart3, 0, std::ref(test_control)); - std::thread thread2(ThreadStart3, 1, std::ref(test_control)); + test_control.fiber1 = std::make_shared<Fiber>([&test_control] { test_control.DoWork1(); }); + test_control.fiber2 = std::make_shared<Fiber>([&test_control] { test_control.DoWork2(); }); + const auto race_function{[&test_control](u32 id) { + test_control.ExecuteThread(id); + test_control.CallFiber1(); + test_control.Exit(); + }}; + std::thread thread1([&] { race_function(0); }); + std::thread thread2([&] { race_function(1); }); thread1.join(); thread2.join(); REQUIRE(test_control.value1 == 1); @@ -319,12 +273,10 @@ TEST_CASE("Fibers::StartRace", "[common]") { class TestControl4; -static void WorkControl4(void* control); - class TestControl4 { public: TestControl4() { - fiber1 = std::make_shared<Fiber>(std::function<void(void*)>{WorkControl4}, this); + fiber1 = std::make_shared<Fiber>([this] { DoWork(); }); goal_reached = false; rewinded = false; } @@ -336,7 +288,7 @@ public: } void DoWork() { - fiber1->SetRewindPoint(std::function<void(void*)>{WorkControl4}, this); + fiber1->SetRewindPoint([this] { DoWork(); }); if (rewinded) { goal_reached = true; Fiber::YieldTo(fiber1, *thread_fiber); @@ -351,11 +303,6 @@ public: bool rewinded; }; -static void WorkControl4(void* control) { - auto* test_control = static_cast<TestControl4*>(control); - test_control->DoWork(); -} - TEST_CASE("Fibers::Rewind", "[common]") { TestControl4 test_control{}; test_control.Execute(); diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp index 8358d36b5..e687416a8 100644 --- a/src/tests/core/core_timing.cpp +++ b/src/tests/core/core_timing.cpp @@ -8,6 +8,7 @@ #include <chrono> #include <cstdlib> #include <memory> +#include <mutex> #include <string> #include "core/core.h" @@ -21,13 +22,14 @@ std::array<s64, 5> delays{}; std::bitset<CB_IDS.size()> callbacks_ran_flags; u64 expected_callback = 0; +std::mutex control_mutex; template <unsigned int IDX> void HostCallbackTemplate(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) { + std::unique_lock<std::mutex> lk(control_mutex); static_assert(IDX < CB_IDS.size(), "IDX out of range"); callbacks_ran_flags.set(IDX); REQUIRE(CB_IDS[IDX] == user_data); - REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]); delays[IDX] = ns_late.count(); ++expected_callback; } diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp index 11ce865a7..743ac09f6 100644 --- a/src/video_core/vulkan_common/vulkan_device.cpp +++ b/src/video_core/vulkan_common/vulkan_device.cpp @@ -669,17 +669,6 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR const bool is_amd = driver_id == VK_DRIVER_ID_AMD_PROPRIETARY || driver_id == VK_DRIVER_ID_AMD_OPEN_SOURCE; if (is_amd) { - // TODO(lat9nq): Add an upper bound when AMD fixes their VK_KHR_push_descriptor - const bool has_broken_push_descriptor = VK_VERSION_MAJOR(properties.driverVersion) == 2 && - VK_VERSION_MINOR(properties.driverVersion) == 0 && - VK_VERSION_PATCH(properties.driverVersion) >= 226; - if (khr_push_descriptor && has_broken_push_descriptor) { - LOG_WARNING( - Render_Vulkan, - "Disabling AMD driver 2.0.226 and later from broken VK_KHR_push_descriptor"); - khr_push_descriptor = false; - } - // AMD drivers need a higher amount of Sets per Pool in certain circunstances like in XC2. sets_per_pool = 96; // Disable VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT on AMD GCN4 and lower as it is broken. diff --git a/src/yuzu/applets/qt_web_browser.cpp b/src/yuzu/applets/qt_web_browser.cpp index 283c04cd5..790edbb2a 100644 --- a/src/yuzu/applets/qt_web_browser.cpp +++ b/src/yuzu/applets/qt_web_browser.cpp @@ -52,8 +52,8 @@ QtNXWebEngineView::QtNXWebEngineView(QWidget* parent, Core::System& system, : QWebEngineView(parent), input_subsystem{input_subsystem_}, url_interceptor(std::make_unique<UrlRequestInterceptor>()), input_interpreter(std::make_unique<InputInterpreter>(system)), - default_profile{QWebEngineProfile::defaultProfile()}, - global_settings{QWebEngineSettings::globalSettings()} { + default_profile{QWebEngineProfile::defaultProfile()}, global_settings{ + default_profile->settings()} { default_profile->setPersistentStoragePath(QString::fromStdString(Common::FS::PathToUTF8String( Common::FS::GetYuzuPath(Common::FS::YuzuPath::YuzuDir) / "qtwebengine"))); @@ -78,7 +78,7 @@ QtNXWebEngineView::QtNXWebEngineView(QWidget* parent, Core::System& system, default_profile->scripts()->insert(gamepad); default_profile->scripts()->insert(window_nx); - default_profile->setRequestInterceptor(url_interceptor.get()); + default_profile->setUrlRequestInterceptor(url_interceptor.get()); global_settings->setAttribute(QWebEngineSettings::LocalContentCanAccessRemoteUrls, true); global_settings->setAttribute(QWebEngineSettings::FullScreenSupportEnabled, true); |