diff options
Diffstat (limited to 'src/core')
-rw-r--r-- | src/core/arm/arm_interface.h | 3 | ||||
-rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_32.cpp | 37 | ||||
-rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_32.h | 4 | ||||
-rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_64.cpp | 38 | ||||
-rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_64.h | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/kernel.cpp | 16 | ||||
-rw-r--r-- | src/core/hle/kernel/physical_core.cpp | 1 | ||||
-rw-r--r-- | src/core/hle/kernel/time_manager.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/service/hid/controllers/npad.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/service/nvflinger/buffer_item_consumer.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/service/nvflinger/buffer_queue_consumer.cpp | 8 | ||||
-rw-r--r-- | src/core/hle/service/nvflinger/buffer_queue_core.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/service/nvflinger/buffer_queue_producer.cpp | 28 | ||||
-rw-r--r-- | src/core/hle/service/nvflinger/consumer_base.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/service/nvflinger/graphic_buffer_producer.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/service/nvflinger/hos_binder_driver_server.cpp | 4 | ||||
-rw-r--r-- | src/core/perf_stats.cpp | 10 | ||||
-rw-r--r-- | src/core/tools/freezer.cpp | 18 |
18 files changed, 92 insertions, 97 deletions
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index c60322442..dce2f4195 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h @@ -171,6 +171,9 @@ public: /// Prepare core for thread reschedule (if needed to correctly handle state) virtual void PrepareReschedule() = 0; + /// Signal an interrupt and ask the core to halt as soon as possible. + virtual void SignalInterrupt() = 0; + struct BacktraceEntry { std::string module; u64 address; diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 054572445..ab3210d84 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -25,6 +25,9 @@ namespace Core { using namespace Common::Literals; +constexpr Dynarmic::HaltReason break_loop = Dynarmic::HaltReason::UserDefined2; +constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3; + class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks { public: explicit DynarmicCallbacks32(ARM_Dynarmic_32& parent_) @@ -84,15 +87,13 @@ public: } void CallSVC(u32 swi) override { - parent.svc_called = true; parent.svc_swi = swi; - parent.jit->HaltExecution(); + parent.jit->HaltExecution(svc_call); } void AddTicks(u64 ticks) override { - if (parent.uses_wall_clock) { - return; - } + ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled"); + // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a // rough approximation of the amount of executed ticks in the system, it may be thrown off // if not all cores are doing a similar amount of work. Instead of doing this, we should @@ -108,12 +109,8 @@ public: } u64 GetTicksRemaining() override { - if (parent.uses_wall_clock) { - if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) { - return minimum_run_cycles; - } - return 0U; - } + ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled"); + return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0); } @@ -148,6 +145,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* // Timing config.wall_clock_cntpct = uses_wall_clock; + config.enable_cycle_counting = !uses_wall_clock; // Code cache size config.code_cache_size = 512_MiB; @@ -230,13 +228,11 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* void ARM_Dynarmic_32::Run() { while (true) { - jit->Run(); - if (!svc_called) { - break; + const auto hr = jit->Run(); + if (Has(hr, svc_call)) { + Kernel::Svc::Call(system, svc_swi); } - svc_called = false; - Kernel::Svc::Call(system, svc_swi); - if (shutdown) { + if (Has(hr, break_loop)) { break; } } @@ -322,8 +318,11 @@ void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) { } void ARM_Dynarmic_32::PrepareReschedule() { - jit->HaltExecution(); - shutdown = true; + jit->HaltExecution(break_loop); +} + +void ARM_Dynarmic_32::SignalInterrupt() { + jit->HaltExecution(break_loop); } void ARM_Dynarmic_32::ClearInstructionCache() { diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h index 5d47b600d..3f68a4ff1 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.h +++ b/src/core/arm/dynarmic/arm_dynarmic_32.h @@ -57,6 +57,7 @@ public: void LoadContext(const ThreadContext64& ctx) override {} void PrepareReschedule() override; + void SignalInterrupt() override; void ClearExclusiveState() override; void ClearInstructionCache() override; @@ -83,9 +84,6 @@ private: // SVC callback u32 svc_swi{}; - bool svc_called{}; - - bool shutdown{}; }; } // namespace Core diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 7ff8f9495..68822a1fc 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -26,6 +26,9 @@ namespace Core { using Vector = Dynarmic::A64::Vector; using namespace Common::Literals; +constexpr Dynarmic::HaltReason break_loop = Dynarmic::HaltReason::UserDefined2; +constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3; + class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks { public: explicit DynarmicCallbacks64(ARM_Dynarmic_64& parent_) @@ -106,7 +109,7 @@ public: break; } - parent.jit->HaltExecution(); + parent.jit->HaltExecution(Dynarmic::HaltReason::CacheInvalidation); } void ExceptionRaised(u64 pc, Dynarmic::A64::Exception exception) override { @@ -126,15 +129,12 @@ public: } void CallSVC(u32 swi) override { - parent.svc_called = true; parent.svc_swi = swi; - parent.jit->HaltExecution(); + parent.jit->HaltExecution(svc_call); } void AddTicks(u64 ticks) override { - if (parent.uses_wall_clock) { - return; - } + ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled"); // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a // rough approximation of the amount of executed ticks in the system, it may be thrown off @@ -149,12 +149,8 @@ public: } u64 GetTicksRemaining() override { - if (parent.uses_wall_clock) { - if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) { - return minimum_run_cycles; - } - return 0U; - } + ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled"); + return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0); } @@ -210,6 +206,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* // Timing config.wall_clock_cntpct = uses_wall_clock; + config.enable_cycle_counting = !uses_wall_clock; // Code cache size config.code_cache_size = 512_MiB; @@ -292,13 +289,11 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* void ARM_Dynarmic_64::Run() { while (true) { - jit->Run(); - if (!svc_called) { - break; + const auto hr = jit->Run(); + if (Has(hr, svc_call)) { + Kernel::Svc::Call(system, svc_swi); } - svc_called = false; - Kernel::Svc::Call(system, svc_swi); - if (shutdown) { + if (Has(hr, break_loop)) { break; } } @@ -389,8 +384,11 @@ void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) { } void ARM_Dynarmic_64::PrepareReschedule() { - jit->HaltExecution(); - shutdown = true; + jit->HaltExecution(break_loop); +} + +void ARM_Dynarmic_64::SignalInterrupt() { + jit->HaltExecution(break_loop); } void ARM_Dynarmic_64::ClearInstructionCache() { diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h index 0c4e46c64..58bc7fbec 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.h +++ b/src/core/arm/dynarmic/arm_dynarmic_64.h @@ -51,6 +51,7 @@ public: void LoadContext(const ThreadContext64& ctx) override; void PrepareReschedule() override; + void SignalInterrupt() override; void ClearExclusiveState() override; void ClearInstructionCache() override; @@ -77,9 +78,6 @@ private: // SVC callback u32 svc_swi{}; - bool svc_called{}; - - bool shutdown{}; }; } // namespace Core diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 6387d0c29..134a0b8e9 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -99,7 +99,7 @@ struct KernelCore::Impl { // Close all open server ports. std::unordered_set<KServerPort*> server_ports_; { - std::lock_guard lk(server_ports_lock); + std::scoped_lock lk{server_ports_lock}; server_ports_ = server_ports; server_ports.clear(); } @@ -157,7 +157,7 @@ struct KernelCore::Impl { // Close kernel objects that were not freed on shutdown { - std::lock_guard lk(registered_in_use_objects_lock); + std::scoped_lock lk{registered_in_use_objects_lock}; if (registered_in_use_objects.size()) { for (auto& object : registered_in_use_objects) { object->Close(); @@ -178,7 +178,7 @@ struct KernelCore::Impl { // Track kernel objects that were not freed on shutdown { - std::lock_guard lk(registered_objects_lock); + std::scoped_lock lk{registered_objects_lock}; if (registered_objects.size()) { LOG_DEBUG(Kernel, "{} kernel objects were dangling on shutdown!", registered_objects.size()); @@ -660,7 +660,7 @@ struct KernelCore::Impl { KClientPort* port = &search->second(system.ServiceManager(), system); { - std::lock_guard lk(server_ports_lock); + std::scoped_lock lk{server_ports_lock}; server_ports.insert(&port->GetParent()->GetServerPort()); } return port; @@ -929,22 +929,22 @@ KClientPort* KernelCore::CreateNamedServicePort(std::string name) { } void KernelCore::RegisterKernelObject(KAutoObject* object) { - std::lock_guard lk(impl->registered_objects_lock); + std::scoped_lock lk{impl->registered_objects_lock}; impl->registered_objects.insert(object); } void KernelCore::UnregisterKernelObject(KAutoObject* object) { - std::lock_guard lk(impl->registered_objects_lock); + std::scoped_lock lk{impl->registered_objects_lock}; impl->registered_objects.erase(object); } void KernelCore::RegisterInUseObject(KAutoObject* object) { - std::lock_guard lk(impl->registered_in_use_objects_lock); + std::scoped_lock lk{impl->registered_in_use_objects_lock}; impl->registered_in_use_objects.insert(object); } void KernelCore::UnregisterInUseObject(KAutoObject* object) { - std::lock_guard lk(impl->registered_in_use_objects_lock); + std::scoped_lock lk{impl->registered_in_use_objects_lock}; impl->registered_in_use_objects.erase(object); } diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index 7477668e4..18a5f40f8 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp @@ -58,6 +58,7 @@ bool PhysicalCore::IsInterrupted() const { void PhysicalCore::Interrupt() { guard->lock(); interrupts[core_index].SetInterrupt(true); + arm_interface->SignalInterrupt(); guard->unlock(); } diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index aa985d820..5b8fe8eae 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -24,7 +24,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { } void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) { - std::lock_guard lock{mutex}; + std::scoped_lock lock{mutex}; if (nanoseconds > 0) { ASSERT(thread); ASSERT(thread->GetState() != ThreadState::Runnable); @@ -35,7 +35,7 @@ void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) { } void TimeManager::UnscheduleTimeEvent(KThread* thread) { - std::lock_guard lock{mutex}; + std::scoped_lock lock{mutex}; system.CoreTiming().UnscheduleEvent(time_manager_event_type, reinterpret_cast<uintptr_t>(thread)); } diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp index aa6cb34b7..4e17a952e 100644 --- a/src/core/hle/service/hid/controllers/npad.cpp +++ b/src/core/hle/service/hid/controllers/npad.cpp @@ -318,7 +318,7 @@ void Controller_NPad::OnRelease() { } void Controller_NPad::RequestPadStateUpdate(Core::HID::NpadIdType npad_id) { - std::lock_guard lock{mutex}; + std::scoped_lock lock{mutex}; auto& controller = GetControllerFromNpadIdType(npad_id); const auto controller_type = controller.device->GetNpadStyleIndex(); if (!controller.device->IsConnected()) { diff --git a/src/core/hle/service/nvflinger/buffer_item_consumer.cpp b/src/core/hle/service/nvflinger/buffer_item_consumer.cpp index 93fa1ec10..d7ee5362b 100644 --- a/src/core/hle/service/nvflinger/buffer_item_consumer.cpp +++ b/src/core/hle/service/nvflinger/buffer_item_consumer.cpp @@ -21,7 +21,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco return Status::BadValue; } - std::scoped_lock lock(mutex); + std::scoped_lock lock{mutex}; if (const auto status = AcquireBufferLocked(item, present_when); status != Status::NoError) { if (status != Status::NoBufferAvailable) { @@ -40,7 +40,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco } Status BufferItemConsumer::ReleaseBuffer(const BufferItem& item, Fence& release_fence) { - std::scoped_lock lock(mutex); + std::scoped_lock lock{mutex}; if (const auto status = AddReleaseFenceLocked(item.buf, item.graphic_buffer, release_fence); status != Status::NoError) { diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp index c527c577e..3ab9a8c05 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp @@ -19,7 +19,7 @@ BufferQueueConsumer::~BufferQueueConsumer() = default; Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer, std::chrono::nanoseconds expected_present) { - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; // Check that the consumer doesn't currently have the maximum number of buffers acquired. const s32 num_acquired_buffers{ @@ -120,7 +120,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc std::shared_ptr<IProducerListener> listener; { - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; // If the frame number has changed because the buffer has been reallocated, we can ignore // this ReleaseBuffer for the old buffer. @@ -180,7 +180,7 @@ Status BufferQueueConsumer::Connect(std::shared_ptr<IConsumerListener> consumer_ LOG_DEBUG(Service_NVFlinger, "controlled_by_app={}", controlled_by_app); - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; if (core->is_abandoned) { LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); @@ -199,7 +199,7 @@ Status BufferQueueConsumer::GetReleasedBuffers(u64* out_slot_mask) { return Status::BadValue; } - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; if (core->is_abandoned) { LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); diff --git a/src/core/hle/service/nvflinger/buffer_queue_core.cpp b/src/core/hle/service/nvflinger/buffer_queue_core.cpp index 3a0481786..ec5aabaeb 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_core.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_core.cpp @@ -15,7 +15,7 @@ BufferQueueCore::BufferQueueCore() = default; BufferQueueCore::~BufferQueueCore() = default; void BufferQueueCore::NotifyShutdown() { - std::scoped_lock lock(mutex); + std::scoped_lock lock{mutex}; is_shutting_down = true; diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp index 3d6e990c3..6f604a88e 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp @@ -38,7 +38,7 @@ BufferQueueProducer::~BufferQueueProducer() { Status BufferQueueProducer::RequestBuffer(s32 slot, std::shared_ptr<GraphicBuffer>* buf) { LOG_DEBUG(Service_NVFlinger, "slot {}", slot); - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; if (core->is_abandoned) { LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); @@ -65,7 +65,7 @@ Status BufferQueueProducer::SetBufferCount(s32 buffer_count) { std::shared_ptr<IConsumerListener> listener; { - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; core->WaitWhileAllocatingLocked(); if (core->is_abandoned) { @@ -236,7 +236,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool Status return_flags = Status::NoError; bool attached_by_consumer = false; { - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; core->WaitWhileAllocatingLocked(); if (format == PixelFormat::NoFormat) { @@ -295,7 +295,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool } { - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; if (core->is_abandoned) { LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); @@ -320,7 +320,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool Status BufferQueueProducer::DetachBuffer(s32 slot) { LOG_DEBUG(Service_NVFlinger, "slot {}", slot); - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; if (core->is_abandoned) { LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); @@ -356,7 +356,7 @@ Status BufferQueueProducer::DetachNextBuffer(std::shared_ptr<GraphicBuffer>* out return Status::BadValue; } - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; core->WaitWhileAllocatingLocked(); if (core->is_abandoned) { @@ -399,7 +399,7 @@ Status BufferQueueProducer::AttachBuffer(s32* out_slot, return Status::BadValue; } - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; core->WaitWhileAllocatingLocked(); Status return_flags = Status::NoError; @@ -460,7 +460,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, BufferItem item; { - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; if (core->is_abandoned) { LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); @@ -576,7 +576,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, // Call back without the main BufferQueue lock held, but with the callback lock held so we can // ensure that callbacks occur in order { - std::scoped_lock lock(callback_mutex); + std::scoped_lock lock{callback_mutex}; while (callback_ticket != current_callback_ticket) { callback_condition.wait(callback_mutex); } @@ -597,7 +597,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) { LOG_DEBUG(Service_NVFlinger, "slot {}", slot); - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; if (core->is_abandoned) { LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned"); @@ -623,7 +623,7 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) { } Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) { - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; if (out_value == nullptr) { LOG_ERROR(Service_NVFlinger, "outValue was nullptr"); @@ -673,7 +673,7 @@ Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) { Status BufferQueueProducer::Connect(const std::shared_ptr<IProducerListener>& listener, NativeWindowApi api, bool producer_controlled_by_app, QueueBufferOutput* output) { - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; LOG_DEBUG(Service_NVFlinger, "api = {} producer_controlled_by_app = {}", api, producer_controlled_by_app); @@ -730,7 +730,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) { std::shared_ptr<IConsumerListener> listener; { - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; core->WaitWhileAllocatingLocked(); @@ -780,7 +780,7 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot, return Status::BadValue; } - std::scoped_lock lock(core->mutex); + std::scoped_lock lock{core->mutex}; slots[slot] = {}; slots[slot].graphic_buffer = buffer; diff --git a/src/core/hle/service/nvflinger/consumer_base.cpp b/src/core/hle/service/nvflinger/consumer_base.cpp index c2c80832c..30fc21acc 100644 --- a/src/core/hle/service/nvflinger/consumer_base.cpp +++ b/src/core/hle/service/nvflinger/consumer_base.cpp @@ -18,7 +18,7 @@ ConsumerBase::ConsumerBase(std::unique_ptr<BufferQueueConsumer> consumer_) : consumer{std::move(consumer_)} {} ConsumerBase::~ConsumerBase() { - std::scoped_lock lock(mutex); + std::scoped_lock lock{mutex}; ASSERT_MSG(is_abandoned, "consumer is not abandoned!"); } @@ -44,7 +44,7 @@ void ConsumerBase::OnFrameReplaced(const BufferItem& item) { } void ConsumerBase::OnBuffersReleased() { - std::scoped_lock lock(mutex); + std::scoped_lock lock{mutex}; LOG_DEBUG(Service_NVFlinger, "called"); diff --git a/src/core/hle/service/nvflinger/graphic_buffer_producer.cpp b/src/core/hle/service/nvflinger/graphic_buffer_producer.cpp index d4da98ddb..04068827e 100644 --- a/src/core/hle/service/nvflinger/graphic_buffer_producer.cpp +++ b/src/core/hle/service/nvflinger/graphic_buffer_producer.cpp @@ -4,8 +4,6 @@ // Parts of this implementation were base on: // https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/gui/IGraphicBufferProducer.cpp -#pragma once - #include "core/hle/service/nvflinger/graphic_buffer_producer.h" #include "core/hle/service/nvflinger/parcel.h" diff --git a/src/core/hle/service/nvflinger/hos_binder_driver_server.cpp b/src/core/hle/service/nvflinger/hos_binder_driver_server.cpp index 0c937d682..094ba2542 100644 --- a/src/core/hle/service/nvflinger/hos_binder_driver_server.cpp +++ b/src/core/hle/service/nvflinger/hos_binder_driver_server.cpp @@ -14,7 +14,7 @@ HosBinderDriverServer::HosBinderDriverServer(Core::System& system_) HosBinderDriverServer::~HosBinderDriverServer() {} u64 HosBinderDriverServer::RegisterProducer(std::unique_ptr<android::IBinder>&& binder) { - std::lock_guard lk{lock}; + std::scoped_lock lk{lock}; last_id++; @@ -24,7 +24,7 @@ u64 HosBinderDriverServer::RegisterProducer(std::unique_ptr<android::IBinder>&& } android::IBinder* HosBinderDriverServer::TryGetProducer(u64 id) { - std::lock_guard lk{lock}; + std::scoped_lock lk{lock}; if (auto search = producers.find(id); search != producers.end()) { return search->second.get(); diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp index 52c43c857..6ef459b7a 100644 --- a/src/core/perf_stats.cpp +++ b/src/core/perf_stats.cpp @@ -53,13 +53,13 @@ PerfStats::~PerfStats() { } void PerfStats::BeginSystemFrame() { - std::lock_guard lock{object_mutex}; + std::scoped_lock lock{object_mutex}; frame_begin = Clock::now(); } void PerfStats::EndSystemFrame() { - std::lock_guard lock{object_mutex}; + std::scoped_lock lock{object_mutex}; auto frame_end = Clock::now(); const auto frame_time = frame_end - frame_begin; @@ -79,7 +79,7 @@ void PerfStats::EndGameFrame() { } double PerfStats::GetMeanFrametime() const { - std::lock_guard lock{object_mutex}; + std::scoped_lock lock{object_mutex}; if (current_index <= IgnoreFrames) { return 0; @@ -91,7 +91,7 @@ double PerfStats::GetMeanFrametime() const { } PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) { - std::lock_guard lock{object_mutex}; + std::scoped_lock lock{object_mutex}; const auto now = Clock::now(); // Walltime elapsed since stats were reset @@ -120,7 +120,7 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us } double PerfStats::GetLastFrameTimeScale() const { - std::lock_guard lock{object_mutex}; + std::scoped_lock lock{object_mutex}; constexpr double FRAME_LENGTH = 1.0 / 60; return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH; diff --git a/src/core/tools/freezer.cpp b/src/core/tools/freezer.cpp index 032c71aff..c81dc0e52 100644 --- a/src/core/tools/freezer.cpp +++ b/src/core/tools/freezer.cpp @@ -80,7 +80,7 @@ bool Freezer::IsActive() const { } void Freezer::Clear() { - std::lock_guard lock{entries_mutex}; + std::scoped_lock lock{entries_mutex}; LOG_DEBUG(Common_Memory, "Clearing all frozen memory values."); @@ -88,7 +88,7 @@ void Freezer::Clear() { } u64 Freezer::Freeze(VAddr address, u32 width) { - std::lock_guard lock{entries_mutex}; + std::scoped_lock lock{entries_mutex}; const auto current_value = MemoryReadWidth(memory, width, address); entries.push_back({address, width, current_value}); @@ -101,7 +101,7 @@ u64 Freezer::Freeze(VAddr address, u32 width) { } void Freezer::Unfreeze(VAddr address) { - std::lock_guard lock{entries_mutex}; + std::scoped_lock lock{entries_mutex}; LOG_DEBUG(Common_Memory, "Unfreezing memory for address={:016X}", address); @@ -109,13 +109,13 @@ void Freezer::Unfreeze(VAddr address) { } bool Freezer::IsFrozen(VAddr address) const { - std::lock_guard lock{entries_mutex}; + std::scoped_lock lock{entries_mutex}; return FindEntry(address) != entries.cend(); } void Freezer::SetFrozenValue(VAddr address, u64 value) { - std::lock_guard lock{entries_mutex}; + std::scoped_lock lock{entries_mutex}; const auto iter = FindEntry(address); @@ -132,7 +132,7 @@ void Freezer::SetFrozenValue(VAddr address, u64 value) { } std::optional<Freezer::Entry> Freezer::GetEntry(VAddr address) const { - std::lock_guard lock{entries_mutex}; + std::scoped_lock lock{entries_mutex}; const auto iter = FindEntry(address); @@ -144,7 +144,7 @@ std::optional<Freezer::Entry> Freezer::GetEntry(VAddr address) const { } std::vector<Freezer::Entry> Freezer::GetEntries() const { - std::lock_guard lock{entries_mutex}; + std::scoped_lock lock{entries_mutex}; return entries; } @@ -165,7 +165,7 @@ void Freezer::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) { return; } - std::lock_guard lock{entries_mutex}; + std::scoped_lock lock{entries_mutex}; for (const auto& entry : entries) { LOG_DEBUG(Common_Memory, @@ -178,7 +178,7 @@ void Freezer::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) { } void Freezer::FillEntryReads() { - std::lock_guard lock{entries_mutex}; + std::scoped_lock lock{entries_mutex}; LOG_DEBUG(Common_Memory, "Updating memory freeze entries to current values."); |