diff options
35 files changed, 870 insertions, 221 deletions
| diff --git a/src/audio_core/device/device_session.cpp b/src/audio_core/device/device_session.cpp index c41d9d1ea..ee42ae529 100644 --- a/src/audio_core/device/device_session.cpp +++ b/src/audio_core/device/device_session.cpp @@ -18,9 +18,7 @@ constexpr auto INCREMENT_TIME{5ms};  DeviceSession::DeviceSession(Core::System& system_)      : system{system_}, thread_event{Core::Timing::CreateEvent(                             "AudioOutSampleTick", -                           [this](std::uintptr_t, s64 time, std::chrono::nanoseconds) { -                               return ThreadFunc(); -                           })} {} +                           [this](s64 time, std::chrono::nanoseconds) { return ThreadFunc(); })} {}  DeviceSession::~DeviceSession() {      Finalize(); diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index b58a7073f..8c57d47c6 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -64,6 +64,8 @@ add_library(common STATIC      fs/path_util.cpp      fs/path_util.h      hash.h +    heap_tracker.cpp +    heap_tracker.h      hex_util.cpp      hex_util.h      host_memory.cpp diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp new file mode 100644 index 000000000..683208795 --- /dev/null +++ b/src/common/heap_tracker.cpp @@ -0,0 +1,281 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <fstream> +#include <vector> + +#include "common/heap_tracker.h" +#include "common/logging/log.h" + +namespace Common { + +namespace { + +s64 GetMaxPermissibleResidentMapCount() { +    // Default value. +    s64 value = 65530; + +    // Try to read how many mappings we can make. +    std::ifstream s("/proc/sys/vm/max_map_count"); +    s >> value; + +    // Print, for debug. +    LOG_INFO(HW_Memory, "Current maximum map count: {}", value); + +    // Allow 20000 maps for other code and to account for split inaccuracy. +    return std::max<s64>(value - 20000, 0); +} + +} // namespace + +HeapTracker::HeapTracker(Common::HostMemory& buffer) +    : m_buffer(buffer), m_max_resident_map_count(GetMaxPermissibleResidentMapCount()) {} +HeapTracker::~HeapTracker() = default; + +void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length, +                      MemoryPermission perm, bool is_separate_heap) { +    // When mapping other memory, map pages immediately. +    if (!is_separate_heap) { +        m_buffer.Map(virtual_offset, host_offset, length, perm, false); +        return; +    } + +    { +        // We are mapping part of a separate heap. +        std::scoped_lock lk{m_lock}; + +        auto* const map = new SeparateHeapMap{ +            .vaddr = virtual_offset, +            .paddr = host_offset, +            .size = length, +            .tick = m_tick++, +            .perm = perm, +            .is_resident = false, +        }; + +        // Insert into mappings. +        m_map_count++; +        m_mappings.insert(*map); +    } + +    // Finally, map. +    this->DeferredMapSeparateHeap(virtual_offset); +} + +void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) { +    // If this is a separate heap... +    if (is_separate_heap) { +        std::scoped_lock lk{m_lock}; + +        const SeparateHeapMap key{ +            .vaddr = virtual_offset, +        }; + +        // Split at the boundaries of the region we are removing. +        this->SplitHeapMapLocked(virtual_offset); +        this->SplitHeapMapLocked(virtual_offset + size); + +        // Erase all mappings in range. +        auto it = m_mappings.find(key); +        while (it != m_mappings.end() && it->vaddr < virtual_offset + size) { +            // Get underlying item. +            auto* const item = std::addressof(*it); + +            // If resident, erase from resident map. +            if (item->is_resident) { +                ASSERT(--m_resident_map_count >= 0); +                m_resident_mappings.erase(m_resident_mappings.iterator_to(*item)); +            } + +            // Erase from map. +            ASSERT(--m_map_count >= 0); +            it = m_mappings.erase(it); + +            // Free the item. +            delete item; +        } +    } + +    // Unmap pages. +    m_buffer.Unmap(virtual_offset, size, false); +} + +void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) { +    // Ensure no rebuild occurs while reprotecting. +    std::shared_lock lk{m_rebuild_lock}; + +    // Split at the boundaries of the region we are reprotecting. +    this->SplitHeapMap(virtual_offset, size); + +    // Declare tracking variables. +    const VAddr end = virtual_offset + size; +    VAddr cur = virtual_offset; + +    while (cur < end) { +        VAddr next = cur; +        bool should_protect = false; + +        { +            std::scoped_lock lk2{m_lock}; + +            const SeparateHeapMap key{ +                .vaddr = next, +            }; + +            // Try to get the next mapping corresponding to this address. +            const auto it = m_mappings.nfind(key); + +            if (it == m_mappings.end()) { +                // There are no separate heap mappings remaining. +                next = end; +                should_protect = true; +            } else if (it->vaddr == cur) { +                // We are in range. +                // Update permission bits. +                it->perm = perm; + +                // Determine next address and whether we should protect. +                next = cur + it->size; +                should_protect = it->is_resident; +            } else /* if (it->vaddr > cur) */ { +                // We weren't in range, but there is a block coming up that will be. +                next = it->vaddr; +                should_protect = true; +            } +        } + +        // Clamp to end. +        next = std::min(next, end); + +        // Reprotect, if we need to. +        if (should_protect) { +            m_buffer.Protect(cur, next - cur, perm); +        } + +        // Advance. +        cur = next; +    } +} + +bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) { +    if (m_buffer.IsInVirtualRange(fault_address)) { +        return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer()); +    } + +    return false; +} + +bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) { +    bool rebuild_required = false; + +    { +        std::scoped_lock lk{m_lock}; + +        // Check to ensure this was a non-resident separate heap mapping. +        const auto it = this->GetNearestHeapMapLocked(virtual_offset); +        if (it == m_mappings.end() || it->is_resident) { +            return false; +        } + +        // Update tick before possible rebuild. +        it->tick = m_tick++; + +        // Check if we need to rebuild. +        if (m_resident_map_count > m_max_resident_map_count) { +            rebuild_required = true; +        } + +        // Map the area. +        m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false); + +        // This map is now resident. +        it->is_resident = true; +        m_resident_map_count++; +        m_resident_mappings.insert(*it); +    } + +    if (rebuild_required) { +        // A rebuild was required, so perform it now. +        this->RebuildSeparateHeapAddressSpace(); +    } + +    return true; +} + +void HeapTracker::RebuildSeparateHeapAddressSpace() { +    std::scoped_lock lk{m_rebuild_lock, m_lock}; + +    ASSERT(!m_resident_mappings.empty()); + +    // Dump half of the mappings. +    // +    // Despite being worse in theory, this has proven to be better in practice than more +    // regularly dumping a smaller amount, because it significantly reduces average case +    // lock contention. +    const size_t desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2; +    const size_t evict_count = m_resident_map_count - desired_count; +    auto it = m_resident_mappings.begin(); + +    for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) { +        // Unmark and unmap. +        it->is_resident = false; +        m_buffer.Unmap(it->vaddr, it->size, false); + +        // Advance. +        ASSERT(--m_resident_map_count >= 0); +        it = m_resident_mappings.erase(it); +    } +} + +void HeapTracker::SplitHeapMap(VAddr offset, size_t size) { +    std::scoped_lock lk{m_lock}; + +    this->SplitHeapMapLocked(offset); +    this->SplitHeapMapLocked(offset + size); +} + +void HeapTracker::SplitHeapMapLocked(VAddr offset) { +    const auto it = this->GetNearestHeapMapLocked(offset); +    if (it == m_mappings.end() || it->vaddr == offset) { +        // Not contained or no split required. +        return; +    } + +    // Cache the original values. +    auto* const left = std::addressof(*it); +    const size_t orig_size = left->size; + +    // Adjust the left map. +    const size_t left_size = offset - left->vaddr; +    left->size = left_size; + +    // Create the new right map. +    auto* const right = new SeparateHeapMap{ +        .vaddr = left->vaddr + left_size, +        .paddr = left->paddr + left_size, +        .size = orig_size - left_size, +        .tick = left->tick, +        .perm = left->perm, +        .is_resident = left->is_resident, +    }; + +    // Insert the new right map. +    m_map_count++; +    m_mappings.insert(*right); + +    // If resident, also insert into resident map. +    if (right->is_resident) { +        m_resident_map_count++; +        m_resident_mappings.insert(*right); +    } +} + +HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) { +    const SeparateHeapMap key{ +        .vaddr = offset, +    }; + +    return m_mappings.find(key); +} + +} // namespace Common diff --git a/src/common/heap_tracker.h b/src/common/heap_tracker.h new file mode 100644 index 000000000..ee5b0bf43 --- /dev/null +++ b/src/common/heap_tracker.h @@ -0,0 +1,98 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <atomic> +#include <mutex> +#include <set> +#include <shared_mutex> + +#include "common/host_memory.h" +#include "common/intrusive_red_black_tree.h" + +namespace Common { + +struct SeparateHeapMap { +    Common::IntrusiveRedBlackTreeNode addr_node{}; +    Common::IntrusiveRedBlackTreeNode tick_node{}; +    VAddr vaddr{}; +    PAddr paddr{}; +    size_t size{}; +    size_t tick{}; +    MemoryPermission perm{}; +    bool is_resident{}; +}; + +struct SeparateHeapMapAddrComparator { +    static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) { +        if (lhs.vaddr < rhs.vaddr) { +            return -1; +        } else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) { +            return 0; +        } else { +            return 1; +        } +    } +}; + +struct SeparateHeapMapTickComparator { +    static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) { +        if (lhs.tick < rhs.tick) { +            return -1; +        } else if (lhs.tick > rhs.tick) { +            return 1; +        } else { +            return SeparateHeapMapAddrComparator::Compare(lhs, rhs); +        } +    } +}; + +class HeapTracker { +public: +    explicit HeapTracker(Common::HostMemory& buffer); +    ~HeapTracker(); + +    void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm, +             bool is_separate_heap); +    void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap); +    void Protect(size_t virtual_offset, size_t length, MemoryPermission perm); +    u8* VirtualBasePointer() { +        return m_buffer.VirtualBasePointer(); +    } + +    bool DeferredMapSeparateHeap(u8* fault_address); +    bool DeferredMapSeparateHeap(size_t virtual_offset); + +private: +    using AddrTreeTraits = +        Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>; +    using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>; + +    using TickTreeTraits = +        Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>; +    using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>; + +    AddrTree m_mappings{}; +    TickTree m_resident_mappings{}; + +private: +    void SplitHeapMap(VAddr offset, size_t size); +    void SplitHeapMapLocked(VAddr offset); + +    AddrTree::iterator GetNearestHeapMapLocked(VAddr offset); + +    void RebuildSeparateHeapAddressSpace(); + +private: +    Common::HostMemory& m_buffer; +    const s64 m_max_resident_map_count; + +    std::shared_mutex m_rebuild_lock{}; +    std::mutex m_lock{}; +    s64 m_map_count{}; +    s64 m_resident_map_count{}; +    size_t m_tick{}; +}; + +} // namespace Common diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp index e540375b8..860c39e6a 100644 --- a/src/common/host_memory.cpp +++ b/src/common/host_memory.cpp @@ -679,7 +679,7 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default;  HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default;  void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, -                     MemoryPermission perms) { +                     MemoryPermission perms, bool separate_heap) {      ASSERT(virtual_offset % PageAlignment == 0);      ASSERT(host_offset % PageAlignment == 0);      ASSERT(length % PageAlignment == 0); @@ -691,7 +691,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,      impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms);  } -void HostMemory::Unmap(size_t virtual_offset, size_t length) { +void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) {      ASSERT(virtual_offset % PageAlignment == 0);      ASSERT(length % PageAlignment == 0);      ASSERT(virtual_offset + length <= virtual_size); @@ -701,14 +701,16 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length) {      impl->Unmap(virtual_offset + virtual_base_offset, length);  } -void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write, -                         bool execute) { +void HostMemory::Protect(size_t virtual_offset, size_t length, MemoryPermission perm) {      ASSERT(virtual_offset % PageAlignment == 0);      ASSERT(length % PageAlignment == 0);      ASSERT(virtual_offset + length <= virtual_size);      if (length == 0 || !virtual_base || !impl) {          return;      } +    const bool read = True(perm & MemoryPermission::Read); +    const bool write = True(perm & MemoryPermission::Write); +    const bool execute = True(perm & MemoryPermission::Execute);      impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute);  } diff --git a/src/common/host_memory.h b/src/common/host_memory.h index 747c5850c..72fbb05af 100644 --- a/src/common/host_memory.h +++ b/src/common/host_memory.h @@ -40,11 +40,12 @@ public:      HostMemory(HostMemory&& other) noexcept;      HostMemory& operator=(HostMemory&& other) noexcept; -    void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms); +    void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms, +             bool separate_heap); -    void Unmap(size_t virtual_offset, size_t length); +    void Unmap(size_t virtual_offset, size_t length, bool separate_heap); -    void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute = false); +    void Protect(size_t virtual_offset, size_t length, MemoryPermission perms);      void EnableDirectMappedAddress(); @@ -64,6 +65,10 @@ public:          return virtual_base;      } +    bool IsInVirtualRange(void* address) const noexcept { +        return address >= virtual_base && address < virtual_base + virtual_size; +    } +  private:      size_t backing_size{};      size_t virtual_size{}; diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 96ab39cb8..367d01dc7 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -978,6 +978,7 @@ endif()  if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)      target_sources(core PRIVATE +        arm/dynarmic/arm_dynarmic.cpp          arm/dynarmic/arm_dynarmic.h          arm/dynarmic/arm_dynarmic_64.cpp          arm/dynarmic/arm_dynarmic_64.h @@ -987,6 +988,8 @@ if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)          arm/dynarmic/dynarmic_cp15.h          arm/dynarmic/dynarmic_exclusive_monitor.cpp          arm/dynarmic/dynarmic_exclusive_monitor.h +        hle/service/jit/jit_code_memory.cpp +        hle/service/jit/jit_code_memory.h          hle/service/jit/jit_context.cpp          hle/service/jit/jit_context.h          hle/service/jit/jit.cpp diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp new file mode 100644 index 000000000..e6e9fc45b --- /dev/null +++ b/src/core/arm/dynarmic/arm_dynarmic.cpp @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifdef __linux__ + +#include "common/signal_chain.h" + +#include "core/arm/dynarmic/arm_dynarmic.h" +#include "core/hle/kernel/k_process.h" +#include "core/memory.h" + +namespace Core { + +namespace { + +thread_local Core::Memory::Memory* g_current_memory{}; +std::once_flag g_registered{}; +struct sigaction g_old_segv {}; + +void HandleSigSegv(int sig, siginfo_t* info, void* ctx) { +    if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) { +        return; +    } + +    return g_old_segv.sa_sigaction(sig, info, ctx); +} + +} // namespace + +ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) { +    g_current_memory = std::addressof(process->GetMemory()); +} + +ScopedJitExecution::~ScopedJitExecution() { +    g_current_memory = nullptr; +} + +void ScopedJitExecution::RegisterHandler() { +    std::call_once(g_registered, [] { +        struct sigaction sa {}; +        sa.sa_sigaction = &HandleSigSegv; +        sa.sa_flags = SA_SIGINFO | SA_ONSTACK; +        Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv)); +    }); +} + +} // namespace Core + +#endif diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h index eef7c3116..53dd18815 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.h +++ b/src/core/arm/dynarmic/arm_dynarmic.h @@ -26,4 +26,24 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) {      return static_cast<HaltReason>(hr);  } +#ifdef __linux__ + +class ScopedJitExecution { +public: +    explicit ScopedJitExecution(Kernel::KProcess* process); +    ~ScopedJitExecution(); +    static void RegisterHandler(); +}; + +#else + +class ScopedJitExecution { +public: +    explicit ScopedJitExecution(Kernel::KProcess* process) {} +    ~ScopedJitExecution() {} +    static void RegisterHandler() {} +}; + +#endif +  } // namespace Core diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index c78cfd528..36478f722 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -331,11 +331,15 @@ bool ArmDynarmic32::IsInThumbMode() const {  }  HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) { +    ScopedJitExecution sj(thread->GetOwnerProcess()); +      m_jit->ClearExclusiveState();      return TranslateHaltReason(m_jit->Run());  }  HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) { +    ScopedJitExecution sj(thread->GetOwnerProcess()); +      m_jit->ClearExclusiveState();      return TranslateHaltReason(m_jit->Step());  } @@ -377,6 +381,7 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc        m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {      auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();      m_jit = MakeJit(&page_table_impl); +    ScopedJitExecution::RegisterHandler();  }  ArmDynarmic32::~ArmDynarmic32() = default; diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index f351b13d9..c811c8ad5 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -362,11 +362,15 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa  }  HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) { +    ScopedJitExecution sj(thread->GetOwnerProcess()); +      m_jit->ClearExclusiveState();      return TranslateHaltReason(m_jit->Run());  }  HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) { +    ScopedJitExecution sj(thread->GetOwnerProcess()); +      m_jit->ClearExclusiveState();      return TranslateHaltReason(m_jit->Step());  } @@ -406,6 +410,7 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc      auto& page_table = process->GetPageTable().GetBasePageTable();      auto& page_table_impl = page_table.GetImpl();      m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth()); +    ScopedJitExecution::RegisterHandler();  }  ArmDynarmic64::~ArmDynarmic64() = default; diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index d6b5abc68..fc536413b 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp @@ -29,7 +29,6 @@ std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callbac  struct CoreTiming::Event {      s64 time;      u64 fifo_order; -    std::uintptr_t user_data;      std::weak_ptr<EventType> type;      s64 reschedule_time;      heap_t::handle_type handle{}; @@ -67,17 +66,15 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {      event_fifo_id = 0;      shutting_down = false;      cpu_ticks = 0; -    const auto empty_timed_callback = [](std::uintptr_t, u64, std::chrono::nanoseconds) -        -> std::optional<std::chrono::nanoseconds> { return std::nullopt; }; -    ev_lost = CreateEvent("_lost_event", empty_timed_callback);      if (is_multicore) {          timer_thread = std::make_unique<std::jthread>(ThreadEntry, std::ref(*this));      }  }  void CoreTiming::ClearPendingEvents() { -    std::scoped_lock lock{basic_lock}; +    std::scoped_lock lock{advance_lock, basic_lock};      event_queue.clear(); +    event.Set();  }  void CoreTiming::Pause(bool is_paused) { @@ -119,14 +116,12 @@ bool CoreTiming::HasPendingEvents() const {  }  void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future, -                               const std::shared_ptr<EventType>& event_type, -                               std::uintptr_t user_data, bool absolute_time) { +                               const std::shared_ptr<EventType>& event_type, bool absolute_time) {      {          std::scoped_lock scope{basic_lock};          const auto next_time{absolute_time ? ns_into_future : GetGlobalTimeNs() + ns_into_future}; -        auto h{event_queue.emplace( -            Event{next_time.count(), event_fifo_id++, user_data, event_type, 0})}; +        auto h{event_queue.emplace(Event{next_time.count(), event_fifo_id++, event_type, 0})};          (*h).handle = h;      } @@ -136,13 +131,13 @@ void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,  void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time,                                        std::chrono::nanoseconds resched_time,                                        const std::shared_ptr<EventType>& event_type, -                                      std::uintptr_t user_data, bool absolute_time) { +                                      bool absolute_time) {      {          std::scoped_lock scope{basic_lock};          const auto next_time{absolute_time ? start_time : GetGlobalTimeNs() + start_time}; -        auto h{event_queue.emplace(Event{next_time.count(), event_fifo_id++, user_data, event_type, -                                         resched_time.count()})}; +        auto h{event_queue.emplace( +            Event{next_time.count(), event_fifo_id++, event_type, resched_time.count()})};          (*h).handle = h;      } @@ -150,14 +145,14 @@ void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time,  }  void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, -                                 std::uintptr_t user_data, bool wait) { +                                 UnscheduleEventType type) {      {          std::scoped_lock lk{basic_lock};          std::vector<heap_t::handle_type> to_remove;          for (auto itr = event_queue.begin(); itr != event_queue.end(); itr++) {              const Event& e = *itr; -            if (e.type.lock().get() == event_type.get() && e.user_data == user_data) { +            if (e.type.lock().get() == event_type.get()) {                  to_remove.push_back(itr->handle);              }          } @@ -165,10 +160,12 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,          for (auto h : to_remove) {              event_queue.erase(h);          } + +        event_type->sequence_number++;      }      // Force any in-progress events to finish -    if (wait) { +    if (type == UnscheduleEventType::Wait) {          std::scoped_lock lk{advance_lock};      }  } @@ -208,28 +205,31 @@ std::optional<s64> CoreTiming::Advance() {          const Event& evt = event_queue.top();          if (const auto event_type{evt.type.lock()}) { -            if (evt.reschedule_time == 0) { -                const auto evt_user_data = evt.user_data; -                const auto evt_time = evt.time; +            const auto evt_time = evt.time; +            const auto evt_sequence_num = event_type->sequence_number; +            if (evt.reschedule_time == 0) {                  event_queue.pop();                  basic_lock.unlock();                  event_type->callback( -                    evt_user_data, evt_time, -                    std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time}); +                    evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time});                  basic_lock.lock();              } else {                  basic_lock.unlock();                  const auto new_schedule_time{event_type->callback( -                    evt.user_data, evt.time, -                    std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt.time})}; +                    evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time})};                  basic_lock.lock(); +                if (evt_sequence_num != event_type->sequence_number) { +                    // Heap handle is invalidated after external modification. +                    continue; +                } +                  const auto next_schedule_time{new_schedule_time.has_value()                                                    ? new_schedule_time.value().count()                                                    : evt.reschedule_time}; @@ -241,8 +241,8 @@ std::optional<s64> CoreTiming::Advance() {                      next_time = pause_end_time + next_schedule_time;                  } -                event_queue.update(evt.handle, Event{next_time, event_fifo_id++, evt.user_data, -                                                     evt.type, next_schedule_time, evt.handle}); +                event_queue.update(evt.handle, Event{next_time, event_fifo_id++, evt.type, +                                                     next_schedule_time, evt.handle});              }          } diff --git a/src/core/core_timing.h b/src/core/core_timing.h index 21548f0a9..7e4dff7f3 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h @@ -22,17 +22,25 @@ namespace Core::Timing {  /// A callback that may be scheduled for a particular core timing event.  using TimedCallback = std::function<std::optional<std::chrono::nanoseconds>( -    std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)>; +    s64 time, std::chrono::nanoseconds ns_late)>;  /// Contains the characteristics of a particular event.  struct EventType {      explicit EventType(TimedCallback&& callback_, std::string&& name_) -        : callback{std::move(callback_)}, name{std::move(name_)} {} +        : callback{std::move(callback_)}, name{std::move(name_)}, sequence_number{0} {}      /// The event's callback function.      TimedCallback callback;      /// A pointer to the name of the event.      const std::string name; +    /// A monotonic sequence number, incremented when this event is +    /// changed externally. +    size_t sequence_number; +}; + +enum class UnscheduleEventType { +    Wait, +    NoWait,  };  /** @@ -89,23 +97,17 @@ public:      /// Schedules an event in core timing      void ScheduleEvent(std::chrono::nanoseconds ns_into_future, -                       const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data = 0, -                       bool absolute_time = false); +                       const std::shared_ptr<EventType>& event_type, bool absolute_time = false);      /// Schedules an event which will automatically re-schedule itself with the given time, until      /// unscheduled      void ScheduleLoopingEvent(std::chrono::nanoseconds start_time,                                std::chrono::nanoseconds resched_time,                                const std::shared_ptr<EventType>& event_type, -                              std::uintptr_t user_data = 0, bool absolute_time = false); +                              bool absolute_time = false); -    void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data, -                         bool wait = true); - -    void UnscheduleEventWithoutWait(const std::shared_ptr<EventType>& event_type, -                                    std::uintptr_t user_data) { -        UnscheduleEvent(event_type, user_data, false); -    } +    void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, +                         UnscheduleEventType type = UnscheduleEventType::Wait);      void AddTicks(u64 ticks_to_add); @@ -158,7 +160,6 @@ private:      heap_t event_queue;      u64 event_fifo_id = 0; -    std::shared_ptr<EventType> ev_lost;      Common::Event event{};      Common::Event pause_event{};      mutable std::mutex basic_lock; diff --git a/src/core/file_sys/ips_layer.cpp b/src/core/file_sys/ips_layer.cpp index 7be1322cc..31033634c 100644 --- a/src/core/file_sys/ips_layer.cpp +++ b/src/core/file_sys/ips_layer.cpp @@ -73,6 +73,9 @@ VirtualFile PatchIPS(const VirtualFile& in, const VirtualFile& ips) {          return nullptr;      auto in_data = in->ReadAllBytes(); +    if (in_data.size() == 0) { +        return nullptr; +    }      std::vector<u8> temp(type == IPSFileType::IPS ? 3 : 4);      u64 offset = 5; // After header @@ -88,6 +91,10 @@ VirtualFile PatchIPS(const VirtualFile& in, const VirtualFile& ips) {          else              real_offset = (temp[0] << 16) | (temp[1] << 8) | temp[2]; +        if (real_offset > in_data.size()) { +            return nullptr; +        } +          u16 data_size{};          if (ips->ReadObject(&data_size, offset) != sizeof(u16))              return nullptr; diff --git a/src/core/hle/kernel/k_hardware_timer.cpp b/src/core/hle/kernel/k_hardware_timer.cpp index 8e2e40307..4e947dd6b 100644 --- a/src/core/hle/kernel/k_hardware_timer.cpp +++ b/src/core/hle/kernel/k_hardware_timer.cpp @@ -10,15 +10,15 @@ namespace Kernel {  void KHardwareTimer::Initialize() {      // Create the timing callback to register with CoreTiming. -    m_event_type = Core::Timing::CreateEvent( -        "KHardwareTimer::Callback", [](std::uintptr_t timer_handle, s64, std::chrono::nanoseconds) { -            reinterpret_cast<KHardwareTimer*>(timer_handle)->DoTask(); -            return std::nullopt; -        }); +    m_event_type = Core::Timing::CreateEvent("KHardwareTimer::Callback", +                                             [this](s64, std::chrono::nanoseconds) { +                                                 this->DoTask(); +                                                 return std::nullopt; +                                             });  }  void KHardwareTimer::Finalize() { -    m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type, reinterpret_cast<uintptr_t>(this)); +    m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type);      m_wakeup_time = std::numeric_limits<s64>::max();      m_event_type.reset();  } @@ -57,13 +57,12 @@ void KHardwareTimer::EnableInterrupt(s64 wakeup_time) {      m_wakeup_time = wakeup_time;      m_kernel.System().CoreTiming().ScheduleEvent(std::chrono::nanoseconds{m_wakeup_time}, -                                                 m_event_type, reinterpret_cast<uintptr_t>(this), -                                                 true); +                                                 m_event_type, true);  }  void KHardwareTimer::DisableInterrupt() { -    m_kernel.System().CoreTiming().UnscheduleEventWithoutWait(m_event_type, -                                                              reinterpret_cast<uintptr_t>(this)); +    m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type, +                                                   Core::Timing::UnscheduleEventType::NoWait);      m_wakeup_time = std::numeric_limits<s64>::max();  } diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp index 423289145..8c1549559 100644 --- a/src/core/hle/kernel/k_page_table_base.cpp +++ b/src/core/hle/kernel/k_page_table_base.cpp @@ -434,7 +434,7 @@ Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool  void KPageTableBase::Finalize() {      auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {          if (Settings::IsFastmemEnabled()) { -            m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); +            m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false);          }      }; @@ -5243,7 +5243,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {                                  // Unmap.                                  R_ASSERT(this->Operate(updater.GetPageList(), cur_address,                                                         cur_pages, 0, false, unmap_properties, -                                                       OperationType::Unmap, true)); +                                                       OperationType::UnmapPhysical, true));                              }                              // Check if we're done. @@ -5326,7 +5326,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {                              // Map the papges.                              R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,                                                  cur_pg, map_properties, -                                                OperationType::MapFirstGroup, false)); +                                                OperationType::MapFirstGroupPhysical, false));                          }                      } @@ -5480,7 +5480,7 @@ Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size)              // Unmap.              R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false, -                                   unmap_properties, OperationType::Unmap, false)); +                                   unmap_properties, OperationType::UnmapPhysical, false));          }          // Check if we're done. @@ -5655,7 +5655,10 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a      // or free them to the page list, and so it goes unused (along with page properties).      switch (operation) { -    case OperationType::Unmap: { +    case OperationType::Unmap: +    case OperationType::UnmapPhysical: { +        const bool separate_heap = operation == OperationType::UnmapPhysical; +          // Ensure that any pages we track are closed on exit.          KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());          SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); @@ -5664,7 +5667,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a          this->MakePageGroup(pages_to_close, virt_addr, num_pages);          // Unmap. -        m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize); +        m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize, separate_heap);          R_SUCCEED();      } @@ -5672,7 +5675,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a          ASSERT(virt_addr != 0);          ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));          m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr, -                                  ConvertToMemoryPermission(properties.perm)); +                                  ConvertToMemoryPermission(properties.perm), false);          // Open references to pages, if we should.          if (this->IsHeapPhysicalAddress(phys_addr)) { @@ -5711,16 +5714,19 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a      switch (operation) {      case OperationType::MapGroup: -    case OperationType::MapFirstGroup: { +    case OperationType::MapFirstGroup: +    case OperationType::MapFirstGroupPhysical: { +        const bool separate_heap = operation == OperationType::MapFirstGroupPhysical; +          // We want to maintain a new reference to every page in the group. -        KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); +        KScopedPageGroup spg(page_group, operation == OperationType::MapGroup);          for (const auto& node : page_group) {              const size_t size{node.GetNumPages() * PageSize};              // Map the pages.              m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(), -                                      ConvertToMemoryPermission(properties.perm)); +                                      ConvertToMemoryPermission(properties.perm), separate_heap);              virt_addr += size;          } diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h index 556d230b3..077cafc96 100644 --- a/src/core/hle/kernel/k_page_table_base.h +++ b/src/core/hle/kernel/k_page_table_base.h @@ -104,6 +104,9 @@ protected:          ChangePermissionsAndRefresh = 5,          ChangePermissionsAndRefreshAndFlush = 6,          Separate = 7, + +        MapFirstGroupPhysical = 65000, +        UnmapPhysical = 65001,      };      static constexpr size_t MaxPhysicalMapAlignment = 1_GiB; diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index d6869c228..068e71dff 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -1237,8 +1237,10 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {          auto& buffer = m_kernel.System().DeviceMemory().buffer;          const auto& code = code_set.CodeSegment();          const auto& patch = code_set.PatchSegment(); -        buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true); -        buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true); +        buffer.Protect(GetInteger(base_addr + code.addr), code.size, +                       Common::MemoryPermission::Read | Common::MemoryPermission::Execute); +        buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, +                       Common::MemoryPermission::Read | Common::MemoryPermission::Execute);          ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None);      }  #endif diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index c14d2d2f3..1030f0c12 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -238,7 +238,7 @@ struct KernelCore::Impl {      void InitializePreemption(KernelCore& kernel) {          preemption_event = Core::Timing::CreateEvent(              "PreemptionCallback", -            [this, &kernel](std::uintptr_t, s64 time, +            [this, &kernel](s64 time,                              std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {                  {                      KScopedSchedulerLock lock(kernel); diff --git a/src/core/hle/service/hid/hidbus.cpp b/src/core/hle/service/hid/hidbus.cpp index d12f9beb0..ffa7e144d 100644 --- a/src/core/hle/service/hid/hidbus.cpp +++ b/src/core/hle/service/hid/hidbus.cpp @@ -49,10 +49,10 @@ HidBus::HidBus(Core::System& system_)      // Register update callbacks      hidbus_update_event = Core::Timing::CreateEvent(          "Hidbus::UpdateCallback", -        [this](std::uintptr_t user_data, s64 time, +        [this](s64 time,                 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {              const auto guard = LockService(); -            UpdateHidbus(user_data, ns_late); +            UpdateHidbus(ns_late);              return std::nullopt;          }); @@ -61,10 +61,10 @@ HidBus::HidBus(Core::System& system_)  }  HidBus::~HidBus() { -    system.CoreTiming().UnscheduleEvent(hidbus_update_event, 0); +    system.CoreTiming().UnscheduleEvent(hidbus_update_event);  } -void HidBus::UpdateHidbus(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) { +void HidBus::UpdateHidbus(std::chrono::nanoseconds ns_late) {      if (is_hidbus_enabled) {          for (std::size_t i = 0; i < devices.size(); ++i) {              if (!devices[i].is_device_initializated) { diff --git a/src/core/hle/service/hid/hidbus.h b/src/core/hle/service/hid/hidbus.h index c29b5e882..85a1df133 100644 --- a/src/core/hle/service/hid/hidbus.h +++ b/src/core/hle/service/hid/hidbus.h @@ -108,7 +108,7 @@ private:      void DisableJoyPollingReceiveMode(HLERequestContext& ctx);      void SetStatusManagerType(HLERequestContext& ctx); -    void UpdateHidbus(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); +    void UpdateHidbus(std::chrono::nanoseconds ns_late);      std::optional<std::size_t> GetDeviceIndexFromHandle(BusHandle handle) const;      template <typename T> diff --git a/src/core/hle/service/hid/resource_manager.cpp b/src/core/hle/service/hid/resource_manager.cpp index 6c6cbd802..afc61f70d 100644 --- a/src/core/hle/service/hid/resource_manager.cpp +++ b/src/core/hle/service/hid/resource_manager.cpp @@ -227,8 +227,7 @@ void ResourceManager::EnableTouchScreen(u64 aruid, bool is_enabled) {      applet_resource->EnableTouchScreen(aruid, is_enabled);  } -void ResourceManager::UpdateControllers(std::uintptr_t user_data, -                                        std::chrono::nanoseconds ns_late) { +void ResourceManager::UpdateControllers(std::chrono::nanoseconds ns_late) {      auto& core_timing = system.CoreTiming();      debug_pad->OnUpdate(core_timing);      digitizer->OnUpdate(core_timing); @@ -241,20 +240,19 @@ void ResourceManager::UpdateControllers(std::uintptr_t user_data,      capture_button->OnUpdate(core_timing);  } -void ResourceManager::UpdateNpad(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) { +void ResourceManager::UpdateNpad(std::chrono::nanoseconds ns_late) {      auto& core_timing = system.CoreTiming();      npad->OnUpdate(core_timing);  } -void ResourceManager::UpdateMouseKeyboard(std::uintptr_t user_data, -                                          std::chrono::nanoseconds ns_late) { +void ResourceManager::UpdateMouseKeyboard(std::chrono::nanoseconds ns_late) {      auto& core_timing = system.CoreTiming();      mouse->OnUpdate(core_timing);      debug_mouse->OnUpdate(core_timing);      keyboard->OnUpdate(core_timing);  } -void ResourceManager::UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) { +void ResourceManager::UpdateMotion(std::chrono::nanoseconds ns_late) {      auto& core_timing = system.CoreTiming();      six_axis->OnUpdate(core_timing);      seven_six_axis->OnUpdate(core_timing); @@ -273,34 +271,34 @@ IAppletResource::IAppletResource(Core::System& system_, std::shared_ptr<Resource      // Register update callbacks      npad_update_event = Core::Timing::CreateEvent(          "HID::UpdatePadCallback", -        [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late) -            -> std::optional<std::chrono::nanoseconds> { +        [this, resource]( +            s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {              const auto guard = LockService(); -            resource->UpdateNpad(user_data, ns_late); +            resource->UpdateNpad(ns_late);              return std::nullopt;          });      default_update_event = Core::Timing::CreateEvent(          "HID::UpdateDefaultCallback", -        [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late) -            -> std::optional<std::chrono::nanoseconds> { +        [this, resource]( +            s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {              const auto guard = LockService(); -            resource->UpdateControllers(user_data, ns_late); +            resource->UpdateControllers(ns_late);              return std::nullopt;          });      mouse_keyboard_update_event = Core::Timing::CreateEvent(          "HID::UpdateMouseKeyboardCallback", -        [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late) -            -> std::optional<std::chrono::nanoseconds> { +        [this, resource]( +            s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {              const auto guard = LockService(); -            resource->UpdateMouseKeyboard(user_data, ns_late); +            resource->UpdateMouseKeyboard(ns_late);              return std::nullopt;          });      motion_update_event = Core::Timing::CreateEvent(          "HID::UpdateMotionCallback", -        [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late) -            -> std::optional<std::chrono::nanoseconds> { +        [this, resource]( +            s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {              const auto guard = LockService(); -            resource->UpdateMotion(user_data, ns_late); +            resource->UpdateMotion(ns_late);              return std::nullopt;          }); @@ -314,10 +312,10 @@ IAppletResource::IAppletResource(Core::System& system_, std::shared_ptr<Resource  }  IAppletResource::~IAppletResource() { -    system.CoreTiming().UnscheduleEvent(npad_update_event, 0); -    system.CoreTiming().UnscheduleEvent(default_update_event, 0); -    system.CoreTiming().UnscheduleEvent(mouse_keyboard_update_event, 0); -    system.CoreTiming().UnscheduleEvent(motion_update_event, 0); +    system.CoreTiming().UnscheduleEvent(npad_update_event); +    system.CoreTiming().UnscheduleEvent(default_update_event); +    system.CoreTiming().UnscheduleEvent(mouse_keyboard_update_event); +    system.CoreTiming().UnscheduleEvent(motion_update_event);      resource_manager->FreeAppletResourceId(aruid);  } diff --git a/src/core/hle/service/hid/resource_manager.h b/src/core/hle/service/hid/resource_manager.h index 5ad7cb564..5a6596099 100644 --- a/src/core/hle/service/hid/resource_manager.h +++ b/src/core/hle/service/hid/resource_manager.h @@ -81,10 +81,10 @@ public:      void EnablePadInput(u64 aruid, bool is_enabled);      void EnableTouchScreen(u64 aruid, bool is_enabled); -    void UpdateControllers(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); -    void UpdateNpad(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); -    void UpdateMouseKeyboard(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); -    void UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); +    void UpdateControllers(std::chrono::nanoseconds ns_late); +    void UpdateNpad(std::chrono::nanoseconds ns_late); +    void UpdateMouseKeyboard(std::chrono::nanoseconds ns_late); +    void UpdateMotion(std::chrono::nanoseconds ns_late);  private:      Result CreateAppletResourceImpl(u64 aruid); diff --git a/src/core/hle/service/jit/jit.cpp b/src/core/hle/service/jit/jit.cpp index a94d05e19..77aa6d7d1 100644 --- a/src/core/hle/service/jit/jit.cpp +++ b/src/core/hle/service/jit/jit.cpp @@ -4,11 +4,11 @@  #include "core/arm/debug.h"  #include "core/arm/symbols.h"  #include "core/core.h" -#include "core/hle/kernel/k_code_memory.h"  #include "core/hle/kernel/k_transfer_memory.h"  #include "core/hle/result.h"  #include "core/hle/service/ipc_helpers.h"  #include "core/hle/service/jit/jit.h" +#include "core/hle/service/jit/jit_code_memory.h"  #include "core/hle/service/jit/jit_context.h"  #include "core/hle/service/server_manager.h"  #include "core/hle/service/service.h" @@ -23,10 +23,12 @@ struct CodeRange {  class IJitEnvironment final : public ServiceFramework<IJitEnvironment> {  public: -    explicit IJitEnvironment(Core::System& system_, Kernel::KProcess& process_, CodeRange user_rx, -                             CodeRange user_ro) -        : ServiceFramework{system_, "IJitEnvironment"}, process{&process_}, -          context{process->GetMemory()} { +    explicit IJitEnvironment(Core::System& system_, +                             Kernel::KScopedAutoObject<Kernel::KProcess>&& process_, +                             CodeMemory&& user_rx_, CodeMemory&& user_ro_) +        : ServiceFramework{system_, "IJitEnvironment"}, process{std::move(process_)}, +          user_rx{std::move(user_rx_)}, user_ro{std::move(user_ro_)}, +          context{system_.ApplicationMemory()} {          // clang-format off          static const FunctionInfo functions[] = {              {0, &IJitEnvironment::GenerateCode, "GenerateCode"}, @@ -39,10 +41,13 @@ public:          RegisterHandlers(functions);          // Identity map user code range into sysmodule context -        configuration.user_ro_memory = user_ro; -        configuration.user_rx_memory = user_rx; -        configuration.sys_ro_memory = user_ro; -        configuration.sys_rx_memory = user_rx; +        configuration.user_rx_memory.size = user_rx.GetSize(); +        configuration.user_rx_memory.offset = user_rx.GetAddress(); +        configuration.user_ro_memory.size = user_ro.GetSize(); +        configuration.user_ro_memory.offset = user_ro.GetAddress(); + +        configuration.sys_rx_memory = configuration.user_rx_memory; +        configuration.sys_ro_memory = configuration.user_ro_memory;      }      void GenerateCode(HLERequestContext& ctx) { @@ -318,6 +323,8 @@ private:      }      Kernel::KScopedAutoObject<Kernel::KProcess> process; +    CodeMemory user_rx; +    CodeMemory user_ro;      GuestCallbacks callbacks;      JITConfiguration configuration;      JITContext context; @@ -335,6 +342,7 @@ public:          RegisterHandlers(functions);      } +private:      void CreateJitEnvironment(HLERequestContext& ctx) {          LOG_DEBUG(Service_JIT, "called"); @@ -380,20 +388,35 @@ public:              return;          } -        const CodeRange user_rx{ -            .offset = GetInteger(rx_mem->GetSourceAddress()), -            .size = parameters.rx_size, -        }; +        CodeMemory rx, ro; +        Result res; -        const CodeRange user_ro{ -            .offset = GetInteger(ro_mem->GetSourceAddress()), -            .size = parameters.ro_size, -        }; +        res = rx.Initialize(*process, *rx_mem, parameters.rx_size, +                            Kernel::Svc::MemoryPermission::ReadExecute, generate_random); +        if (R_FAILED(res)) { +            LOG_ERROR(Service_JIT, "rx_mem could not be mapped for handle=0x{:08X}", rx_mem_handle); +            IPC::ResponseBuilder rb{ctx, 2}; +            rb.Push(res); +            return; +        } + +        res = ro.Initialize(*process, *ro_mem, parameters.ro_size, +                            Kernel::Svc::MemoryPermission::Read, generate_random); +        if (R_FAILED(res)) { +            LOG_ERROR(Service_JIT, "ro_mem could not be mapped for handle=0x{:08X}", ro_mem_handle); +            IPC::ResponseBuilder rb{ctx, 2}; +            rb.Push(res); +            return; +        }          IPC::ResponseBuilder rb{ctx, 2, 0, 1};          rb.Push(ResultSuccess); -        rb.PushIpcInterface<IJitEnvironment>(system, *process, user_rx, user_ro); +        rb.PushIpcInterface<IJitEnvironment>(system, std::move(process), std::move(rx), +                                             std::move(ro));      } + +private: +    std::mt19937_64 generate_random{};  };  void LoopProcess(Core::System& system) { diff --git a/src/core/hle/service/jit/jit_code_memory.cpp b/src/core/hle/service/jit/jit_code_memory.cpp new file mode 100644 index 000000000..2b480488a --- /dev/null +++ b/src/core/hle/service/jit/jit_code_memory.cpp @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/hle/service/jit/jit_code_memory.h" + +namespace Service::JIT { + +Result CodeMemory::Initialize(Kernel::KProcess& process, Kernel::KCodeMemory& code_memory, +                              size_t size, Kernel::Svc::MemoryPermission perm, +                              std::mt19937_64& generate_random) { +    auto& page_table = process.GetPageTable(); +    const u64 alias_code_start = +        GetInteger(page_table.GetAliasCodeRegionStart()) / Kernel::PageSize; +    const u64 alias_code_size = page_table.GetAliasCodeRegionSize() / Kernel::PageSize; + +    // NOTE: This will retry indefinitely until mapping the code memory succeeds. +    while (true) { +        // Generate a new trial address. +        const u64 mapped_address = +            (alias_code_start + (generate_random() % alias_code_size)) * Kernel::PageSize; + +        // Try to map the address +        R_TRY_CATCH(code_memory.MapToOwner(mapped_address, size, perm)) { +            R_CATCH(Kernel::ResultInvalidMemoryRegion) { +                // If we could not map here, retry. +                continue; +            } +        } +        R_END_TRY_CATCH; + +        // Set members. +        m_code_memory = std::addressof(code_memory); +        m_size = size; +        m_address = mapped_address; +        m_perm = perm; + +        // Open a new reference to the code memory. +        m_code_memory->Open(); + +        // We succeeded. +        R_SUCCEED(); +    } +} + +void CodeMemory::Finalize() { +    if (m_code_memory) { +        R_ASSERT(m_code_memory->UnmapFromOwner(m_address, m_size)); +        m_code_memory->Close(); +    } + +    m_code_memory = nullptr; +} + +} // namespace Service::JIT diff --git a/src/core/hle/service/jit/jit_code_memory.h b/src/core/hle/service/jit/jit_code_memory.h new file mode 100644 index 000000000..6376d4c4e --- /dev/null +++ b/src/core/hle/service/jit/jit_code_memory.h @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <random> + +#include "core/hle/kernel/k_code_memory.h" + +namespace Service::JIT { + +class CodeMemory { +public: +    YUZU_NON_COPYABLE(CodeMemory); + +    explicit CodeMemory() = default; + +    CodeMemory(CodeMemory&& rhs) { +        std::swap(m_code_memory, rhs.m_code_memory); +        std::swap(m_size, rhs.m_size); +        std::swap(m_address, rhs.m_address); +        std::swap(m_perm, rhs.m_perm); +    } + +    ~CodeMemory() { +        this->Finalize(); +    } + +public: +    Result Initialize(Kernel::KProcess& process, Kernel::KCodeMemory& code_memory, size_t size, +                      Kernel::Svc::MemoryPermission perm, std::mt19937_64& generate_random); +    void Finalize(); + +    size_t GetSize() const { +        return m_size; +    } + +    u64 GetAddress() const { +        return m_address; +    } + +private: +    Kernel::KCodeMemory* m_code_memory{}; +    size_t m_size{}; +    u64 m_address{}; +    Kernel::Svc::MemoryPermission m_perm{}; +}; + +} // namespace Service::JIT diff --git a/src/core/hle/service/nvnflinger/nvnflinger.cpp b/src/core/hle/service/nvnflinger/nvnflinger.cpp index 6352b09a9..aa8aaa2d9 100644 --- a/src/core/hle/service/nvnflinger/nvnflinger.cpp +++ b/src/core/hle/service/nvnflinger/nvnflinger.cpp @@ -67,7 +67,7 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_      // Schedule the screen composition events      multi_composition_event = Core::Timing::CreateEvent(          "ScreenComposition", -        [this](std::uintptr_t, s64 time, +        [this](s64 time,                 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {              vsync_signal.Set();              return std::chrono::nanoseconds(GetNextTicks()); @@ -75,7 +75,7 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_      single_composition_event = Core::Timing::CreateEvent(          "ScreenComposition", -        [this](std::uintptr_t, s64 time, +        [this](s64 time,                 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {              const auto lock_guard = Lock();              Compose(); @@ -93,11 +93,11 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_  Nvnflinger::~Nvnflinger() {      if (system.IsMulticore()) { -        system.CoreTiming().UnscheduleEvent(multi_composition_event, {}); +        system.CoreTiming().UnscheduleEvent(multi_composition_event);          vsync_thread.request_stop();          vsync_signal.Set();      } else { -        system.CoreTiming().UnscheduleEvent(single_composition_event, {}); +        system.CoreTiming().UnscheduleEvent(single_composition_event);      }      ShutdownLayers(); diff --git a/src/core/memory.cpp b/src/core/memory.cpp index c7eb32c19..8176a41be 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -10,6 +10,7 @@  #include "common/assert.h"  #include "common/atomic_ops.h"  #include "common/common_types.h" +#include "common/heap_tracker.h"  #include "common/logging/log.h"  #include "common/page_table.h"  #include "common/scope_exit.h" @@ -52,10 +53,18 @@ struct Memory::Impl {          } else {              current_page_table->fastmem_arena = nullptr;          } + +#ifdef __linux__ +        heap_tracker.emplace(system.DeviceMemory().buffer); +        buffer = std::addressof(*heap_tracker); +#else +        buffer = std::addressof(system.DeviceMemory().buffer); +#endif      }      void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, -                         Common::PhysicalAddress target, Common::MemoryPermission perms) { +                         Common::PhysicalAddress target, Common::MemoryPermission perms, +                         bool separate_heap) {          ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);          ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));          ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", @@ -64,19 +73,20 @@ struct Memory::Impl {                   Common::PageType::Memory);          if (current_page_table->fastmem_arena) { -            system.DeviceMemory().buffer.Map(GetInteger(base), -                                             GetInteger(target) - DramMemoryMap::Base, size, perms); +            buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms, +                        separate_heap);          }      } -    void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { +    void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, +                     bool separate_heap) {          ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);          ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));          MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,                   Common::PageType::Unmapped);          if (current_page_table->fastmem_arena) { -            system.DeviceMemory().buffer.Unmap(GetInteger(base), size); +            buffer->Unmap(GetInteger(base), size, separate_heap);          }      } @@ -89,11 +99,6 @@ struct Memory::Impl {              return;          } -        const bool is_r = True(perms & Common::MemoryPermission::Read); -        const bool is_w = True(perms & Common::MemoryPermission::Write); -        const bool is_x = -            True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled(); -          u64 protect_bytes{};          u64 protect_begin{};          for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) { @@ -102,8 +107,7 @@ struct Memory::Impl {              switch (page_type) {              case Common::PageType::RasterizerCachedMemory:                  if (protect_bytes > 0) { -                    system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, -                                                         is_x); +                    buffer->Protect(protect_begin, protect_bytes, perms);                      protect_bytes = 0;                  }                  break; @@ -116,7 +120,7 @@ struct Memory::Impl {          }          if (protect_bytes > 0) { -            system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x); +            buffer->Protect(protect_begin, protect_bytes, perms);          }      } @@ -486,7 +490,9 @@ struct Memory::Impl {          }          if (current_page_table->fastmem_arena) { -            system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug); +            const auto perm{debug ? Common::MemoryPermission{} +                                  : Common::MemoryPermission::ReadWrite}; +            buffer->Protect(vaddr, size, perm);          }          // Iterate over a contiguous CPU address space, marking/unmarking the region. @@ -543,9 +549,14 @@ struct Memory::Impl {          }          if (current_page_table->fastmem_arena) { -            const bool is_read_enable = -                !Settings::values.use_reactive_flushing.GetValue() || !cached; -            system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); +            Common::MemoryPermission perm{}; +            if (!Settings::values.use_reactive_flushing.GetValue() || !cached) { +                perm |= Common::MemoryPermission::Read; +            } +            if (!cached) { +                perm |= Common::MemoryPermission::Write; +            } +            buffer->Protect(vaddr, size, perm);          }          // Iterate over a contiguous CPU address space, which corresponds to the specified GPU @@ -856,6 +867,13 @@ struct Memory::Impl {      std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};      std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;      std::mutex sys_core_guard; + +    std::optional<Common::HeapTracker> heap_tracker; +#ifdef __linux__ +    Common::HeapTracker* buffer{}; +#else +    Common::HostMemory* buffer{}; +#endif  };  Memory::Memory(Core::System& system_) : system{system_} { @@ -873,12 +891,14 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process) {  }  void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, -                             Common::PhysicalAddress target, Common::MemoryPermission perms) { -    impl->MapMemoryRegion(page_table, base, size, target, perms); +                             Common::PhysicalAddress target, Common::MemoryPermission perms, +                             bool separate_heap) { +    impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap);  } -void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { -    impl->UnmapRegion(page_table, base, size); +void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, +                         bool separate_heap) { +    impl->UnmapRegion(page_table, base, size, separate_heap);  }  void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size, @@ -1048,7 +1068,9 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {  }  bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { -    bool mapped = true; +    [[maybe_unused]] bool mapped = true; +    [[maybe_unused]] bool rasterizer = false; +      u8* const ptr = impl->GetPointerImpl(          GetInteger(vaddr),          [&] { @@ -1056,8 +1078,26 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {                        GetInteger(vaddr));              mapped = false;          }, -        [&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); }); +        [&] { +            impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); +            rasterizer = true; +        }); + +#ifdef __linux__ +    if (!rasterizer && mapped) { +        impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr)); +    } +#endif +      return mapped && ptr != nullptr;  } +bool Memory::InvalidateSeparateHeap(void* fault_address) { +#ifdef __linux__ +    return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address)); +#else +    return false; +#endif +} +  } // namespace Core::Memory diff --git a/src/core/memory.h b/src/core/memory.h index c1879e78f..3e4d03f57 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -86,7 +86,8 @@ public:       * @param perms      The permissions to map the memory with.       */      void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, -                         Common::PhysicalAddress target, Common::MemoryPermission perms); +                         Common::PhysicalAddress target, Common::MemoryPermission perms, +                         bool separate_heap);      /**       * Unmaps a region of the emulated process address space. @@ -95,7 +96,8 @@ public:       * @param base       The address to begin unmapping at.       * @param size       The amount of bytes to unmap.       */ -    void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size); +    void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, +                     bool separate_heap);      /**       * Protects a region of the emulated process address space with the new permissions. @@ -486,6 +488,7 @@ public:      void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);      void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);      bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); +    bool InvalidateSeparateHeap(void* fault_address);      void FlushRegion(Common::ProcessAddress dest_addr, size_t size);  private: diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp index 3fc4024dc..7bc5b5ae5 100644 --- a/src/core/memory/cheat_engine.cpp +++ b/src/core/memory/cheat_engine.cpp @@ -190,15 +190,15 @@ CheatEngine::CheatEngine(System& system_, std::vector<CheatEntry> cheats_,  }  CheatEngine::~CheatEngine() { -    core_timing.UnscheduleEvent(event, 0); +    core_timing.UnscheduleEvent(event);  }  void CheatEngine::Initialize() {      event = Core::Timing::CreateEvent(          "CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id), -        [this](std::uintptr_t user_data, s64 time, +        [this](s64 time,                 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> { -            FrameCallback(user_data, ns_late); +            FrameCallback(ns_late);              return std::nullopt;          });      core_timing.ScheduleLoopingEvent(CHEAT_ENGINE_NS, CHEAT_ENGINE_NS, event); @@ -239,7 +239,7 @@ void CheatEngine::Reload(std::vector<CheatEntry> reload_cheats) {  MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70)); -void CheatEngine::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) { +void CheatEngine::FrameCallback(std::chrono::nanoseconds ns_late) {      if (is_pending_reload.exchange(false)) {          vm.LoadProgram(cheats);      } diff --git a/src/core/memory/cheat_engine.h b/src/core/memory/cheat_engine.h index 284abdd28..ced2168d1 100644 --- a/src/core/memory/cheat_engine.h +++ b/src/core/memory/cheat_engine.h @@ -70,7 +70,7 @@ public:      void Reload(std::vector<CheatEntry> reload_cheats);  private: -    void FrameCallback(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); +    void FrameCallback(std::chrono::nanoseconds ns_late);      DmntCheatVm vm;      CheatProcessMetadata metadata; diff --git a/src/core/tools/freezer.cpp b/src/core/tools/freezer.cpp index 98ebbbf32..9d42c726e 100644 --- a/src/core/tools/freezer.cpp +++ b/src/core/tools/freezer.cpp @@ -51,18 +51,17 @@ void MemoryWriteWidth(Core::Memory::Memory& memory, u32 width, VAddr addr, u64 v  Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_)      : core_timing{core_timing_}, memory{memory_} { -    event = Core::Timing::CreateEvent( -        "MemoryFreezer::FrameCallback", -        [this](std::uintptr_t user_data, s64 time, -               std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> { -            FrameCallback(user_data, ns_late); -            return std::nullopt; -        }); +    event = Core::Timing::CreateEvent("MemoryFreezer::FrameCallback", +                                      [this](s64 time, std::chrono::nanoseconds ns_late) +                                          -> std::optional<std::chrono::nanoseconds> { +                                          FrameCallback(ns_late); +                                          return std::nullopt; +                                      });      core_timing.ScheduleEvent(memory_freezer_ns, event);  }  Freezer::~Freezer() { -    core_timing.UnscheduleEvent(event, 0); +    core_timing.UnscheduleEvent(event);  }  void Freezer::SetActive(bool is_active) { @@ -159,7 +158,7 @@ Freezer::Entries::const_iterator Freezer::FindEntry(VAddr address) const {                          [address](const Entry& entry) { return entry.address == address; });  } -void Freezer::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) { +void Freezer::FrameCallback(std::chrono::nanoseconds ns_late) {      if (!IsActive()) {          LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events.");          return; diff --git a/src/core/tools/freezer.h b/src/core/tools/freezer.h index 0d6df5217..2efbc11f3 100644 --- a/src/core/tools/freezer.h +++ b/src/core/tools/freezer.h @@ -77,7 +77,7 @@ private:      Entries::iterator FindEntry(VAddr address);      Entries::const_iterator FindEntry(VAddr address) const; -    void FrameCallback(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); +    void FrameCallback(std::chrono::nanoseconds ns_late);      void FillEntryReads();      std::atomic_bool active{false}; diff --git a/src/tests/common/host_memory.cpp b/src/tests/common/host_memory.cpp index 1a28e862b..cb040c942 100644 --- a/src/tests/common/host_memory.cpp +++ b/src/tests/common/host_memory.cpp @@ -12,6 +12,7 @@ using namespace Common::Literals;  static constexpr size_t VIRTUAL_SIZE = 1ULL << 39;  static constexpr size_t BACKING_SIZE = 4_GiB;  static constexpr auto PERMS = Common::MemoryPermission::ReadWrite; +static constexpr auto HEAP = false;  TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") {      { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); } @@ -20,7 +21,7 @@ TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") {  TEST_CASE("HostMemory: Simple map", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x5000, 0x8000, 0x1000, PERMS); +    mem.Map(0x5000, 0x8000, 0x1000, PERMS, HEAP);      volatile u8* const data = mem.VirtualBasePointer() + 0x5000;      data[0] = 50; @@ -29,8 +30,8 @@ TEST_CASE("HostMemory: Simple map", "[common]") {  TEST_CASE("HostMemory: Simple mirror map", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x5000, 0x3000, 0x2000, PERMS); -    mem.Map(0x8000, 0x4000, 0x1000, PERMS); +    mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP); +    mem.Map(0x8000, 0x4000, 0x1000, PERMS, HEAP);      volatile u8* const mirror_a = mem.VirtualBasePointer() + 0x5000;      volatile u8* const mirror_b = mem.VirtualBasePointer() + 0x8000; @@ -40,116 +41,116 @@ TEST_CASE("HostMemory: Simple mirror map", "[common]") {  TEST_CASE("HostMemory: Simple unmap", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x5000, 0x3000, 0x2000, PERMS); +    mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);      volatile u8* const data = mem.VirtualBasePointer() + 0x5000;      data[75] = 50;      REQUIRE(data[75] == 50); -    mem.Unmap(0x5000, 0x2000); +    mem.Unmap(0x5000, 0x2000, HEAP);  }  TEST_CASE("HostMemory: Simple unmap and remap", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x5000, 0x3000, 0x2000, PERMS); +    mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);      volatile u8* const data = mem.VirtualBasePointer() + 0x5000;      data[0] = 50;      REQUIRE(data[0] == 50); -    mem.Unmap(0x5000, 0x2000); +    mem.Unmap(0x5000, 0x2000, HEAP); -    mem.Map(0x5000, 0x3000, 0x2000, PERMS); +    mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);      REQUIRE(data[0] == 50); -    mem.Map(0x7000, 0x2000, 0x5000, PERMS); +    mem.Map(0x7000, 0x2000, 0x5000, PERMS, HEAP);      REQUIRE(data[0x3000] == 50);  }  TEST_CASE("HostMemory: Nieche allocation", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x0000, 0, 0x20000, PERMS); -    mem.Unmap(0x0000, 0x4000); -    mem.Map(0x1000, 0, 0x2000, PERMS); -    mem.Map(0x3000, 0, 0x1000, PERMS); -    mem.Map(0, 0, 0x1000, PERMS); +    mem.Map(0x0000, 0, 0x20000, PERMS, HEAP); +    mem.Unmap(0x0000, 0x4000, HEAP); +    mem.Map(0x1000, 0, 0x2000, PERMS, HEAP); +    mem.Map(0x3000, 0, 0x1000, PERMS, HEAP); +    mem.Map(0, 0, 0x1000, PERMS, HEAP);  }  TEST_CASE("HostMemory: Full unmap", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x8000, 0, 0x4000, PERMS); -    mem.Unmap(0x8000, 0x4000); -    mem.Map(0x6000, 0, 0x16000, PERMS); +    mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); +    mem.Unmap(0x8000, 0x4000, HEAP); +    mem.Map(0x6000, 0, 0x16000, PERMS, HEAP);  }  TEST_CASE("HostMemory: Right out of bounds unmap", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x0000, 0, 0x4000, PERMS); -    mem.Unmap(0x2000, 0x4000); -    mem.Map(0x2000, 0x80000, 0x4000, PERMS); +    mem.Map(0x0000, 0, 0x4000, PERMS, HEAP); +    mem.Unmap(0x2000, 0x4000, HEAP); +    mem.Map(0x2000, 0x80000, 0x4000, PERMS, HEAP);  }  TEST_CASE("HostMemory: Left out of bounds unmap", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x8000, 0, 0x4000, PERMS); -    mem.Unmap(0x6000, 0x4000); -    mem.Map(0x8000, 0, 0x2000, PERMS); +    mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); +    mem.Unmap(0x6000, 0x4000, HEAP); +    mem.Map(0x8000, 0, 0x2000, PERMS, HEAP);  }  TEST_CASE("HostMemory: Multiple placeholder unmap", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x0000, 0, 0x4000, PERMS); -    mem.Map(0x4000, 0, 0x1b000, PERMS); -    mem.Unmap(0x3000, 0x1c000); -    mem.Map(0x3000, 0, 0x20000, PERMS); +    mem.Map(0x0000, 0, 0x4000, PERMS, HEAP); +    mem.Map(0x4000, 0, 0x1b000, PERMS, HEAP); +    mem.Unmap(0x3000, 0x1c000, HEAP); +    mem.Map(0x3000, 0, 0x20000, PERMS, HEAP);  }  TEST_CASE("HostMemory: Unmap between placeholders", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x0000, 0, 0x4000, PERMS); -    mem.Map(0x4000, 0, 0x4000, PERMS); -    mem.Unmap(0x2000, 0x4000); -    mem.Map(0x2000, 0, 0x4000, PERMS); +    mem.Map(0x0000, 0, 0x4000, PERMS, HEAP); +    mem.Map(0x4000, 0, 0x4000, PERMS, HEAP); +    mem.Unmap(0x2000, 0x4000, HEAP); +    mem.Map(0x2000, 0, 0x4000, PERMS, HEAP);  }  TEST_CASE("HostMemory: Unmap to origin", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x4000, 0, 0x4000, PERMS); -    mem.Map(0x8000, 0, 0x4000, PERMS); -    mem.Unmap(0x4000, 0x4000); -    mem.Map(0, 0, 0x4000, PERMS); -    mem.Map(0x4000, 0, 0x4000, PERMS); +    mem.Map(0x4000, 0, 0x4000, PERMS, HEAP); +    mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); +    mem.Unmap(0x4000, 0x4000, HEAP); +    mem.Map(0, 0, 0x4000, PERMS, HEAP); +    mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);  }  TEST_CASE("HostMemory: Unmap to right", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x4000, 0, 0x4000, PERMS); -    mem.Map(0x8000, 0, 0x4000, PERMS); -    mem.Unmap(0x8000, 0x4000); -    mem.Map(0x8000, 0, 0x4000, PERMS); +    mem.Map(0x4000, 0, 0x4000, PERMS, HEAP); +    mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); +    mem.Unmap(0x8000, 0x4000, HEAP); +    mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);  }  TEST_CASE("HostMemory: Partial right unmap check bindings", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x4000, 0x10000, 0x4000, PERMS); +    mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);      volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;      ptr[0x1000] = 17; -    mem.Unmap(0x6000, 0x2000); +    mem.Unmap(0x6000, 0x2000, HEAP);      REQUIRE(ptr[0x1000] == 17);  }  TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x4000, 0x10000, 0x4000, PERMS); +    mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);      volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;      ptr[0x3000] = 19;      ptr[0x3fff] = 12; -    mem.Unmap(0x4000, 0x2000); +    mem.Unmap(0x4000, 0x2000, HEAP);      REQUIRE(ptr[0x3000] == 19);      REQUIRE(ptr[0x3fff] == 12); @@ -157,13 +158,13 @@ TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") {  TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x4000, 0x10000, 0x4000, PERMS); +    mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);      volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;      ptr[0x0000] = 19;      ptr[0x3fff] = 12; -    mem.Unmap(0x1000, 0x2000); +    mem.Unmap(0x1000, 0x2000, HEAP);      REQUIRE(ptr[0x0000] == 19);      REQUIRE(ptr[0x3fff] == 12); @@ -171,14 +172,14 @@ TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") {  TEST_CASE("HostMemory: Partial sparse middle unmap and check bindings", "[common]") {      HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); -    mem.Map(0x4000, 0x10000, 0x2000, PERMS); -    mem.Map(0x6000, 0x20000, 0x2000, PERMS); +    mem.Map(0x4000, 0x10000, 0x2000, PERMS, HEAP); +    mem.Map(0x6000, 0x20000, 0x2000, PERMS, HEAP);      volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;      ptr[0x0000] = 19;      ptr[0x3fff] = 12; -    mem.Unmap(0x5000, 0x2000); +    mem.Unmap(0x5000, 0x2000, HEAP);      REQUIRE(ptr[0x0000] == 19);      REQUIRE(ptr[0x3fff] == 12); diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp index f08afbf9a..81898a1d3 100644 --- a/src/tests/core/core_timing.cpp +++ b/src/tests/core/core_timing.cpp @@ -16,20 +16,16 @@  namespace {  // Numbers are chosen randomly to make sure the correct one is given. -constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}};  constexpr std::array<u64, 5> calls_order{{2, 0, 1, 4, 3}};  std::array<s64, 5> delays{}; - -std::bitset<CB_IDS.size()> callbacks_ran_flags; +std::bitset<5> callbacks_ran_flags;  u64 expected_callback = 0;  template <unsigned int IDX> -std::optional<std::chrono::nanoseconds> HostCallbackTemplate(std::uintptr_t user_data, s64 time, +std::optional<std::chrono::nanoseconds> HostCallbackTemplate(s64 time,                                                               std::chrono::nanoseconds ns_late) { -    static_assert(IDX < CB_IDS.size(), "IDX out of range"); +    static_assert(IDX < callbacks_ran_flags.size(), "IDX out of range");      callbacks_ran_flags.set(IDX); -    REQUIRE(CB_IDS[IDX] == user_data); -    REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]);      delays[IDX] = ns_late.count();      ++expected_callback;      return std::nullopt; @@ -76,7 +72,7 @@ TEST_CASE("CoreTiming[BasicOrder]", "[core]") {          const u64 order = calls_order[i];          const auto future_ns = std::chrono::nanoseconds{static_cast<s64>(i * one_micro + 100)}; -        core_timing.ScheduleEvent(future_ns, events[order], CB_IDS[order]); +        core_timing.ScheduleEvent(future_ns, events[order]);      }      /// test pause      REQUIRE(callbacks_ran_flags.none()); @@ -118,7 +114,7 @@ TEST_CASE("CoreTiming[BasicOrderNoPausing]", "[core]") {      for (std::size_t i = 0; i < events.size(); i++) {          const u64 order = calls_order[i];          const auto future_ns = std::chrono::nanoseconds{static_cast<s64>(i * one_micro + 100)}; -        core_timing.ScheduleEvent(future_ns, events[order], CB_IDS[order]); +        core_timing.ScheduleEvent(future_ns, events[order]);      }      const u64 end = core_timing.GetGlobalTimeNs().count(); | 
