diff options
65 files changed, 3612 insertions, 1205 deletions
| diff --git a/CMakeLists.txt b/CMakeLists.txt index b625743ea..c6fc5dd9e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -218,11 +218,11 @@ if(ENABLE_QT)      set(QT_VERSION 5.15)      # Check for system Qt on Linux, fallback to bundled Qt -    if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") +    if (UNIX AND NOT APPLE)          if (NOT YUZU_USE_BUNDLED_QT)              find_package(Qt5 ${QT_VERSION} COMPONENTS Widgets DBus Multimedia)          endif() -        if (NOT Qt5_FOUND OR YUZU_USE_BUNDLED_QT) +        if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux" AND (NOT Qt5_FOUND OR YUZU_USE_BUNDLED_QT))              # Check for dependencies, then enable bundled Qt download              # Check that the system GLIBCXX version is compatible @@ -323,7 +323,7 @@ if(ENABLE_QT)          set(YUZU_QT_NO_CMAKE_SYSTEM_PATH "NO_CMAKE_SYSTEM_PATH")      endif() -    if ((${CMAKE_SYSTEM_NAME} STREQUAL "Linux") AND YUZU_USE_BUNDLED_QT) +    if (UNIX AND NOT APPLE AND YUZU_USE_BUNDLED_QT)          find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets Concurrent Multimedia DBus ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})      else()          find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets Concurrent Multimedia ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH}) diff --git a/src/common/settings.cpp b/src/common/settings.cpp index 0a560ebb7..8173462cb 100644 --- a/src/common/settings.cpp +++ b/src/common/settings.cpp @@ -151,6 +151,7 @@ void UpdateRescalingInfo() {          ASSERT(false);          info.up_scale = 1;          info.down_shift = 0; +        break;      }      info.up_factor = static_cast<f32>(info.up_scale) / (1U << info.down_shift);      info.down_factor = static_cast<f32>(1U << info.down_shift) / info.up_scale; diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 113e663b5..f6e082c36 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -190,11 +190,13 @@ add_library(core STATIC      hle/kernel/k_code_memory.h      hle/kernel/k_condition_variable.cpp      hle/kernel/k_condition_variable.h +    hle/kernel/k_debug.h      hle/kernel/k_dynamic_page_manager.h      hle/kernel/k_dynamic_resource_manager.h      hle/kernel/k_dynamic_slab_heap.h      hle/kernel/k_event.cpp      hle/kernel/k_event.h +    hle/kernel/k_event_info.h      hle/kernel/k_handle_table.cpp      hle/kernel/k_handle_table.h      hle/kernel/k_interrupt_manager.cpp @@ -222,6 +224,8 @@ add_library(core STATIC      hle/kernel/k_page_group.h      hle/kernel/k_page_table.cpp      hle/kernel/k_page_table.h +    hle/kernel/k_page_table_manager.h +    hle/kernel/k_page_table_slab_heap.h      hle/kernel/k_port.cpp      hle/kernel/k_port.h      hle/kernel/k_priority_queue.h @@ -254,6 +258,8 @@ add_library(core STATIC      hle/kernel/k_synchronization_object.cpp      hle/kernel/k_synchronization_object.h      hle/kernel/k_system_control.h +    hle/kernel/k_system_resource.cpp +    hle/kernel/k_system_resource.h      hle/kernel/k_thread.cpp      hle/kernel/k_thread.h      hle/kernel/k_thread_local_page.cpp diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index 3bb111748..a86bec252 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h @@ -149,7 +149,7 @@ public:              context->AddDomainObject(std::move(iface));          } else {              kernel.CurrentProcess()->GetResourceLimit()->Reserve( -                Kernel::LimitableResource::Sessions, 1); +                Kernel::LimitableResource::SessionCountMax, 1);              auto* session = Kernel::KSession::Create(kernel);              session->Initialize(nullptr, iface->GetServiceName()); diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h index fe375769e..4b717d091 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h @@ -9,6 +9,10 @@ namespace Kernel::Board::Nintendo::Nx {  class KSystemControl {  public: +    // This can be overridden as needed. +    static constexpr size_t SecureAppletMemorySize = 4 * 1024 * 1024; // 4_MB + +public:      class Init {      public:          // Initialization. diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index fd354d484..06010b8d1 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -27,16 +27,12 @@ namespace Kernel {  SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_,                                               ServiceThreadType thread_type) -    : kernel{kernel_} { -    if (thread_type == ServiceThreadType::CreateNew) { -        service_thread = kernel.CreateServiceThread(service_name_); -    } else { -        service_thread = kernel.GetDefaultServiceThread(); -    } -} +    : kernel{kernel_}, service_thread{thread_type == ServiceThreadType::CreateNew +                                          ? kernel.CreateServiceThread(service_name_) +                                          : kernel.GetDefaultServiceThread()} {}  SessionRequestHandler::~SessionRequestHandler() { -    kernel.ReleaseServiceThread(service_thread.lock()); +    kernel.ReleaseServiceThread(service_thread);  }  void SessionRequestHandler::AcceptSession(KServerPort* server_port) { @@ -49,7 +45,7 @@ void SessionRequestHandler::AcceptSession(KServerPort* server_port) {  void SessionRequestHandler::RegisterSession(KServerSession* server_session,                                              std::shared_ptr<SessionRequestManager> manager) {      manager->SetSessionHandler(shared_from_this()); -    service_thread.lock()->RegisterServerSession(server_session, manager); +    service_thread.RegisterServerSession(server_session, manager);      server_session->Close();  } diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index 67da8e7e1..d87be72d6 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -82,13 +82,13 @@ public:      void RegisterSession(KServerSession* server_session,                           std::shared_ptr<SessionRequestManager> manager); -    std::weak_ptr<ServiceThread> GetServiceThread() const { +    ServiceThread& GetServiceThread() const {          return service_thread;      }  protected:      KernelCore& kernel; -    std::weak_ptr<ServiceThread> service_thread; +    ServiceThread& service_thread;  };  using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>; @@ -154,7 +154,7 @@ public:          session_handler = std::move(handler);      } -    std::weak_ptr<ServiceThread> GetServiceThread() const { +    ServiceThread& GetServiceThread() const {          return session_handler->GetServiceThread();      } diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 477e4e407..bda098511 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -10,7 +10,9 @@  #include "core/hardware_properties.h"  #include "core/hle/kernel/init/init_slab_setup.h"  #include "core/hle/kernel/k_code_memory.h" +#include "core/hle/kernel/k_debug.h"  #include "core/hle/kernel/k_event.h" +#include "core/hle/kernel/k_event_info.h"  #include "core/hle/kernel/k_memory_layout.h"  #include "core/hle/kernel/k_memory_manager.h"  #include "core/hle/kernel/k_page_buffer.h" @@ -22,6 +24,7 @@  #include "core/hle/kernel/k_shared_memory.h"  #include "core/hle/kernel/k_shared_memory_info.h"  #include "core/hle/kernel/k_system_control.h" +#include "core/hle/kernel/k_system_resource.h"  #include "core/hle/kernel/k_thread.h"  #include "core/hle/kernel/k_thread_local_page.h"  #include "core/hle/kernel/k_transfer_memory.h" @@ -44,7 +47,10 @@ namespace Kernel::Init {      HANDLER(KThreadLocalPage,                                                                      \              (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8),             \              ##__VA_ARGS__)                                                                         \ -    HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) +    HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)                           \ +    HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__)                 \ +    HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__)                                           \ +    HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__)  namespace { @@ -73,8 +79,20 @@ constexpr size_t SlabCountKResourceLimit = 5;  constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;  constexpr size_t SlabCountKIoPool = 1;  constexpr size_t SlabCountKIoRegion = 6; +constexpr size_t SlabcountKSessionRequestMappings = 40; -constexpr size_t SlabCountExtraKThread = 160; +constexpr size_t SlabCountExtraKThread = (1024 + 256 + 256) - SlabCountKThread; + +namespace test { + +static_assert(KernelPageBufferHeapSize == +              2 * PageSize + (SlabCountKProcess + SlabCountKThread + +                              (SlabCountKProcess + SlabCountKThread) / 8) * +                                 PageSize); +static_assert(KernelPageBufferAdditionalSize == +              (SlabCountExtraKThread + (SlabCountExtraKThread / 8)) * PageSize); + +} // namespace test  /// Helper function to translate from the slab virtual address to the reserved location in physical  /// memory. @@ -109,7 +127,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd  }  size_t CalculateSlabHeapGapSize() { -    constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB; +    constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB;      static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);      return KernelSlabHeapGapSize;  } @@ -134,6 +152,7 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() {          .num_KDebug = SlabCountKDebug,          .num_KIoPool = SlabCountKIoPool,          .num_KIoRegion = SlabCountKIoRegion, +        .num_KSessionRequestMappings = SlabcountKSessionRequestMappings,      };  } @@ -164,29 +183,6 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {      return size;  } -void InitializeKPageBufferSlabHeap(Core::System& system) { -    auto& kernel = system.Kernel(); - -    const auto& counts = kernel.SlabResourceCounts(); -    const size_t num_pages = -        counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; -    const size_t slab_size = num_pages * PageSize; - -    // Reserve memory from the system resource limit. -    ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size)); - -    // Allocate memory for the slab. -    constexpr auto AllocateOption = KMemoryManager::EncodeOption( -        KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); -    const PAddr slab_address = -        kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); -    ASSERT(slab_address != 0); - -    // Initialize the slabheap. -    KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), -                                    slab_size); -} -  void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {      auto& kernel = system.Kernel(); @@ -258,3 +254,30 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {  }  } // namespace Kernel::Init + +namespace Kernel { + +void KPageBufferSlabHeap::Initialize(Core::System& system) { +    auto& kernel = system.Kernel(); +    const auto& counts = kernel.SlabResourceCounts(); +    const size_t num_pages = +        counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; +    const size_t slab_size = num_pages * PageSize; + +    // Reserve memory from the system resource limit. +    ASSERT( +        kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemoryMax, slab_size)); + +    // Allocate memory for the slab. +    constexpr auto AllocateOption = KMemoryManager::EncodeOption( +        KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); +    const PAddr slab_address = +        kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); +    ASSERT(slab_address != 0); + +    // Initialize the slabheap. +    KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), +                                    slab_size); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/init/init_slab_setup.h b/src/core/hle/kernel/init/init_slab_setup.h index 13be63c87..5e22821bc 100644 --- a/src/core/hle/kernel/init/init_slab_setup.h +++ b/src/core/hle/kernel/init/init_slab_setup.h @@ -33,11 +33,11 @@ struct KSlabResourceCounts {      size_t num_KDebug;      size_t num_KIoPool;      size_t num_KIoRegion; +    size_t num_KSessionRequestMappings;  };  void InitializeSlabResourceCounts(KernelCore& kernel);  size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); -void InitializeKPageBufferSlabHeap(Core::System& system);  void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);  } // namespace Kernel::Init diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp index 10265c23c..a850db3c4 100644 --- a/src/core/hle/kernel/k_class_token.cpp +++ b/src/core/hle/kernel/k_class_token.cpp @@ -16,6 +16,7 @@  #include "core/hle/kernel/k_session.h"  #include "core/hle/kernel/k_shared_memory.h"  #include "core/hle/kernel/k_synchronization_object.h" +#include "core/hle/kernel/k_system_resource.h"  #include "core/hle/kernel/k_thread.h"  #include "core/hle/kernel/k_transfer_memory.h" @@ -119,4 +120,6 @@ static_assert(std::is_final_v<KTransferMemory> && std::is_base_of_v<KAutoObject,  // static_assert(std::is_final_v<KCodeMemory> &&  //              std::is_base_of_v<KAutoObject, KCodeMemory>); +static_assert(std::is_base_of_v<KAutoObject, KSystemResource>); +  } // namespace Kernel diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h index ab20e00ff..e75b1c035 100644 --- a/src/core/hle/kernel/k_class_token.h +++ b/src/core/hle/kernel/k_class_token.h @@ -10,6 +10,8 @@ namespace Kernel {  class KAutoObject; +class KSystemResource; +  class KClassTokenGenerator {  public:      using TokenBaseType = u16; @@ -58,7 +60,7 @@ private:          if constexpr (std::is_same<T, KAutoObject>::value) {              static_assert(T::ObjectType == ObjectType::KAutoObject);              return 0; -        } else if constexpr (!std::is_final<T>::value) { +        } else if constexpr (!std::is_final<T>::value && !std::same_as<T, KSystemResource>) {              static_assert(ObjectType::BaseClassesStart <= T::ObjectType &&                            T::ObjectType < ObjectType::BaseClassesEnd);              constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - @@ -108,6 +110,8 @@ public:          KSessionRequest,          KCodeMemory, +        KSystemResource, +          // NOTE: True order for these has not been determined yet.          KAlpha,          KBeta, diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp index eaa2e094c..2ec623a58 100644 --- a/src/core/hle/kernel/k_client_port.cpp +++ b/src/core/hle/kernel/k_client_port.cpp @@ -61,7 +61,7 @@ bool KClientPort::IsSignaled() const {  Result KClientPort::CreateSession(KClientSession** out) {      // Reserve a new session from the resource limit.      KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(), -                                                   LimitableResource::Sessions); +                                                   LimitableResource::SessionCountMax);      R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);      // Update the session counts. diff --git a/src/core/hle/kernel/k_debug.h b/src/core/hle/kernel/k_debug.h new file mode 100644 index 000000000..e3a0689c8 --- /dev/null +++ b/src/core/hle/kernel/k_debug.h @@ -0,0 +1,20 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/slab_helpers.h" + +namespace Kernel { + +class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObjectWithList> { +    KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject); + +public: +    explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} + +    static void PostDestroy([[maybe_unused]] uintptr_t arg) {} +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h index 9076c8fa3..ac80d60a1 100644 --- a/src/core/hle/kernel/k_dynamic_page_manager.h +++ b/src/core/hle/kernel/k_dynamic_page_manager.h @@ -3,6 +3,8 @@  #pragma once +#include <vector> +  #include "common/alignment.h"  #include "common/common_types.h"  #include "core/hle/kernel/k_page_bitmap.h" @@ -33,28 +35,36 @@ public:          return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));      } -    Result Initialize(VAddr addr, size_t sz) { +    Result Initialize(VAddr memory, size_t size, size_t align) {          // We need to have positive size. -        R_UNLESS(sz > 0, ResultOutOfMemory); -        m_backing_memory.resize(sz); +        R_UNLESS(size > 0, ResultOutOfMemory); +        m_backing_memory.resize(size); + +        // Set addresses. +        m_address = memory; +        m_aligned_address = Common::AlignDown(memory, align); -        // Calculate management overhead. -        const size_t management_size = -            KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); -        const size_t allocatable_size = sz - management_size; +        // Calculate extents. +        const size_t managed_size = m_address + size - m_aligned_address; +        const size_t overhead_size = Common::AlignUp( +            KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)), +            sizeof(PageBuffer)); +        R_UNLESS(overhead_size < size, ResultOutOfMemory);          // Set tracking fields. -        m_address = addr; -        m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); -        m_count = allocatable_size / sizeof(PageBuffer); -        R_UNLESS(m_count > 0, ResultOutOfMemory); +        m_size = Common::AlignDown(size - overhead_size, sizeof(PageBuffer)); +        m_count = m_size / sizeof(PageBuffer);          // Clear the management region. -        u64* management_ptr = GetPointer<u64>(m_address + allocatable_size); -        std::memset(management_ptr, 0, management_size); +        u64* management_ptr = GetPointer<u64>(m_address + size - overhead_size); +        std::memset(management_ptr, 0, overhead_size);          // Initialize the bitmap. -        m_page_bitmap.Initialize(management_ptr, m_count); +        const size_t allocatable_region_size = +            (m_address + size - overhead_size) - m_aligned_address; +        ASSERT(allocatable_region_size >= sizeof(PageBuffer)); + +        m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer));          // Free the pages to the bitmap.          for (size_t i = 0; i < m_count; i++) { @@ -62,7 +72,8 @@ public:              std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize);              // Set the bit for the free page. -            m_page_bitmap.SetBit(i); +            m_page_bitmap.SetBit((m_address + (i * sizeof(PageBuffer)) - m_aligned_address) / +                                 sizeof(PageBuffer));          }          R_SUCCEED(); @@ -101,7 +112,28 @@ public:          m_page_bitmap.ClearBit(offset);          m_peak = std::max(m_peak, (++m_used)); -        return GetPointer<PageBuffer>(m_address) + offset; +        return GetPointer<PageBuffer>(m_aligned_address) + offset; +    } + +    PageBuffer* Allocate(size_t count) { +        // Take the lock. +        // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. +        KScopedSpinLock lk(m_lock); + +        // Find a random free block. +        s64 soffset = m_page_bitmap.FindFreeRange(count); +        if (soffset < 0) [[likely]] { +            return nullptr; +        } + +        const size_t offset = static_cast<size_t>(soffset); + +        // Update our tracking. +        m_page_bitmap.ClearRange(offset, count); +        m_used += count; +        m_peak = std::max(m_peak, m_used); + +        return GetPointer<PageBuffer>(m_aligned_address) + offset;      }      void Free(PageBuffer* pb) { @@ -113,7 +145,7 @@ public:          KScopedSpinLock lk(m_lock);          // Set the bit for the free page. -        size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer); +        size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer);          m_page_bitmap.SetBit(offset);          // Decrement our used count. @@ -127,6 +159,7 @@ private:      size_t m_peak{};      size_t m_count{};      VAddr m_address{}; +    VAddr m_aligned_address{};      size_t m_size{};      // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h index 1ce517e8e..b6a27d648 100644 --- a/src/core/hle/kernel/k_dynamic_resource_manager.h +++ b/src/core/hle/kernel/k_dynamic_resource_manager.h @@ -6,6 +6,7 @@  #include "common/common_funcs.h"  #include "core/hle/kernel/k_dynamic_slab_heap.h"  #include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_page_group.h"  namespace Kernel { @@ -51,8 +52,10 @@ private:      DynamicSlabType* m_slab_heap{};  }; +class KBlockInfoManager : public KDynamicResourceManager<KBlockInfo> {};  class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; +using KBlockInfoSlabHeap = typename KBlockInfoManager::DynamicSlabType;  using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;  } // namespace Kernel diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp index 78ca59463..27f70e5c5 100644 --- a/src/core/hle/kernel/k_event.cpp +++ b/src/core/hle/kernel/k_event.cpp @@ -50,7 +50,7 @@ Result KEvent::Clear() {  void KEvent::PostDestroy(uintptr_t arg) {      // Release the event count resource the owner process holds.      KProcess* owner = reinterpret_cast<KProcess*>(arg); -    owner->GetResourceLimit()->Release(LimitableResource::Events, 1); +    owner->GetResourceLimit()->Release(LimitableResource::EventCountMax, 1);      owner->Close();  } diff --git a/src/core/hle/kernel/k_event_info.h b/src/core/hle/kernel/k_event_info.h new file mode 100644 index 000000000..25b3ff594 --- /dev/null +++ b/src/core/hle/kernel/k_event_info.h @@ -0,0 +1,64 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <array> + +#include <boost/intrusive/list.hpp> + +#include "core/hle/kernel/slab_helpers.h" +#include "core/hle/kernel/svc_types.h" + +namespace Kernel { + +class KEventInfo : public KSlabAllocated<KEventInfo>, public boost::intrusive::list_base_hook<> { +public: +    struct InfoCreateThread { +        u32 thread_id{}; +        uintptr_t tls_address{}; +    }; + +    struct InfoExitProcess { +        Svc::ProcessExitReason reason{}; +    }; + +    struct InfoExitThread { +        Svc::ThreadExitReason reason{}; +    }; + +    struct InfoException { +        Svc::DebugException exception_type{}; +        s32 exception_data_count{}; +        uintptr_t exception_address{}; +        std::array<uintptr_t, 4> exception_data{}; +    }; + +    struct InfoSystemCall { +        s64 tick{}; +        s32 id{}; +    }; + +public: +    KEventInfo() = default; +    ~KEventInfo() = default; + +public: +    Svc::DebugEvent event{}; +    u32 thread_id{}; +    u32 flags{}; +    bool is_attached{}; +    bool continue_flag{}; +    bool ignore_continue{}; +    bool close_once{}; +    union { +        InfoCreateThread create_thread; +        InfoExitProcess exit_process; +        InfoExitThread exit_thread; +        InfoException exception; +        InfoSystemCall system_call; +    } info{}; +    KThread* debug_thread{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index e830ca46e..1c7a766c8 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp @@ -5,14 +5,11 @@  namespace Kernel { -KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {} -KHandleTable::~KHandleTable() = default; -  Result KHandleTable::Finalize() {      // Get the table and clear our record of it.      u16 saved_table_size = 0;      { -        KScopedDisableDispatch dd(kernel); +        KScopedDisableDispatch dd{m_kernel};          KScopedSpinLock lk(m_lock);          std::swap(m_table_size, saved_table_size); @@ -25,28 +22,28 @@ Result KHandleTable::Finalize() {          }      } -    return ResultSuccess; +    R_SUCCEED();  }  bool KHandleTable::Remove(Handle handle) {      // Don't allow removal of a pseudo-handle. -    if (Svc::IsPseudoHandle(handle)) { +    if (Svc::IsPseudoHandle(handle)) [[unlikely]] {          return false;      }      // Handles must not have reserved bits set.      const auto handle_pack = HandlePack(handle); -    if (handle_pack.reserved != 0) { +    if (handle_pack.reserved != 0) [[unlikely]] {          return false;      }      // Find the object and free the entry.      KAutoObject* obj = nullptr;      { -        KScopedDisableDispatch dd(kernel); +        KScopedDisableDispatch dd{m_kernel};          KScopedSpinLock lk(m_lock); -        if (this->IsValidHandle(handle)) { +        if (this->IsValidHandle(handle)) [[likely]] {              const auto index = handle_pack.index;              obj = m_objects[index]; @@ -57,13 +54,13 @@ bool KHandleTable::Remove(Handle handle) {      }      // Close the object. -    kernel.UnregisterInUseObject(obj); +    m_kernel.UnregisterInUseObject(obj);      obj->Close();      return true;  }  Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { -    KScopedDisableDispatch dd(kernel); +    KScopedDisableDispatch dd{m_kernel};      KScopedSpinLock lk(m_lock);      // Never exceed our capacity. @@ -82,22 +79,22 @@ Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {          *out_handle = EncodeHandle(static_cast<u16>(index), linear_id);      } -    return ResultSuccess; +    R_SUCCEED();  }  Result KHandleTable::Reserve(Handle* out_handle) { -    KScopedDisableDispatch dd(kernel); +    KScopedDisableDispatch dd{m_kernel};      KScopedSpinLock lk(m_lock);      // Never exceed our capacity.      R_UNLESS(m_count < m_table_size, ResultOutOfHandles);      *out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId()); -    return ResultSuccess; +    R_SUCCEED();  }  void KHandleTable::Unreserve(Handle handle) { -    KScopedDisableDispatch dd(kernel); +    KScopedDisableDispatch dd{m_kernel};      KScopedSpinLock lk(m_lock);      // Unpack the handle. @@ -108,7 +105,7 @@ void KHandleTable::Unreserve(Handle handle) {      ASSERT(reserved == 0);      ASSERT(linear_id != 0); -    if (index < m_table_size) { +    if (index < m_table_size) [[likely]] {          // NOTE: This code does not check the linear id.          ASSERT(m_objects[index] == nullptr);          this->FreeEntry(index); @@ -116,7 +113,7 @@ void KHandleTable::Unreserve(Handle handle) {  }  void KHandleTable::Register(Handle handle, KAutoObject* obj) { -    KScopedDisableDispatch dd(kernel); +    KScopedDisableDispatch dd{m_kernel};      KScopedSpinLock lk(m_lock);      // Unpack the handle. @@ -127,7 +124,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj) {      ASSERT(reserved == 0);      ASSERT(linear_id != 0); -    if (index < m_table_size) { +    if (index < m_table_size) [[likely]] {          // Set the entry.          ASSERT(m_objects[index] == nullptr); diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 0864a737c..65cae3b27 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h @@ -21,33 +21,38 @@ namespace Kernel {  class KernelCore;  class KHandleTable { -public:      YUZU_NON_COPYABLE(KHandleTable);      YUZU_NON_MOVEABLE(KHandleTable); +public:      static constexpr size_t MaxTableSize = 1024; -    explicit KHandleTable(KernelCore& kernel_); -    ~KHandleTable(); +public: +    explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {}      Result Initialize(s32 size) { +        // Check that the table size is valid.          R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory); +        // Lock. +        KScopedDisableDispatch dd{m_kernel}; +        KScopedSpinLock lk(m_lock); +          // Initialize all fields.          m_max_count = 0; -        m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size); +        m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size);          m_next_linear_id = MinLinearId;          m_count = 0;          m_free_head_index = -1;          // Free all entries. -        for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) { +        for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {              m_objects[i] = nullptr; -            m_entry_infos[i].next_free_index = i - 1; +            m_entry_infos[i].next_free_index = static_cast<s16>(i - 1);              m_free_head_index = i;          } -        return ResultSuccess; +        R_SUCCEED();      }      size_t GetTableSize() const { @@ -66,13 +71,13 @@ public:      template <typename T = KAutoObject>      KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {          // Lock and look up in table. -        KScopedDisableDispatch dd(kernel); +        KScopedDisableDispatch dd{m_kernel};          KScopedSpinLock lk(m_lock);          if constexpr (std::is_same_v<T, KAutoObject>) {              return this->GetObjectImpl(handle);          } else { -            if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) { +            if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] {                  return obj->DynamicCast<T*>();              } else {                  return nullptr; @@ -85,13 +90,13 @@ public:          // Handle pseudo-handles.          if constexpr (std::derived_from<KProcess, T>) {              if (handle == Svc::PseudoHandle::CurrentProcess) { -                auto* const cur_process = kernel.CurrentProcess(); +                auto* const cur_process = m_kernel.CurrentProcess();                  ASSERT(cur_process != nullptr);                  return cur_process;              }          } else if constexpr (std::derived_from<KThread, T>) {              if (handle == Svc::PseudoHandle::CurrentThread) { -                auto* const cur_thread = GetCurrentThreadPointer(kernel); +                auto* const cur_thread = GetCurrentThreadPointer(m_kernel);                  ASSERT(cur_thread != nullptr);                  return cur_thread;              } @@ -100,6 +105,37 @@ public:          return this->template GetObjectWithoutPseudoHandle<T>(handle);      } +    KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const { +        // Lock and look up in table. +        KScopedDisableDispatch dd{m_kernel}; +        KScopedSpinLock lk(m_lock); + +        return this->GetObjectImpl(handle); +    } + +    KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const { +        // Handle pseudo-handles. +        ASSERT(cur_thread != nullptr); +        if (handle == Svc::PseudoHandle::CurrentProcess) { +            auto* const cur_process = +                static_cast<KAutoObject*>(static_cast<void*>(cur_thread->GetOwnerProcess())); +            ASSERT(cur_process != nullptr); +            return cur_process; +        } +        if (handle == Svc::PseudoHandle::CurrentThread) { +            return static_cast<KAutoObject*>(cur_thread); +        } + +        return GetObjectForIpcWithoutPseudoHandle(handle); +    } + +    KScopedAutoObject<KAutoObject> GetObjectByIndex(Handle* out_handle, size_t index) const { +        KScopedDisableDispatch dd{m_kernel}; +        KScopedSpinLock lk(m_lock); + +        return this->GetObjectByIndexImpl(out_handle, index); +    } +      Result Reserve(Handle* out_handle);      void Unreserve(Handle handle); @@ -112,7 +148,7 @@ public:          size_t num_opened;          {              // Lock the table. -            KScopedDisableDispatch dd(kernel); +            KScopedDisableDispatch dd{m_kernel};              KScopedSpinLock lk(m_lock);              for (num_opened = 0; num_opened < num_handles; num_opened++) {                  // Get the current handle. @@ -120,13 +156,13 @@ public:                  // Get the object for the current handle.                  KAutoObject* cur_object = this->GetObjectImpl(cur_handle); -                if (cur_object == nullptr) { +                if (cur_object == nullptr) [[unlikely]] {                      break;                  }                  // Cast the current object to the desired type.                  T* cur_t = cur_object->DynamicCast<T*>(); -                if (cur_t == nullptr) { +                if (cur_t == nullptr) [[unlikely]] {                      break;                  } @@ -137,7 +173,7 @@ public:          }          // If we converted every object, succeed. -        if (num_opened == num_handles) { +        if (num_opened == num_handles) [[likely]] {              return true;          } @@ -191,21 +227,21 @@ private:          ASSERT(reserved == 0);          // Validate our indexing information. -        if (raw_value == 0) { +        if (raw_value == 0) [[unlikely]] {              return false;          } -        if (linear_id == 0) { +        if (linear_id == 0) [[unlikely]] {              return false;          } -        if (index >= m_table_size) { +        if (index >= m_table_size) [[unlikely]] {              return false;          }          // Check that there's an object, and our serial id is correct. -        if (m_objects[index] == nullptr) { +        if (m_objects[index] == nullptr) [[unlikely]] {              return false;          } -        if (m_entry_infos[index].GetLinearId() != linear_id) { +        if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] {              return false;          } @@ -215,11 +251,11 @@ private:      KAutoObject* GetObjectImpl(Handle handle) const {          // Handles must not have reserved bits set.          const auto handle_pack = HandlePack(handle); -        if (handle_pack.reserved != 0) { +        if (handle_pack.reserved != 0) [[unlikely]] {              return nullptr;          } -        if (this->IsValidHandle(handle)) { +        if (this->IsValidHandle(handle)) [[likely]] {              return m_objects[handle_pack.index];          } else {              return nullptr; @@ -227,9 +263,8 @@ private:      }      KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const { -          // Index must be in bounds. -        if (index >= m_table_size) { +        if (index >= m_table_size) [[unlikely]] {              return nullptr;          } @@ -244,18 +279,15 @@ private:  private:      union HandlePack { -        HandlePack() = default; -        HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {} +        constexpr HandlePack() = default; +        constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {} -        u32 raw; +        u32 raw{};          BitField<0, 15, u32> index;          BitField<15, 15, u32> linear_id;          BitField<30, 2, u32> reserved;      }; -    static constexpr u16 MinLinearId = 1; -    static constexpr u16 MaxLinearId = 0x7FFF; -      static constexpr Handle EncodeHandle(u16 index, u16 linear_id) {          HandlePack handle{};          handle.index.Assign(index); @@ -264,6 +296,10 @@ private:          return handle.raw;      } +private: +    static constexpr u16 MinLinearId = 1; +    static constexpr u16 MaxLinearId = 0x7FFF; +      union EntryInfo {          u16 linear_id;          s16 next_free_index; @@ -271,21 +307,21 @@ private:          constexpr u16 GetLinearId() const {              return linear_id;          } -        constexpr s16 GetNextFreeIndex() const { +        constexpr s32 GetNextFreeIndex() const {              return next_free_index;          }      };  private: +    KernelCore& m_kernel;      std::array<EntryInfo, MaxTableSize> m_entry_infos{};      std::array<KAutoObject*, MaxTableSize> m_objects{}; -    s32 m_free_head_index{-1}; +    mutable KSpinLock m_lock; +    s32 m_free_head_index{};      u16 m_table_size{};      u16 m_max_count{}; -    u16 m_next_linear_id{MinLinearId}; +    u16 m_next_linear_id{};      u16 m_count{}; -    mutable KSpinLock m_lock; -    KernelCore& kernel;  };  } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 9444f6bd2..3b6e7baff 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -35,26 +35,32 @@ enum class KMemoryState : u32 {      FlagCanMapProcess = (1 << 23),      FlagCanChangeAttribute = (1 << 24),      FlagCanCodeMemory = (1 << 25), +    FlagLinearMapped = (1 << 26),      FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |                  FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |                  FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | -                FlagReferenceCounted | FlagCanChangeAttribute, +                FlagReferenceCounted | FlagCanChangeAttribute | FlagLinearMapped,      FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |                  FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | -                FlagCanAlignedDeviceMap | FlagReferenceCounted, +                FlagCanAlignedDeviceMap | FlagReferenceCounted | FlagLinearMapped, -    FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap, +    FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap | +                FlagLinearMapped,      Free = static_cast<u32>(Svc::MemoryState::Free), -    Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped, +    Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | +         FlagCanAlignedDeviceMap,      Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,      Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,      CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |                 FlagCanCodeMemory, -    Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted,      Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, +    Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted | +             FlagLinearMapped, + +    // Alias was removed after 1.0.0.      AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |                  FlagCanCodeAlias, @@ -67,18 +73,18 @@ enum class KMemoryState : u32 {      Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |              FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, -    ThreadLocal = -        static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted, +    ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped, -    Transfered = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc | +    Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |                   FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |                   FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, -    SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc | +    SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |                         FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,      SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | -                 FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, +                 FlagReferenceCounted | FlagLinearMapped | FlagCanUseNonSecureIpc | +                 FlagCanUseNonDeviceIpc,      Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible), @@ -91,69 +97,69 @@ enum class KMemoryState : u32 {      Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,      GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | -                    FlagReferenceCounted | FlagCanDebug, -    CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted, +                    FlagReferenceCounted | FlagCanDebug | FlagLinearMapped, +    CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted | +              FlagLinearMapped,      Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped, + +    Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted | +               FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap | +               FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,  };  DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);  static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); -static_assert(static_cast<u32>(KMemoryState::Io) == 0x00002001); +static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001);  static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); -static_assert(static_cast<u32>(KMemoryState::Code) == 0x00DC7E03); -static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x03FEBD04); -static_assert(static_cast<u32>(KMemoryState::Normal) == 0x037EBD05); -static_assert(static_cast<u32>(KMemoryState::Shared) == 0x00402006); -static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x00DD7E08); -static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x03FFBD09); -static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x005C3C0A); -static_assert(static_cast<u32>(KMemoryState::Stack) == 0x005C3C0B); -static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0040200C); -static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x015C3C0D); -static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x005C380E); -static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0040380F); +static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03); +static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04); +static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05); +static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006); + +static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08); +static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09); +static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A); +static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B); +static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C); +static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D); +static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E); +static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F);  static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); -static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x005C3811); -static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x004C2812); +static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811); +static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812);  static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); -static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x00402214); -static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015); +static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214); +static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015);  static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); +static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817);  enum class KMemoryPermission : u8 {      None = 0,      All = static_cast<u8>(~None), -    Read = 1 << 0, -    Write = 1 << 1, -    Execute = 1 << 2, - -    ReadAndWrite = Read | Write, -    ReadAndExecute = Read | Execute, - -    UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | -                               Svc::MemoryPermission::Execute), -      KernelShift = 3, -    KernelRead = Read << KernelShift, -    KernelWrite = Write << KernelShift, -    KernelExecute = Execute << KernelShift, +    KernelRead = static_cast<u8>(Svc::MemoryPermission::Read) << KernelShift, +    KernelWrite = static_cast<u8>(Svc::MemoryPermission::Write) << KernelShift, +    KernelExecute = static_cast<u8>(Svc::MemoryPermission::Execute) << KernelShift,      NotMapped = (1 << (2 * KernelShift)),      KernelReadWrite = KernelRead | KernelWrite,      KernelReadExecute = KernelRead | KernelExecute, -    UserRead = Read | KernelRead, -    UserWrite = Write | KernelWrite, -    UserExecute = Execute, +    UserRead = static_cast<u8>(Svc::MemoryPermission::Read) | KernelRead, +    UserWrite = static_cast<u8>(Svc::MemoryPermission::Write) | KernelWrite, +    UserExecute = static_cast<u8>(Svc::MemoryPermission::Execute),      UserReadWrite = UserRead | UserWrite,      UserReadExecute = UserRead | UserExecute, -    IpcLockChangeMask = NotMapped | UserReadWrite +    UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | +                               Svc::MemoryPermission::Execute), + +    IpcLockChangeMask = NotMapped | UserReadWrite,  };  DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); @@ -210,13 +216,15 @@ struct KMemoryInfo {      constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {          return { -            .addr = m_address, +            .base_address = m_address,              .size = m_size,              .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask), -            .attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask), -            .perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask), -            .ipc_refcount = m_ipc_lock_count, -            .device_refcount = m_device_use_count, +            .attribute = +                static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask), +            .permission = +                static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask), +            .ipc_count = m_ipc_lock_count, +            .device_count = m_device_use_count,              .padding = {},          };      } @@ -468,6 +476,7 @@ public:      constexpr void UpdateDeviceDisableMergeStateForShareLeft(          [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { +        // New permission/right aren't used.          if (left) {              m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(                  m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); @@ -478,6 +487,7 @@ public:      constexpr void UpdateDeviceDisableMergeStateForShareRight(          [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { +        // New permission/left aren't used.          if (right) {              m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(                  m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); @@ -494,6 +504,8 @@ public:      constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,                                   bool right) { +        // New permission isn't used. +          // We must either be shared or have a zero lock count.          ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||                 m_device_use_count == 0); @@ -509,6 +521,7 @@ public:      constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(          [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { +        // New permission/right aren't used.          if (left) {              if (!m_device_disable_merge_left_count) { @@ -528,6 +541,8 @@ public:      constexpr void UpdateDeviceDisableMergeStateForUnshareRight(          [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { +        // New permission/left aren't used. +          if (right) {              const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;              ASSERT(old_device_disable_merge_right_count > 0); @@ -546,6 +561,8 @@ public:      constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,                                     bool right) { +        // New permission isn't used. +          // We must be shared.          ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); @@ -563,6 +580,7 @@ public:      constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,                                          bool right) { +        // New permission isn't used.          // We must be shared.          ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); @@ -613,6 +631,8 @@ public:      constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,                                  [[maybe_unused]] bool right) { +        // New permission isn't used. +          // We must be locked.          ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp index 55dc296d0..72c3ee4b7 100644 --- a/src/core/hle/kernel/k_memory_layout.cpp +++ b/src/core/hle/kernel/k_memory_layout.cpp @@ -153,13 +153,9 @@ void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_      }  } -size_t KMemoryLayout::GetResourceRegionSizeForInit() { -    // Calculate resource region size based on whether we allow extra threads. -    const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); -    size_t resource_region_size = -        KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0); - -    return resource_region_size; +size_t KMemoryLayout::GetResourceRegionSizeForInit(bool use_extra_resource) { +    return KernelResourceSize + KSystemControl::SecureAppletMemorySize + +           (use_extra_resource ? KernelSlabHeapAdditionalSize + KernelPageBufferAdditionalSize : 0);  }  } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 884fc623a..fd6e1d3e6 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -60,10 +60,12 @@ constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB;  constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;  // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. -constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000; +constexpr size_t KernelPageBufferHeapSize = 0x3E0000; +constexpr size_t KernelSlabHeapAdditionalSize = 0x148000; +constexpr size_t KernelPageBufferAdditionalSize = 0x33C000; -constexpr std::size_t KernelResourceSize = -    KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize; +constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + +                                           KernelSlabHeapSize + KernelPageBufferHeapSize;  constexpr bool IsKernelAddressKey(VAddr key) {      return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; @@ -168,6 +170,11 @@ public:              KMemoryRegionType_VirtualDramKernelTraceBuffer));      } +    const KMemoryRegion& GetSecureAppletMemoryRegion() { +        return Dereference(GetVirtualMemoryRegionTree().FindByType( +            KMemoryRegionType_VirtualDramKernelSecureAppletMemory)); +    } +      const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const {          return Dereference(FindVirtualLinear(address));      } @@ -229,7 +236,7 @@ public:      void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,                                             VAddr linear_virtual_start); -    static size_t GetResourceRegionSizeForInit(); +    static size_t GetResourceRegionSizeForInit(bool use_extra_resource);      auto GetKernelRegionExtents() const {          return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); @@ -279,6 +286,10 @@ public:          return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(              KMemoryRegionType_DramKernelSlab);      } +    auto GetKernelSecureAppletMemoryRegionPhysicalExtents() { +        return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( +            KMemoryRegionType_DramKernelSecureAppletMemory); +    }      auto GetKernelPageTableHeapRegionPhysicalExtents() const {          return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(              KMemoryRegionType_DramKernelPtHeap); diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 646711505..c4bf306e8 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -29,43 +29,44 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {      } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {          return KMemoryManager::Pool::SystemNonSecure;      } else { -        ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool"); -        return {}; +        UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");      }  }  } // namespace -KMemoryManager::KMemoryManager(Core::System& system_) -    : system{system_}, pool_locks{ -                           KLightLock{system_.Kernel()}, -                           KLightLock{system_.Kernel()}, -                           KLightLock{system_.Kernel()}, -                           KLightLock{system_.Kernel()}, -                       } {} +KMemoryManager::KMemoryManager(Core::System& system) +    : m_system{system}, m_memory_layout{system.Kernel().MemoryLayout()}, +      m_pool_locks{ +          KLightLock{system.Kernel()}, +          KLightLock{system.Kernel()}, +          KLightLock{system.Kernel()}, +          KLightLock{system.Kernel()}, +      } {}  void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {      // Clear the management region to zero.      const VAddr management_region_end = management_region + management_region_size; +    // std::memset(GetVoidPointer(management_region), 0, management_region_size);      // Reset our manager count. -    num_managers = 0; +    m_num_managers = 0;      // Traverse the virtual memory layout tree, initializing each manager as appropriate. -    while (num_managers != MaxManagerCount) { +    while (m_num_managers != MaxManagerCount) {          // Locate the region that should initialize the current manager.          PAddr region_address = 0;          size_t region_size = 0;          Pool region_pool = Pool::Count; -        for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { +        for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {              // We only care about regions that we need to create managers for.              if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {                  continue;              }              // We want to initialize the managers in order. -            if (it.GetAttributes() != num_managers) { +            if (it.GetAttributes() != m_num_managers) {                  continue;              } @@ -97,8 +98,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio          }          // Initialize a new manager for the region. -        Impl* manager = std::addressof(managers[num_managers++]); -        ASSERT(num_managers <= managers.size()); +        Impl* manager = std::addressof(m_managers[m_num_managers++]); +        ASSERT(m_num_managers <= m_managers.size());          const size_t cur_size = manager->Initialize(region_address, region_size, management_region,                                                      management_region_end, region_pool); @@ -107,13 +108,13 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio          // Insert the manager into the pool list.          const auto region_pool_index = static_cast<u32>(region_pool); -        if (pool_managers_tail[region_pool_index] == nullptr) { -            pool_managers_head[region_pool_index] = manager; +        if (m_pool_managers_tail[region_pool_index] == nullptr) { +            m_pool_managers_head[region_pool_index] = manager;          } else { -            pool_managers_tail[region_pool_index]->SetNext(manager); -            manager->SetPrev(pool_managers_tail[region_pool_index]); +            m_pool_managers_tail[region_pool_index]->SetNext(manager); +            manager->SetPrev(m_pool_managers_tail[region_pool_index]);          } -        pool_managers_tail[region_pool_index] = manager; +        m_pool_managers_tail[region_pool_index] = manager;      }      // Free each region to its corresponding heap. @@ -121,11 +122,10 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio      const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();      const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;      const PAddr ini_last = ini_end - 1; -    for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { +    for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {          if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {              // Get the manager for the region. -            auto index = it.GetAttributes(); -            auto& manager = managers[index]; +            auto& manager = m_managers[it.GetAttributes()];              const PAddr cur_start = it.GetAddress();              const PAddr cur_last = it.GetLastAddress(); @@ -162,11 +162,19 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio      }      // Update the used size for all managers. -    for (size_t i = 0; i < num_managers; ++i) { -        managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); +    for (size_t i = 0; i < m_num_managers; ++i) { +        m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);      }  } +Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) { +    UNREACHABLE(); +} + +void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) { +    UNREACHABLE(); +} +  PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {      // Early return if we're allocating no pages.      if (num_pages == 0) { @@ -175,7 +183,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p      // Lock the pool that we're allocating from.      const auto [pool, dir] = DecodeOption(option); -    KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]); +    KScopedLightLock lk(m_pool_locks[static_cast<std::size_t>(pool)]);      // Choose a heap based on our page size request.      const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); @@ -185,7 +193,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p      PAddr allocated_block = 0;      for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;           chosen_manager = this->GetNextManager(chosen_manager, dir)) { -        allocated_block = chosen_manager->AllocateBlock(heap_index, true); +        allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);          if (allocated_block != 0) {              break;          } @@ -196,10 +204,9 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p          return 0;      } -    // If we allocated more than we need, free some. -    const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index); -    if (allocated_pages > num_pages) { -        chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); +    // Maintain the optimized memory bitmap, if we should. +    if (m_has_optimized_process[static_cast<size_t>(pool)]) { +        UNIMPLEMENTED();      }      // Open the first reference to the pages. @@ -209,20 +216,21 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p  }  Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, -                                             Direction dir, bool random) { +                                             Direction dir, bool unoptimized, bool random) {      // Choose a heap based on our page size request.      const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);      R_UNLESS(0 <= heap_index, ResultOutOfMemory);      // Ensure that we don't leave anything un-freed. -    auto group_guard = SCOPE_GUARD({ +    ON_RESULT_FAILURE {          for (const auto& it : out->Nodes()) { -            auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress()); -            const size_t num_pages_to_free = +            auto& manager = this->GetManager(it.GetAddress()); +            const size_t node_num_pages =                  std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); -            manager.Free(it.GetAddress(), num_pages_to_free); +            manager.Free(it.GetAddress(), node_num_pages);          } -    }); +        out->Finalize(); +    };      // Keep allocating until we've allocated all our pages.      for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { @@ -236,12 +244,17 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,                      break;                  } -                // Safely add it to our group. -                { -                    auto block_guard = -                        SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); }); -                    R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); -                    block_guard.Cancel(); +                // Ensure we don't leak the block if we fail. +                ON_RESULT_FAILURE_2 { +                    cur_manager->Free(allocated_block, pages_per_alloc); +                }; + +                // Add the block to our group. +                R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); + +                // Maintain the optimized memory bitmap, if we should. +                if (unoptimized) { +                    UNIMPLEMENTED();                  }                  num_pages -= pages_per_alloc; @@ -253,8 +266,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,      R_UNLESS(num_pages == 0, ResultOutOfMemory);      // We succeeded! -    group_guard.Cancel(); -    return ResultSuccess; +    R_SUCCEED();  }  Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) { @@ -266,10 +278,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op      // Lock the pool that we're allocating from.      const auto [pool, dir] = DecodeOption(option); -    KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); +    KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);      // Allocate the page group. -    R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); +    R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, +                                      m_has_optimized_process[static_cast<size_t>(pool)], true));      // Open the first reference to the pages.      for (const auto& block : out->Nodes()) { @@ -277,7 +290,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op          size_t remaining_pages = block.GetNumPages();          while (remaining_pages > 0) {              // Get the manager for the current address. -            auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); +            auto& manager = this->GetManager(cur_address);              // Process part or all of the block.              const size_t cur_pages = @@ -290,11 +303,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op          }      } -    return ResultSuccess; +    R_SUCCEED();  } -Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, -                                                 u64 process_id, u8 fill_pattern) { +Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, +                                          u64 process_id, u8 fill_pattern) {      ASSERT(out != nullptr);      ASSERT(out->GetNumPages() == 0); @@ -302,83 +315,89 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag      const auto [pool, dir] = DecodeOption(option);      // Allocate the memory. +    bool optimized;      {          // Lock the pool that we're allocating from. -        KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); +        KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); + +        // Check if we have an optimized process. +        const bool has_optimized = m_has_optimized_process[static_cast<size_t>(pool)]; +        const bool is_optimized = m_optimized_process_ids[static_cast<size_t>(pool)] == process_id;          // Allocate the page group. -        R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); +        R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, +                                          false)); -        // Open the first reference to the pages. -        for (const auto& block : out->Nodes()) { -            PAddr cur_address = block.GetAddress(); -            size_t remaining_pages = block.GetNumPages(); -            while (remaining_pages > 0) { -                // Get the manager for the current address. -                auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); - -                // Process part or all of the block. -                const size_t cur_pages = -                    std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); -                manager.OpenFirst(cur_address, cur_pages); - -                // Advance. -                cur_address += cur_pages * PageSize; -                remaining_pages -= cur_pages; -            } -        } +        // Set whether we should optimize. +        optimized = has_optimized && is_optimized;      } -    // Set all the allocated memory. -    for (const auto& block : out->Nodes()) { -        std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, -                    block.GetSize()); -    } +    // Perform optimized memory tracking, if we should. +    if (optimized) { +        // Iterate over the allocated blocks. +        for (const auto& block : out->Nodes()) { +            // Get the block extents. +            const PAddr block_address = block.GetAddress(); +            const size_t block_pages = block.GetNumPages(); -    return ResultSuccess; -} +            // If it has no pages, we don't need to do anything. +            if (block_pages == 0) { +                continue; +            } -void KMemoryManager::Open(PAddr address, size_t num_pages) { -    // Repeatedly open references until we've done so for all pages. -    while (num_pages) { -        auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); -        const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); +            // Fill all the pages that we need to fill. +            bool any_new = false; +            { +                PAddr cur_address = block_address; +                size_t remaining_pages = block_pages; +                while (remaining_pages > 0) { +                    // Get the manager for the current address. +                    auto& manager = this->GetManager(cur_address); + +                    // Process part or all of the block. +                    const size_t cur_pages = +                        std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); +                    any_new = +                        manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern); + +                    // Advance. +                    cur_address += cur_pages * PageSize; +                    remaining_pages -= cur_pages; +                } +            } -        { -            KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); -            manager.Open(address, cur_pages); +            // If there are new pages, update tracking for the allocation. +            if (any_new) { +                // Update tracking for the allocation. +                PAddr cur_address = block_address; +                size_t remaining_pages = block_pages; +                while (remaining_pages > 0) { +                    // Get the manager for the current address. +                    auto& manager = this->GetManager(cur_address); + +                    // Lock the pool for the manager. +                    KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); + +                    // Track some or all of the current pages. +                    const size_t cur_pages = +                        std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); +                    manager.TrackOptimizedAllocation(cur_address, cur_pages); + +                    // Advance. +                    cur_address += cur_pages * PageSize; +                    remaining_pages -= cur_pages; +                } +            }          } - -        num_pages -= cur_pages; -        address += cur_pages * PageSize; -    } -} - -void KMemoryManager::Close(PAddr address, size_t num_pages) { -    // Repeatedly close references until we've done so for all pages. -    while (num_pages) { -        auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); -        const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); - -        { -            KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); -            manager.Close(address, cur_pages); +    } else { +        // Set all the allocated memory. +        for (const auto& block : out->Nodes()) { +            std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, +                        block.GetSize());          } - -        num_pages -= cur_pages; -        address += cur_pages * PageSize;      } -} -void KMemoryManager::Close(const KPageGroup& pg) { -    for (const auto& node : pg.Nodes()) { -        Close(node.GetAddress(), node.GetNumPages()); -    } -} -void KMemoryManager::Open(const KPageGroup& pg) { -    for (const auto& node : pg.Nodes()) { -        Open(node.GetAddress(), node.GetNumPages()); -    } +    R_SUCCEED();  }  size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, @@ -394,18 +413,31 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage      ASSERT(Common::IsAligned(total_management_size, PageSize));      // Setup region. -    pool = p; -    management_region = management; -    page_reference_counts.resize( +    m_pool = p; +    m_management_region = management; +    m_page_reference_counts.resize(          Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); -    ASSERT(Common::IsAligned(management_region, PageSize)); +    ASSERT(Common::IsAligned(m_management_region, PageSize));      // Initialize the manager's KPageHeap. -    heap.Initialize(address, size, management + manager_size, page_heap_size); +    m_heap.Initialize(address, size, management + manager_size, page_heap_size);      return total_management_size;  } +void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) { +    UNREACHABLE(); +} + +void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) { +    UNREACHABLE(); +} + +bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages, +                                                      u8 fill_pattern) { +    UNREACHABLE(); +} +  size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {      const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);      const size_t optimize_map_size = diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index dcb9b6348..401d4e644 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h @@ -21,11 +21,8 @@ namespace Kernel {  class KPageGroup; -class KMemoryManager final { +class KMemoryManager {  public: -    YUZU_NON_COPYABLE(KMemoryManager); -    YUZU_NON_MOVEABLE(KMemoryManager); -      enum class Pool : u32 {          Application = 0,          Applet = 1, @@ -45,16 +42,85 @@ public:      enum class Direction : u32 {          FromFront = 0,          FromBack = 1, -          Shift = 0,          Mask = (0xF << Shift),      }; -    explicit KMemoryManager(Core::System& system_); +    static constexpr size_t MaxManagerCount = 10; + +    explicit KMemoryManager(Core::System& system);      void Initialize(VAddr management_region, size_t management_region_size); -    constexpr size_t GetSize(Pool pool) const { +    Result InitializeOptimizedMemory(u64 process_id, Pool pool); +    void FinalizeOptimizedMemory(u64 process_id, Pool pool); + +    PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); +    Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); +    Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, +                              u8 fill_pattern); + +    Pool GetPool(PAddr address) const { +        return this->GetManager(address).GetPool(); +    } + +    void Open(PAddr address, size_t num_pages) { +        // Repeatedly open references until we've done so for all pages. +        while (num_pages) { +            auto& manager = this->GetManager(address); +            const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); + +            { +                KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); +                manager.Open(address, cur_pages); +            } + +            num_pages -= cur_pages; +            address += cur_pages * PageSize; +        } +    } + +    void OpenFirst(PAddr address, size_t num_pages) { +        // Repeatedly open references until we've done so for all pages. +        while (num_pages) { +            auto& manager = this->GetManager(address); +            const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); + +            { +                KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); +                manager.OpenFirst(address, cur_pages); +            } + +            num_pages -= cur_pages; +            address += cur_pages * PageSize; +        } +    } + +    void Close(PAddr address, size_t num_pages) { +        // Repeatedly close references until we've done so for all pages. +        while (num_pages) { +            auto& manager = this->GetManager(address); +            const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); + +            { +                KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); +                manager.Close(address, cur_pages); +            } + +            num_pages -= cur_pages; +            address += cur_pages * PageSize; +        } +    } + +    size_t GetSize() { +        size_t total = 0; +        for (size_t i = 0; i < m_num_managers; i++) { +            total += m_managers[i].GetSize(); +        } +        return total; +    } + +    size_t GetSize(Pool pool) {          constexpr Direction GetSizeDirection = Direction::FromFront;          size_t total = 0;          for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; @@ -64,18 +130,36 @@ public:          return total;      } -    PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); -    Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); -    Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, -                                     u8 fill_pattern); +    size_t GetFreeSize() { +        size_t total = 0; +        for (size_t i = 0; i < m_num_managers; i++) { +            KScopedLightLock lk(m_pool_locks[static_cast<size_t>(m_managers[i].GetPool())]); +            total += m_managers[i].GetFreeSize(); +        } +        return total; +    } -    static constexpr size_t MaxManagerCount = 10; +    size_t GetFreeSize(Pool pool) { +        KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); + +        constexpr Direction GetSizeDirection = Direction::FromFront; +        size_t total = 0; +        for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; +             manager = this->GetNextManager(manager, GetSizeDirection)) { +            total += manager->GetFreeSize(); +        } +        return total; +    } -    void Close(PAddr address, size_t num_pages); -    void Close(const KPageGroup& pg); +    void DumpFreeList(Pool pool) { +        KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); -    void Open(PAddr address, size_t num_pages); -    void Open(const KPageGroup& pg); +        constexpr Direction DumpDirection = Direction::FromFront; +        for (auto* manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr; +             manager = this->GetNextManager(manager, DumpDirection)) { +            manager->DumpFreeList(); +        } +    }  public:      static size_t CalculateManagementOverheadSize(size_t region_size) { @@ -88,14 +172,13 @@ public:      }      static constexpr Pool GetPool(u32 option) { -        return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >> +        return static_cast<Pool>((option & static_cast<u32>(Pool::Mask)) >>                                   static_cast<u32>(Pool::Shift));      }      static constexpr Direction GetDirection(u32 option) { -        return static_cast<Direction>( -            (static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >> -            static_cast<u32>(Direction::Shift)); +        return static_cast<Direction>((option & static_cast<u32>(Direction::Mask)) >> +                                      static_cast<u32>(Direction::Shift));      }      static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) { @@ -103,74 +186,88 @@ public:      }  private: -    class Impl final { +    class Impl {      public: -        YUZU_NON_COPYABLE(Impl); -        YUZU_NON_MOVEABLE(Impl); +        static size_t CalculateManagementOverheadSize(size_t region_size); + +        static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { +            return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / +                    Common::BitSize<u64>()) * +                   sizeof(u64); +        } +    public:          Impl() = default; -        ~Impl() = default;          size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,                            Pool p); -        VAddr AllocateBlock(s32 index, bool random) { -            return heap.AllocateBlock(index, random); +        PAddr AllocateBlock(s32 index, bool random) { +            return m_heap.AllocateBlock(index, random);          } - -        void Free(VAddr addr, size_t num_pages) { -            heap.Free(addr, num_pages); +        PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { +            return m_heap.AllocateAligned(index, num_pages, align_pages); +        } +        void Free(PAddr addr, size_t num_pages) { +            m_heap.Free(addr, num_pages);          }          void SetInitialUsedHeapSize(size_t reserved_size) { -            heap.SetInitialUsedSize(reserved_size); +            m_heap.SetInitialUsedSize(reserved_size);          } -        constexpr Pool GetPool() const { -            return pool; +        void InitializeOptimizedMemory() { +            UNIMPLEMENTED();          } +        void TrackUnoptimizedAllocation(PAddr block, size_t num_pages); +        void TrackOptimizedAllocation(PAddr block, size_t num_pages); + +        bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern); + +        constexpr Pool GetPool() const { +            return m_pool; +        }          constexpr size_t GetSize() const { -            return heap.GetSize(); +            return m_heap.GetSize(); +        } +        constexpr PAddr GetEndAddress() const { +            return m_heap.GetEndAddress();          } -        constexpr VAddr GetAddress() const { -            return heap.GetAddress(); +        size_t GetFreeSize() const { +            return m_heap.GetFreeSize();          } -        constexpr VAddr GetEndAddress() const { -            return heap.GetEndAddress(); +        void DumpFreeList() const { +            UNIMPLEMENTED();          }          constexpr size_t GetPageOffset(PAddr address) const { -            return heap.GetPageOffset(address); +            return m_heap.GetPageOffset(address);          } -          constexpr size_t GetPageOffsetToEnd(PAddr address) const { -            return heap.GetPageOffsetToEnd(address); +            return m_heap.GetPageOffsetToEnd(address);          }          constexpr void SetNext(Impl* n) { -            next = n; +            m_next = n;          } -          constexpr void SetPrev(Impl* n) { -            prev = n; +            m_prev = n;          } -          constexpr Impl* GetNext() const { -            return next; +            return m_next;          } -          constexpr Impl* GetPrev() const { -            return prev; +            return m_prev;          }          void OpenFirst(PAddr address, size_t num_pages) {              size_t index = this->GetPageOffset(address);              const size_t end = index + num_pages;              while (index < end) { -                const RefCount ref_count = (++page_reference_counts[index]); +                const RefCount ref_count = (++m_page_reference_counts[index]);                  ASSERT(ref_count == 1);                  index++; @@ -181,7 +278,7 @@ private:              size_t index = this->GetPageOffset(address);              const size_t end = index + num_pages;              while (index < end) { -                const RefCount ref_count = (++page_reference_counts[index]); +                const RefCount ref_count = (++m_page_reference_counts[index]);                  ASSERT(ref_count > 1);                  index++; @@ -195,8 +292,8 @@ private:              size_t free_start = 0;              size_t free_count = 0;              while (index < end) { -                ASSERT(page_reference_counts[index] > 0); -                const RefCount ref_count = (--page_reference_counts[index]); +                ASSERT(m_page_reference_counts[index] > 0); +                const RefCount ref_count = (--m_page_reference_counts[index]);                  // Keep track of how many zero refcounts we see in a row, to minimize calls to free.                  if (ref_count == 0) { @@ -208,7 +305,7 @@ private:                      }                  } else {                      if (free_count > 0) { -                        this->Free(heap.GetAddress() + free_start * PageSize, free_count); +                        this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);                          free_count = 0;                      }                  } @@ -217,44 +314,36 @@ private:              }              if (free_count > 0) { -                this->Free(heap.GetAddress() + free_start * PageSize, free_count); +                this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);              }          } -        static size_t CalculateManagementOverheadSize(size_t region_size); - -        static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { -            return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / -                    Common::BitSize<u64>()) * -                   sizeof(u64); -        } -      private:          using RefCount = u16; -        KPageHeap heap; -        std::vector<RefCount> page_reference_counts; -        VAddr management_region{}; -        Pool pool{}; -        Impl* next{}; -        Impl* prev{}; +        KPageHeap m_heap; +        std::vector<RefCount> m_page_reference_counts; +        VAddr m_management_region{}; +        Pool m_pool{}; +        Impl* m_next{}; +        Impl* m_prev{};      };  private: -    Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) { -        return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; +    Impl& GetManager(PAddr address) { +        return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];      } -    const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const { -        return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; +    const Impl& GetManager(PAddr address) const { +        return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];      } -    constexpr Impl* GetFirstManager(Pool pool, Direction dir) const { -        return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)] -                                          : pool_managers_head[static_cast<size_t>(pool)]; +    constexpr Impl* GetFirstManager(Pool pool, Direction dir) { +        return dir == Direction::FromBack ? m_pool_managers_tail[static_cast<size_t>(pool)] +                                          : m_pool_managers_head[static_cast<size_t>(pool)];      } -    constexpr Impl* GetNextManager(Impl* cur, Direction dir) const { +    constexpr Impl* GetNextManager(Impl* cur, Direction dir) {          if (dir == Direction::FromBack) {              return cur->GetPrev();          } else { @@ -263,15 +352,21 @@ private:      }      Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir, -                                 bool random); +                                 bool unoptimized, bool random);  private: -    Core::System& system; -    std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks; -    std::array<Impl*, MaxManagerCount> pool_managers_head{}; -    std::array<Impl*, MaxManagerCount> pool_managers_tail{}; -    std::array<Impl, MaxManagerCount> managers; -    size_t num_managers{}; +    template <typename T> +    using PoolArray = std::array<T, static_cast<size_t>(Pool::Count)>; + +    Core::System& m_system; +    const KMemoryLayout& m_memory_layout; +    PoolArray<KLightLock> m_pool_locks; +    std::array<Impl*, MaxManagerCount> m_pool_managers_head{}; +    std::array<Impl*, MaxManagerCount> m_pool_managers_tail{}; +    std::array<Impl, MaxManagerCount> m_managers; +    size_t m_num_managers{}; +    PoolArray<u64> m_optimized_process_ids{}; +    PoolArray<bool> m_has_optimized_process{};  };  } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index 7e2fcccdc..e5630c1ac 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h @@ -142,32 +142,38 @@ private:  } // namespace impl -constexpr auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); -constexpr auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); -constexpr auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2); +constexpr inline auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); + +constexpr inline auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); +constexpr inline auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2);  static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1);  static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); -constexpr auto KMemoryRegionType_DramKernelBase = +// constexpr inline auto KMemoryRegionType_CoreLocalRegion = +// KMemoryRegionType_None.DeriveInitial(2).Finalize(); +// static_assert(KMemoryRegionType_CoreLocalRegion.GetValue() == 0x4); + +constexpr inline auto KMemoryRegionType_DramKernelBase =      KMemoryRegionType_Dram.DeriveSparse(0, 3, 0)          .SetAttribute(KMemoryRegionAttr_NoUserMap)          .SetAttribute(KMemoryRegionAttr_CarveoutProtected); -constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); -constexpr auto KMemoryRegionType_DramHeapBase = +constexpr inline auto KMemoryRegionType_DramReservedBase = +    KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); +constexpr inline auto KMemoryRegionType_DramHeapBase =      KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped);  static_assert(KMemoryRegionType_DramKernelBase.GetValue() ==                (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap));  static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16));  static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped)); -constexpr auto KMemoryRegionType_DramKernelCode = +constexpr inline auto KMemoryRegionType_DramKernelCode =      KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); -constexpr auto KMemoryRegionType_DramKernelSlab = +constexpr inline auto KMemoryRegionType_DramKernelSlab =      KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); -constexpr auto KMemoryRegionType_DramKernelPtHeap = +constexpr inline auto KMemoryRegionType_DramKernelPtHeap =      KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute(          KMemoryRegionAttr_LinearMapped); -constexpr auto KMemoryRegionType_DramKernelInitPt = +constexpr inline auto KMemoryRegionType_DramKernelInitPt =      KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute(          KMemoryRegionAttr_LinearMapped);  static_assert(KMemoryRegionType_DramKernelCode.GetValue() == @@ -181,32 +187,40 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() ==                (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |                 KMemoryRegionAttr_LinearMapped)); -constexpr auto KMemoryRegionType_DramReservedEarly = +constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory = +    KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute( +        KMemoryRegionAttr_LinearMapped); +static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == +              (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | +               KMemoryRegionAttr_LinearMapped)); + +constexpr inline auto KMemoryRegionType_DramReservedEarly =      KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);  static_assert(KMemoryRegionType_DramReservedEarly.GetValue() ==                (0x16 | KMemoryRegionAttr_NoUserMap)); -constexpr auto KMemoryRegionType_KernelTraceBuffer = +constexpr inline auto KMemoryRegionType_KernelTraceBuffer =      KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0)          .SetAttribute(KMemoryRegionAttr_LinearMapped)          .SetAttribute(KMemoryRegionAttr_UserReadOnly); -constexpr auto KMemoryRegionType_OnMemoryBootImage = +constexpr inline auto KMemoryRegionType_OnMemoryBootImage =      KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); -constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); +constexpr inline auto KMemoryRegionType_DTB = +    KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2);  static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() ==                (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly));  static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156);  static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); -constexpr auto KMemoryRegionType_DramPoolPartition = +constexpr inline auto KMemoryRegionType_DramPoolPartition =      KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);  static_assert(KMemoryRegionType_DramPoolPartition.GetValue() ==                (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); -constexpr auto KMemoryRegionType_DramPoolManagement = +constexpr inline auto KMemoryRegionType_DramPoolManagement =      KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute(          KMemoryRegionAttr_CarveoutProtected); -constexpr auto KMemoryRegionType_DramUserPool = +constexpr inline auto KMemoryRegionType_DramUserPool =      KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition();  static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==                (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | @@ -214,11 +228,13 @@ static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==  static_assert(KMemoryRegionType_DramUserPool.GetValue() ==                (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); -constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0); -constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1); -constexpr auto KMemoryRegionType_DramSystemNonSecurePool = +constexpr inline auto KMemoryRegionType_DramApplicationPool = +    KMemoryRegionType_DramUserPool.Derive(4, 0); +constexpr inline auto KMemoryRegionType_DramAppletPool = +    KMemoryRegionType_DramUserPool.Derive(4, 1); +constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool =      KMemoryRegionType_DramUserPool.Derive(4, 2); -constexpr auto KMemoryRegionType_DramSystemPool = +constexpr inline auto KMemoryRegionType_DramSystemPool =      KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);  static_assert(KMemoryRegionType_DramApplicationPool.GetValue() ==                (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); @@ -230,50 +246,55 @@ static_assert(KMemoryRegionType_DramSystemPool.GetValue() ==                (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |                 KMemoryRegionAttr_CarveoutProtected)); -constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); -constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap = +constexpr inline auto KMemoryRegionType_VirtualDramHeapBase = +    KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); +constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap =      KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); -constexpr auto KMemoryRegionType_VirtualDramKernelTraceBuffer = +constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer =      KMemoryRegionType_Dram.DeriveSparse(1, 3, 2);  static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);  static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);  static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);  // UNUSED: .DeriveSparse(2, 2, 0); -constexpr auto KMemoryRegionType_VirtualDramUnknownDebug = +constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug =      KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);  static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); -constexpr auto KMemoryRegionType_VirtualDramKernelInitPt = +constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = +    KMemoryRegionType_Dram.DeriveSparse(3, 1, 0); +static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62)); + +constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt =      KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); -constexpr auto KMemoryRegionType_VirtualDramPoolManagement = +constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement =      KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); -constexpr auto KMemoryRegionType_VirtualDramUserPool = +constexpr inline auto KMemoryRegionType_VirtualDramUserPool =      KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);  static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A);  static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A);  static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); -// NOTE: For unknown reason, the pools are derived out-of-order here. It's worth eventually trying -// to understand why Nintendo made this choice. +// NOTE: For unknown reason, the pools are derived out-of-order here. +// It's worth eventually trying to understand why Nintendo made this choice.  // UNUSED: .Derive(6, 0);  // UNUSED: .Derive(6, 1); -constexpr auto KMemoryRegionType_VirtualDramAppletPool = +constexpr inline auto KMemoryRegionType_VirtualDramAppletPool =      KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); -constexpr auto KMemoryRegionType_VirtualDramApplicationPool = +constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool =      KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); -constexpr auto KMemoryRegionType_VirtualDramSystemNonSecurePool = +constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool =      KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); -constexpr auto KMemoryRegionType_VirtualDramSystemPool = +constexpr inline auto KMemoryRegionType_VirtualDramSystemPool =      KMemoryRegionType_VirtualDramUserPool.Derive(6, 5);  static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A);  static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A);  static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A);  static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); -constexpr auto KMemoryRegionType_ArchDeviceBase = +constexpr inline auto KMemoryRegionType_ArchDeviceBase =      KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); -constexpr auto KMemoryRegionType_BoardDeviceBase = +constexpr inline auto KMemoryRegionType_BoardDeviceBase =      KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly();  static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5);  static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); @@ -284,7 +305,7 @@ static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5);  #error "Unimplemented"  #else  // Default to no architecture devices. -constexpr auto NumArchitectureDeviceRegions = 0; +constexpr inline auto NumArchitectureDeviceRegions = 0;  #endif  static_assert(NumArchitectureDeviceRegions >= 0); @@ -292,34 +313,35 @@ static_assert(NumArchitectureDeviceRegions >= 0);  #include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc"  #else  // Default to no board devices. -constexpr auto NumBoardDeviceRegions = 0; +constexpr inline auto NumBoardDeviceRegions = 0;  #endif  static_assert(NumBoardDeviceRegions >= 0); -constexpr auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); -constexpr auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); -constexpr auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); -constexpr auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3); +constexpr inline auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); +constexpr inline auto KMemoryRegionType_KernelStack = +    KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); +constexpr inline auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); +constexpr inline auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3);  static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19);  static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29);  static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49);  static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89); -constexpr auto KMemoryRegionType_KernelMiscDerivedBase = +constexpr inline auto KMemoryRegionType_KernelMiscDerivedBase =      KMemoryRegionType_KernelMisc.DeriveTransition();  static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149);  // UNUSED: .Derive(7, 0); -constexpr auto KMemoryRegionType_KernelMiscMainStack = +constexpr inline auto KMemoryRegionType_KernelMiscMainStack =      KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1); -constexpr auto KMemoryRegionType_KernelMiscMappedDevice = +constexpr inline auto KMemoryRegionType_KernelMiscMappedDevice =      KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2); -constexpr auto KMemoryRegionType_KernelMiscExceptionStack = +constexpr inline auto KMemoryRegionType_KernelMiscExceptionStack =      KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3); -constexpr auto KMemoryRegionType_KernelMiscUnknownDebug = +constexpr inline auto KMemoryRegionType_KernelMiscUnknownDebug =      KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4);  // UNUSED: .Derive(7, 5); -constexpr auto KMemoryRegionType_KernelMiscIdleStack = +constexpr inline auto KMemoryRegionType_KernelMiscIdleStack =      KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6);  static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49);  static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49); @@ -327,7 +349,8 @@ static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349);  static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549);  static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349); -constexpr auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0); +constexpr inline auto KMemoryRegionType_KernelTemp = +    KMemoryRegionType_Kernel.Advance(2).Derive(2, 0);  static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);  constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { @@ -335,6 +358,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {          return KMemoryRegionType_VirtualDramKernelTraceBuffer;      } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {          return KMemoryRegionType_VirtualDramKernelPtHeap; +    } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) { +        return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;      } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {          return KMemoryRegionType_VirtualDramUnknownDebug;      } else { diff --git a/src/core/hle/kernel/k_page_bitmap.h b/src/core/hle/kernel/k_page_bitmap.h index c97b3dc0b..0ff987732 100644 --- a/src/core/hle/kernel/k_page_bitmap.h +++ b/src/core/hle/kernel/k_page_bitmap.h @@ -16,107 +16,126 @@  namespace Kernel {  class KPageBitmap { -private: +public:      class RandomBitGenerator { -    private: -        Common::TinyMT rng{}; -        u32 entropy{}; -        u32 bits_available{}; +    public: +        RandomBitGenerator() { +            m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64())); +        } + +        u64 SelectRandomBit(u64 bitmap) { +            u64 selected = 0; + +            for (size_t cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; cur_num_bits != 0; +                 cur_num_bits /= 2) { +                const u64 high = (bitmap >> cur_num_bits); +                const u64 low = (bitmap & (~(UINT64_C(0xFFFFFFFFFFFFFFFF) << cur_num_bits))); + +                // Choose high if we have high and (don't have low or select high randomly). +                if (high && (low == 0 || this->GenerateRandomBit())) { +                    bitmap = high; +                    selected += cur_num_bits; +                } else { +                    bitmap = low; +                    selected += 0; +                } +            } + +            return selected; +        } + +        u64 GenerateRandom(u64 max) { +            // Determine the number of bits we need. +            const u64 bits_needed = 1 + (Common::BitSize<decltype(max)>() - std::countl_zero(max)); + +            // Generate a random value of the desired bitwidth. +            const u64 rnd = this->GenerateRandomBits(static_cast<u32>(bits_needed)); + +            // Adjust the value to be in range. +            return rnd - ((rnd / max) * max); +        }      private:          void RefreshEntropy() { -            entropy = rng.GenerateRandomU32(); -            bits_available = static_cast<u32>(Common::BitSize<decltype(entropy)>()); +            m_entropy = m_rng.GenerateRandomU32(); +            m_bits_available = static_cast<u32>(Common::BitSize<decltype(m_entropy)>());          }          bool GenerateRandomBit() { -            if (bits_available == 0) { +            if (m_bits_available == 0) {                  this->RefreshEntropy();              } -            const bool rnd_bit = (entropy & 1) != 0; -            entropy >>= 1; -            --bits_available; +            const bool rnd_bit = (m_entropy & 1) != 0; +            m_entropy >>= 1; +            --m_bits_available;              return rnd_bit;          } -    public: -        RandomBitGenerator() { -            rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64())); -        } +        u64 GenerateRandomBits(u32 num_bits) { +            u64 result = 0; -        std::size_t SelectRandomBit(u64 bitmap) { -            u64 selected = 0; +            // Iteratively add random bits to our result. +            while (num_bits > 0) { +                // Ensure we have random bits to take from. +                if (m_bits_available == 0) { +                    this->RefreshEntropy(); +                } -            u64 cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; -            u64 cur_mask = (1ULL << cur_num_bits) - 1; +                // Determine how many bits to take this round. +                const auto cur_bits = std::min(num_bits, m_bits_available); -            while (cur_num_bits) { -                const u64 low = (bitmap >> 0) & cur_mask; -                const u64 high = (bitmap >> cur_num_bits) & cur_mask; +                // Generate mask for our current bits. +                const u64 mask = (static_cast<u64>(1) << cur_bits) - 1; -                bool choose_low; -                if (high == 0) { -                    // If only low val is set, choose low. -                    choose_low = true; -                } else if (low == 0) { -                    // If only high val is set, choose high. -                    choose_low = false; -                } else { -                    // If both are set, choose random. -                    choose_low = this->GenerateRandomBit(); -                } +                // Add bits to output from our entropy. +                result <<= cur_bits; +                result |= (m_entropy & mask); -                // If we chose low, proceed with low. -                if (choose_low) { -                    bitmap = low; -                    selected += 0; -                } else { -                    bitmap = high; -                    selected += cur_num_bits; -                } +                // Remove bits from our entropy. +                m_entropy >>= cur_bits; +                m_bits_available -= cur_bits; -                // Proceed. -                cur_num_bits /= 2; -                cur_mask >>= cur_num_bits; +                // Advance. +                num_bits -= cur_bits;              } -            return selected; +            return result;          } + +    private: +        Common::TinyMT m_rng; +        u32 m_entropy{}; +        u32 m_bits_available{};      };  public: -    static constexpr std::size_t MaxDepth = 4; - -private: -    std::array<u64*, MaxDepth> bit_storages{}; -    RandomBitGenerator rng{}; -    std::size_t num_bits{}; -    std::size_t used_depths{}; +    static constexpr size_t MaxDepth = 4;  public:      KPageBitmap() = default; -    constexpr std::size_t GetNumBits() const { -        return num_bits; +    constexpr size_t GetNumBits() const { +        return m_num_bits;      }      constexpr s32 GetHighestDepthIndex() const { -        return static_cast<s32>(used_depths) - 1; +        return static_cast<s32>(m_used_depths) - 1;      } -    u64* Initialize(u64* storage, std::size_t size) { +    u64* Initialize(u64* storage, size_t size) {          // Initially, everything is un-set. -        num_bits = 0; +        m_num_bits = 0;          // Calculate the needed bitmap depth. -        used_depths = static_cast<std::size_t>(GetRequiredDepth(size)); -        ASSERT(used_depths <= MaxDepth); +        m_used_depths = static_cast<size_t>(GetRequiredDepth(size)); +        ASSERT(m_used_depths <= MaxDepth);          // Set the bitmap pointers.          for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { -            bit_storages[depth] = storage; +            m_bit_storages[depth] = storage;              size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>();              storage += size; +            m_end_storages[depth] = storage;          }          return storage; @@ -128,19 +147,19 @@ public:          if (random) {              do { -                const u64 v = bit_storages[depth][offset]; +                const u64 v = m_bit_storages[depth][offset];                  if (v == 0) {                      // If depth is bigger than zero, then a previous level indicated a block was                      // free.                      ASSERT(depth == 0);                      return -1;                  } -                offset = offset * Common::BitSize<u64>() + rng.SelectRandomBit(v); +                offset = offset * Common::BitSize<u64>() + m_rng.SelectRandomBit(v);                  ++depth; -            } while (depth < static_cast<s32>(used_depths)); +            } while (depth < static_cast<s32>(m_used_depths));          } else {              do { -                const u64 v = bit_storages[depth][offset]; +                const u64 v = m_bit_storages[depth][offset];                  if (v == 0) {                      // If depth is bigger than zero, then a previous level indicated a block was                      // free. @@ -149,28 +168,69 @@ public:                  }                  offset = offset * Common::BitSize<u64>() + std::countr_zero(v);                  ++depth; -            } while (depth < static_cast<s32>(used_depths)); +            } while (depth < static_cast<s32>(m_used_depths));          }          return static_cast<s64>(offset);      } -    void SetBit(std::size_t offset) { +    s64 FindFreeRange(size_t count) { +        // Check that it is possible to find a range. +        const u64* const storage_start = m_bit_storages[m_used_depths - 1]; +        const u64* const storage_end = m_end_storages[m_used_depths - 1]; + +        // If we don't have a storage to iterate (or want more blocks than fit in a single storage), +        // we can't find a free range. +        if (!(storage_start < storage_end && count <= Common::BitSize<u64>())) { +            return -1; +        } + +        // Walk the storages to select a random free range. +        const size_t options_per_storage = std::max<size_t>(Common::BitSize<u64>() / count, 1); +        const size_t num_entries = std::max<size_t>(storage_end - storage_start, 1); + +        const u64 free_mask = (static_cast<u64>(1) << count) - 1; + +        size_t num_valid_options = 0; +        s64 chosen_offset = -1; +        for (size_t storage_index = 0; storage_index < num_entries; ++storage_index) { +            u64 storage = storage_start[storage_index]; +            for (size_t option = 0; option < options_per_storage; ++option) { +                if ((storage & free_mask) == free_mask) { +                    // We've found a new valid option. +                    ++num_valid_options; + +                    // Select the Kth valid option with probability 1/K. This leads to an overall +                    // uniform distribution. +                    if (num_valid_options == 1 || m_rng.GenerateRandom(num_valid_options) == 0) { +                        // This is our first option, so select it. +                        chosen_offset = storage_index * Common::BitSize<u64>() + option * count; +                    } +                } +                storage >>= count; +            } +        } + +        // Return the random offset we chose.*/ +        return chosen_offset; +    } + +    void SetBit(size_t offset) {          this->SetBit(this->GetHighestDepthIndex(), offset); -        num_bits++; +        m_num_bits++;      } -    void ClearBit(std::size_t offset) { +    void ClearBit(size_t offset) {          this->ClearBit(this->GetHighestDepthIndex(), offset); -        num_bits--; +        m_num_bits--;      } -    bool ClearRange(std::size_t offset, std::size_t count) { +    bool ClearRange(size_t offset, size_t count) {          s32 depth = this->GetHighestDepthIndex(); -        u64* bits = bit_storages[depth]; -        std::size_t bit_ind = offset / Common::BitSize<u64>(); -        if (count < Common::BitSize<u64>()) { -            const std::size_t shift = offset % Common::BitSize<u64>(); +        u64* bits = m_bit_storages[depth]; +        size_t bit_ind = offset / Common::BitSize<u64>(); +        if (count < Common::BitSize<u64>()) [[likely]] { +            const size_t shift = offset % Common::BitSize<u64>();              ASSERT(shift + count <= Common::BitSize<u64>());              // Check that all the bits are set.              const u64 mask = ((u64(1) << count) - 1) << shift; @@ -189,8 +249,8 @@ public:              ASSERT(offset % Common::BitSize<u64>() == 0);              ASSERT(count % Common::BitSize<u64>() == 0);              // Check that all the bits are set. -            std::size_t remaining = count; -            std::size_t i = 0; +            size_t remaining = count; +            size_t i = 0;              do {                  if (bits[bit_ind + i++] != ~u64(0)) {                      return false; @@ -209,18 +269,18 @@ public:              } while (remaining > 0);          } -        num_bits -= count; +        m_num_bits -= count;          return true;      }  private: -    void SetBit(s32 depth, std::size_t offset) { +    void SetBit(s32 depth, size_t offset) {          while (depth >= 0) { -            std::size_t ind = offset / Common::BitSize<u64>(); -            std::size_t which = offset % Common::BitSize<u64>(); +            size_t ind = offset / Common::BitSize<u64>(); +            size_t which = offset % Common::BitSize<u64>();              const u64 mask = u64(1) << which; -            u64* bit = std::addressof(bit_storages[depth][ind]); +            u64* bit = std::addressof(m_bit_storages[depth][ind]);              u64 v = *bit;              ASSERT((v & mask) == 0);              *bit = v | mask; @@ -232,13 +292,13 @@ private:          }      } -    void ClearBit(s32 depth, std::size_t offset) { +    void ClearBit(s32 depth, size_t offset) {          while (depth >= 0) { -            std::size_t ind = offset / Common::BitSize<u64>(); -            std::size_t which = offset % Common::BitSize<u64>(); +            size_t ind = offset / Common::BitSize<u64>(); +            size_t which = offset % Common::BitSize<u64>();              const u64 mask = u64(1) << which; -            u64* bit = std::addressof(bit_storages[depth][ind]); +            u64* bit = std::addressof(m_bit_storages[depth][ind]);              u64 v = *bit;              ASSERT((v & mask) != 0);              v &= ~mask; @@ -252,7 +312,7 @@ private:      }  private: -    static constexpr s32 GetRequiredDepth(std::size_t region_size) { +    static constexpr s32 GetRequiredDepth(size_t region_size) {          s32 depth = 0;          while (true) {              region_size /= Common::BitSize<u64>(); @@ -264,8 +324,8 @@ private:      }  public: -    static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size) { -        std::size_t overhead_bits = 0; +    static constexpr size_t CalculateManagementOverheadSize(size_t region_size) { +        size_t overhead_bits = 0;          for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) {              region_size =                  Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>(); @@ -273,6 +333,13 @@ public:          }          return overhead_bits * sizeof(u64);      } + +private: +    std::array<u64*, MaxDepth> m_bit_storages{}; +    std::array<u64*, MaxDepth> m_end_storages{}; +    RandomBitGenerator m_rng; +    size_t m_num_bits{}; +    size_t m_used_depths{};  };  } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h index aef06e213..cfedaae61 100644 --- a/src/core/hle/kernel/k_page_buffer.h +++ b/src/core/hle/kernel/k_page_buffer.h @@ -11,6 +11,16 @@  namespace Kernel { +class KernelCore; + +class KPageBufferSlabHeap : protected impl::KSlabHeapImpl { +public: +    static constexpr size_t BufferSize = PageSize; + +public: +    void Initialize(Core::System& system); +}; +  class KPageBuffer final : public KSlabAllocated<KPageBuffer> {  public:      explicit KPageBuffer(KernelCore&) {} @@ -21,8 +31,6 @@ public:  private:      [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{};  }; - -static_assert(sizeof(KPageBuffer) == PageSize); -static_assert(alignof(KPageBuffer) == PageSize); +static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize);  } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h index 968753992..316f172f2 100644 --- a/src/core/hle/kernel/k_page_group.h +++ b/src/core/hle/kernel/k_page_group.h @@ -5,6 +5,7 @@  #include <list> +#include "common/alignment.h"  #include "common/assert.h"  #include "common/common_types.h"  #include "core/hle/kernel/memory_types.h" @@ -12,6 +13,89 @@  namespace Kernel { +class KPageGroup; + +class KBlockInfo { +private: +    friend class KPageGroup; + +public: +    constexpr KBlockInfo() = default; + +    constexpr void Initialize(PAddr addr, size_t np) { +        ASSERT(Common::IsAligned(addr, PageSize)); +        ASSERT(static_cast<u32>(np) == np); + +        m_page_index = static_cast<u32>(addr) / PageSize; +        m_num_pages = static_cast<u32>(np); +    } + +    constexpr PAddr GetAddress() const { +        return m_page_index * PageSize; +    } +    constexpr size_t GetNumPages() const { +        return m_num_pages; +    } +    constexpr size_t GetSize() const { +        return this->GetNumPages() * PageSize; +    } +    constexpr PAddr GetEndAddress() const { +        return (m_page_index + m_num_pages) * PageSize; +    } +    constexpr PAddr GetLastAddress() const { +        return this->GetEndAddress() - 1; +    } + +    constexpr KBlockInfo* GetNext() const { +        return m_next; +    } + +    constexpr bool IsEquivalentTo(const KBlockInfo& rhs) const { +        return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages; +    } + +    constexpr bool operator==(const KBlockInfo& rhs) const { +        return this->IsEquivalentTo(rhs); +    } + +    constexpr bool operator!=(const KBlockInfo& rhs) const { +        return !(*this == rhs); +    } + +    constexpr bool IsStrictlyBefore(PAddr addr) const { +        const PAddr end = this->GetEndAddress(); + +        if (m_page_index != 0 && end == 0) { +            return false; +        } + +        return end < addr; +    } + +    constexpr bool operator<(PAddr addr) const { +        return this->IsStrictlyBefore(addr); +    } + +    constexpr bool TryConcatenate(PAddr addr, size_t np) { +        if (addr != 0 && addr == this->GetEndAddress()) { +            m_num_pages += static_cast<u32>(np); +            return true; +        } +        return false; +    } + +private: +    constexpr void SetNext(KBlockInfo* next) { +        m_next = next; +    } + +private: +    KBlockInfo* m_next{}; +    u32 m_page_index{}; +    u32 m_num_pages{}; +}; +static_assert(sizeof(KBlockInfo) <= 0x10); +  class KPageGroup final {  public:      class Node final { @@ -92,6 +176,8 @@ public:          return nodes.empty();      } +    void Finalize() {} +  private:      std::list<Node> nodes;  }; diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp index 5ede60168..7b02c7d8b 100644 --- a/src/core/hle/kernel/k_page_heap.cpp +++ b/src/core/hle/kernel/k_page_heap.cpp @@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const {      return num_free;  } -PAddr KPageHeap::AllocateBlock(s32 index, bool random) { +PAddr KPageHeap::AllocateByLinearSearch(s32 index) {      const size_t needed_size = m_blocks[index].GetSize();      for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { -        if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) { +        if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) {              if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {                  this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);              } @@ -59,6 +59,88 @@ PAddr KPageHeap::AllocateBlock(s32 index, bool random) {      return 0;  } +PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) { +    // Get the size and required alignment. +    const size_t needed_size = num_pages * PageSize; +    const size_t align_size = align_pages * PageSize; + +    // Determine meta-alignment of our desired alignment size. +    const size_t align_shift = std::countr_zero(align_size); + +    // Decide on a block to allocate from. +    constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4; +    { +        // By default, we'll want to look at all blocks larger than our current one. +        s32 max_blocks = static_cast<s32>(m_num_blocks); + +        // Determine the maximum block we should try to allocate from. +        size_t possible_alignments = 0; +        for (s32 i = index; i < max_blocks; ++i) { +            // Add the possible alignments from blocks at the current size. +            possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * +                                   m_blocks[i].GetNumFreeBlocks(); + +            // If there are enough possible alignments, we don't need to look at larger blocks. +            if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) { +                max_blocks = i + 1; +                break; +            } +        } + +        // If we have any possible alignments which require a larger block, we need to pick one. +        if (possible_alignments > 0 && index + 1 < max_blocks) { +            // Select a random alignment from the possibilities. +            const size_t rnd = m_rng.GenerateRandom(possible_alignments); + +            // Determine which block corresponds to the random alignment we chose. +            possible_alignments = 0; +            for (s32 i = index; i < max_blocks; ++i) { +                // Add the possible alignments from blocks at the current size. +                possible_alignments += +                    (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * +                    m_blocks[i].GetNumFreeBlocks(); + +                // If the current block gets us to our random choice, use the current block. +                if (rnd < possible_alignments) { +                    index = i; +                    break; +                } +            } +        } +    } + +    // Pop a block from the index we selected. +    if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) { +        // Determine how much size we have left over. +        if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; +            leftover_size > 0) { +            // Determine how many valid alignments we can have. +            const size_t possible_alignments = 1 + (leftover_size >> align_shift); + +            // Select a random valid alignment. +            const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift; + +            // Free memory before the random offset. +            if (random_offset != 0) { +                this->Free(addr, random_offset / PageSize); +            } + +            // Advance our block by the random offset. +            addr += random_offset; + +            // Free memory after our allocated block. +            if (random_offset != leftover_size) { +                this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize); +            } +        } + +        // Return the block we allocated. +        return addr; +    } + +    return 0; +} +  void KPageHeap::FreeBlock(PAddr block, s32 index) {      do {          block = m_blocks[index++].PushBlock(block); diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h index 0917a8bed..9021edcf7 100644 --- a/src/core/hle/kernel/k_page_heap.h +++ b/src/core/hle/kernel/k_page_heap.h @@ -14,13 +14,9 @@  namespace Kernel { -class KPageHeap final { +class KPageHeap {  public: -    YUZU_NON_COPYABLE(KPageHeap); -    YUZU_NON_MOVEABLE(KPageHeap); -      KPageHeap() = default; -    ~KPageHeap() = default;      constexpr PAddr GetAddress() const {          return m_heap_address; @@ -57,7 +53,20 @@ public:          m_initial_used_size = m_heap_size - free_size - reserved_size;      } -    PAddr AllocateBlock(s32 index, bool random); +    PAddr AllocateBlock(s32 index, bool random) { +        if (random) { +            const size_t block_pages = m_blocks[index].GetNumPages(); +            return this->AllocateByRandom(index, block_pages, block_pages); +        } else { +            return this->AllocateByLinearSearch(index); +        } +    } + +    PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { +        // TODO: linear search support? +        return this->AllocateByRandom(index, num_pages, align_pages); +    } +      void Free(PAddr addr, size_t num_pages);      static size_t CalculateManagementOverheadSize(size_t region_size) { @@ -68,7 +77,7 @@ public:      static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {          const size_t target_pages = std::max(num_pages, align_pages);          for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { -            if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { +            if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {                  return static_cast<s32>(i);              }          } @@ -77,7 +86,7 @@ public:      static constexpr s32 GetBlockIndex(size_t num_pages) {          for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { -            if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { +            if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {                  return i;              }          } @@ -85,7 +94,7 @@ public:      }      static constexpr size_t GetBlockSize(size_t index) { -        return size_t(1) << MemoryBlockPageShifts[index]; +        return static_cast<size_t>(1) << MemoryBlockPageShifts[index];      }      static constexpr size_t GetBlockNumPages(size_t index) { @@ -93,13 +102,9 @@ public:      }  private: -    class Block final { +    class Block {      public: -        YUZU_NON_COPYABLE(Block); -        YUZU_NON_MOVEABLE(Block); -          Block() = default; -        ~Block() = default;          constexpr size_t GetShift() const {              return m_block_shift; @@ -201,6 +206,9 @@ private:      };  private: +    PAddr AllocateByLinearSearch(s32 index); +    PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages); +      static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,                                                    size_t num_block_shifts); @@ -209,7 +217,8 @@ private:      size_t m_heap_size{};      size_t m_initial_used_size{};      size_t m_num_blocks{}; -    std::array<Block, NumMemoryBlockPageShifts> m_blocks{}; +    std::array<Block, NumMemoryBlockPageShifts> m_blocks; +    KPageBitmap::RandomBitGenerator m_rng;      std::vector<u64> m_management_data;  }; diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 307e491cb..5387bf5fe 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -15,6 +15,7 @@  #include "core/hle/kernel/k_resource_limit.h"  #include "core/hle/kernel/k_scoped_resource_reservation.h"  #include "core/hle/kernel/k_system_control.h" +#include "core/hle/kernel/k_system_resource.h"  #include "core/hle/kernel/kernel.h"  #include "core/hle/kernel/svc_results.h"  #include "core/memory.h" @@ -23,6 +24,61 @@ namespace Kernel {  namespace { +class KScopedLightLockPair { +    YUZU_NON_COPYABLE(KScopedLightLockPair); +    YUZU_NON_MOVEABLE(KScopedLightLockPair); + +private: +    KLightLock* m_lower; +    KLightLock* m_upper; + +public: +    KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) { +        // Ensure our locks are in a consistent order. +        if (std::addressof(lhs) <= std::addressof(rhs)) { +            m_lower = std::addressof(lhs); +            m_upper = std::addressof(rhs); +        } else { +            m_lower = std::addressof(rhs); +            m_upper = std::addressof(lhs); +        } + +        // Acquire both locks. +        m_lower->Lock(); +        if (m_lower != m_upper) { +            m_upper->Lock(); +        } +    } + +    ~KScopedLightLockPair() { +        // Unlock the upper lock. +        if (m_upper != nullptr && m_upper != m_lower) { +            m_upper->Unlock(); +        } + +        // Unlock the lower lock. +        if (m_lower != nullptr) { +            m_lower->Unlock(); +        } +    } + +public: +    // Utility. +    void TryUnlockHalf(KLightLock& lock) { +        // Only allow unlocking if the lock is half the pair. +        if (m_lower != m_upper) { +            // We want to be sure the lock is one we own. +            if (m_lower == std::addressof(lock)) { +                lock.Unlock(); +                m_lower = nullptr; +            } else if (m_upper == std::addressof(lock)) { +                lock.Unlock(); +                m_upper = nullptr; +            } +        } +    } +}; +  using namespace Common::Literals;  constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { @@ -49,9 +105,10 @@ KPageTable::KPageTable(Core::System& system_)  KPageTable::~KPageTable() = default;  Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, -                                        VAddr code_addr, size_t code_size, -                                        KMemoryBlockSlabManager* mem_block_slab_manager, -                                        KMemoryManager::Pool pool) { +                                        bool enable_das_merge, bool from_back, +                                        KMemoryManager::Pool pool, VAddr code_addr, +                                        size_t code_size, KSystemResource* system_resource, +                                        KResourceLimit* resource_limit) {      const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {          return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); @@ -112,11 +169,13 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type      // Set other basic fields      m_enable_aslr = enable_aslr; -    m_enable_device_address_space_merge = false; +    m_enable_device_address_space_merge = enable_das_merge;      m_address_space_start = start;      m_address_space_end = end;      m_is_kernel = false; -    m_memory_block_slab_manager = mem_block_slab_manager; +    m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer(); +    m_block_info_manager = system_resource->GetBlockInfoManagerPointer(); +    m_resource_limit = resource_limit;      // Determine the region we can place our undetermineds in      VAddr alloc_start{}; @@ -215,10 +274,22 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type          }      } -    // Set heap members +    // Set heap and fill members.      m_current_heap_end = m_heap_region_start;      m_max_heap_size = 0; -    m_max_physical_memory_size = 0; +    m_mapped_physical_memory_size = 0; +    m_mapped_unsafe_physical_memory = 0; +    m_mapped_insecure_memory = 0; +    m_mapped_ipc_server_memory = 0; + +    m_heap_fill_value = 0; +    m_ipc_fill_value = 0; +    m_stack_fill_value = 0; + +    // Set allocation option. +    m_allocate_option = +        KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack +                                                     : KMemoryManager::Direction::FromFront);      // Ensure that we regions inside our address space      auto IsInAddressSpace = [&](VAddr addr) { @@ -267,6 +338,16 @@ void KPageTable::Finalize() {          m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);      }); +    // Release any insecure mapped memory. +    if (m_mapped_insecure_memory) { +        UNIMPLEMENTED(); +    } + +    // Release any ipc server memory. +    if (m_mapped_ipc_server_memory) { +        UNIMPLEMENTED(); +    } +      // Close the backing page table, as the destructor is not called for guest objects.      m_page_table_impl.reset();  } @@ -650,7 +731,8 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu  Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,                                        VAddr src_addr) { -    KScopedLightLock lk(m_general_lock); +    // Acquire the table locks. +    KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);      const size_t num_pages{size / PageSize}; @@ -686,9 +768,753 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s      R_SUCCEED();  } +Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, +                                     VAddr address, size_t size, KMemoryPermission test_perm, +                                     KMemoryState dst_state) { +    // Validate pre-conditions. +    ASSERT(this->IsLockedByCurrentThread()); +    ASSERT(test_perm == KMemoryPermission::UserReadWrite || +           test_perm == KMemoryPermission::UserRead); + +    // Check that the address is in range. +    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + +    // Get the source permission. +    const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) +                              ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped +                              : KMemoryPermission::UserRead; + +    // Get aligned extents. +    const VAddr aligned_src_start = Common::AlignDown((address), PageSize); +    const VAddr aligned_src_end = Common::AlignUp((address) + size, PageSize); +    const VAddr mapping_src_start = Common::AlignUp((address), PageSize); +    const VAddr mapping_src_end = Common::AlignDown((address) + size, PageSize); + +    const auto aligned_src_last = (aligned_src_end)-1; +    const auto mapping_src_last = (mapping_src_end)-1; + +    // Get the test state and attribute mask. +    KMemoryState test_state; +    KMemoryAttribute test_attr_mask; +    switch (dst_state) { +    case KMemoryState::Ipc: +        test_state = KMemoryState::FlagCanUseIpc; +        test_attr_mask = +            KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; +        break; +    case KMemoryState::NonSecureIpc: +        test_state = KMemoryState::FlagCanUseNonSecureIpc; +        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; +        break; +    case KMemoryState::NonDeviceIpc: +        test_state = KMemoryState::FlagCanUseNonDeviceIpc; +        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; +        break; +    default: +        R_THROW(ResultInvalidCombination); +    } + +    // Ensure that on failure, we roll back appropriately. +    size_t mapped_size = 0; +    ON_RESULT_FAILURE { +        if (mapped_size > 0) { +            this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, +                                                          src_perm); +        } +    }; + +    size_t blocks_needed = 0; + +    // Iterate, mapping as needed. +    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start); +    while (true) { +        const KMemoryInfo info = it->GetMemoryInfo(); + +        // Validate the current block. +        R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, +                                     test_attr_mask, KMemoryAttribute::None)); + +        if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() && +            info.GetAddress() < (mapping_src_end)) { +            const auto cur_start = +                info.GetAddress() >= (mapping_src_start) ? info.GetAddress() : (mapping_src_start); +            const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress() +                                                                           : (mapping_src_end); +            const size_t cur_size = cur_end - cur_start; + +            if (info.GetAddress() < (mapping_src_start)) { +                ++blocks_needed; +            } +            if (mapping_src_last < info.GetLastAddress()) { +                ++blocks_needed; +            } + +            // Set the permissions on the block, if we need to. +            if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) { +                R_TRY(Operate(cur_start, cur_size / PageSize, src_perm, +                              OperationType::ChangePermissions)); +            } + +            // Note that we mapped this part. +            mapped_size += cur_size; +        } + +        // If the block is at the end, we're done. +        if (aligned_src_last <= info.GetLastAddress()) { +            break; +        } + +        // Advance. +        ++it; +        ASSERT(it != m_memory_block_manager.end()); +    } + +    if (out_blocks_needed != nullptr) { +        ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); +        *out_blocks_needed = blocks_needed; +    } + +    R_SUCCEED(); +} + +Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr, +                                     KMemoryPermission test_perm, KMemoryState dst_state, +                                     KPageTable& src_page_table, bool send) { +    ASSERT(this->IsLockedByCurrentThread()); +    ASSERT(src_page_table.IsLockedByCurrentThread()); + +    // Check that we can theoretically map. +    const VAddr region_start = m_alias_region_start; +    const size_t region_size = m_alias_region_end - m_alias_region_start; +    R_UNLESS(size < region_size, ResultOutOfAddressSpace); + +    // Get aligned source extents. +    const VAddr src_start = src_addr; +    const VAddr src_end = src_addr + size; +    const VAddr aligned_src_start = Common::AlignDown((src_start), PageSize); +    const VAddr aligned_src_end = Common::AlignUp((src_start) + size, PageSize); +    const VAddr mapping_src_start = Common::AlignUp((src_start), PageSize); +    const VAddr mapping_src_end = Common::AlignDown((src_start) + size, PageSize); +    const size_t aligned_src_size = aligned_src_end - aligned_src_start; +    const size_t mapping_src_size = +        (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0; + +    // Select a random address to map at. +    VAddr dst_addr = +        this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, +                           PageSize, 0, this->GetNumGuardPages()); + +    R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace); + +    // Check that we can perform the operation we're about to perform. +    ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state)); + +    // Create an update allocator. +    Result allocator_result; +    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), +                                                 m_memory_block_slab_manager); +    R_TRY(allocator_result); + +    // We're going to perform an update, so create a helper. +    KScopedPageTableUpdater updater(this); + +    // Reserve space for any partial pages we allocate. +    const size_t unmapped_size = aligned_src_size - mapping_src_size; +    KScopedResourceReservation memory_reservation( +        m_resource_limit, LimitableResource::PhysicalMemoryMax, unmapped_size); +    R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); + +    // Ensure that we manage page references correctly. +    PAddr start_partial_page = 0; +    PAddr end_partial_page = 0; +    VAddr cur_mapped_addr = dst_addr; + +    // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll +    // free on scope exit. +    SCOPE_EXIT({ +        if (start_partial_page != 0) { +            m_system.Kernel().MemoryManager().Close(start_partial_page, 1); +        } +        if (end_partial_page != 0) { +            m_system.Kernel().MemoryManager().Close(end_partial_page, 1); +        } +    }); + +    ON_RESULT_FAILURE { +        if (cur_mapped_addr != dst_addr) { +            // HACK: Manually close the pages. +            HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize); + +            ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, +                           KMemoryPermission::None, OperationType::Unmap) +                       .IsSuccess()); +        } +    }; + +    // Allocate the start page as needed. +    if (aligned_src_start < mapping_src_start) { +        start_partial_page = +            m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); +        R_UNLESS(start_partial_page != 0, ResultOutOfMemory); +    } + +    // Allocate the end page as needed. +    if (mapping_src_end < aligned_src_end && +        (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { +        end_partial_page = +            m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); +        R_UNLESS(end_partial_page != 0, ResultOutOfMemory); +    } + +    // Get the implementation. +    auto& src_impl = src_page_table.PageTableImpl(); + +    // Get the fill value for partial pages. +    const auto fill_val = m_ipc_fill_value; + +    // Begin traversal. +    Common::PageTable::TraversalContext context; +    Common::PageTable::TraversalEntry next_entry; +    bool traverse_valid = src_impl.BeginTraversal(next_entry, context, aligned_src_start); +    ASSERT(traverse_valid); + +    // Prepare tracking variables. +    PAddr cur_block_addr = next_entry.phys_addr; +    size_t cur_block_size = +        next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1)); +    size_t tot_block_size = cur_block_size; + +    // Map the start page, if we have one. +    if (start_partial_page != 0) { +        // Ensure the page holds correct data. +        const VAddr start_partial_virt = +            GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page); +        if (send) { +            const size_t partial_offset = src_start - aligned_src_start; +            size_t copy_size, clear_size; +            if (src_end < mapping_src_start) { +                copy_size = size; +                clear_size = mapping_src_start - src_end; +            } else { +                copy_size = mapping_src_start - src_start; +                clear_size = 0; +            } + +            std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, +                        partial_offset); +            std::memcpy( +                m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset), +                m_system.Memory().GetPointer<void>( +                    GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), cur_block_addr) + +                    partial_offset), +                copy_size); +            if (clear_size > 0) { +                std::memset(m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset + +                                                               copy_size), +                            fill_val, clear_size); +            } +        } else { +            std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, PageSize); +        } + +        // Map the page. +        R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); + +        // HACK: Manually open the pages. +        HACK_OpenPages(start_partial_page, 1); + +        // Update tracking extents. +        cur_mapped_addr += PageSize; +        cur_block_addr += PageSize; +        cur_block_size -= PageSize; + +        // If the block's size was one page, we may need to continue traversal. +        if (cur_block_size == 0 && aligned_src_size > PageSize) { +            traverse_valid = src_impl.ContinueTraversal(next_entry, context); +            ASSERT(traverse_valid); + +            cur_block_addr = next_entry.phys_addr; +            cur_block_size = next_entry.block_size; +            tot_block_size += next_entry.block_size; +        } +    } + +    // Map the remaining pages. +    while (aligned_src_start + tot_block_size < mapping_src_end) { +        // Continue the traversal. +        traverse_valid = src_impl.ContinueTraversal(next_entry, context); +        ASSERT(traverse_valid); + +        // Process the block. +        if (next_entry.phys_addr != cur_block_addr + cur_block_size) { +            // Map the block we've been processing so far. +            R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, +                          cur_block_addr)); + +            // HACK: Manually open the pages. +            HACK_OpenPages(cur_block_addr, cur_block_size / PageSize); + +            // Update tracking extents. +            cur_mapped_addr += cur_block_size; +            cur_block_addr = next_entry.phys_addr; +            cur_block_size = next_entry.block_size; +        } else { +            cur_block_size += next_entry.block_size; +        } +        tot_block_size += next_entry.block_size; +    } + +    // Handle the last direct-mapped page. +    if (const VAddr mapped_block_end = aligned_src_start + tot_block_size - cur_block_size; +        mapped_block_end < mapping_src_end) { +        const size_t last_block_size = mapping_src_end - mapped_block_end; + +        // Map the last block. +        R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, +                      cur_block_addr)); + +        // HACK: Manually open the pages. +        HACK_OpenPages(cur_block_addr, last_block_size / PageSize); + +        // Update tracking extents. +        cur_mapped_addr += last_block_size; +        cur_block_addr += last_block_size; +        if (mapped_block_end + cur_block_size < aligned_src_end && +            cur_block_size == last_block_size) { +            traverse_valid = src_impl.ContinueTraversal(next_entry, context); +            ASSERT(traverse_valid); + +            cur_block_addr = next_entry.phys_addr; +        } +    } + +    // Map the end page, if we have one. +    if (end_partial_page != 0) { +        // Ensure the page holds correct data. +        const VAddr end_partial_virt = +            GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page); +        if (send) { +            const size_t copy_size = src_end - mapping_src_end; +            std::memcpy(m_system.Memory().GetPointer<void>(end_partial_virt), +                        m_system.Memory().GetPointer<void>(GetHeapVirtualAddress( +                            m_system.Kernel().MemoryLayout(), cur_block_addr)), +                        copy_size); +            std::memset(m_system.Memory().GetPointer<void>(end_partial_virt + copy_size), fill_val, +                        PageSize - copy_size); +        } else { +            std::memset(m_system.Memory().GetPointer<void>(end_partial_virt), fill_val, PageSize); +        } + +        // Map the page. +        R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); + +        // HACK: Manually open the pages. +        HACK_OpenPages(end_partial_page, 1); +    } + +    // Update memory blocks to reflect our changes +    m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, +                                  dst_state, test_perm, KMemoryAttribute::None, +                                  KMemoryBlockDisableMergeAttribute::Normal, +                                  KMemoryBlockDisableMergeAttribute::None); + +    // Set the output address. +    *out_addr = dst_addr + (src_start - aligned_src_start); + +    // We succeeded. +    memory_reservation.Commit(); +    R_SUCCEED(); +} + +Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, +                               KPageTable& src_page_table, KMemoryPermission test_perm, +                               KMemoryState dst_state, bool send) { +    // For convenience, alias this. +    KPageTable& dst_page_table = *this; + +    // Acquire the table locks. +    KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); + +    // We're going to perform an update, so create a helper. +    KScopedPageTableUpdater updater(std::addressof(src_page_table)); + +    // Perform client setup. +    size_t num_allocator_blocks; +    R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), +                                           std::addressof(num_allocator_blocks), src_addr, size, +                                           test_perm, dst_state)); + +    // Create an update allocator. +    Result allocator_result; +    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), +                                                 src_page_table.m_memory_block_slab_manager, +                                                 num_allocator_blocks); +    R_TRY(allocator_result); + +    // Get the mapped extents. +    const VAddr src_map_start = Common::AlignUp((src_addr), PageSize); +    const VAddr src_map_end = Common::AlignDown((src_addr) + size, PageSize); +    const size_t src_map_size = src_map_end - src_map_start; + +    // Ensure that we clean up appropriately if we fail after this. +    const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) +                              ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped +                              : KMemoryPermission::UserRead; +    ON_RESULT_FAILURE { +        if (src_map_end > src_map_start) { +            src_page_table.CleanupForIpcClientOnServerSetupFailure( +                updater.GetPageList(), src_map_start, src_map_size, src_perm); +        } +    }; + +    // Perform server setup. +    R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, +                                           src_page_table, send)); + +    // If anything was mapped, ipc-lock the pages. +    if (src_map_start < src_map_end) { +        // Get the source permission. +        src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, +                                                         (src_map_end - src_map_start) / PageSize, +                                                         &KMemoryBlock::LockForIpc, src_perm); +    } + +    R_SUCCEED(); +} + +Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state) { +    // Validate the address. +    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + +    // Lock the table. +    KScopedLightLock lk(m_general_lock); + +    // Validate the memory state. +    size_t num_allocator_blocks; +    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, +                                 KMemoryState::All, dst_state, KMemoryPermission::UserRead, +                                 KMemoryPermission::UserRead, KMemoryAttribute::All, +                                 KMemoryAttribute::None)); + +    // Create an update allocator. +    Result allocator_result; +    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), +                                                 m_memory_block_slab_manager, num_allocator_blocks); +    R_TRY(allocator_result); + +    // We're going to perform an update, so create a helper. +    KScopedPageTableUpdater updater(this); + +    // Get aligned extents. +    const VAddr aligned_start = Common::AlignDown((address), PageSize); +    const VAddr aligned_end = Common::AlignUp((address) + size, PageSize); +    const size_t aligned_size = aligned_end - aligned_start; +    const size_t aligned_num_pages = aligned_size / PageSize; + +    // HACK: Manually close the pages. +    HACK_ClosePages(aligned_start, aligned_num_pages); + +    // Unmap the pages. +    R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); + +    // Update memory blocks. +    m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, +                                  KMemoryState::None, KMemoryPermission::None, +                                  KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, +                                  KMemoryBlockDisableMergeAttribute::Normal); + +    // Release from the resource limit as relevant. +    const VAddr mapping_start = Common::AlignUp((address), PageSize); +    const VAddr mapping_end = Common::AlignDown((address) + size, PageSize); +    const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0; +    m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size); + +    R_SUCCEED(); +} + +Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state) { +    // Validate the address. +    R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + +    // Get aligned source extents. +    const VAddr mapping_start = Common::AlignUp((address), PageSize); +    const VAddr mapping_end = Common::AlignDown((address) + size, PageSize); +    const VAddr mapping_last = mapping_end - 1; +    const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0; + +    // If nothing was mapped, we're actually done immediately. +    R_SUCCEED_IF(mapping_size == 0); + +    // Get the test state and attribute mask. +    KMemoryState test_state; +    KMemoryAttribute test_attr_mask; +    switch (dst_state) { +    case KMemoryState::Ipc: +        test_state = KMemoryState::FlagCanUseIpc; +        test_attr_mask = +            KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; +        break; +    case KMemoryState::NonSecureIpc: +        test_state = KMemoryState::FlagCanUseNonSecureIpc; +        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; +        break; +    case KMemoryState::NonDeviceIpc: +        test_state = KMemoryState::FlagCanUseNonDeviceIpc; +        test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; +        break; +    default: +        R_THROW(ResultInvalidCombination); +    } + +    // Lock the table. +    // NOTE: Nintendo does this *after* creating the updater below, but this does not follow +    // convention elsewhere in KPageTable. +    KScopedLightLock lk(m_general_lock); + +    // We're going to perform an update, so create a helper. +    KScopedPageTableUpdater updater(this); + +    // Ensure that on failure, we roll back appropriately. +    size_t mapped_size = 0; +    ON_RESULT_FAILURE { +        if (mapped_size > 0) { +            // Determine where the mapping ends. +            const auto mapped_end = (mapping_start) + mapped_size; +            const auto mapped_last = mapped_end - 1; + +            // Get current and next iterators. +            KMemoryBlockManager::const_iterator start_it = +                m_memory_block_manager.FindIterator(mapping_start); +            KMemoryBlockManager::const_iterator next_it = start_it; +            ++next_it; + +            // Get the current block info. +            KMemoryInfo cur_info = start_it->GetMemoryInfo(); + +            // Create tracking variables. +            VAddr cur_address = cur_info.GetAddress(); +            size_t cur_size = cur_info.GetSize(); +            bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); +            bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; +            bool first = +                cur_info.GetIpcDisableMergeCount() == 1 && +                (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == +                    KMemoryBlockDisableMergeAttribute::None; + +            while (((cur_address) + cur_size - 1) < mapped_last) { +                // Check that we have a next block. +                ASSERT(next_it != m_memory_block_manager.end()); + +                // Get the next info. +                const KMemoryInfo next_info = next_it->GetMemoryInfo(); + +                // Check if we can consolidate the next block's permission set with the current one. + +                const bool next_perm_eq = +                    next_info.GetPermission() == next_info.GetOriginalPermission(); +                const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; +                if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && +                    cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { +                    // We can consolidate the reprotection for the current and next block into a +                    // single call. +                    cur_size += next_info.GetSize(); +                } else { +                    // We have to operate on the current block. +                    if ((cur_needs_set_perm || first) && !cur_perm_eq) { +                        ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), +                                       OperationType::ChangePermissions) +                                   .IsSuccess()); +                    } + +                    // Advance. +                    cur_address = next_info.GetAddress(); +                    cur_size = next_info.GetSize(); +                    first = false; +                } + +                // Advance. +                cur_info = next_info; +                cur_perm_eq = next_perm_eq; +                cur_needs_set_perm = next_needs_set_perm; +                ++next_it; +            } + +            // Process the last block. +            if ((first || cur_needs_set_perm) && !cur_perm_eq) { +                ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), +                               OperationType::ChangePermissions) +                           .IsSuccess()); +            } +        } +    }; + +    // Iterate, reprotecting as needed. +    { +        // Get current and next iterators. +        KMemoryBlockManager::const_iterator start_it = +            m_memory_block_manager.FindIterator(mapping_start); +        KMemoryBlockManager::const_iterator next_it = start_it; +        ++next_it; + +        // Validate the current block. +        KMemoryInfo cur_info = start_it->GetMemoryInfo(); +        ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None, +                                      KMemoryPermission::None, +                                      test_attr_mask | KMemoryAttribute::IpcLocked, +                                      KMemoryAttribute::IpcLocked) +                   .IsSuccess()); + +        // Create tracking variables. +        VAddr cur_address = cur_info.GetAddress(); +        size_t cur_size = cur_info.GetSize(); +        bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); +        bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; +        bool first = +            cur_info.GetIpcDisableMergeCount() == 1 && +            (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == +                KMemoryBlockDisableMergeAttribute::None; + +        while ((cur_address + cur_size - 1) < mapping_last) { +            // Check that we have a next block. +            ASSERT(next_it != m_memory_block_manager.end()); + +            // Get the next info. +            const KMemoryInfo next_info = next_it->GetMemoryInfo(); + +            // Validate the next block. +            ASSERT(this->CheckMemoryState(next_info, test_state, test_state, +                                          KMemoryPermission::None, KMemoryPermission::None, +                                          test_attr_mask | KMemoryAttribute::IpcLocked, +                                          KMemoryAttribute::IpcLocked) +                       .IsSuccess()); + +            // Check if we can consolidate the next block's permission set with the current one. +            const bool next_perm_eq = +                next_info.GetPermission() == next_info.GetOriginalPermission(); +            const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; +            if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && +                cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { +                // We can consolidate the reprotection for the current and next block into a single +                // call. +                cur_size += next_info.GetSize(); +            } else { +                // We have to operate on the current block. +                if ((cur_needs_set_perm || first) && !cur_perm_eq) { +                    R_TRY(Operate(cur_address, cur_size / PageSize, +                                  cur_needs_set_perm ? cur_info.GetOriginalPermission() +                                                     : cur_info.GetPermission(), +                                  OperationType::ChangePermissions)); +                } + +                // Mark that we mapped the block. +                mapped_size += cur_size; + +                // Advance. +                cur_address = next_info.GetAddress(); +                cur_size = next_info.GetSize(); +                first = false; +            } + +            // Advance. +            cur_info = next_info; +            cur_perm_eq = next_perm_eq; +            cur_needs_set_perm = next_needs_set_perm; +            ++next_it; +        } + +        // Process the last block. +        const auto lock_count = +            cur_info.GetIpcLockCount() + +            (next_it != m_memory_block_manager.end() +                 ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) +                 : 0); +        if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) { +            R_TRY(Operate(cur_address, cur_size / PageSize, +                          cur_needs_set_perm ? cur_info.GetOriginalPermission() +                                             : cur_info.GetPermission(), +                          OperationType::ChangePermissions)); +        } +    } + +    // Create an update allocator. +    // NOTE: Guaranteed zero blocks needed here. +    Result allocator_result; +    KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), +                                                 m_memory_block_slab_manager, 0); +    R_TRY(allocator_result); + +    // Unlock the pages. +    m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, +                                      mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, +                                      KMemoryPermission::None); + +    R_SUCCEED(); +} + +void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list, +                                                         VAddr address, size_t size, +                                                         KMemoryPermission prot_perm) { +    ASSERT(this->IsLockedByCurrentThread()); +    ASSERT(Common::IsAligned(address, PageSize)); +    ASSERT(Common::IsAligned(size, PageSize)); + +    // Get the mapped extents. +    const VAddr src_map_start = address; +    const VAddr src_map_end = address + size; +    const VAddr src_map_last = src_map_end - 1; + +    // This function is only invoked when there's something to do. +    ASSERT(src_map_end > src_map_start); + +    // Iterate over blocks, fixing permissions. +    KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); +    while (true) { +        const KMemoryInfo info = it->GetMemoryInfo(); + +        const auto cur_start = +            info.GetAddress() >= src_map_start ? info.GetAddress() : src_map_start; +        const auto cur_end = +            src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); + +        // If we can, fix the protections on the block. +        if ((info.GetIpcLockCount() == 0 && +             (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) || +            (info.GetIpcLockCount() != 0 && +             (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) { +            // Check if we actually need to fix the protections on the block. +            if (cur_end == src_map_end || info.GetAddress() <= src_map_start || +                (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) { +                ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(), +                               OperationType::ChangePermissions) +                           .IsSuccess()); +            } +        } + +        // If we're past the end of the region, we're done. +        if (src_map_last <= info.GetLastAddress()) { +            break; +        } + +        // Advance. +        ++it; +        ASSERT(it != m_memory_block_manager.end()); +    } +} + +void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { +    m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); +} + +void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) { +    for (size_t index = 0; index < num_pages; ++index) { +        const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize)); +        m_system.Kernel().MemoryManager().Close(paddr, 1); +    } +} +  Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {      // Lock the physical memory lock. -    KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); +    KScopedLightLock phys_lk(m_map_physical_memory_lock);      // Calculate the last address for convenience.      const VAddr last_address = address + size - 1; @@ -742,15 +1568,19 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {          {              // Reserve the memory from the process resource limit.              KScopedResourceReservation memory_reservation( -                m_system.Kernel().CurrentProcess()->GetResourceLimit(), -                LimitableResource::PhysicalMemory, size - mapped_size); +                m_resource_limit, LimitableResource::PhysicalMemoryMax, size - mapped_size);              R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);              // Allocate pages for the new memory.              KPageGroup pg; -            R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( -                &pg, (size - mapped_size) / PageSize, -                KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); +            R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( +                &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); + +            // If we fail in the next bit (or retry), we need to cleanup the pages. +            // auto pg_guard = SCOPE_GUARD { +            //    pg.OpenFirst(); +            //    pg.Close(); +            //};              // Map the memory.              { @@ -810,15 +1640,24 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {                  // Create an update allocator.                  ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); -                Result allocator_result{ResultSuccess}; +                Result allocator_result;                  KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),                                                               m_memory_block_slab_manager,                                                               num_allocator_blocks);                  R_TRY(allocator_result); +                // We're going to perform an update, so create a helper. +                KScopedPageTableUpdater updater(this); + +                // Prepare to iterate over the memory. +                auto pg_it = pg.Nodes().begin(); +                PAddr pg_phys_addr = pg_it->GetAddress(); +                size_t pg_pages = pg_it->GetNumPages(); +                  // Reset the current tracking address, and make sure we clean up on failure. +                // pg_guard.Cancel();                  cur_address = address; -                auto unmap_guard = detail::ScopeExit([&] { +                ON_RESULT_FAILURE {                      if (cur_address > address) {                          const VAddr last_unmap_address = cur_address - 1; @@ -841,6 +1680,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {                                               last_unmap_address + 1 - cur_address) /                                      PageSize; +                                // HACK: Manually close the pages. +                                HACK_ClosePages(cur_address, cur_pages); +                                  // Unmap.                                  ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,                                                 OperationType::Unmap) @@ -857,12 +1699,17 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {                              ++it;                          }                      } -                }); -                // Iterate over the memory. -                auto pg_it = pg.Nodes().begin(); -                PAddr pg_phys_addr = pg_it->GetAddress(); -                size_t pg_pages = pg_it->GetNumPages(); +                    // Release any remaining unmapped memory. +                    m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); +                    m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); +                    for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { +                        m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), +                                                                    pg_it->GetNumPages()); +                        m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), +                                                                pg_it->GetNumPages()); +                    } +                };                  auto it = m_memory_block_manager.FindIterator(cur_address);                  while (true) { @@ -897,6 +1744,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {                              R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,                                            OperationType::Map, pg_phys_addr)); +                            // HACK: Manually open the pages. +                            HACK_OpenPages(pg_phys_addr, cur_pages); +                              // Advance.                              cur_address += cur_pages * PageSize;                              map_pages -= cur_pages; @@ -928,9 +1778,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {                      KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,                      KMemoryPermission::UserReadWrite, KMemoryAttribute::None); -                // Cancel our guard. -                unmap_guard.Cancel(); -                  R_SUCCEED();              }          } @@ -939,7 +1786,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {  Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {      // Lock the physical memory lock. -    KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); +    KScopedLightLock phys_lk(m_map_physical_memory_lock);      // Lock the table.      KScopedLightLock lk(m_general_lock); @@ -948,8 +1795,11 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {      const VAddr last_address = address + size - 1;      // Define iteration variables. -    VAddr cur_address = 0; -    size_t mapped_size = 0; +    VAddr map_start_address = 0; +    VAddr map_last_address = 0; + +    VAddr cur_address; +    size_t mapped_size;      size_t num_allocator_blocks = 0;      // Check if the memory is mapped. @@ -975,27 +1825,27 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {              if (is_normal) {                  R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); +                if (map_start_address == 0) { +                    map_start_address = cur_address; +                } +                map_last_address = +                    (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; +                  if (info.GetAddress() < address) {                      ++num_allocator_blocks;                  }                  if (last_address < info.GetLastAddress()) {                      ++num_allocator_blocks;                  } + +                mapped_size += (map_last_address + 1 - cur_address);              }              // Check if we're done.              if (last_address <= info.GetLastAddress()) { -                if (is_normal) { -                    mapped_size += (last_address + 1 - cur_address); -                }                  break;              } -            // Track the memory if it's mapped. -            if (is_normal) { -                mapped_size += VAddr(info.GetEndAddress()) - cur_address; -            } -              // Advance.              cur_address = info.GetEndAddress();              ++it; @@ -1005,125 +1855,22 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {          R_SUCCEED_IF(mapped_size == 0);      } -    // Make a page group for the unmap region. -    KPageGroup pg; -    { -        auto& impl = this->PageTableImpl(); - -        // Begin traversal. -        Common::PageTable::TraversalContext context; -        Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; -        bool cur_valid = false; -        Common::PageTable::TraversalEntry next_entry; -        bool next_valid = false; -        size_t tot_size = 0; - -        cur_address = address; -        next_valid = impl.BeginTraversal(next_entry, context, cur_address); -        next_entry.block_size = -            (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1))); - -        // Iterate, building the group. -        while (true) { -            if ((!next_valid && !cur_valid) || -                (next_valid && cur_valid && -                 next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { -                cur_entry.block_size += next_entry.block_size; -            } else { -                if (cur_valid) { -                    // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); -                    R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize)); -                } - -                // Update tracking variables. -                tot_size += cur_entry.block_size; -                cur_entry = next_entry; -                cur_valid = next_valid; -            } - -            if (cur_entry.block_size + tot_size >= size) { -                break; -            } - -            next_valid = impl.ContinueTraversal(next_entry, context); -        } - -        // Add the last block. -        if (cur_valid) { -            // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); -            R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize)); -        } -    } -    ASSERT(pg.GetNumPages() == mapped_size / PageSize); -      // Create an update allocator.      ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); -    Result allocator_result{ResultSuccess}; +    Result allocator_result;      KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),                                                   m_memory_block_slab_manager, num_allocator_blocks);      R_TRY(allocator_result); -    // Reset the current tracking address, and make sure we clean up on failure. -    cur_address = address; -    auto remap_guard = detail::ScopeExit([&] { -        if (cur_address > address) { -            const VAddr last_map_address = cur_address - 1; -            cur_address = address; - -            // Iterate over the memory we unmapped. -            auto it = m_memory_block_manager.FindIterator(cur_address); -            auto pg_it = pg.Nodes().begin(); -            PAddr pg_phys_addr = pg_it->GetAddress(); -            size_t pg_pages = pg_it->GetNumPages(); - -            while (true) { -                // Get the memory info for the pages we unmapped, convert to property. -                const KMemoryInfo info = it->GetMemoryInfo(); - -                // If the memory is normal, we unmapped it and need to re-map it. -                if (info.GetState() == KMemoryState::Normal) { -                    // Determine the range to map. -                    size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, -                                                last_map_address + 1 - cur_address) / -                                       PageSize; - -                    // While we have pages to map, map them. -                    while (map_pages > 0) { -                        // Check if we're at the end of the physical block. -                        if (pg_pages == 0) { -                            // Ensure there are more pages to map. -                            ASSERT(pg_it != pg.Nodes().end()); - -                            // Advance our physical block. -                            ++pg_it; -                            pg_phys_addr = pg_it->GetAddress(); -                            pg_pages = pg_it->GetNumPages(); -                        } - -                        // Map whatever we can. -                        const size_t cur_pages = std::min(pg_pages, map_pages); -                        ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(), -                                             OperationType::Map, pg_phys_addr) == ResultSuccess); +    // We're going to perform an update, so create a helper. +    KScopedPageTableUpdater updater(this); -                        // Advance. -                        cur_address += cur_pages * PageSize; -                        map_pages -= cur_pages; +    // Separate the mapping. +    R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize, +                  KMemoryPermission::None, OperationType::Separate)); -                        pg_phys_addr += cur_pages * PageSize; -                        pg_pages -= cur_pages; -                    } -                } - -                // Check if we're done. -                if (last_map_address <= info.GetLastAddress()) { -                    break; -                } - -                // Advance. -                ++it; -            } -        } -    }); +    // Reset the current tracking address, and make sure we clean up on failure. +    cur_address = address;      // Iterate over the memory, unmapping as we go.      auto it = m_memory_block_manager.FindIterator(cur_address); @@ -1141,8 +1888,12 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {                                                last_address + 1 - cur_address) /                                       PageSize; +            // HACK: Manually close the pages. +            HACK_ClosePages(cur_address, cur_pages); +              // Unmap. -            R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)); +            ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) +                       .IsSuccess());          }          // Check if we're done. @@ -1157,8 +1908,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {      // Release the memory resource.      m_mapped_physical_memory_size -= mapped_size; -    auto process{m_system.Kernel().CurrentProcess()}; -    process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); +    m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, mapped_size);      // Update memory blocks.      m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, @@ -1166,14 +1916,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {                                    KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,                                    KMemoryBlockDisableMergeAttribute::None); -    // TODO(bunnei): This is a workaround until the next set of changes, where we add reference -    // counting for mapped pages. Until then, we must manually close the reference to the page -    // group. -    m_system.Kernel().MemoryManager().Close(pg); -      // We succeeded. -    remap_guard.Cancel(); -      R_SUCCEED();  } @@ -1749,8 +2492,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {                            OperationType::Unmap));              // Release the memory from the resource limit. -            m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( -                LimitableResource::PhysicalMemory, num_pages * PageSize); +            m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, num_pages * PageSize);              // Apply the memory block update.              m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, @@ -1780,8 +2522,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {      // Reserve memory for the heap extension.      KScopedResourceReservation memory_reservation( -        m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, -        allocation_size); +        m_resource_limit, LimitableResource::PhysicalMemoryMax, allocation_size);      R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);      // Allocate pages for the heap extension. @@ -1869,7 +2610,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_          R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));      } else {          KPageGroup page_group; -        R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( +        R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(              &page_group, needed_num_pages,              KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));          R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); @@ -1883,8 +2624,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_      return addr;  } -Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, -                                                bool is_aligned) { +Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, +                                                KMemoryPermission perm, bool is_aligned, +                                                bool check_heap) {      // Lightly validate the range before doing anything else.      const size_t num_pages = size / PageSize;      R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); @@ -1894,15 +2636,18 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem      // Check the memory state.      const auto test_state = -        (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); +        (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) | +        (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);      size_t num_allocator_blocks; -    R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, +    KMemoryState old_state; +    R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, +                                 std::addressof(num_allocator_blocks), address, size, test_state,                                   test_state, perm, perm,                                   KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,                                   KMemoryAttribute::None, KMemoryAttribute::DeviceShared));      // Create an update allocator. -    Result allocator_result{ResultSuccess}; +    Result allocator_result;      KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),                                                   m_memory_block_slab_manager, num_allocator_blocks);      R_TRY(allocator_result); @@ -1911,10 +2656,13 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem      m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,                                        &KMemoryBlock::ShareToDevice, KMemoryPermission::None); +    // Set whether the locked memory was io. +    *out_is_io = old_state == KMemoryState::Io; +      R_SUCCEED();  } -Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { +Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap) {      // Lightly validate the range before doing anything else.      const size_t num_pages = size / PageSize;      R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); @@ -1923,16 +2671,16 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {      KScopedLightLock lk(m_general_lock);      // Check the memory state. +    const auto test_state = KMemoryState::FlagCanDeviceMap | +                            (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);      size_t num_allocator_blocks;      R_TRY(this->CheckMemoryStateContiguous( -        std::addressof(num_allocator_blocks), address, size, -        KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, -        KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, +        std::addressof(num_allocator_blocks), address, size, test_state, test_state,          KMemoryPermission::None, KMemoryPermission::None,          KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));      // Create an update allocator. -    Result allocator_result{ResultSuccess}; +    Result allocator_result;      KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),                                                   m_memory_block_slab_manager, num_allocator_blocks);      R_TRY(allocator_result); @@ -1976,13 +2724,28 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {      R_SUCCEED();  } +Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size) { +    R_RETURN(this->LockMemoryAndOpen( +        nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer, +        KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All, +        KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, +        KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite, +        KMemoryAttribute::Locked)); +} + +Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) { +    R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer, +                                KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None, +                                KMemoryPermission::None, KMemoryAttribute::All, +                                KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, +                                KMemoryAttribute::Locked, nullptr)); +} +  Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {      R_RETURN(this->LockMemoryAndOpen(          out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,          KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, -        KMemoryAttribute::None, -        static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | -                                       KMemoryPermission::KernelReadWrite), +        KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,          KMemoryAttribute::Locked));  } @@ -2066,6 +2829,10 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,          m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);          break;      } +    case OperationType::Separate: { +        // HACK: Unimplemented. +        break; +    }      case OperationType::ChangePermissions:      case OperationType::ChangePermissionsAndRefresh:          break; @@ -2075,6 +2842,17 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,      R_SUCCEED();  } +void KPageTable::FinalizeUpdate(PageLinkedList* page_list) { +    while (page_list->Peek()) { +        [[maybe_unused]] auto page = page_list->Pop(); + +        // TODO(bunnei): Free pages once they are allocated in guest memory +        // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page)); +        // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0); +        // this->GetPageTableManager().Free(page); +    } +} +  VAddr KPageTable::GetRegionAddress(KMemoryState state) const {      switch (state) {      case KMemoryState::Free: @@ -2101,6 +2879,7 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {      case KMemoryState::GeneratedCode:      case KMemoryState::CodeOut:      case KMemoryState::Coverage: +    case KMemoryState::Insecure:          return m_alias_code_region_start;      case KMemoryState::Code:      case KMemoryState::CodeData: @@ -2136,6 +2915,7 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const {      case KMemoryState::GeneratedCode:      case KMemoryState::CodeOut:      case KMemoryState::Coverage: +    case KMemoryState::Insecure:          return m_alias_code_region_end - m_alias_code_region_start;      case KMemoryState::Code:      case KMemoryState::CodeData: @@ -2177,6 +2957,7 @@ bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const {      case KMemoryState::GeneratedCode:      case KMemoryState::CodeOut:      case KMemoryState::Coverage: +    case KMemoryState::Insecure:          return is_in_region && !is_in_heap && !is_in_alias;      case KMemoryState::Normal:          ASSERT(is_in_heap); diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index c6aeacd96..950850291 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -16,6 +16,7 @@  #include "core/hle/kernel/k_memory_layout.h"  #include "core/hle/kernel/k_memory_manager.h"  #include "core/hle/result.h" +#include "core/memory.h"  namespace Core {  class System; @@ -23,7 +24,10 @@ class System;  namespace Kernel { +class KBlockInfoManager;  class KMemoryBlockManager; +class KResourceLimit; +class KSystemResource;  class KPageTable final {  public: @@ -36,9 +40,9 @@ public:      ~KPageTable();      Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, -                                VAddr code_addr, size_t code_size, -                                KMemoryBlockSlabManager* mem_block_slab_manager, -                                KMemoryManager::Pool pool); +                                bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, +                                VAddr code_addr, size_t code_size, KSystemResource* system_resource, +                                KResourceLimit* resource_limit);      void Finalize(); @@ -74,12 +78,20 @@ public:                                            KMemoryState state, KMemoryPermission perm,                                            PAddr map_addr = 0); -    Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, -                                        bool is_aligned); -    Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); +    Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, +                                        KMemoryPermission perm, bool is_aligned, bool check_heap); +    Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap);      Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); +    Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size); +    Result UnlockForIpcUserBuffer(VAddr address, size_t size); + +    Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table, +                       KMemoryPermission test_perm, KMemoryState dst_state, bool send); +    Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state); +    Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state); +      Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);      Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);      Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, @@ -97,13 +109,54 @@ public:      bool CanContain(VAddr addr, size_t size, KMemoryState state) const; +protected: +    struct PageLinkedList { +    private: +        struct Node { +            Node* m_next; +            std::array<u8, PageSize - sizeof(Node*)> m_buffer; +        }; + +    public: +        constexpr PageLinkedList() = default; + +        void Push(Node* n) { +            ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize)); +            n->m_next = m_root; +            m_root = n; +        } + +        void Push(Core::Memory::Memory& memory, VAddr addr) { +            this->Push(memory.GetPointer<Node>(addr)); +        } + +        Node* Peek() const { +            return m_root; +        } + +        Node* Pop() { +            Node* const r = m_root; + +            m_root = r->m_next; +            r->m_next = nullptr; + +            return r; +        } + +    private: +        Node* m_root{}; +    }; +    static_assert(std::is_trivially_destructible<PageLinkedList>::value); +  private:      enum class OperationType : u32 { -        Map, -        MapGroup, -        Unmap, -        ChangePermissions, -        ChangePermissionsAndRefresh, +        Map = 0, +        MapFirst = 1, +        MapGroup = 2, +        Unmap = 3, +        ChangePermissions = 4, +        ChangePermissionsAndRefresh = 5, +        Separate = 6,      };      static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = @@ -123,6 +176,7 @@ private:                     OperationType operation);      Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,                     PAddr map_addr = 0); +    void FinalizeUpdate(PageLinkedList* page_list);      VAddr GetRegionAddress(KMemoryState state) const;      size_t GetRegionSize(KMemoryState state) const; @@ -199,6 +253,18 @@ private:          return *out != 0;      } +    Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address, +                             size_t size, KMemoryPermission test_perm, KMemoryState dst_state); +    Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr, +                             KMemoryPermission test_perm, KMemoryState dst_state, +                             KPageTable& src_page_table, bool send); +    void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, +                                                 size_t size, KMemoryPermission prot_perm); + +    // HACK: These will be removed once we automatically manage page reference counts. +    void HACK_OpenPages(PAddr phys_addr, size_t num_pages); +    void HACK_ClosePages(VAddr virt_addr, size_t num_pages); +      mutable KLightLock m_general_lock;      mutable KLightLock m_map_physical_memory_lock; @@ -316,6 +382,31 @@ public:                 addr + size - 1 <= m_address_space_end - 1;      } +public: +    static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) { +        return layout.GetLinearVirtualAddress(addr); +    } + +    static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { +        return layout.GetLinearPhysicalAddress(addr); +    } + +    static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) { +        return GetLinearMappedVirtualAddress(layout, addr); +    } + +    static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { +        return GetLinearMappedPhysicalAddress(layout, addr); +    } + +    static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) { +        return GetLinearMappedVirtualAddress(layout, addr); +    } + +    static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) { +        return GetLinearMappedPhysicalAddress(layout, addr); +    } +  private:      constexpr bool IsKernel() const {          return m_is_kernel; @@ -331,6 +422,24 @@ private:      }  private: +    class KScopedPageTableUpdater { +    private: +        KPageTable* m_pt{}; +        PageLinkedList m_ll; + +    public: +        explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {} +        explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {} +        ~KScopedPageTableUpdater() { +            m_pt->FinalizeUpdate(this->GetPageList()); +        } + +        PageLinkedList* GetPageList() { +            return &m_ll; +        } +    }; + +private:      VAddr m_address_space_start{};      VAddr m_address_space_end{};      VAddr m_heap_region_start{}; @@ -347,20 +456,27 @@ private:      VAddr m_alias_code_region_start{};      VAddr m_alias_code_region_end{}; -    size_t m_mapped_physical_memory_size{};      size_t m_max_heap_size{}; -    size_t m_max_physical_memory_size{}; +    size_t m_mapped_physical_memory_size{}; +    size_t m_mapped_unsafe_physical_memory{}; +    size_t m_mapped_insecure_memory{}; +    size_t m_mapped_ipc_server_memory{};      size_t m_address_space_width{};      KMemoryBlockManager m_memory_block_manager; +    u32 m_allocate_option{};      bool m_is_kernel{};      bool m_enable_aslr{};      bool m_enable_device_address_space_merge{};      KMemoryBlockSlabManager* m_memory_block_slab_manager{}; +    KBlockInfoManager* m_block_info_manager{}; +    KResourceLimit* m_resource_limit{};      u32 m_heap_fill_value{}; +    u32 m_ipc_fill_value{}; +    u32 m_stack_fill_value{};      const KMemoryRegion* m_cached_physical_heap_region{};      KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; diff --git a/src/core/hle/kernel/k_page_table_manager.h b/src/core/hle/kernel/k_page_table_manager.h new file mode 100644 index 000000000..91a45cde3 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_manager.h @@ -0,0 +1,55 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <atomic> + +#include "common/common_types.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" +#include "core/hle/kernel/k_page_table_slab_heap.h" + +namespace Kernel { + +class KPageTableManager : public KDynamicResourceManager<impl::PageTablePage, true> { +public: +    using RefCount = KPageTableSlabHeap::RefCount; +    static constexpr size_t PageTableSize = KPageTableSlabHeap::PageTableSize; + +public: +    KPageTableManager() = default; + +    void Initialize(KDynamicPageManager* page_allocator, KPageTableSlabHeap* pt_heap) { +        m_pt_heap = pt_heap; + +        static_assert(std::derived_from<KPageTableSlabHeap, DynamicSlabType>); +        BaseHeap::Initialize(page_allocator, pt_heap); +    } + +    VAddr Allocate() { +        return VAddr(BaseHeap::Allocate()); +    } + +    RefCount GetRefCount(VAddr addr) const { +        return m_pt_heap->GetRefCount(addr); +    } + +    void Open(VAddr addr, int count) { +        return m_pt_heap->Open(addr, count); +    } + +    bool Close(VAddr addr, int count) { +        return m_pt_heap->Close(addr, count); +    } + +    bool IsInPageTableHeap(VAddr addr) const { +        return m_pt_heap->IsInRange(addr); +    } + +private: +    using BaseHeap = KDynamicResourceManager<impl::PageTablePage, true>; + +    KPageTableSlabHeap* m_pt_heap{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table_slab_heap.h b/src/core/hle/kernel/k_page_table_slab_heap.h new file mode 100644 index 000000000..a9543cbd0 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_slab_heap.h @@ -0,0 +1,93 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <array> +#include <vector> + +#include "common/common_types.h" +#include "core/hle/kernel/k_dynamic_slab_heap.h" +#include "core/hle/kernel/slab_helpers.h" + +namespace Kernel { + +namespace impl { + +class PageTablePage { +public: +    // Do not initialize anything. +    PageTablePage() = default; + +private: +    std::array<u8, PageSize> m_buffer{}; +}; +static_assert(sizeof(PageTablePage) == PageSize); + +} // namespace impl + +class KPageTableSlabHeap : public KDynamicSlabHeap<impl::PageTablePage, true> { +public: +    using RefCount = u16; +    static constexpr size_t PageTableSize = sizeof(impl::PageTablePage); +    static_assert(PageTableSize == PageSize); + +public: +    KPageTableSlabHeap() = default; + +    static constexpr size_t CalculateReferenceCountSize(size_t size) { +        return (size / PageSize) * sizeof(RefCount); +    } + +    void Initialize(KDynamicPageManager* page_allocator, size_t object_count, RefCount* rc) { +        BaseHeap::Initialize(page_allocator, object_count); +        this->Initialize(rc); +    } + +    RefCount GetRefCount(VAddr addr) { +        ASSERT(this->IsInRange(addr)); +        return *this->GetRefCountPointer(addr); +    } + +    void Open(VAddr addr, int count) { +        ASSERT(this->IsInRange(addr)); + +        *this->GetRefCountPointer(addr) += static_cast<RefCount>(count); + +        ASSERT(this->GetRefCount(addr) > 0); +    } + +    bool Close(VAddr addr, int count) { +        ASSERT(this->IsInRange(addr)); +        ASSERT(this->GetRefCount(addr) >= count); + +        *this->GetRefCountPointer(addr) -= static_cast<RefCount>(count); +        return this->GetRefCount(addr) == 0; +    } + +    bool IsInPageTableHeap(VAddr addr) const { +        return this->IsInRange(addr); +    } + +private: +    void Initialize([[maybe_unused]] RefCount* rc) { +        // TODO(bunnei): Use rc once we support kernel virtual memory allocations. +        const auto count = this->GetSize() / PageSize; +        m_ref_counts.resize(count); + +        for (size_t i = 0; i < count; i++) { +            m_ref_counts[i] = 0; +        } +    } + +    RefCount* GetRefCountPointer(VAddr addr) { +        return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize); +    } + +private: +    using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>; + +    std::vector<RefCount> m_ref_counts; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 8c3495e5a..55a9c5fae 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -38,7 +38,7 @@ namespace {   */  void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) {      const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); -    ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1)); +    ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1));      KThread* thread = KThread::Create(system.Kernel());      SCOPE_EXIT({ thread->Close(); }); @@ -124,7 +124,7 @@ void KProcess::DecrementRunningThreadCount() {  }  u64 KProcess::GetTotalPhysicalMemoryAvailable() { -    const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + +    const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +                         page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +                         main_thread_stack_size};      if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); @@ -349,8 +349,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:      // We currently do not support process-specific system resource      UNIMPLEMENTED_IF(system_resource_size != 0); -    KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, -                                                  code_size + system_resource_size); +    KScopedResourceReservation memory_reservation( +        resource_limit, LimitableResource::PhysicalMemoryMax, code_size + system_resource_size);      if (!memory_reservation.Succeeded()) {          LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",                    code_size + system_resource_size); @@ -358,8 +358,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:      }      // Initialize proces address space      if (const Result result{page_table.InitializeForProcess( -            metadata.GetAddressSpaceType(), false, 0x8000000, code_size, -            &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; +            metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application, +            0x8000000, code_size, &kernel.GetSystemSystemResource(), resource_limit)};          result.IsError()) {          R_RETURN(result);      } @@ -406,8 +406,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:  void KProcess::Run(s32 main_thread_priority, u64 stack_size) {      AllocateMainThreadStack(stack_size); -    resource_limit->Reserve(LimitableResource::Threads, 1); -    resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); +    resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); +    resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, main_thread_stack_size);      const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};      ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); @@ -442,7 +442,7 @@ void KProcess::PrepareForTermination() {      plr_address = 0;      if (resource_limit) { -        resource_limit->Release(LimitableResource::PhysicalMemory, +        resource_limit->Release(LimitableResource::PhysicalMemoryMax,                                  main_thread_stack_size + image_size);      } diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp index 010dcf99e..b9d22b414 100644 --- a/src/core/hle/kernel/k_resource_limit.cpp +++ b/src/core/hle/kernel/k_resource_limit.cpp @@ -159,12 +159,13 @@ KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical      // TODO(bunnei): These values are the system defaults, the limits for service processes are      // lower. These should use the correct limit values. -    ASSERT(resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, physical_memory_size) +    ASSERT(resource_limit->SetLimitValue(LimitableResource::PhysicalMemoryMax, physical_memory_size)                 .IsSuccess()); -    ASSERT(resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess()); -    ASSERT(resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess()); -    ASSERT(resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200).IsSuccess()); -    ASSERT(resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess()); +    ASSERT(resource_limit->SetLimitValue(LimitableResource::ThreadCountMax, 800).IsSuccess()); +    ASSERT(resource_limit->SetLimitValue(LimitableResource::EventCountMax, 900).IsSuccess()); +    ASSERT( +        resource_limit->SetLimitValue(LimitableResource::TransferMemoryCountMax, 200).IsSuccess()); +    ASSERT(resource_limit->SetLimitValue(LimitableResource::SessionCountMax, 1133).IsSuccess());      return resource_limit;  } diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h index 65c98c979..2573d1b7c 100644 --- a/src/core/hle/kernel/k_resource_limit.h +++ b/src/core/hle/kernel/k_resource_limit.h @@ -16,15 +16,8 @@ class CoreTiming;  namespace Kernel {  class KernelCore; -enum class LimitableResource : u32 { -    PhysicalMemory = 0, -    Threads = 1, -    Events = 2, -    TransferMemory = 3, -    Sessions = 4, - -    Count, -}; + +using LimitableResource = Svc::LimitableResource;  constexpr bool IsValidResourceType(LimitableResource type) {      return type < LimitableResource::Count; diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index b1cabbca0..d6676904b 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -384,7 +384,8 @@ void KScheduler::SwitchThread(KThread* next_thread) {  void KScheduler::ScheduleImpl() {      // First, clear the needs scheduling bool. -    m_state.needs_scheduling.store(false, std::memory_order_seq_cst); +    m_state.needs_scheduling.store(false, std::memory_order_relaxed); +    std::atomic_thread_fence(std::memory_order_seq_cst);      // Load the appropriate thread pointers for scheduling.      KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; @@ -400,7 +401,8 @@ void KScheduler::ScheduleImpl() {      // If there aren't, we want to check if the highest priority thread is the same as the current      // thread.      if (highest_priority_thread == cur_thread) { -        // If they're the same, then we can just return. +        // If they're the same, then we can just issue a memory barrier and return. +        std::atomic_thread_fence(std::memory_order_seq_cst);          return;      } @@ -476,7 +478,8 @@ void KScheduler::ScheduleImplFiber() {          // We failed to successfully do the context switch, and need to retry.          // Clear needs_scheduling. -        m_state.needs_scheduling.store(false, std::memory_order_seq_cst); +        m_state.needs_scheduling.store(false, std::memory_order_relaxed); +        std::atomic_thread_fence(std::memory_order_seq_cst);          // Refresh the highest priority thread.          highest_priority_thread = m_state.highest_priority_thread; diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 73314b45e..129d60472 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -60,6 +60,9 @@ public:          // Release an instance of the lock.          if ((--lock_count) == 0) { +            // Perform a memory barrier here. +            std::atomic_thread_fence(std::memory_order_seq_cst); +              // We're no longer going to hold the lock. Take note of what cores need scheduling.              const u64 cores_needing_scheduling =                  SchedulerType::UpdateHighestPriorityThreads(kernel); diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp index 7a6534ac3..b6f6fe9d9 100644 --- a/src/core/hle/kernel/k_session.cpp +++ b/src/core/hle/kernel/k_session.cpp @@ -76,7 +76,7 @@ void KSession::OnClientClosed() {  void KSession::PostDestroy(uintptr_t arg) {      // Release the session count resource the owner process holds.      KProcess* owner = reinterpret_cast<KProcess*>(arg); -    owner->GetResourceLimit()->Release(LimitableResource::Sessions, 1); +    owner->GetResourceLimit()->Release(LimitableResource::SessionCountMax, 1);      owner->Close();  } diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index a039cc591..10cd4c43d 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp @@ -14,7 +14,7 @@ namespace Kernel {  KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}  KSharedMemory::~KSharedMemory() { -    kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size); +    kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size);  }  Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, @@ -35,7 +35,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o      KResourceLimit* reslimit = kernel.GetSystemResourceLimit();      // Reserve memory for ourselves. -    KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemory, +    KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax,                                                    size_);      R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); @@ -57,7 +57,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o  void KSharedMemory::Finalize() {      // Release the memory reservation. -    resource_limit->Release(LimitableResource::PhysicalMemory, size); +    resource_limit->Release(LimitableResource::PhysicalMemoryMax, size);      resource_limit->Close();      // Perform inherited finalization. diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp new file mode 100644 index 000000000..4cc377a6c --- /dev/null +++ b/src/core/hle/kernel/k_system_resource.cpp @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/hle/kernel/k_system_resource.h" + +namespace Kernel { + +Result KSecureSystemResource::Initialize([[maybe_unused]] size_t size, +                                         [[maybe_unused]] KResourceLimit* resource_limit, +                                         [[maybe_unused]] KMemoryManager::Pool pool) { +    // Unimplemented +    UNREACHABLE(); +} + +void KSecureSystemResource::Finalize() { +    // Unimplemented +    UNREACHABLE(); +} + +size_t KSecureSystemResource::CalculateRequiredSecureMemorySize( +    [[maybe_unused]] size_t size, [[maybe_unused]] KMemoryManager::Pool pool) { +    // Unimplemented +    UNREACHABLE(); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_system_resource.h b/src/core/hle/kernel/k_system_resource.h new file mode 100644 index 000000000..9a991f725 --- /dev/null +++ b/src/core/hle/kernel/k_system_resource.h @@ -0,0 +1,137 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/assert.h" +#include "common/common_types.h" +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" +#include "core/hle/kernel/k_memory_manager.h" +#include "core/hle/kernel/k_page_table_manager.h" +#include "core/hle/kernel/k_resource_limit.h" +#include "core/hle/kernel/slab_helpers.h" + +namespace Kernel { + +// NOTE: Nintendo's implementation does not have the "is_secure_resource" field, and instead uses +// virtual IsSecureResource(). + +class KSystemResource : public KAutoObject { +    KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject); + +public: +    explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {} + +protected: +    void SetSecureResource() { +        m_is_secure_resource = true; +    } + +public: +    virtual void Destroy() override { +        UNREACHABLE_MSG("KSystemResource::Destroy() was called"); +    } + +    bool IsSecureResource() const { +        return m_is_secure_resource; +    } + +    void SetManagers(KMemoryBlockSlabManager& mb, KBlockInfoManager& bi, KPageTableManager& pt) { +        ASSERT(m_p_memory_block_slab_manager == nullptr); +        ASSERT(m_p_block_info_manager == nullptr); +        ASSERT(m_p_page_table_manager == nullptr); + +        m_p_memory_block_slab_manager = std::addressof(mb); +        m_p_block_info_manager = std::addressof(bi); +        m_p_page_table_manager = std::addressof(pt); +    } + +    const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const { +        return *m_p_memory_block_slab_manager; +    } +    const KBlockInfoManager& GetBlockInfoManager() const { +        return *m_p_block_info_manager; +    } +    const KPageTableManager& GetPageTableManager() const { +        return *m_p_page_table_manager; +    } + +    KMemoryBlockSlabManager& GetMemoryBlockSlabManager() { +        return *m_p_memory_block_slab_manager; +    } +    KBlockInfoManager& GetBlockInfoManager() { +        return *m_p_block_info_manager; +    } +    KPageTableManager& GetPageTableManager() { +        return *m_p_page_table_manager; +    } + +    KMemoryBlockSlabManager* GetMemoryBlockSlabManagerPointer() { +        return m_p_memory_block_slab_manager; +    } +    KBlockInfoManager* GetBlockInfoManagerPointer() { +        return m_p_block_info_manager; +    } +    KPageTableManager* GetPageTableManagerPointer() { +        return m_p_page_table_manager; +    } + +private: +    KMemoryBlockSlabManager* m_p_memory_block_slab_manager{}; +    KBlockInfoManager* m_p_block_info_manager{}; +    KPageTableManager* m_p_page_table_manager{}; +    bool m_is_secure_resource{false}; +}; + +class KSecureSystemResource final +    : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> { +public: +    explicit KSecureSystemResource(KernelCore& kernel_) +        : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) { +        // Mark ourselves as being a secure resource. +        this->SetSecureResource(); +    } + +    Result Initialize(size_t size, KResourceLimit* resource_limit, KMemoryManager::Pool pool); +    void Finalize(); + +    bool IsInitialized() const { +        return m_is_initialized; +    } +    static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + +    size_t CalculateRequiredSecureMemorySize() const { +        return CalculateRequiredSecureMemorySize(m_resource_size, m_resource_pool); +    } + +    size_t GetSize() const { +        return m_resource_size; +    } +    size_t GetUsedSize() const { +        return m_dynamic_page_manager.GetUsed() * PageSize; +    } + +    const KDynamicPageManager& GetDynamicPageManager() const { +        return m_dynamic_page_manager; +    } + +public: +    static size_t CalculateRequiredSecureMemorySize(size_t size, KMemoryManager::Pool pool); + +private: +    bool m_is_initialized{}; +    KMemoryManager::Pool m_resource_pool{}; +    KDynamicPageManager m_dynamic_page_manager; +    KMemoryBlockSlabManager m_memory_block_slab_manager; +    KBlockInfoManager m_block_info_manager; +    KPageTableManager m_page_table_manager; +    KMemoryBlockSlabHeap m_memory_block_heap; +    KBlockInfoSlabHeap m_block_info_heap; +    KPageTableSlabHeap m_page_table_heap; +    KResourceLimit* m_resource_limit{}; +    VAddr m_resource_address{}; +    size_t m_resource_size{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index cc88d08f0..21207fe99 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -263,9 +263,9 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_      R_SUCCEED();  } -Result KThread::InitializeDummyThread(KThread* thread) { +Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) {      // Initialize the thread. -    R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy)); +    R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy));      // Initialize emulation parameters.      thread->stack_parameters.disable_count = 0; @@ -303,7 +303,7 @@ void KThread::PostDestroy(uintptr_t arg) {      const bool resource_limit_release_hint = (arg & 1);      const s64 hint_value = (resource_limit_release_hint ? 0 : 1);      if (owner != nullptr) { -        owner->GetResourceLimit()->Release(LimitableResource::Threads, 1, hint_value); +        owner->GetResourceLimit()->Release(LimitableResource::ThreadCountMax, 1, hint_value);          owner->Close();      }  } @@ -1054,7 +1054,7 @@ void KThread::Exit() {      // Release the thread resource hint, running thread count from parent.      if (parent != nullptr) { -        parent->GetResourceLimit()->Release(Kernel::LimitableResource::Threads, 0, 1); +        parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1);          resource_limit_release_hint = true;          parent->DecrementRunningThreadCount();      } diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 30aa10c9a..f38c92bff 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -415,7 +415,7 @@ public:      static void PostDestroy(uintptr_t arg); -    [[nodiscard]] static Result InitializeDummyThread(KThread* thread); +    [[nodiscard]] static Result InitializeDummyThread(KThread* thread, KProcess* owner);      [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread,                                                       s32 virt_core); diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp index b0320eb73..9f34c2d46 100644 --- a/src/core/hle/kernel/k_transfer_memory.cpp +++ b/src/core/hle/kernel/k_transfer_memory.cpp @@ -37,7 +37,7 @@ void KTransferMemory::Finalize() {  void KTransferMemory::PostDestroy(uintptr_t arg) {      KProcess* owner = reinterpret_cast<KProcess*>(arg); -    owner->GetResourceLimit()->Release(LimitableResource::TransferMemory, 1); +    owner->GetResourceLimit()->Release(LimitableResource::TransferMemoryCountMax, 1);      owner->Close();  } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 09c36ee09..b77723503 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -28,10 +28,12 @@  #include "core/hle/kernel/k_handle_table.h"  #include "core/hle/kernel/k_memory_layout.h"  #include "core/hle/kernel/k_memory_manager.h" +#include "core/hle/kernel/k_page_buffer.h"  #include "core/hle/kernel/k_process.h"  #include "core/hle/kernel/k_resource_limit.h"  #include "core/hle/kernel/k_scheduler.h"  #include "core/hle/kernel/k_shared_memory.h" +#include "core/hle/kernel/k_system_resource.h"  #include "core/hle/kernel/k_thread.h"  #include "core/hle/kernel/k_worker_task_manager.h"  #include "core/hle/kernel/kernel.h" @@ -47,6 +49,11 @@ MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));  namespace Kernel {  struct KernelCore::Impl { +    static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; +    static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000; +    static constexpr size_t BlockInfoSlabHeapSize = 4000; +    static constexpr size_t ReservedDynamicPageCount = 64; +      explicit Impl(Core::System& system_, KernelCore& kernel_)          : time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"},            service_thread_barrier{2}, system{system_} {} @@ -71,7 +78,6 @@ struct KernelCore::Impl {          // Initialize kernel memory and resources.          InitializeSystemResourceLimit(kernel, system.CoreTiming());          InitializeMemoryLayout(); -        Init::InitializeKPageBufferSlabHeap(system);          InitializeShutdownThreads();          InitializePhysicalCores();          InitializePreemption(kernel); @@ -81,12 +87,13 @@ struct KernelCore::Impl {              const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion();              ASSERT(pt_heap_region.GetEndAddress() != 0); -            InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize()); +            InitializeResourceManagers(kernel, pt_heap_region.GetAddress(), +                                       pt_heap_region.GetSize());          } -        RegisterHostThread(); +        RegisterHostThread(nullptr); -        default_service_thread = CreateServiceThread(kernel, "DefaultServiceThread"); +        default_service_thread = &CreateServiceThread(kernel, "DefaultServiceThread");      }      void InitializeCores() { @@ -222,18 +229,22 @@ struct KernelCore::Impl {          const auto kernel_size{sizes.second};          // If setting the default system values fails, then something seriously wrong has occurred. -        ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size) +        ASSERT( +            system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemoryMax, total_size) +                .IsSuccess()); +        ASSERT(system_resource_limit->SetLimitValue(LimitableResource::ThreadCountMax, 800) +                   .IsSuccess()); +        ASSERT(system_resource_limit->SetLimitValue(LimitableResource::EventCountMax, 900) +                   .IsSuccess()); +        ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemoryCountMax, 200)                     .IsSuccess()); -        ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess()); -        ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess()); -        ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200) +        ASSERT(system_resource_limit->SetLimitValue(LimitableResource::SessionCountMax, 1133)                     .IsSuccess()); -        ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess()); -        system_resource_limit->Reserve(LimitableResource::PhysicalMemory, kernel_size); +        system_resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, kernel_size);          // Reserve secure applet memory, introduced in firmware 5.0.0          constexpr u64 secure_applet_memory_size{4_MiB}; -        ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory, +        ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemoryMax,                                                secure_applet_memory_size));      } @@ -253,16 +264,82 @@ struct KernelCore::Impl {          system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);      } -    void InitializeResourceManagers(VAddr address, size_t size) { -        dynamic_page_manager = std::make_unique<KDynamicPageManager>(); -        memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); +    void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) { +        // Ensure that the buffer is suitable for our use. +        ASSERT(Common::IsAligned(address, PageSize)); +        ASSERT(Common::IsAligned(size, PageSize)); + +        // Ensure that we have space for our reference counts. +        const size_t rc_size = +            Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(size), PageSize); +        ASSERT(rc_size < size); +        size -= rc_size; + +        // Initialize the resource managers' shared page manager. +        resource_manager_page_manager = std::make_unique<KDynamicPageManager>(); +        resource_manager_page_manager->Initialize( +            address, size, std::max<size_t>(PageSize, KPageBufferSlabHeap::BufferSize)); + +        // Initialize the KPageBuffer slab heap. +        page_buffer_slab_heap.Initialize(system); + +        // Initialize the fixed-size slabheaps. +        app_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); +        sys_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); +        block_info_heap = std::make_unique<KBlockInfoSlabHeap>(); +        app_memory_block_heap->Initialize(resource_manager_page_manager.get(), +                                          ApplicationMemoryBlockSlabHeapSize); +        sys_memory_block_heap->Initialize(resource_manager_page_manager.get(), +                                          SystemMemoryBlockSlabHeapSize); +        block_info_heap->Initialize(resource_manager_page_manager.get(), BlockInfoSlabHeapSize); + +        // Reserve all but a fixed number of remaining pages for the page table heap. +        const size_t num_pt_pages = resource_manager_page_manager->GetCount() - +                                    resource_manager_page_manager->GetUsed() - +                                    ReservedDynamicPageCount; +        page_table_heap = std::make_unique<KPageTableSlabHeap>(); + +        // TODO(bunnei): Pass in address once we support kernel virtual memory allocations. +        page_table_heap->Initialize( +            resource_manager_page_manager.get(), num_pt_pages, +            /*GetPointer<KPageTableManager::RefCount>(address + size)*/ nullptr); + +        // Setup the slab managers. +        KDynamicPageManager* const app_dynamic_page_manager = nullptr; +        KDynamicPageManager* const sys_dynamic_page_manager = +            /*KTargetSystem::IsDynamicResourceLimitsEnabled()*/ true +                ? resource_manager_page_manager.get() +                : nullptr;          app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); - -        dynamic_page_manager->Initialize(address, size); -        static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; -        memory_block_heap->Initialize(dynamic_page_manager.get(), -                                      ApplicationMemoryBlockSlabHeapSize); -        app_memory_block_manager->Initialize(nullptr, memory_block_heap.get()); +        sys_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); +        app_block_info_manager = std::make_unique<KBlockInfoManager>(); +        sys_block_info_manager = std::make_unique<KBlockInfoManager>(); +        app_page_table_manager = std::make_unique<KPageTableManager>(); +        sys_page_table_manager = std::make_unique<KPageTableManager>(); + +        app_memory_block_manager->Initialize(app_dynamic_page_manager, app_memory_block_heap.get()); +        sys_memory_block_manager->Initialize(sys_dynamic_page_manager, sys_memory_block_heap.get()); + +        app_block_info_manager->Initialize(app_dynamic_page_manager, block_info_heap.get()); +        sys_block_info_manager->Initialize(sys_dynamic_page_manager, block_info_heap.get()); + +        app_page_table_manager->Initialize(app_dynamic_page_manager, page_table_heap.get()); +        sys_page_table_manager->Initialize(sys_dynamic_page_manager, page_table_heap.get()); + +        // Check that we have the correct number of dynamic pages available. +        ASSERT(resource_manager_page_manager->GetCount() - +                   resource_manager_page_manager->GetUsed() == +               ReservedDynamicPageCount); + +        // Create the system page table managers. +        app_system_resource = std::make_unique<KSystemResource>(kernel); +        sys_system_resource = std::make_unique<KSystemResource>(kernel); + +        // Set the managers for the system resources. +        app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager, +                                         *app_page_table_manager); +        sys_system_resource->SetManagers(*sys_memory_block_manager, *sys_block_info_manager, +                                         *sys_page_table_manager);      }      void InitializeShutdownThreads() { @@ -300,15 +377,18 @@ struct KernelCore::Impl {      }      // Gets the dummy KThread for the caller, allocating a new one if this is the first time -    KThread* GetHostDummyThread() { +    KThread* GetHostDummyThread(KThread* existing_thread) {          auto initialize = [this](KThread* thread) { -            ASSERT(KThread::InitializeDummyThread(thread).IsSuccess()); +            ASSERT(KThread::InitializeDummyThread(thread, nullptr).IsSuccess());              thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));              return thread;          }; -        thread_local auto raw_thread = KThread(system.Kernel()); -        thread_local auto thread = initialize(&raw_thread); +        thread_local KThread raw_thread{system.Kernel()}; +        thread_local KThread* thread = nullptr; +        if (thread == nullptr) { +            thread = (existing_thread == nullptr) ? initialize(&raw_thread) : existing_thread; +        }          return thread;      } @@ -323,9 +403,9 @@ struct KernelCore::Impl {      }      /// Registers a new host thread by allocating a host thread ID for it -    void RegisterHostThread() { +    void RegisterHostThread(KThread* existing_thread) {          [[maybe_unused]] const auto this_id = GetHostThreadId(); -        [[maybe_unused]] const auto dummy_thread = GetHostDummyThread(); +        [[maybe_unused]] const auto dummy_thread = GetHostDummyThread(existing_thread);      }      [[nodiscard]] u32 GetCurrentHostThreadID() { @@ -356,7 +436,7 @@ struct KernelCore::Impl {      KThread* GetCurrentEmuThread() {          const auto thread_id = GetCurrentHostThreadID();          if (thread_id >= Core::Hardware::NUM_CPU_CORES) { -            return GetHostDummyThread(); +            return GetHostDummyThread(nullptr);          }          return current_thread; @@ -446,6 +526,9 @@ struct KernelCore::Impl {          ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(              misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); +        // Determine if we'll use extra thread resources. +        const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); +          // Setup the stack region.          constexpr size_t StackRegionSize = 14_MiB;          constexpr size_t StackRegionAlign = KernelAslrAlignment; @@ -456,7 +539,8 @@ struct KernelCore::Impl {              stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));          // Determine the size of the resource region. -        const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit(); +        const size_t resource_region_size = +            memory_layout->GetResourceRegionSizeForInit(use_extra_resources);          // Determine the size of the slab region.          const size_t slab_region_size = @@ -702,33 +786,31 @@ struct KernelCore::Impl {          search->second(system.ServiceManager(), server_port);      } -    std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(KernelCore& kernel, -                                                             const std::string& name) { -        auto service_thread = std::make_shared<Kernel::ServiceThread>(kernel, name); +    Kernel::ServiceThread& CreateServiceThread(KernelCore& kernel, const std::string& name) { +        auto* ptr = new ServiceThread(kernel, name);          service_threads_manager.QueueWork( -            [this, service_thread]() { service_threads.emplace(service_thread); }); +            [this, ptr]() { service_threads.emplace(ptr, std::unique_ptr<ServiceThread>(ptr)); }); -        return service_thread; +        return *ptr;      } -    void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) { -        if (auto strong_ptr = service_thread.lock()) { -            if (strong_ptr == default_service_thread.lock()) { -                // Nothing to do here, the service is using default_service_thread, which will be -                // released on shutdown. -                return; -            } +    void ReleaseServiceThread(Kernel::ServiceThread& service_thread) { +        auto* ptr = &service_thread; -            service_threads_manager.QueueWork( -                [this, strong_ptr{std::move(strong_ptr)}]() { service_threads.erase(strong_ptr); }); +        if (ptr == default_service_thread) { +            // Nothing to do here, the service is using default_service_thread, which will be +            // released on shutdown. +            return;          } + +        service_threads_manager.QueueWork([this, ptr]() { service_threads.erase(ptr); });      }      void ClearServiceThreads() {          service_threads_manager.QueueWork([this] {              service_threads.clear(); -            default_service_thread.reset(); +            default_service_thread = nullptr;              service_thread_barrier.Sync();          });          service_thread_barrier.Sync(); @@ -751,6 +833,8 @@ struct KernelCore::Impl {      Init::KSlabResourceCounts slab_resource_counts{};      KResourceLimit* system_resource_limit{}; +    KPageBufferSlabHeap page_buffer_slab_heap; +      std::shared_ptr<Core::Timing::EventType> preemption_event;      // This is the kernel's handle table or supervisor handle table which @@ -776,10 +860,20 @@ struct KernelCore::Impl {      // Kernel memory management      std::unique_ptr<KMemoryManager> memory_manager; -    // Dynamic slab managers -    std::unique_ptr<KDynamicPageManager> dynamic_page_manager; -    std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap; +    // Resource managers +    std::unique_ptr<KDynamicPageManager> resource_manager_page_manager; +    std::unique_ptr<KPageTableSlabHeap> page_table_heap; +    std::unique_ptr<KMemoryBlockSlabHeap> app_memory_block_heap; +    std::unique_ptr<KMemoryBlockSlabHeap> sys_memory_block_heap; +    std::unique_ptr<KBlockInfoSlabHeap> block_info_heap; +    std::unique_ptr<KPageTableManager> app_page_table_manager; +    std::unique_ptr<KPageTableManager> sys_page_table_manager;      std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; +    std::unique_ptr<KMemoryBlockSlabManager> sys_memory_block_manager; +    std::unique_ptr<KBlockInfoManager> app_block_info_manager; +    std::unique_ptr<KBlockInfoManager> sys_block_info_manager; +    std::unique_ptr<KSystemResource> app_system_resource; +    std::unique_ptr<KSystemResource> sys_system_resource;      // Shared memory for services      Kernel::KSharedMemory* hid_shared_mem{}; @@ -792,8 +886,8 @@ struct KernelCore::Impl {      std::unique_ptr<KMemoryLayout> memory_layout;      // Threads used for services -    std::unordered_set<std::shared_ptr<ServiceThread>> service_threads; -    std::weak_ptr<ServiceThread> default_service_thread; +    std::unordered_map<ServiceThread*, std::unique_ptr<ServiceThread>> service_threads; +    ServiceThread* default_service_thread{};      Common::ThreadWorker service_threads_manager;      Common::Barrier service_thread_barrier; @@ -1033,8 +1127,12 @@ void KernelCore::RegisterCoreThread(std::size_t core_id) {      impl->RegisterCoreThread(core_id);  } -void KernelCore::RegisterHostThread() { -    impl->RegisterHostThread(); +void KernelCore::RegisterHostThread(KThread* existing_thread) { +    impl->RegisterHostThread(existing_thread); + +    if (existing_thread != nullptr) { +        ASSERT(GetCurrentEmuThread() == existing_thread); +    }  }  u32 KernelCore::GetCurrentHostThreadID() const { @@ -1057,12 +1155,12 @@ const KMemoryManager& KernelCore::MemoryManager() const {      return *impl->memory_manager;  } -KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() { -    return *impl->app_memory_block_manager; +KSystemResource& KernelCore::GetSystemSystemResource() { +    return *impl->sys_system_resource;  } -const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const { -    return *impl->app_memory_block_manager; +const KSystemResource& KernelCore::GetSystemSystemResource() const { +    return *impl->sys_system_resource;  }  Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { @@ -1109,16 +1207,28 @@ void KernelCore::Suspend(bool suspended) {      const bool should_suspend{exception_exited || suspended};      const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable; -    for (auto* process : GetProcessList()) { -        process->SetActivity(activity); +    std::vector<KScopedAutoObject<KThread>> process_threads; +    { +        KScopedSchedulerLock sl{*this}; + +        if (auto* process = CurrentProcess(); process != nullptr) { +            process->SetActivity(activity); + +            if (!should_suspend) { +                // Runnable now; no need to wait. +                return; +            } -        if (should_suspend) { -            // Wait for execution to stop              for (auto* thread : process->GetThreadList()) { -                thread->WaitUntilSuspended(); +                process_threads.emplace_back(thread);              }          }      } + +    // Wait for execution to stop. +    for (auto& thread : process_threads) { +        thread->WaitUntilSuspended(); +    }  }  void KernelCore::ShutdownCores() { @@ -1150,15 +1260,15 @@ void KernelCore::ExitSVCProfile() {      MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);  } -std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { +Kernel::ServiceThread& KernelCore::CreateServiceThread(const std::string& name) {      return impl->CreateServiceThread(*this, name);  } -std::weak_ptr<Kernel::ServiceThread> KernelCore::GetDefaultServiceThread() const { -    return impl->default_service_thread; +Kernel::ServiceThread& KernelCore::GetDefaultServiceThread() const { +    return *impl->default_service_thread;  } -void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) { +void KernelCore::ReleaseServiceThread(Kernel::ServiceThread& service_thread) {      impl->ReleaseServiceThread(service_thread);  } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 4ae6b3923..2e22fe0f6 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -34,13 +34,16 @@ class KClientPort;  class GlobalSchedulerContext;  class KAutoObjectWithListContainer;  class KClientSession; +class KDebug; +class KDynamicPageManager;  class KEvent; +class KEventInfo;  class KHandleTable;  class KLinkedListNode; -class KMemoryBlockSlabManager;  class KMemoryLayout;  class KMemoryManager;  class KPageBuffer; +class KPageBufferSlabHeap;  class KPort;  class KProcess;  class KResourceLimit; @@ -51,6 +54,7 @@ class KSession;  class KSessionRequest;  class KSharedMemory;  class KSharedMemoryInfo; +class KSecureSystemResource;  class KThread;  class KThreadLocalPage;  class KTransferMemory; @@ -236,7 +240,7 @@ public:      void RegisterCoreThread(std::size_t core_id);      /// Register the current thread as a non CPU core thread. -    void RegisterHostThread(); +    void RegisterHostThread(KThread* existing_thread = nullptr);      /// Gets the virtual memory manager for the kernel.      KMemoryManager& MemoryManager(); @@ -244,11 +248,11 @@ public:      /// Gets the virtual memory manager for the kernel.      const KMemoryManager& MemoryManager() const; -    /// Gets the application memory block manager for the kernel. -    KMemoryBlockSlabManager& GetApplicationMemoryBlockManager(); +    /// Gets the system resource manager. +    KSystemResource& GetSystemSystemResource(); -    /// Gets the application memory block manager for the kernel. -    const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const; +    /// Gets the system resource manager. +    const KSystemResource& GetSystemSystemResource() const;      /// Gets the shared memory object for HID services.      Kernel::KSharedMemory& GetHidSharedMem(); @@ -305,24 +309,24 @@ public:       * See GetDefaultServiceThread.       * @param name String name for the ServerSession creating this thread, used for debug       * purposes. -     * @returns The a weak pointer newly created service thread. +     * @returns A reference to the newly created service thread.       */ -    std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name); +    Kernel::ServiceThread& CreateServiceThread(const std::string& name);      /**       * Gets the default host service thread, which executes HLE service requests. Unless service       * requests need to block on the host, the default service thread should be used in favor of       * creating a new service thread. -     * @returns The a weak pointer for the default service thread. +     * @returns A reference to the default service thread.       */ -    std::weak_ptr<Kernel::ServiceThread> GetDefaultServiceThread() const; +    Kernel::ServiceThread& GetDefaultServiceThread() const;      /**       * Releases a HLE service thread, instructing KernelCore to free it. This should be called when       * the ServerSession associated with the thread is destroyed.       * @param service_thread Service thread to release.       */ -    void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread); +    void ReleaseServiceThread(Kernel::ServiceThread& service_thread);      /// Workaround for single-core mode when preempting threads while idle.      bool IsPhantomModeForSingleCore() const; @@ -364,6 +368,12 @@ public:              return slab_heap_container->thread_local_page;          } else if constexpr (std::is_same_v<T, KSessionRequest>) {              return slab_heap_container->session_request; +        } else if constexpr (std::is_same_v<T, KSecureSystemResource>) { +            return slab_heap_container->secure_system_resource; +        } else if constexpr (std::is_same_v<T, KEventInfo>) { +            return slab_heap_container->event_info; +        } else if constexpr (std::is_same_v<T, KDebug>) { +            return slab_heap_container->debug;          }      } @@ -427,6 +437,9 @@ private:          KSlabHeap<KPageBuffer> page_buffer;          KSlabHeap<KThreadLocalPage> thread_local_page;          KSlabHeap<KSessionRequest> session_request; +        KSlabHeap<KSecureSystemResource> secure_system_resource; +        KSlabHeap<KEventInfo> event_info; +        KSlabHeap<KDebug> debug;      };      std::unique_ptr<SlabHeapContainer> slab_heap_container; diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index c8fe42537..f5c2ab23f 100644 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp @@ -36,11 +36,12 @@ public:  private:      KernelCore& kernel; -    std::jthread m_thread; +    std::jthread m_host_thread;      std::mutex m_session_mutex;      std::map<KServerSession*, std::shared_ptr<SessionRequestManager>> m_sessions;      KEvent* m_wakeup_event;      KProcess* m_process; +    KThread* m_thread;      std::atomic<bool> m_shutdown_requested;      const std::string m_service_name;  }; @@ -132,7 +133,7 @@ void ServiceThread::Impl::SessionClosed(KServerSession* server_session,  void ServiceThread::Impl::LoopProcess() {      Common::SetCurrentThreadName(m_service_name.c_str()); -    kernel.RegisterHostThread(); +    kernel.RegisterHostThread(m_thread);      while (!m_shutdown_requested.load()) {          WaitAndProcessImpl(); @@ -160,7 +161,7 @@ ServiceThread::Impl::~Impl() {      // Shut down the processing thread.      m_shutdown_requested.store(true);      m_wakeup_event->Signal(); -    m_thread.join(); +    m_host_thread.join();      // Lock mutex.      m_session_mutex.lock(); @@ -177,6 +178,9 @@ ServiceThread::Impl::~Impl() {      m_wakeup_event->GetReadableEvent().Close();      m_wakeup_event->Close(); +    // Close thread. +    m_thread->Close(); +      // Close process.      m_process->Close();  } @@ -189,7 +193,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel_, const std::string& service_name)                           KProcess::ProcessType::KernelInternal, kernel.GetSystemResourceLimit());      // Reserve a new event from the process resource limit -    KScopedResourceReservation event_reservation(m_process, LimitableResource::Events); +    KScopedResourceReservation event_reservation(m_process, LimitableResource::EventCountMax);      ASSERT(event_reservation.Succeeded());      // Initialize event. @@ -199,11 +203,19 @@ ServiceThread::Impl::Impl(KernelCore& kernel_, const std::string& service_name)      // Commit the event reservation.      event_reservation.Commit(); -    // Register the event. -    KEvent::Register(kernel, m_wakeup_event); +    // Reserve a new thread from the process resource limit +    KScopedResourceReservation thread_reservation(m_process, LimitableResource::ThreadCountMax); +    ASSERT(thread_reservation.Succeeded()); + +    // Initialize thread. +    m_thread = KThread::Create(kernel); +    ASSERT(KThread::InitializeDummyThread(m_thread, m_process).IsSuccess()); + +    // Commit the thread reservation. +    thread_reservation.Commit();      // Start thread. -    m_thread = std::jthread([this] { LoopProcess(); }); +    m_host_thread = std::jthread([this] { LoopProcess(); });  }  ServiceThread::ServiceThread(KernelCore& kernel, const std::string& name) diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index 06b51e919..0228ce188 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h @@ -53,6 +53,84 @@ public:  };  template <typename Derived, typename Base> +class KAutoObjectWithSlabHeap : public Base { +    static_assert(std::is_base_of<KAutoObject, Base>::value); + +private: +    static Derived* Allocate(KernelCore& kernel) { +        return kernel.SlabHeap<Derived>().Allocate(kernel); +    } + +    static void Free(KernelCore& kernel, Derived* obj) { +        kernel.SlabHeap<Derived>().Free(obj); +    } + +public: +    explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} +    virtual ~KAutoObjectWithSlabHeap() = default; + +    virtual void Destroy() override { +        const bool is_initialized = this->IsInitialized(); +        uintptr_t arg = 0; +        if (is_initialized) { +            arg = this->GetPostDestroyArgument(); +            this->Finalize(); +        } +        Free(kernel, static_cast<Derived*>(this)); +        if (is_initialized) { +            Derived::PostDestroy(arg); +        } +    } + +    virtual bool IsInitialized() const { +        return true; +    } +    virtual uintptr_t GetPostDestroyArgument() const { +        return 0; +    } + +    size_t GetSlabIndex() const { +        return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); +    } + +public: +    static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) { +        kernel.SlabHeap<Derived>().Initialize(memory, memory_size); +    } + +    static Derived* Create(KernelCore& kernel) { +        Derived* obj = Allocate(kernel); +        if (obj != nullptr) { +            KAutoObject::Create(obj); +        } +        return obj; +    } + +    static size_t GetObjectSize(KernelCore& kernel) { +        return kernel.SlabHeap<Derived>().GetObjectSize(); +    } + +    static size_t GetSlabHeapSize(KernelCore& kernel) { +        return kernel.SlabHeap<Derived>().GetSlabHeapSize(); +    } + +    static size_t GetPeakIndex(KernelCore& kernel) { +        return kernel.SlabHeap<Derived>().GetPeakIndex(); +    } + +    static uintptr_t GetSlabHeapAddress(KernelCore& kernel) { +        return kernel.SlabHeap<Derived>().GetSlabHeapAddress(); +    } + +    static size_t GetNumRemaining(KernelCore& kernel) { +        return kernel.SlabHeap<Derived>().GetNumRemaining(); +    } + +protected: +    KernelCore& kernel; +}; + +template <typename Derived, typename Base>  class KAutoObjectWithSlabHeapAndContainer : public Base {      static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 4c819f4b6..9962ad171 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -267,7 +267,7 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien      // Reserve a new session from the process resource limit.      // FIXME: LimitableResource_SessionCountMax -    KScopedResourceReservation session_reservation(&process, LimitableResource::Sessions); +    KScopedResourceReservation session_reservation(&process, LimitableResource::SessionCountMax);      if (session_reservation.Succeeded()) {          session = T::Create(system.Kernel());      } else { @@ -298,7 +298,7 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien          // We successfully allocated a session, so add the object we allocated to the resource          // limit. -        // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::Sessions, 1); +        // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::SessionCountMax, 1);      }      // Check that we successfully created a session. @@ -656,27 +656,12 @@ static Result ArbitrateUnlock32(Core::System& system, u32 address) {      return ArbitrateUnlock(system, address);  } -enum class BreakType : u32 { -    Panic = 0, -    AssertionFailed = 1, -    PreNROLoad = 3, -    PostNROLoad = 4, -    PreNROUnload = 5, -    PostNROUnload = 6, -    CppException = 7, -}; - -struct BreakReason { -    union { -        u32 raw; -        BitField<0, 30, BreakType> break_type; -        BitField<31, 1, u32> signal_debugger; -    }; -}; -  /// Break program execution  static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { -    BreakReason break_reason{reason}; +    BreakReason break_reason = +        static_cast<BreakReason>(reason & ~static_cast<u32>(BreakReason::NotificationOnlyFlag)); +    bool notification_only = (reason & static_cast<u32>(BreakReason::NotificationOnlyFlag)) != 0; +      bool has_dumped_buffer{};      std::vector<u8> debug_buffer; @@ -705,57 +690,56 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {          }          has_dumped_buffer = true;      }; -    switch (break_reason.break_type) { -    case BreakType::Panic: -        LOG_CRITICAL(Debug_Emulated, "Signalling debugger, PANIC! info1=0x{:016X}, info2=0x{:016X}", -                     info1, info2); +    switch (break_reason) { +    case BreakReason::Panic: +        LOG_CRITICAL(Debug_Emulated, "Userspace PANIC! info1=0x{:016X}, info2=0x{:016X}", info1, +                     info2);          handle_debug_buffer(info1, info2);          break; -    case BreakType::AssertionFailed: -        LOG_CRITICAL(Debug_Emulated, -                     "Signalling debugger, Assertion failed! info1=0x{:016X}, info2=0x{:016X}", +    case BreakReason::Assert: +        LOG_CRITICAL(Debug_Emulated, "Userspace Assertion failed! info1=0x{:016X}, info2=0x{:016X}",                       info1, info2);          handle_debug_buffer(info1, info2);          break; -    case BreakType::PreNROLoad: -        LOG_WARNING( -            Debug_Emulated, -            "Signalling debugger, Attempting to load an NRO at 0x{:016X} with size 0x{:016X}", -            info1, info2); +    case BreakReason::User: +        LOG_WARNING(Debug_Emulated, "Userspace Break! 0x{:016X} with size 0x{:016X}", info1, info2); +        handle_debug_buffer(info1, info2);          break; -    case BreakType::PostNROLoad: -        LOG_WARNING(Debug_Emulated, -                    "Signalling debugger, Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1, -                    info2); +    case BreakReason::PreLoadDll: +        LOG_INFO(Debug_Emulated, +                 "Userspace Attempting to load an NRO at 0x{:016X} with size 0x{:016X}", info1, +                 info2);          break; -    case BreakType::PreNROUnload: -        LOG_WARNING( -            Debug_Emulated, -            "Signalling debugger, Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}", -            info1, info2); +    case BreakReason::PostLoadDll: +        LOG_INFO(Debug_Emulated, "Userspace Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1, +                 info2);          break; -    case BreakType::PostNROUnload: -        LOG_WARNING(Debug_Emulated, -                    "Signalling debugger, Unloaded an NRO at 0x{:016X} with size 0x{:016X}", info1, -                    info2); +    case BreakReason::PreUnloadDll: +        LOG_INFO(Debug_Emulated, +                 "Userspace Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}", info1, +                 info2);          break; -    case BreakType::CppException: +    case BreakReason::PostUnloadDll: +        LOG_INFO(Debug_Emulated, "Userspace Unloaded an NRO at 0x{:016X} with size 0x{:016X}", +                 info1, info2); +        break; +    case BreakReason::CppException:          LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered.");          break;      default:          LOG_WARNING(              Debug_Emulated, -            "Signalling debugger, Unknown break reason {}, info1=0x{:016X}, info2=0x{:016X}", -            static_cast<u32>(break_reason.break_type.Value()), info1, info2); +            "Signalling debugger, Unknown break reason {:#X}, info1=0x{:016X}, info2=0x{:016X}", +            reason, info1, info2);          handle_debug_buffer(info1, info2);          break;      } -    system.GetReporter().SaveSvcBreakReport( -        static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger.As<bool>(), -        info1, info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); +    system.GetReporter().SaveSvcBreakReport(reason, notification_only, info1, info2, +                                            has_dumped_buffer ? std::make_optional(debug_buffer) +                                                              : std::nullopt); -    if (!break_reason.signal_debugger) { +    if (!notification_only) {          LOG_CRITICAL(              Debug_Emulated,              "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", @@ -1716,13 +1700,13 @@ static Result QueryProcessMemory(Core::System& system, VAddr memory_info_address      auto& memory{system.Memory()};      const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()}; -    memory.Write64(memory_info_address + 0x00, memory_info.addr); +    memory.Write64(memory_info_address + 0x00, memory_info.base_address);      memory.Write64(memory_info_address + 0x08, memory_info.size);      memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff); -    memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attr)); -    memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.perm)); -    memory.Write32(memory_info_address + 0x1c, memory_info.ipc_refcount); -    memory.Write32(memory_info_address + 0x20, memory_info.device_refcount); +    memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attribute)); +    memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.permission)); +    memory.Write32(memory_info_address + 0x1c, memory_info.ipc_count); +    memory.Write32(memory_info_address + 0x20, memory_info.device_count);      memory.Write32(memory_info_address + 0x24, 0);      // Page info appears to be currently unused by the kernel and is always set to zero. @@ -1943,7 +1927,7 @@ static Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry      // Reserve a new thread from the process resource limit (waiting up to 100ms).      KScopedResourceReservation thread_reservation( -        kernel.CurrentProcess(), LimitableResource::Threads, 1, +        kernel.CurrentProcess(), LimitableResource::ThreadCountMax, 1,          system.CoreTiming().GetGlobalTimeNs().count() + 100000000);      if (!thread_reservation.Succeeded()) {          LOG_ERROR(Kernel_SVC, "Could not reserve a new thread"); @@ -2247,7 +2231,7 @@ static u64 GetSystemTick(Core::System& system) {      auto& core_timing = system.CoreTiming();      // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) -    const u64 result{system.CoreTiming().GetClockTicks()}; +    const u64 result{core_timing.GetClockTicks()};      if (!system.Kernel().IsMulticore()) {          core_timing.AddTicks(400U); @@ -2344,7 +2328,7 @@ static Result CreateTransferMemory(Core::System& system, Handle* out, VAddr addr      // Reserve a new transfer memory from the process resource limit.      KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(), -                                                 LimitableResource::TransferMemory); +                                                 LimitableResource::TransferMemoryCountMax);      R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached);      // Create the transfer memory. @@ -2496,7 +2480,7 @@ static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_r      // Reserve a new event from the process resource limit      KScopedResourceReservation event_reservation(kernel.CurrentProcess(), -                                                 LimitableResource::Events); +                                                 LimitableResource::EventCountMax);      R_UNLESS(event_reservation.Succeeded(), ResultLimitReached);      // Create a new event. @@ -2539,11 +2523,6 @@ static Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out  static Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {      LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type); -    // This function currently only allows retrieving a process' status. -    enum class InfoType { -        Status, -    }; -      const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();      KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);      if (process.IsNull()) { @@ -2552,9 +2531,9 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand          return ResultInvalidHandle;      } -    const auto info_type = static_cast<InfoType>(type); -    if (info_type != InfoType::Status) { -        LOG_ERROR(Kernel_SVC, "Expected info_type to be Status but got {} instead", type); +    const auto info_type = static_cast<ProcessInfoType>(type); +    if (info_type != ProcessInfoType::ProcessState) { +        LOG_ERROR(Kernel_SVC, "Expected info_type to be ProcessState but got {} instead", type);          return ResultInvalidEnumValue;      } diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h index f27cade33..b7ca53085 100644 --- a/src/core/hle/kernel/svc_results.h +++ b/src/core/hle/kernel/svc_results.h @@ -37,6 +37,7 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125};  constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126};  constexpr Result ResultPortClosed{ErrorModule::Kernel, 131};  constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; +constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259};  constexpr Result ResultInvalidId{ErrorModule::Kernel, 519};  } // namespace Kernel diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index abb9847fe..33eebcef6 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -8,6 +8,8 @@  namespace Kernel::Svc { +using Handle = u32; +  enum class MemoryState : u32 {      Free = 0x00,      Io = 0x01, @@ -22,8 +24,8 @@ enum class MemoryState : u32 {      Ipc = 0x0A,      Stack = 0x0B,      ThreadLocal = 0x0C, -    Transferred = 0x0D, -    SharedTransferred = 0x0E, +    Transfered = 0x0D, +    SharedTransfered = 0x0E,      SharedCode = 0x0F,      Inaccessible = 0x10,      NonSecureIpc = 0x11, @@ -32,6 +34,7 @@ enum class MemoryState : u32 {      GeneratedCode = 0x14,      CodeOut = 0x15,      Coverage = 0x16, +    Insecure = 0x17,  };  DECLARE_ENUM_FLAG_OPERATORS(MemoryState); @@ -54,17 +57,6 @@ enum class MemoryPermission : u32 {  };  DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission); -struct MemoryInfo { -    u64 addr{}; -    u64 size{}; -    MemoryState state{}; -    MemoryAttribute attr{}; -    MemoryPermission perm{}; -    u32 ipc_refcount{}; -    u32 device_refcount{}; -    u32 padding{}; -}; -  enum class SignalType : u32 {      Signal = 0,      SignalAndIncrementIfEqual = 1, @@ -83,6 +75,13 @@ enum class YieldType : s64 {      ToAnyThread = -2,  }; +enum class ThreadExitReason : u32 { +    ExitThread = 0, +    TerminateThread = 1, +    ExitProcess = 2, +    TerminateProcess = 3, +}; +  enum class ThreadActivity : u32 {      Runnable = 0,      Paused = 1, @@ -108,6 +107,489 @@ enum class ProcessState : u32 {      DebugBreak = 7,  }; +enum class ProcessExitReason : u32 { +    ExitProcess = 0, +    TerminateProcess = 1, +    Exception = 2, +}; +  constexpr inline size_t ThreadLocalRegionSize = 0x200; +struct PageInfo { +    u32 flags; +}; + +// Info Types. +enum class InfoType : u32 { +    CoreMask = 0, +    PriorityMask = 1, +    AliasRegionAddress = 2, +    AliasRegionSize = 3, +    HeapRegionAddress = 4, +    HeapRegionSize = 5, +    TotalMemorySize = 6, +    UsedMemorySize = 7, +    DebuggerAttached = 8, +    ResourceLimit = 9, +    IdleTickCount = 10, +    RandomEntropy = 11, +    AslrRegionAddress = 12, +    AslrRegionSize = 13, +    StackRegionAddress = 14, +    StackRegionSize = 15, +    SystemResourceSizeTotal = 16, +    SystemResourceSizeUsed = 17, +    ProgramId = 18, +    InitialProcessIdRange = 19, +    UserExceptionContextAddress = 20, +    TotalNonSystemMemorySize = 21, +    UsedNonSystemMemorySize = 22, +    IsApplication = 23, +    FreeThreadCount = 24, +    ThreadTickCount = 25, +    IsSvcPermitted = 26, + +    MesosphereMeta = 65000, +    MesosphereCurrentProcess = 65001, +}; + +enum class BreakReason : u32 { +    Panic = 0, +    Assert = 1, +    User = 2, +    PreLoadDll = 3, +    PostLoadDll = 4, +    PreUnloadDll = 5, +    PostUnloadDll = 6, +    CppException = 7, + +    NotificationOnlyFlag = 0x80000000, +}; + +enum class DebugEvent : u32 { +    CreateProcess = 0, +    CreateThread = 1, +    ExitProcess = 2, +    ExitThread = 3, +    Exception = 4, +}; + +enum class DebugThreadParam : u32 { +    Priority = 0, +    State = 1, +    IdealCore = 2, +    CurrentCore = 3, +    AffinityMask = 4, +}; + +enum class DebugException : u32 { +    UndefinedInstruction = 0, +    InstructionAbort = 1, +    DataAbort = 2, +    AlignmentFault = 3, +    DebuggerAttached = 4, +    BreakPoint = 5, +    UserBreak = 6, +    DebuggerBreak = 7, +    UndefinedSystemCall = 8, +    MemorySystemError = 9, +}; + +enum class DebugEventFlag : u32 { +    Stopped = (1u << 0), +}; + +enum class BreakPointType : u32 { +    HardwareInstruction = 0, +    HardwareData = 1, +}; + +enum class HardwareBreakPointRegisterName : u32 { +    I0 = 0, +    I1 = 1, +    I2 = 2, +    I3 = 3, +    I4 = 4, +    I5 = 5, +    I6 = 6, +    I7 = 7, +    I8 = 8, +    I9 = 9, +    I10 = 10, +    I11 = 11, +    I12 = 12, +    I13 = 13, +    I14 = 14, +    I15 = 15, +    D0 = 16, +    D1 = 17, +    D2 = 18, +    D3 = 19, +    D4 = 20, +    D5 = 21, +    D6 = 22, +    D7 = 23, +    D8 = 24, +    D9 = 25, +    D10 = 26, +    D11 = 27, +    D12 = 28, +    D13 = 29, +    D14 = 30, +    D15 = 31, +}; + +namespace lp64 { +struct LastThreadContext { +    u64 fp; +    u64 sp; +    u64 lr; +    u64 pc; +}; + +struct PhysicalMemoryInfo { +    PAddr physical_address; +    u64 virtual_address; +    u64 size; +}; + +struct DebugInfoCreateProcess { +    u64 program_id; +    u64 process_id; +    std::array<char, 0xC> name; +    u32 flags; +    u64 user_exception_context_address; // 5.0.0+ +}; + +struct DebugInfoCreateThread { +    u64 thread_id; +    u64 tls_address; +    // Removed in 11.0.0 u64 entrypoint; +}; + +struct DebugInfoExitProcess { +    ProcessExitReason reason; +}; + +struct DebugInfoExitThread { +    ThreadExitReason reason; +}; + +struct DebugInfoUndefinedInstructionException { +    u32 insn; +}; + +struct DebugInfoDataAbortException { +    u64 address; +}; + +struct DebugInfoAlignmentFaultException { +    u64 address; +}; + +struct DebugInfoBreakPointException { +    BreakPointType type; +    u64 address; +}; + +struct DebugInfoUserBreakException { +    BreakReason break_reason; +    u64 address; +    u64 size; +}; + +struct DebugInfoDebuggerBreakException { +    std::array<u64, 4> active_thread_ids; +}; + +struct DebugInfoUndefinedSystemCallException { +    u32 id; +}; + +union DebugInfoSpecificException { +    DebugInfoUndefinedInstructionException undefined_instruction; +    DebugInfoDataAbortException data_abort; +    DebugInfoAlignmentFaultException alignment_fault; +    DebugInfoBreakPointException break_point; +    DebugInfoUserBreakException user_break; +    DebugInfoDebuggerBreakException debugger_break; +    DebugInfoUndefinedSystemCallException undefined_system_call; +    u64 raw; +}; + +struct DebugInfoException { +    DebugException type; +    u64 address; +    DebugInfoSpecificException specific; +}; + +union DebugInfo { +    DebugInfoCreateProcess create_process; +    DebugInfoCreateThread create_thread; +    DebugInfoExitProcess exit_process; +    DebugInfoExitThread exit_thread; +    DebugInfoException exception; +}; + +struct DebugEventInfo { +    DebugEvent type; +    u32 flags; +    u64 thread_id; +    DebugInfo info; +}; +static_assert(sizeof(DebugEventInfo) >= 0x40); + +struct SecureMonitorArguments { +    std::array<u64, 8> r; +}; +static_assert(sizeof(SecureMonitorArguments) == 0x40); +} // namespace lp64 + +namespace ilp32 { +struct LastThreadContext { +    u32 fp; +    u32 sp; +    u32 lr; +    u32 pc; +}; + +struct PhysicalMemoryInfo { +    PAddr physical_address; +    u32 virtual_address; +    u32 size; +}; + +struct DebugInfoCreateProcess { +    u64 program_id; +    u64 process_id; +    std::array<char, 0xC> name; +    u32 flags; +    u32 user_exception_context_address; // 5.0.0+ +}; + +struct DebugInfoCreateThread { +    u64 thread_id; +    u32 tls_address; +    // Removed in 11.0.0 u32 entrypoint; +}; + +struct DebugInfoExitProcess { +    ProcessExitReason reason; +}; + +struct DebugInfoExitThread { +    ThreadExitReason reason; +}; + +struct DebugInfoUndefinedInstructionException { +    u32 insn; +}; + +struct DebugInfoDataAbortException { +    u32 address; +}; + +struct DebugInfoAlignmentFaultException { +    u32 address; +}; + +struct DebugInfoBreakPointException { +    BreakPointType type; +    u32 address; +}; + +struct DebugInfoUserBreakException { +    BreakReason break_reason; +    u32 address; +    u32 size; +}; + +struct DebugInfoDebuggerBreakException { +    std::array<u64, 4> active_thread_ids; +}; + +struct DebugInfoUndefinedSystemCallException { +    u32 id; +}; + +union DebugInfoSpecificException { +    DebugInfoUndefinedInstructionException undefined_instruction; +    DebugInfoDataAbortException data_abort; +    DebugInfoAlignmentFaultException alignment_fault; +    DebugInfoBreakPointException break_point; +    DebugInfoUserBreakException user_break; +    DebugInfoDebuggerBreakException debugger_break; +    DebugInfoUndefinedSystemCallException undefined_system_call; +    u64 raw; +}; + +struct DebugInfoException { +    DebugException type; +    u32 address; +    DebugInfoSpecificException specific; +}; + +union DebugInfo { +    DebugInfoCreateProcess create_process; +    DebugInfoCreateThread create_thread; +    DebugInfoExitProcess exit_process; +    DebugInfoExitThread exit_thread; +    DebugInfoException exception; +}; + +struct DebugEventInfo { +    DebugEvent type; +    u32 flags; +    u64 thread_id; +    DebugInfo info; +}; + +struct SecureMonitorArguments { +    std::array<u32, 8> r; +}; +static_assert(sizeof(SecureMonitorArguments) == 0x20); +} // namespace ilp32 + +struct ThreadContext { +    std::array<u64, 29> r; +    u64 fp; +    u64 lr; +    u64 sp; +    u64 pc; +    u32 pstate; +    u32 padding; +    std::array<u128, 32> v; +    u32 fpcr; +    u32 fpsr; +    u64 tpidr; +}; +static_assert(sizeof(ThreadContext) == 0x320); + +struct MemoryInfo { +    u64 base_address; +    u64 size; +    MemoryState state; +    MemoryAttribute attribute; +    MemoryPermission permission; +    u32 ipc_count; +    u32 device_count; +    u32 padding; +}; + +enum class LimitableResource : u32 { +    PhysicalMemoryMax = 0, +    ThreadCountMax = 1, +    EventCountMax = 2, +    TransferMemoryCountMax = 3, +    SessionCountMax = 4, +    Count, +}; + +enum class IoPoolType : u32 { +    // Not supported. +    Count = 0, +}; + +enum class MemoryMapping : u32 { +    IoRegister = 0, +    Uncached = 1, +    Memory = 2, +}; + +enum class KernelDebugType : u32 { +    Thread = 0, +    ThreadCallStack = 1, +    KernelObject = 2, +    Handle_ = 3, +    Memory = 4, +    PageTable = 5, +    CpuUtilization = 6, +    Process = 7, +    SuspendProcess = 8, +    ResumeProcess = 9, +    Port = 10, +}; + +enum class KernelTraceState : u32 { +    Disabled = 0, +    Enabled = 1, +}; + +enum class CodeMemoryOperation : u32 { +    Map = 0, +    MapToOwner = 1, +    Unmap = 2, +    UnmapFromOwner = 3, +}; + +enum class InterruptType : u32 { +    Edge = 0, +    Level = 1, +}; + +enum class DeviceName { +    Afi = 0, +    Avpc = 1, +    Dc = 2, +    Dcb = 3, +    Hc = 4, +    Hda = 5, +    Isp2 = 6, +    MsencNvenc = 7, +    Nv = 8, +    Nv2 = 9, +    Ppcs = 10, +    Sata = 11, +    Vi = 12, +    Vic = 13, +    XusbHost = 14, +    XusbDev = 15, +    Tsec = 16, +    Ppcs1 = 17, +    Dc1 = 18, +    Sdmmc1a = 19, +    Sdmmc2a = 20, +    Sdmmc3a = 21, +    Sdmmc4a = 22, +    Isp2b = 23, +    Gpu = 24, +    Gpub = 25, +    Ppcs2 = 26, +    Nvdec = 27, +    Ape = 28, +    Se = 29, +    Nvjpg = 30, +    Hc1 = 31, +    Se1 = 32, +    Axiap = 33, +    Etr = 34, +    Tsecb = 35, +    Tsec1 = 36, +    Tsecb1 = 37, +    Nvdec1 = 38, +    Count, +}; + +enum class SystemInfoType : u32 { +    TotalPhysicalMemorySize = 0, +    UsedPhysicalMemorySize = 1, +    InitialProcessIdRange = 2, +}; + +enum class ProcessInfoType : u32 { +    ProcessState = 0, +}; + +struct CreateProcessParameter { +    std::array<char, 12> name; +    u32 version; +    u64 program_id; +    u64 code_address; +    s32 code_num_pages; +    u32 flags; +    Handle reslimit; +    s32 system_resource_num_pages; +}; +static_assert(sizeof(CreateProcessParameter) == 0x30); +  } // namespace Kernel::Svc diff --git a/src/core/hle/result.h b/src/core/hle/result.h index ef4b2d417..56c990728 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h @@ -423,16 +423,17 @@ constexpr void UpdateCurrentResultReference<const Result>(Result result_referenc  } // namespace ResultImpl  #define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE)                                \ -    [[maybe_unused]] constexpr bool HasPrevRef_##COUNTER_VALUE =                                   \ +    [[maybe_unused]] constexpr bool CONCAT2(HasPrevRef_, COUNTER_VALUE) =                          \          std::same_as<decltype(__TmpCurrentResultReference), Result&>;                              \ -    [[maybe_unused]] auto& PrevRef_##COUNTER_VALUE = __TmpCurrentResultReference;                  \ -    [[maybe_unused]] Result __tmp_result_##COUNTER_VALUE = ResultSuccess;                          \ -    Result& __TmpCurrentResultReference =                                                          \ -        HasPrevRef_##COUNTER_VALUE ? PrevRef_##COUNTER_VALUE : __tmp_result_##COUNTER_VALUE +    [[maybe_unused]] Result CONCAT2(PrevRef_, COUNTER_VALUE) = __TmpCurrentResultReference;        \ +    [[maybe_unused]] Result CONCAT2(__tmp_result_, COUNTER_VALUE) = ResultSuccess;                 \ +    Result& __TmpCurrentResultReference = CONCAT2(HasPrevRef_, COUNTER_VALUE)                      \ +                                              ? CONCAT2(PrevRef_, COUNTER_VALUE)                   \ +                                              : CONCAT2(__tmp_result_, COUNTER_VALUE)  #define ON_RESULT_RETURN_IMPL(...)                                                                 \      static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>);                   \ -    auto RESULT_GUARD_STATE_##__COUNTER__ =                                                        \ +    auto CONCAT2(RESULT_GUARD_STATE_, __COUNTER__) =                                               \          ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>(                              \              __TmpCurrentResultReference) +                                                         \          [&]() diff --git a/src/core/hle/service/kernel_helpers.cpp b/src/core/hle/service/kernel_helpers.cpp index af133af93..42991928e 100644 --- a/src/core/hle/service/kernel_helpers.cpp +++ b/src/core/hle/service/kernel_helpers.cpp @@ -31,7 +31,7 @@ ServiceContext::~ServiceContext() {  Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) {      // Reserve a new event from the process resource limit      Kernel::KScopedResourceReservation event_reservation(process, -                                                         Kernel::LimitableResource::Events); +                                                         Kernel::LimitableResource::EventCountMax);      if (!event_reservation.Succeeded()) {          LOG_CRITICAL(Service, "Resource limit reached!");          return {}; diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 44388655d..fa29db758 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -126,10 +126,12 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)          LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);          return result;      } +    bool is_out_io{};      ASSERT(system.CurrentProcess()                 ->PageTable() -               .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, -                                             Kernel::KMemoryPermission::None, true) +               .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address, +                                             handle_description->size, +                                             Kernel::KMemoryPermission::None, true, false)                 .IsSuccess());      std::memcpy(output.data(), ¶ms, sizeof(params));      return result; diff --git a/src/core/hle/service/sm/sm_controller.cpp b/src/core/hle/service/sm/sm_controller.cpp index 69e0fe808..1cf9dd1c4 100644 --- a/src/core/hle/service/sm/sm_controller.cpp +++ b/src/core/hle/service/sm/sm_controller.cpp @@ -34,8 +34,8 @@ void Controller::CloneCurrentObject(Kernel::HLERequestContext& ctx) {      // once this is a proper process      // Reserve a new session from the process resource limit. -    Kernel::KScopedResourceReservation session_reservation(&process, -                                                           Kernel::LimitableResource::Sessions); +    Kernel::KScopedResourceReservation session_reservation( +        &process, Kernel::LimitableResource::SessionCountMax);      ASSERT(session_reservation.Succeeded());      // Create the session. diff --git a/src/core/internal_network/socket_proxy.cpp b/src/core/internal_network/socket_proxy.cpp index 7d5d37bbc..1e1c42cea 100644 --- a/src/core/internal_network/socket_proxy.cpp +++ b/src/core/internal_network/socket_proxy.cpp @@ -11,6 +11,10 @@  #include "core/internal_network/network_interface.h"  #include "core/internal_network/socket_proxy.h" +#if YUZU_UNIX +#include <sys/socket.h> +#endif +  namespace Network {  ProxySocket::ProxySocket(RoomNetwork& room_network_) noexcept : room_network{room_network_} {} diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index f9794dfe4..4a2f2c1fd 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -631,47 +631,40 @@ void Maxwell3D::ProcessDeferredDraw() {          Instance,      };      DrawMode draw_mode{DrawMode::Undefined}; -    u32 instance_count = 1; - -    u32 index = 0; -    u32 method = 0;      u32 method_count = static_cast<u32>(deferred_draw_method.size()); -    for (; index < method_count && -           (method = deferred_draw_method[index]) != MAXWELL3D_REG_INDEX(draw.begin); -         ++index) -        ; - -    if (MAXWELL3D_REG_INDEX(draw.begin) != method) { -        return; -    } - -    // The minimum number of methods for drawing must be greater than or equal to -    // 3[draw.begin->vertex(index)count(first)->draw.end] to avoid errors in index mode drawing -    if ((method_count - index) < 3) { +    u32 method = deferred_draw_method[method_count - 1]; +    if (MAXWELL3D_REG_INDEX(draw.end) != method) {          return;      }      draw_mode = (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) ||                          (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged)                      ? DrawMode::Instance                      : DrawMode::General; - -    // Drawing will only begin with draw.begin or index_buffer method, other methods directly -    // clear -    if (draw_mode == DrawMode::Undefined) { -        deferred_draw_method.clear(); -        return; -    } - +    u32 instance_count = 0;      if (draw_mode == DrawMode::Instance) { -        ASSERT_MSG(deferred_draw_method.size() % 4 == 0, "Instance mode method size error"); -        instance_count = static_cast<u32>(method_count - index) / 4; +        u32 vertex_buffer_count = 0; +        u32 index_buffer_count = 0; +        for (u32 index = 0; index < method_count; ++index) { +            method = deferred_draw_method[index]; +            if (method == MAXWELL3D_REG_INDEX(vertex_buffer.count)) { +                instance_count = ++vertex_buffer_count; +            } else if (method == MAXWELL3D_REG_INDEX(index_buffer.count)) { +                instance_count = ++index_buffer_count; +            } +        } +        ASSERT_MSG(!(vertex_buffer_count && index_buffer_count), +                   "Instance both indexed and direct?");      } else { -        method = deferred_draw_method[index + 1]; -        if (MAXWELL3D_REG_INDEX(draw_inline_index) == method || -            MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method || -            MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) { -            regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4); -            regs.index_buffer.format = Regs::IndexFormat::UnsignedInt; +        instance_count = 1; +        for (u32 index = 0; index < method_count; ++index) { +            method = deferred_draw_method[index]; +            if (MAXWELL3D_REG_INDEX(draw_inline_index) == method || +                MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method || +                MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) { +                regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4); +                regs.index_buffer.format = Regs::IndexFormat::UnsignedInt; +                break; +            }          }      } diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 384350dbd..8c8dfcca6 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -45,7 +45,7 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64      kind_valus.fill(PTEKind::INVALID);      big_kinds.resize(big_page_table_size / 32, kind_valus);      entries.resize(page_table_size / 32, 0); -    kinds.resize(big_page_table_size / 32, kind_valus); +    kinds.resize(page_table_size / 32, kind_valus);  }  MemoryManager::~MemoryManager() = default; diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index 81b6c372d..1aa116cea 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -467,7 +467,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {  }  void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling, -                                     const RenderAreaPushConstant& render_are) { +                                     const RenderAreaPushConstant& render_area) {      texture_cache.UpdateRenderTargets(false);      scheduler.RequestRenderpass(texture_cache.GetFramebuffer()); @@ -484,8 +484,8 @@ void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling,      const void* const descriptor_data{update_descriptor_queue.UpdateData()};      scheduler.Record([this, descriptor_data, bind_pipeline, rescaling_data = rescaling.Data(),                        is_rescaling, update_rescaling, -                      uses_render_area = render_are.uses_render_area, -                      render_area_data = render_are.words](vk::CommandBuffer cmdbuf) { +                      uses_render_area = render_area.uses_render_area, +                      render_area_data = render_area.words](vk::CommandBuffer cmdbuf) {          if (bind_pipeline) {              cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);          } @@ -494,7 +494,7 @@ void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling,                               rescaling_data.data());          if (update_rescaling) {              const f32 config_down_factor{Settings::values.resolution_info.down_factor}; -            const f32 scale_down_factor{is_rescaling ? config_down_factor : 2.0f}; +            const f32 scale_down_factor{is_rescaling ? config_down_factor : 1.0f};              cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_ALL_GRAPHICS,                                   RESCALING_LAYOUT_DOWN_FACTOR_OFFSET, sizeof(scale_down_factor),                                   &scale_down_factor); diff --git a/src/yuzu/CMakeLists.txt b/src/yuzu/CMakeLists.txt index 29d506c47..239f12382 100644 --- a/src/yuzu/CMakeLists.txt +++ b/src/yuzu/CMakeLists.txt @@ -315,7 +315,7 @@ target_include_directories(yuzu PRIVATE ../../externals/Vulkan-Headers/include)  if (NOT WIN32)      target_include_directories(yuzu PRIVATE ${Qt5Gui_PRIVATE_INCLUDE_DIRS})  endif() -if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") +if (UNIX AND NOT APPLE)      target_link_libraries(yuzu PRIVATE Qt::DBus)  endif() diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp index 6acfb7b06..d88efacd7 100644 --- a/src/yuzu/bootmanager.cpp +++ b/src/yuzu/bootmanager.cpp @@ -401,224 +401,127 @@ void GRenderWindow::closeEvent(QCloseEvent* event) {  }  int GRenderWindow::QtKeyToSwitchKey(Qt::Key qt_key) { -    switch (qt_key) { -    case Qt::Key_A: -        return Settings::NativeKeyboard::A; -    case Qt::Key_B: -        return Settings::NativeKeyboard::B; -    case Qt::Key_C: -        return Settings::NativeKeyboard::C; -    case Qt::Key_D: -        return Settings::NativeKeyboard::D; -    case Qt::Key_E: -        return Settings::NativeKeyboard::E; -    case Qt::Key_F: -        return Settings::NativeKeyboard::F; -    case Qt::Key_G: -        return Settings::NativeKeyboard::G; -    case Qt::Key_H: -        return Settings::NativeKeyboard::H; -    case Qt::Key_I: -        return Settings::NativeKeyboard::I; -    case Qt::Key_J: -        return Settings::NativeKeyboard::J; -    case Qt::Key_K: -        return Settings::NativeKeyboard::K; -    case Qt::Key_L: -        return Settings::NativeKeyboard::L; -    case Qt::Key_M: -        return Settings::NativeKeyboard::M; -    case Qt::Key_N: -        return Settings::NativeKeyboard::N; -    case Qt::Key_O: -        return Settings::NativeKeyboard::O; -    case Qt::Key_P: -        return Settings::NativeKeyboard::P; -    case Qt::Key_Q: -        return Settings::NativeKeyboard::Q; -    case Qt::Key_R: -        return Settings::NativeKeyboard::R; -    case Qt::Key_S: -        return Settings::NativeKeyboard::S; -    case Qt::Key_T: -        return Settings::NativeKeyboard::T; -    case Qt::Key_U: -        return Settings::NativeKeyboard::U; -    case Qt::Key_V: -        return Settings::NativeKeyboard::V; -    case Qt::Key_W: -        return Settings::NativeKeyboard::W; -    case Qt::Key_X: -        return Settings::NativeKeyboard::X; -    case Qt::Key_Y: -        return Settings::NativeKeyboard::Y; -    case Qt::Key_Z: -        return Settings::NativeKeyboard::Z; -    case Qt::Key_1: -        return Settings::NativeKeyboard::N1; -    case Qt::Key_2: -        return Settings::NativeKeyboard::N2; -    case Qt::Key_3: -        return Settings::NativeKeyboard::N3; -    case Qt::Key_4: -        return Settings::NativeKeyboard::N4; -    case Qt::Key_5: -        return Settings::NativeKeyboard::N5; -    case Qt::Key_6: -        return Settings::NativeKeyboard::N6; -    case Qt::Key_7: -        return Settings::NativeKeyboard::N7; -    case Qt::Key_8: -        return Settings::NativeKeyboard::N8; -    case Qt::Key_9: -        return Settings::NativeKeyboard::N9; -    case Qt::Key_0: -        return Settings::NativeKeyboard::N0; -    case Qt::Key_Return: -        return Settings::NativeKeyboard::Return; -    case Qt::Key_Escape: -        return Settings::NativeKeyboard::Escape; -    case Qt::Key_Backspace: -        return Settings::NativeKeyboard::Backspace; -    case Qt::Key_Tab: -        return Settings::NativeKeyboard::Tab; -    case Qt::Key_Space: -        return Settings::NativeKeyboard::Space; -    case Qt::Key_Minus: -        return Settings::NativeKeyboard::Minus; -    case Qt::Key_Plus: -    case Qt::Key_questiondown: -        return Settings::NativeKeyboard::Plus; -    case Qt::Key_BracketLeft: -    case Qt::Key_BraceLeft: -        return Settings::NativeKeyboard::OpenBracket; -    case Qt::Key_BracketRight: -    case Qt::Key_BraceRight: -        return Settings::NativeKeyboard::CloseBracket; -    case Qt::Key_Bar: -        return Settings::NativeKeyboard::Pipe; -    case Qt::Key_Dead_Tilde: -        return Settings::NativeKeyboard::Tilde; -    case Qt::Key_Ntilde: -    case Qt::Key_Semicolon: -        return Settings::NativeKeyboard::Semicolon; -    case Qt::Key_Apostrophe: -        return Settings::NativeKeyboard::Quote; -    case Qt::Key_Dead_Grave: -        return Settings::NativeKeyboard::Backquote; -    case Qt::Key_Comma: -        return Settings::NativeKeyboard::Comma; -    case Qt::Key_Period: -        return Settings::NativeKeyboard::Period; -    case Qt::Key_Slash: -        return Settings::NativeKeyboard::Slash; -    case Qt::Key_CapsLock: -        return Settings::NativeKeyboard::CapsLock; -    case Qt::Key_F1: -        return Settings::NativeKeyboard::F1; -    case Qt::Key_F2: -        return Settings::NativeKeyboard::F2; -    case Qt::Key_F3: -        return Settings::NativeKeyboard::F3; -    case Qt::Key_F4: -        return Settings::NativeKeyboard::F4; -    case Qt::Key_F5: -        return Settings::NativeKeyboard::F5; -    case Qt::Key_F6: -        return Settings::NativeKeyboard::F6; -    case Qt::Key_F7: -        return Settings::NativeKeyboard::F7; -    case Qt::Key_F8: -        return Settings::NativeKeyboard::F8; -    case Qt::Key_F9: -        return Settings::NativeKeyboard::F9; -    case Qt::Key_F10: -        return Settings::NativeKeyboard::F10; -    case Qt::Key_F11: -        return Settings::NativeKeyboard::F11; -    case Qt::Key_F12: -        return Settings::NativeKeyboard::F12; -    case Qt::Key_Print: -        return Settings::NativeKeyboard::PrintScreen; -    case Qt::Key_ScrollLock: -        return Settings::NativeKeyboard::ScrollLock; -    case Qt::Key_Pause: -        return Settings::NativeKeyboard::Pause; -    case Qt::Key_Insert: -        return Settings::NativeKeyboard::Insert; -    case Qt::Key_Home: -        return Settings::NativeKeyboard::Home; -    case Qt::Key_PageUp: -        return Settings::NativeKeyboard::PageUp; -    case Qt::Key_Delete: -        return Settings::NativeKeyboard::Delete; -    case Qt::Key_End: -        return Settings::NativeKeyboard::End; -    case Qt::Key_PageDown: -        return Settings::NativeKeyboard::PageDown; -    case Qt::Key_Right: -        return Settings::NativeKeyboard::Right; -    case Qt::Key_Left: -        return Settings::NativeKeyboard::Left; -    case Qt::Key_Down: -        return Settings::NativeKeyboard::Down; -    case Qt::Key_Up: -        return Settings::NativeKeyboard::Up; -    case Qt::Key_NumLock: -        return Settings::NativeKeyboard::NumLock; -    // Numpad keys are missing here -    case Qt::Key_F13: -        return Settings::NativeKeyboard::F13; -    case Qt::Key_F14: -        return Settings::NativeKeyboard::F14; -    case Qt::Key_F15: -        return Settings::NativeKeyboard::F15; -    case Qt::Key_F16: -        return Settings::NativeKeyboard::F16; -    case Qt::Key_F17: -        return Settings::NativeKeyboard::F17; -    case Qt::Key_F18: -        return Settings::NativeKeyboard::F18; -    case Qt::Key_F19: -        return Settings::NativeKeyboard::F19; -    case Qt::Key_F20: -        return Settings::NativeKeyboard::F20; -    case Qt::Key_F21: -        return Settings::NativeKeyboard::F21; -    case Qt::Key_F22: -        return Settings::NativeKeyboard::F22; -    case Qt::Key_F23: -        return Settings::NativeKeyboard::F23; -    case Qt::Key_F24: -        return Settings::NativeKeyboard::F24; -    // case Qt::: -    //    return Settings::NativeKeyboard::KPComma; -    // case Qt::: -    //    return Settings::NativeKeyboard::Ro; -    case Qt::Key_Hiragana_Katakana: -        return Settings::NativeKeyboard::KatakanaHiragana; -    case Qt::Key_yen: -        return Settings::NativeKeyboard::Yen; -    case Qt::Key_Henkan: -        return Settings::NativeKeyboard::Henkan; -    case Qt::Key_Muhenkan: -        return Settings::NativeKeyboard::Muhenkan; -    // case Qt::: -    //    return Settings::NativeKeyboard::NumPadCommaPc98; -    case Qt::Key_Hangul: -        return Settings::NativeKeyboard::HangulEnglish; -    case Qt::Key_Hangul_Hanja: -        return Settings::NativeKeyboard::Hanja; -    case Qt::Key_Katakana: -        return Settings::NativeKeyboard::KatakanaKey; -    case Qt::Key_Hiragana: -        return Settings::NativeKeyboard::HiraganaKey; -    case Qt::Key_Zenkaku_Hankaku: -        return Settings::NativeKeyboard::ZenkakuHankaku; -    // Modifier keys are handled by the modifier property -    default: -        return Settings::NativeKeyboard::None; +    static constexpr std::array<std::pair<Qt::Key, Settings::NativeKeyboard::Keys>, 106> key_map = { +        std::pair<Qt::Key, Settings::NativeKeyboard::Keys>{Qt::Key_A, Settings::NativeKeyboard::A}, +        {Qt::Key_A, Settings::NativeKeyboard::A}, +        {Qt::Key_B, Settings::NativeKeyboard::B}, +        {Qt::Key_C, Settings::NativeKeyboard::C}, +        {Qt::Key_D, Settings::NativeKeyboard::D}, +        {Qt::Key_E, Settings::NativeKeyboard::E}, +        {Qt::Key_F, Settings::NativeKeyboard::F}, +        {Qt::Key_G, Settings::NativeKeyboard::G}, +        {Qt::Key_H, Settings::NativeKeyboard::H}, +        {Qt::Key_I, Settings::NativeKeyboard::I}, +        {Qt::Key_J, Settings::NativeKeyboard::J}, +        {Qt::Key_K, Settings::NativeKeyboard::K}, +        {Qt::Key_L, Settings::NativeKeyboard::L}, +        {Qt::Key_M, Settings::NativeKeyboard::M}, +        {Qt::Key_N, Settings::NativeKeyboard::N}, +        {Qt::Key_O, Settings::NativeKeyboard::O}, +        {Qt::Key_P, Settings::NativeKeyboard::P}, +        {Qt::Key_Q, Settings::NativeKeyboard::Q}, +        {Qt::Key_R, Settings::NativeKeyboard::R}, +        {Qt::Key_S, Settings::NativeKeyboard::S}, +        {Qt::Key_T, Settings::NativeKeyboard::T}, +        {Qt::Key_U, Settings::NativeKeyboard::U}, +        {Qt::Key_V, Settings::NativeKeyboard::V}, +        {Qt::Key_W, Settings::NativeKeyboard::W}, +        {Qt::Key_X, Settings::NativeKeyboard::X}, +        {Qt::Key_Y, Settings::NativeKeyboard::Y}, +        {Qt::Key_Z, Settings::NativeKeyboard::Z}, +        {Qt::Key_1, Settings::NativeKeyboard::N1}, +        {Qt::Key_2, Settings::NativeKeyboard::N2}, +        {Qt::Key_3, Settings::NativeKeyboard::N3}, +        {Qt::Key_4, Settings::NativeKeyboard::N4}, +        {Qt::Key_5, Settings::NativeKeyboard::N5}, +        {Qt::Key_6, Settings::NativeKeyboard::N6}, +        {Qt::Key_7, Settings::NativeKeyboard::N7}, +        {Qt::Key_8, Settings::NativeKeyboard::N8}, +        {Qt::Key_9, Settings::NativeKeyboard::N9}, +        {Qt::Key_0, Settings::NativeKeyboard::N0}, +        {Qt::Key_Return, Settings::NativeKeyboard::Return}, +        {Qt::Key_Escape, Settings::NativeKeyboard::Escape}, +        {Qt::Key_Backspace, Settings::NativeKeyboard::Backspace}, +        {Qt::Key_Tab, Settings::NativeKeyboard::Tab}, +        {Qt::Key_Space, Settings::NativeKeyboard::Space}, +        {Qt::Key_Minus, Settings::NativeKeyboard::Minus}, +        {Qt::Key_Plus, Settings::NativeKeyboard::Plus}, +        {Qt::Key_questiondown, Settings::NativeKeyboard::Plus}, +        {Qt::Key_BracketLeft, Settings::NativeKeyboard::OpenBracket}, +        {Qt::Key_BraceLeft, Settings::NativeKeyboard::OpenBracket}, +        {Qt::Key_BracketRight, Settings::NativeKeyboard::CloseBracket}, +        {Qt::Key_BraceRight, Settings::NativeKeyboard::CloseBracket}, +        {Qt::Key_Bar, Settings::NativeKeyboard::Pipe}, +        {Qt::Key_Dead_Tilde, Settings::NativeKeyboard::Tilde}, +        {Qt::Key_Ntilde, Settings::NativeKeyboard::Semicolon}, +        {Qt::Key_Semicolon, Settings::NativeKeyboard::Semicolon}, +        {Qt::Key_Apostrophe, Settings::NativeKeyboard::Quote}, +        {Qt::Key_Dead_Grave, Settings::NativeKeyboard::Backquote}, +        {Qt::Key_Comma, Settings::NativeKeyboard::Comma}, +        {Qt::Key_Period, Settings::NativeKeyboard::Period}, +        {Qt::Key_Slash, Settings::NativeKeyboard::Slash}, +        {Qt::Key_CapsLock, Settings::NativeKeyboard::CapsLockKey}, +        {Qt::Key_F1, Settings::NativeKeyboard::F1}, +        {Qt::Key_F2, Settings::NativeKeyboard::F2}, +        {Qt::Key_F3, Settings::NativeKeyboard::F3}, +        {Qt::Key_F4, Settings::NativeKeyboard::F4}, +        {Qt::Key_F5, Settings::NativeKeyboard::F5}, +        {Qt::Key_F6, Settings::NativeKeyboard::F6}, +        {Qt::Key_F7, Settings::NativeKeyboard::F7}, +        {Qt::Key_F8, Settings::NativeKeyboard::F8}, +        {Qt::Key_F9, Settings::NativeKeyboard::F9}, +        {Qt::Key_F10, Settings::NativeKeyboard::F10}, +        {Qt::Key_F11, Settings::NativeKeyboard::F11}, +        {Qt::Key_F12, Settings::NativeKeyboard::F12}, +        {Qt::Key_Print, Settings::NativeKeyboard::PrintScreen}, +        {Qt::Key_ScrollLock, Settings::NativeKeyboard::ScrollLockKey}, +        {Qt::Key_Pause, Settings::NativeKeyboard::Pause}, +        {Qt::Key_Insert, Settings::NativeKeyboard::Insert}, +        {Qt::Key_Home, Settings::NativeKeyboard::Home}, +        {Qt::Key_PageUp, Settings::NativeKeyboard::PageUp}, +        {Qt::Key_Delete, Settings::NativeKeyboard::Delete}, +        {Qt::Key_End, Settings::NativeKeyboard::End}, +        {Qt::Key_PageDown, Settings::NativeKeyboard::PageDown}, +        {Qt::Key_Right, Settings::NativeKeyboard::Right}, +        {Qt::Key_Left, Settings::NativeKeyboard::Left}, +        {Qt::Key_Down, Settings::NativeKeyboard::Down}, +        {Qt::Key_Up, Settings::NativeKeyboard::Up}, +        {Qt::Key_NumLock, Settings::NativeKeyboard::NumLockKey}, +        // Numpad keys are missing here +        {Qt::Key_F13, Settings::NativeKeyboard::F13}, +        {Qt::Key_F14, Settings::NativeKeyboard::F14}, +        {Qt::Key_F15, Settings::NativeKeyboard::F15}, +        {Qt::Key_F16, Settings::NativeKeyboard::F16}, +        {Qt::Key_F17, Settings::NativeKeyboard::F17}, +        {Qt::Key_F18, Settings::NativeKeyboard::F18}, +        {Qt::Key_F19, Settings::NativeKeyboard::F19}, +        {Qt::Key_F20, Settings::NativeKeyboard::F20}, +        {Qt::Key_F21, Settings::NativeKeyboard::F21}, +        {Qt::Key_F22, Settings::NativeKeyboard::F22}, +        {Qt::Key_F23, Settings::NativeKeyboard::F23}, +        {Qt::Key_F24, Settings::NativeKeyboard::F24}, +        // {Qt::..., Settings::NativeKeyboard::KPComma}, +        // {Qt::..., Settings::NativeKeyboard::Ro}, +        {Qt::Key_Hiragana_Katakana, Settings::NativeKeyboard::KatakanaHiragana}, +        {Qt::Key_yen, Settings::NativeKeyboard::Yen}, +        {Qt::Key_Henkan, Settings::NativeKeyboard::Henkan}, +        {Qt::Key_Muhenkan, Settings::NativeKeyboard::Muhenkan}, +        // {Qt::..., Settings::NativeKeyboard::NumPadCommaPc98}, +        {Qt::Key_Hangul, Settings::NativeKeyboard::HangulEnglish}, +        {Qt::Key_Hangul_Hanja, Settings::NativeKeyboard::Hanja}, +        {Qt::Key_Katakana, Settings::NativeKeyboard::KatakanaKey}, +        {Qt::Key_Hiragana, Settings::NativeKeyboard::HiraganaKey}, +        {Qt::Key_Zenkaku_Hankaku, Settings::NativeKeyboard::ZenkakuHankaku}, +        // Modifier keys are handled by the modifier property +    }; + +    for (const auto& [qkey, nkey] : key_map) { +        if (qt_key == qkey) { +            return nkey; +        }      } + +    return Settings::NativeKeyboard::None;  }  int GRenderWindow::QtModifierToSwitchModifier(Qt::KeyboardModifiers qt_modifiers) { diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index 57fd10dab..c27f8196d 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp @@ -9,7 +9,7 @@  #ifdef __APPLE__  #include <unistd.h> // for chdir  #endif -#ifdef __linux__ +#ifdef __unix__  #include <csignal>  #include <sys/socket.h>  #endif @@ -275,7 +275,7 @@ static void OverrideWindowsFont() {  #endif  bool GMainWindow::CheckDarkMode() { -#ifdef __linux__ +#ifdef __unix__      const QPalette test_palette(qApp->palette());      const QColor text_color = test_palette.color(QPalette::Active, QPalette::Text);      const QColor window_color = test_palette.color(QPalette::Active, QPalette::Window); @@ -283,7 +283,7 @@ bool GMainWindow::CheckDarkMode() {  #else      // TODO: Windows      return false; -#endif // __linux__ +#endif // __unix__  }  GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan) @@ -291,7 +291,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan        input_subsystem{std::make_shared<InputCommon::InputSubsystem>()}, config{std::move(config_)},        vfs{std::make_shared<FileSys::RealVfsFilesystem>()},        provider{std::make_unique<FileSys::ManualContentProvider>()} { -#ifdef __linux__ +#ifdef __unix__      SetupSigInterrupts();  #endif      system->Initialize(); @@ -509,7 +509,7 @@ GMainWindow::~GMainWindow() {          delete render_window;      } -#ifdef __linux__ +#ifdef __unix__      ::close(sig_interrupt_fds[0]);      ::close(sig_interrupt_fds[1]);  #endif @@ -1379,7 +1379,7 @@ void GMainWindow::OnDisplayTitleBars(bool show) {  }  void GMainWindow::SetupPrepareForSleep() { -#ifdef __linux__ +#ifdef __unix__      auto bus = QDBusConnection::systemBus();      if (bus.isConnected()) {          const bool success = bus.connect( @@ -1393,7 +1393,7 @@ void GMainWindow::SetupPrepareForSleep() {      } else {          LOG_WARNING(Frontend, "QDBusConnection system bus is not connected");      } -#endif // __linux__ +#endif // __unix__  }  void GMainWindow::OnPrepareForSleep(bool prepare_sleep) { @@ -1415,7 +1415,7 @@ void GMainWindow::OnPrepareForSleep(bool prepare_sleep) {      }  } -#ifdef __linux__ +#ifdef __unix__  static std::optional<QDBusObjectPath> HoldWakeLockLinux(u32 window_id = 0) {      if (!QDBusConnection::sessionBus().isConnected()) {          return {}; @@ -1500,14 +1500,14 @@ void GMainWindow::OnSigInterruptNotifierActivated() {      emit SigInterrupt();  } -#endif // __linux__ +#endif // __unix__  void GMainWindow::PreventOSSleep() {  #ifdef _WIN32      SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED | ES_DISPLAY_REQUIRED);  #elif defined(HAVE_SDL2)      SDL_DisableScreenSaver(); -#ifdef __linux__ +#ifdef __unix__      auto reply = HoldWakeLockLinux(winId());      if (reply) {          wake_lock = std::move(reply.value()); @@ -1521,7 +1521,7 @@ void GMainWindow::AllowOSSleep() {      SetThreadExecutionState(ES_CONTINUOUS);  #elif defined(HAVE_SDL2)      SDL_EnableScreenSaver(); -#ifdef __linux__ +#ifdef __unix__      if (!wake_lock.path().isEmpty()) {          ReleaseWakeLockLinux(wake_lock);      } @@ -4082,7 +4082,7 @@ void GMainWindow::SetDiscordEnabled([[maybe_unused]] bool state) {  }  void GMainWindow::changeEvent(QEvent* event) { -#ifdef __linux__ +#ifdef __unix__      // PaletteChange event appears to only reach so far into the GUI, explicitly asking to      // UpdateUITheme is a decent work around      if (event->type() == QEvent::PaletteChange) { @@ -4097,7 +4097,7 @@ void GMainWindow::changeEvent(QEvent* event) {          }          last_window_color = window_color;      } -#endif // __linux__ +#endif // __unix__      QWidget::changeEvent(event);  } diff --git a/src/yuzu/main.h b/src/yuzu/main.h index 0ab0d54b3..b73f550dd 100644 --- a/src/yuzu/main.h +++ b/src/yuzu/main.h @@ -15,7 +15,7 @@  #include "yuzu/compatibility_list.h"  #include "yuzu/hotkeys.h" -#ifdef __linux__ +#ifdef __unix__  #include <QVariant>  #include <QtDBus/QDBusInterface>  #include <QtDBus/QtDBus> @@ -255,7 +255,7 @@ private:      void changeEvent(QEvent* event) override;      void closeEvent(QCloseEvent* event) override; -#ifdef __linux__ +#ifdef __unix__      void SetupSigInterrupts();      static void HandleSigInterrupt(int);      void OnSigInterruptNotifierActivated(); @@ -436,7 +436,7 @@ private:      // True if TAS recording dialog is visible      bool is_tas_recording_dialog_active{}; -#ifdef __linux__ +#ifdef __unix__      QSocketNotifier* sig_interrupt_notifier;      static std::array<int, 3> sig_interrupt_fds; | 
