diff options
| author | Liam <byteslice@airmail.cc> | 2022-12-23 21:32:13 -0500 | 
|---|---|---|
| committer | Liam <byteslice@airmail.cc> | 2022-12-25 12:55:21 -0500 | 
| commit | 3392fdac9be7f1ecf35f9dbbb8f686b208e52bec (patch) | |
| tree | 6daa6c464720572d90c7af242ce739d3a4ba0991 | |
| parent | 9933121256a259dfecc282358880278fd0c156f3 (diff) | |
k_page_group: synchronize
| -rw-r--r-- | src/core/CMakeLists.txt | 1 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_code_memory.cpp | 29 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_code_memory.h | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_manager.cpp | 8 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_group.cpp | 121 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_group.h | 162 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 39 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.h | 5 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_shared_memory.cpp | 19 | ||||
| -rw-r--r-- | src/core/hle/kernel/memory_types.h | 3 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 2 | 
11 files changed, 270 insertions, 125 deletions
| diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 0252c8c31..5afdeb5ff 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -226,6 +226,7 @@ add_library(core STATIC      hle/kernel/k_page_buffer.h      hle/kernel/k_page_heap.cpp      hle/kernel/k_page_heap.h +    hle/kernel/k_page_group.cpp      hle/kernel/k_page_group.h      hle/kernel/k_page_table.cpp      hle/kernel/k_page_table.h diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index 4b1c134d4..d9da1e600 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp @@ -27,13 +27,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si      auto& page_table = m_owner->PageTable();      // Construct the page group. -    m_page_group = {}; +    m_page_group.emplace(kernel, page_table.GetBlockInfoManager());      // Lock the memory. -    R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size)) +    R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))      // Clear the memory. -    for (const auto& block : m_page_group.Nodes()) { +    for (const auto& block : *m_page_group) {          std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());      } @@ -51,12 +51,13 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si  void KCodeMemory::Finalize() {      // Unlock.      if (!m_is_mapped && !m_is_owner_mapped) { -        const size_t size = m_page_group.GetNumPages() * PageSize; -        m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group); +        const size_t size = m_page_group->GetNumPages() * PageSize; +        m_owner->PageTable().UnlockForCodeMemory(m_address, size, *m_page_group);      }      // Close the page group. -    m_page_group = {}; +    m_page_group->Close(); +    m_page_group->Finalize();      // Close our reference to our owner.      m_owner->Close(); @@ -64,7 +65,7 @@ void KCodeMemory::Finalize() {  Result KCodeMemory::Map(VAddr address, size_t size) {      // Validate the size. -    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); +    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);      // Lock ourselves.      KScopedLightLock lk(m_lock); @@ -74,7 +75,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {      // Map the memory.      R_TRY(kernel.CurrentProcess()->PageTable().MapPages( -        address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); +        address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));      // Mark ourselves as mapped.      m_is_mapped = true; @@ -84,13 +85,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) {  Result KCodeMemory::Unmap(VAddr address, size_t size) {      // Validate the size. -    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); +    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);      // Lock ourselves.      KScopedLightLock lk(m_lock);      // Unmap the memory. -    R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group, +    R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group,                                                            KMemoryState::CodeOut));      // Mark ourselves as unmapped. @@ -101,7 +102,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {  Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {      // Validate the size. -    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); +    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);      // Lock ourselves.      KScopedLightLock lk(m_lock); @@ -125,7 +126,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission      // Map the memory.      R_TRY( -        m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm)); +        m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm));      // Mark ourselves as mapped.      m_is_owner_mapped = true; @@ -135,13 +136,13 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission  Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {      // Validate the size. -    R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); +    R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);      // Lock ourselves.      KScopedLightLock lk(m_lock);      // Unmap the memory. -    R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode)); +    R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode));      // Mark ourselves as unmapped.      m_is_owner_mapped = false; diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h index 2e7e1436a..5b260b385 100644 --- a/src/core/hle/kernel/k_code_memory.h +++ b/src/core/hle/kernel/k_code_memory.h @@ -3,6 +3,8 @@  #pragma once +#include <optional> +  #include "common/common_types.h"  #include "core/device_memory.h"  #include "core/hle/kernel/k_auto_object.h" @@ -49,11 +51,11 @@ public:          return m_address;      }      size_t GetSize() const { -        return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0; +        return m_is_initialized ? m_page_group->GetNumPages() * PageSize : 0;      }  private: -    KPageGroup m_page_group{}; +    std::optional<KPageGroup> m_page_group{};      KProcess* m_owner{};      VAddr m_address{};      KLightLock m_lock; diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index bd33571da..cd6ea388e 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -223,7 +223,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,      // Ensure that we don't leave anything un-freed.      ON_RESULT_FAILURE { -        for (const auto& it : out->Nodes()) { +        for (const auto& it : *out) {              auto& manager = this->GetManager(it.GetAddress());              const size_t node_num_pages = std::min<u64>(                  it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); @@ -285,7 +285,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op                                        m_has_optimized_process[static_cast<size_t>(pool)], true));      // Open the first reference to the pages. -    for (const auto& block : out->Nodes()) { +    for (const auto& block : *out) {          PAddr cur_address = block.GetAddress();          size_t remaining_pages = block.GetNumPages();          while (remaining_pages > 0) { @@ -335,7 +335,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32      // Perform optimized memory tracking, if we should.      if (optimized) {          // Iterate over the allocated blocks. -        for (const auto& block : out->Nodes()) { +        for (const auto& block : *out) {              // Get the block extents.              const PAddr block_address = block.GetAddress();              const size_t block_pages = block.GetNumPages(); @@ -391,7 +391,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32          }      } else {          // Set all the allocated memory. -        for (const auto& block : out->Nodes()) { +        for (const auto& block : *out) {              std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,                          block.GetSize());          } diff --git a/src/core/hle/kernel/k_page_group.cpp b/src/core/hle/kernel/k_page_group.cpp new file mode 100644 index 000000000..d8c644a33 --- /dev/null +++ b/src/core/hle/kernel/k_page_group.cpp @@ -0,0 +1,121 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/hle/kernel/k_dynamic_resource_manager.h" +#include "core/hle/kernel/k_memory_manager.h" +#include "core/hle/kernel/k_page_group.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/svc_results.h" + +namespace Kernel { + +void KPageGroup::Finalize() { +    KBlockInfo* cur = m_first_block; +    while (cur != nullptr) { +        KBlockInfo* next = cur->GetNext(); +        m_manager->Free(cur); +        cur = next; +    } + +    m_first_block = nullptr; +    m_last_block = nullptr; +} + +void KPageGroup::CloseAndReset() { +    auto& mm = m_kernel.MemoryManager(); + +    KBlockInfo* cur = m_first_block; +    while (cur != nullptr) { +        KBlockInfo* next = cur->GetNext(); +        mm.Close(cur->GetAddress(), cur->GetNumPages()); +        m_manager->Free(cur); +        cur = next; +    } + +    m_first_block = nullptr; +    m_last_block = nullptr; +} + +size_t KPageGroup::GetNumPages() const { +    size_t num_pages = 0; + +    for (const auto& it : *this) { +        num_pages += it.GetNumPages(); +    } + +    return num_pages; +} + +Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) { +    // Succeed immediately if we're adding no pages. +    R_SUCCEED_IF(num_pages == 0); + +    // Check for overflow. +    ASSERT(addr < addr + num_pages * PageSize); + +    // Try to just append to the last block. +    if (m_last_block != nullptr) { +        R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages)); +    } + +    // Allocate a new block. +    KBlockInfo* new_block = m_manager->Allocate(); +    R_UNLESS(new_block != nullptr, ResultOutOfResource); + +    // Initialize the block. +    new_block->Initialize(addr, num_pages); + +    // Add the block to our list. +    if (m_last_block != nullptr) { +        m_last_block->SetNext(new_block); +    } else { +        m_first_block = new_block; +    } +    m_last_block = new_block; + +    R_SUCCEED(); +} + +void KPageGroup::Open() const { +    auto& mm = m_kernel.MemoryManager(); + +    for (const auto& it : *this) { +        mm.Open(it.GetAddress(), it.GetNumPages()); +    } +} + +void KPageGroup::OpenFirst() const { +    auto& mm = m_kernel.MemoryManager(); + +    for (const auto& it : *this) { +        mm.OpenFirst(it.GetAddress(), it.GetNumPages()); +    } +} + +void KPageGroup::Close() const { +    auto& mm = m_kernel.MemoryManager(); + +    for (const auto& it : *this) { +        mm.Close(it.GetAddress(), it.GetNumPages()); +    } +} + +bool KPageGroup::IsEquivalentTo(const KPageGroup& rhs) const { +    auto lit = this->begin(); +    auto rit = rhs.begin(); +    auto lend = this->end(); +    auto rend = rhs.end(); + +    while (lit != lend && rit != rend) { +        if (*lit != *rit) { +            return false; +        } + +        ++lit; +        ++rit; +    } + +    return lit == lend && rit == rend; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h index 316f172f2..b0b243e7d 100644 --- a/src/core/hle/kernel/k_page_group.h +++ b/src/core/hle/kernel/k_page_group.h @@ -1,4 +1,4 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project  // SPDX-License-Identifier: GPL-2.0-or-later  #pragma once @@ -13,24 +13,22 @@  namespace Kernel { +class KBlockInfoManager;  class KPageGroup;  class KBlockInfo { -private: -    friend class KPageGroup; -  public: -    constexpr KBlockInfo() = default; +    constexpr explicit KBlockInfo() : m_next(nullptr) {} -    constexpr void Initialize(PAddr addr, size_t np) { +    constexpr void Initialize(KPhysicalAddress addr, size_t np) {          ASSERT(Common::IsAligned(addr, PageSize));          ASSERT(static_cast<u32>(np) == np); -        m_page_index = static_cast<u32>(addr) / PageSize; +        m_page_index = static_cast<u32>(addr / PageSize);          m_num_pages = static_cast<u32>(np);      } -    constexpr PAddr GetAddress() const { +    constexpr KPhysicalAddress GetAddress() const {          return m_page_index * PageSize;      }      constexpr size_t GetNumPages() const { @@ -39,10 +37,10 @@ public:      constexpr size_t GetSize() const {          return this->GetNumPages() * PageSize;      } -    constexpr PAddr GetEndAddress() const { +    constexpr KPhysicalAddress GetEndAddress() const {          return (m_page_index + m_num_pages) * PageSize;      } -    constexpr PAddr GetLastAddress() const { +    constexpr KPhysicalAddress GetLastAddress() const {          return this->GetEndAddress() - 1;      } @@ -62,8 +60,8 @@ public:          return !(*this == rhs);      } -    constexpr bool IsStrictlyBefore(PAddr addr) const { -        const PAddr end = this->GetEndAddress(); +    constexpr bool IsStrictlyBefore(KPhysicalAddress addr) const { +        const KPhysicalAddress end = this->GetEndAddress();          if (m_page_index != 0 && end == 0) {              return false; @@ -72,11 +70,11 @@ public:          return end < addr;      } -    constexpr bool operator<(PAddr addr) const { +    constexpr bool operator<(KPhysicalAddress addr) const {          return this->IsStrictlyBefore(addr);      } -    constexpr bool TryConcatenate(PAddr addr, size_t np) { +    constexpr bool TryConcatenate(KPhysicalAddress addr, size_t np) {          if (addr != 0 && addr == this->GetEndAddress()) {              m_num_pages += static_cast<u32>(np);              return true; @@ -90,96 +88,118 @@ private:      }  private: +    friend class KPageGroup; +      KBlockInfo* m_next{};      u32 m_page_index{};      u32 m_num_pages{};  };  static_assert(sizeof(KBlockInfo) <= 0x10); -class KPageGroup final { +class KPageGroup {  public: -    class Node final { +    class Iterator {      public: -        constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {} +        using iterator_category = std::forward_iterator_tag; +        using value_type = const KBlockInfo; +        using difference_type = std::ptrdiff_t; +        using pointer = value_type*; +        using reference = value_type&; + +        constexpr explicit Iterator(pointer n) : m_node(n) {} + +        constexpr bool operator==(const Iterator& rhs) const { +            return m_node == rhs.m_node; +        } +        constexpr bool operator!=(const Iterator& rhs) const { +            return !(*this == rhs); +        } -        constexpr u64 GetAddress() const { -            return addr; +        constexpr pointer operator->() const { +            return m_node; +        } +        constexpr reference operator*() const { +            return *m_node;          } -        constexpr std::size_t GetNumPages() const { -            return num_pages; +        constexpr Iterator& operator++() { +            m_node = m_node->GetNext(); +            return *this;          } -        constexpr std::size_t GetSize() const { -            return GetNumPages() * PageSize; +        constexpr Iterator operator++(int) { +            const Iterator it{*this}; +            ++(*this); +            return it;          }      private: -        u64 addr{}; -        std::size_t num_pages{}; +        pointer m_node{};      }; -public: -    KPageGroup() = default; -    KPageGroup(u64 address, u64 num_pages) { -        ASSERT(AddBlock(address, num_pages).IsSuccess()); +    explicit KPageGroup(KernelCore& kernel, KBlockInfoManager* m) +        : m_kernel{kernel}, m_manager{m} {} +    ~KPageGroup() { +        this->Finalize();      } -    constexpr std::list<Node>& Nodes() { -        return nodes; -    } +    void CloseAndReset(); +    void Finalize(); -    constexpr const std::list<Node>& Nodes() const { -        return nodes; +    Iterator begin() const { +        return Iterator{m_first_block}; +    } +    Iterator end() const { +        return Iterator{nullptr}; +    } +    bool empty() const { +        return m_first_block == nullptr;      } -    std::size_t GetNumPages() const { -        std::size_t num_pages = 0; -        for (const Node& node : nodes) { -            num_pages += node.GetNumPages(); -        } -        return num_pages; -    } - -    bool IsEqual(KPageGroup& other) const { -        auto this_node = nodes.begin(); -        auto other_node = other.nodes.begin(); -        while (this_node != nodes.end() && other_node != other.nodes.end()) { -            if (this_node->GetAddress() != other_node->GetAddress() || -                this_node->GetNumPages() != other_node->GetNumPages()) { -                return false; -            } -            this_node = std::next(this_node); -            other_node = std::next(other_node); -        } +    Result AddBlock(KPhysicalAddress addr, size_t num_pages); +    void Open() const; +    void OpenFirst() const; +    void Close() const; + +    size_t GetNumPages() const; + +    bool IsEquivalentTo(const KPageGroup& rhs) const; + +    bool operator==(const KPageGroup& rhs) const { +        return this->IsEquivalentTo(rhs); +    } -        return this_node == nodes.end() && other_node == other.nodes.end(); +    bool operator!=(const KPageGroup& rhs) const { +        return !(*this == rhs);      } -    Result AddBlock(u64 address, u64 num_pages) { -        if (!num_pages) { -            return ResultSuccess; +private: +    KernelCore& m_kernel; +    KBlockInfo* m_first_block{}; +    KBlockInfo* m_last_block{}; +    KBlockInfoManager* m_manager{}; +}; + +class KScopedPageGroup { +public: +    explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) { +        if (m_pg) { +            m_pg->Open();          } -        if (!nodes.empty()) { -            const auto node = nodes.back(); -            if (node.GetAddress() + node.GetNumPages() * PageSize == address) { -                address = node.GetAddress(); -                num_pages += node.GetNumPages(); -                nodes.pop_back(); -            } +    } +    explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {} +    ~KScopedPageGroup() { +        if (m_pg) { +            m_pg->Close();          } -        nodes.push_back({address, num_pages}); -        return ResultSuccess;      } -    bool Empty() const { -        return nodes.empty(); +    void CancelClose() { +        m_pg = nullptr;      } -    void Finalize() {} -  private: -    std::list<Node> nodes; +    const KPageGroup* m_pg{};  };  } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 612fc76fa..83131774c 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -100,7 +100,7 @@ constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType a  KPageTable::KPageTable(Core::System& system_)      : m_general_lock{system_.Kernel()}, -      m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} +      m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {}  KPageTable::~KPageTable() = default; @@ -373,7 +373,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta                                                   m_memory_block_slab_manager);      // Allocate and open. -    KPageGroup pg; +    KPageGroup pg{m_kernel, m_block_info_manager};      R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(          &pg, num_pages,          KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); @@ -432,7 +432,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si          const size_t num_pages = size / PageSize;          // Create page groups for the memory being mapped. -        KPageGroup pg; +        KPageGroup pg{m_kernel, m_block_info_manager};          AddRegionToPages(src_address, num_pages, pg);          // Reprotect the source as kernel-read/not mapped. @@ -593,7 +593,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {      const size_t size = num_pages * PageSize;      // We're making a new group, not adding to an existing one. -    R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory); +    R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);      // Begin traversal.      Common::PageTable::TraversalContext context; @@ -640,11 +640,10 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {      R_SUCCEED();  } -bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { +bool KPageTable::IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages) {      ASSERT(this->IsLockedByCurrentThread());      const size_t size = num_pages * PageSize; -    const auto& pg = pg_ll.Nodes();      const auto& memory_layout = m_system.Kernel().MemoryLayout();      // Empty groups are necessarily invalid. @@ -1572,7 +1571,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {              R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);              // Allocate pages for the new memory. -            KPageGroup pg; +            KPageGroup pg{m_kernel, m_block_info_manager};              R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(                  &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); @@ -1650,7 +1649,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {                  KScopedPageTableUpdater updater(this);                  // Prepare to iterate over the memory. -                auto pg_it = pg.Nodes().begin(); +                auto pg_it = pg.begin();                  PAddr pg_phys_addr = pg_it->GetAddress();                  size_t pg_pages = pg_it->GetNumPages(); @@ -1703,7 +1702,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {                      // Release any remaining unmapped memory.                      m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);                      m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); -                    for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { +                    for (++pg_it; pg_it != pg.end(); ++pg_it) {                          m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),                                                                      pg_it->GetNumPages());                          m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), @@ -1731,7 +1730,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {                              // Check if we're at the end of the physical block.                              if (pg_pages == 0) {                                  // Ensure there are more pages to map. -                                ASSERT(pg_it != pg.Nodes().end()); +                                ASSERT(pg_it != pg.end());                                  // Advance our physical block.                                  ++pg_it; @@ -1955,7 +1954,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)      R_TRY(dst_allocator_result);      // Map the memory. -    KPageGroup page_linked_list; +    KPageGroup page_linked_list{m_kernel, m_block_info_manager};      const size_t num_pages{size / PageSize};      const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(          KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); @@ -2022,14 +2021,14 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size                                                       num_dst_allocator_blocks);      R_TRY(dst_allocator_result); -    KPageGroup src_pages; -    KPageGroup dst_pages; +    KPageGroup src_pages{m_kernel, m_block_info_manager}; +    KPageGroup dst_pages{m_kernel, m_block_info_manager};      const size_t num_pages{size / PageSize};      AddRegionToPages(src_address, num_pages, src_pages);      AddRegionToPages(dst_address, num_pages, dst_pages); -    R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); +    R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion);      {          auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); @@ -2060,7 +2059,7 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,      VAddr cur_addr{addr}; -    for (const auto& node : page_linked_list.Nodes()) { +    for (const auto& node : page_linked_list) {          if (const auto result{                  Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};              result.IsError()) { @@ -2160,7 +2159,7 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {      VAddr cur_addr{addr}; -    for (const auto& node : page_linked_list.Nodes()) { +    for (const auto& node : page_linked_list) {          if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,                                        OperationType::Unmap)};              result.IsError()) { @@ -2527,13 +2526,13 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {      R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);      // Allocate pages for the heap extension. -    KPageGroup pg; +    KPageGroup pg{m_kernel, m_block_info_manager};      R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(          &pg, allocation_size / PageSize,          KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));      // Clear all the newly allocated pages. -    for (const auto& it : pg.Nodes()) { +    for (const auto& it : pg) {          std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,                      it.GetSize());      } @@ -2610,7 +2609,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_      if (is_map_only) {          R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));      } else { -        KPageGroup page_group; +        KPageGroup page_group{m_kernel, m_block_info_manager};          R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(              &page_group, needed_num_pages,              KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); @@ -2795,7 +2794,7 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_      ASSERT(num_pages > 0);      ASSERT(num_pages == page_group.GetNumPages()); -    for (const auto& node : page_group.Nodes()) { +    for (const auto& node : page_group) {          const size_t size{node.GetNumPages() * PageSize};          switch (operation) { diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index f1ca785d7..5df5ba1a9 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -107,6 +107,10 @@ public:          return *m_page_table_impl;      } +    KBlockInfoManager* GetBlockInfoManager() { +        return m_block_info_manager; +    } +      bool CanContain(VAddr addr, size_t size, KMemoryState state) const;  protected: @@ -488,6 +492,7 @@ private:      std::unique_ptr<Common::PageTable> m_page_table_impl;      Core::System& m_system; +    KernelCore& m_kernel;  };  } // namespace Kernel diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index 0aa68103c..3cf2b5d91 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp @@ -13,10 +13,7 @@  namespace Kernel {  KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} - -KSharedMemory::~KSharedMemory() { -    kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size); -} +KSharedMemory::~KSharedMemory() = default;  Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,                                   Svc::MemoryPermission owner_permission_, @@ -49,7 +46,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o      R_UNLESS(physical_address != 0, ResultOutOfMemory);      //! Insert the result into our page group. -    page_group.emplace(physical_address, num_pages); +    page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager()); +    page_group->AddBlock(physical_address, num_pages);      // Commit our reservation.      memory_reservation.Commit(); @@ -62,7 +60,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o      is_initialized = true;      // Clear all pages in the memory. -    for (const auto& block : page_group->Nodes()) { +    for (const auto& block : *page_group) {          std::memset(device_memory_.GetPointer<void>(block.GetAddress()), 0, block.GetSize());      } @@ -71,13 +69,8 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o  void KSharedMemory::Finalize() {      // Close and finalize the page group. -    // page_group->Close(); -    // page_group->Finalize(); - -    //! HACK: Manually close. -    for (const auto& block : page_group->Nodes()) { -        kernel.MemoryManager().Close(block.GetAddress(), block.GetNumPages()); -    } +    page_group->Close(); +    page_group->Finalize();      // Release the memory reservation.      resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); diff --git a/src/core/hle/kernel/memory_types.h b/src/core/hle/kernel/memory_types.h index 3975507bd..92b8b37ac 100644 --- a/src/core/hle/kernel/memory_types.h +++ b/src/core/hle/kernel/memory_types.h @@ -14,4 +14,7 @@ constexpr std::size_t PageSize{1 << PageBits};  using Page = std::array<u8, PageSize>; +using KPhysicalAddress = PAddr; +using KProcessAddress = VAddr; +  } // namespace Kernel diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 788ee2160..aca442196 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -1485,7 +1485,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p               ResultInvalidMemoryRegion);      // Create a new page group. -    KPageGroup pg; +    KPageGroup pg{system.Kernel(), dst_pt.GetBlockInfoManager()};      R_TRY(src_pt.MakeAndOpenPageGroup(          std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,          KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None, | 
