diff options
| author | bunnei <bunneidev@gmail.com> | 2022-09-09 21:12:37 -0700 | 
|---|---|---|
| committer | bunnei <bunneidev@gmail.com> | 2022-10-18 19:13:34 -0700 | 
| commit | 2bb41cffca7e5ec6383a59c513ef9d7e2def5f51 (patch) | |
| tree | e76369cb0d11749ce2ab586dba38c745d1e8a14a | |
| parent | 57a77e9ff4b4a63c106c0ac3448a8f1452b5384c (diff) | |
core: hle: kernel: k_memory_block_manager: Update.
| -rw-r--r-- | src/core/hle/kernel/k_memory_block_manager.cpp | 409 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_block_manager.h | 145 | 
2 files changed, 380 insertions, 174 deletions
| diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index 3ddb9984f..c908af75a 100644 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp @@ -2,221 +2,336 @@  // SPDX-License-Identifier: GPL-2.0-or-later  #include "core/hle/kernel/k_memory_block_manager.h" -#include "core/hle/kernel/memory_types.h"  namespace Kernel { -KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_) -    : start_addr{start_addr_}, end_addr{end_addr_} { -    const u64 num_pages{(end_addr - start_addr) / PageSize}; -    memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free, -                                   KMemoryPermission::None, KMemoryAttribute::None); -} +KMemoryBlockManager::KMemoryBlockManager() = default; -KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) { -    auto node{memory_block_tree.begin()}; -    while (node != end()) { -        const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; -        if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) { -            return node; -        } -        node = std::next(node); -    } -    return end(); +Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) { +    // Allocate a block to encapsulate the address space, insert it into the tree. +    KMemoryBlock* start_block = slab_manager->Allocate(); +    R_UNLESS(start_block != nullptr, ResultOutOfResource); + +    // Set our start and end. +    m_start_address = st; +    m_end_address = nd; +    ASSERT(Common::IsAligned(m_start_address, PageSize)); +    ASSERT(Common::IsAligned(m_end_address, PageSize)); + +    // Initialize and insert the block. +    start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize, +                            KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None); +    m_memory_block_tree.insert(*start_block); + +    return ResultSuccess;  } -VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, -                                        std::size_t num_pages, std::size_t align, -                                        std::size_t offset, std::size_t guard_pages) { -    if (num_pages == 0) { -        return {}; +void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager, +                                   HostUnmapCallback&& host_unmap_callback) { +    // Erase every block until we have none left. +    auto it = m_memory_block_tree.begin(); +    while (it != m_memory_block_tree.end()) { +        KMemoryBlock* block = std::addressof(*it); +        it = m_memory_block_tree.erase(it); +        slab_manager->Free(block); +        host_unmap_callback(block->GetAddress(), block->GetSize());      } -    const VAddr region_end{region_start + region_num_pages * PageSize}; -    const VAddr region_last{region_end - 1}; -    for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) { -        const auto info{it->GetMemoryInfo()}; -        if (region_last < info.GetAddress()) { -            break; -        } +    ASSERT(m_memory_block_tree.empty()); +} -        if (info.state != KMemoryState::Free) { -            continue; -        } +VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages, +                                        size_t num_pages, size_t alignment, size_t offset, +                                        size_t guard_pages) const { +    if (num_pages > 0) { +        const VAddr region_end = region_start + region_num_pages * PageSize; +        const VAddr region_last = region_end - 1; +        for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); +             it++) { +            const KMemoryInfo info = it->GetMemoryInfo(); +            if (region_last < info.GetAddress()) { +                break; +            } +            if (info.m_state != KMemoryState::Free) { +                continue; +            } -        VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; -        area += guard_pages * PageSize; +            VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress(); +            area += guard_pages * PageSize; -        const VAddr offset_area{Common::AlignDown(area, align) + offset}; -        area = (area <= offset_area) ? offset_area : offset_area + align; +            const VAddr offset_area = Common::AlignDown(area, alignment) + offset; +            area = (area <= offset_area) ? offset_area : offset_area + alignment; -        const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; -        const VAddr area_last{area_end - 1}; +            const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize; +            const VAddr area_last = area_end - 1; -        if (info.GetAddress() <= area && area < area_last && area_last <= region_last && -            area_last <= info.GetLastAddress()) { -            return area; +            if (info.GetAddress() <= area && area < area_last && area_last <= region_last && +                area_last <= info.GetLastAddress()) { +                return area; +            }          }      }      return {};  } -void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, -                                 KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, -                                 KMemoryState state, KMemoryPermission perm, -                                 KMemoryAttribute attribute) { -    const VAddr update_end_addr{addr + num_pages * PageSize}; -    iterator node{memory_block_tree.begin()}; +void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, +                                            VAddr address, size_t num_pages) { +    // Find the iterator now that we've updated. +    iterator it = this->FindIterator(address); +    if (address != m_start_address) { +        it--; +    } -    prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped; +    // Coalesce blocks that we can. +    while (true) { +        iterator prev = it++; +        if (it == m_memory_block_tree.end()) { +            break; +        } -    while (node != memory_block_tree.end()) { -        KMemoryBlock* block{&(*node)}; -        iterator next_node{std::next(node)}; -        const VAddr cur_addr{block->GetAddress()}; -        const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; +        if (prev->CanMergeWith(*it)) { +            KMemoryBlock* block = std::addressof(*it); +            m_memory_block_tree.erase(it); +            prev->Add(*block); +            allocator->Free(block); +            it = prev; +        } -        if (addr < cur_end_addr && cur_addr < update_end_addr) { -            if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { -                node = next_node; -                continue; -            } +        if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) { +            break; +        } +    } +} -            iterator new_node{node}; -            if (addr > cur_addr) { -                memory_block_tree.insert(node, block->Split(addr)); +void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, +                                 size_t num_pages, KMemoryState state, KMemoryPermission perm, +                                 KMemoryAttribute attr, +                                 KMemoryBlockDisableMergeAttribute set_disable_attr, +                                 KMemoryBlockDisableMergeAttribute clear_disable_attr) { +    // Ensure for auditing that we never end up with an invalid tree. +    KScopedMemoryBlockManagerAuditor auditor(this); +    ASSERT(Common::IsAligned(address, PageSize)); +    ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == +           KMemoryAttribute::None); + +    VAddr cur_address = address; +    size_t remaining_pages = num_pages; +    iterator it = this->FindIterator(address); + +    while (remaining_pages > 0) { +        const size_t remaining_size = remaining_pages * PageSize; +        KMemoryInfo cur_info = it->GetMemoryInfo(); +        if (it->HasProperties(state, perm, attr)) { +            // If we already have the right properties, just advance. +            if (cur_address + remaining_size < cur_info.GetEndAddress()) { +                remaining_pages = 0; +                cur_address += remaining_size; +            } else { +                remaining_pages = +                    (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; +                cur_address = cur_info.GetEndAddress();              } +        } else { +            // If we need to, create a new block before and insert it. +            if (cur_info.GetAddress() != cur_address) { +                KMemoryBlock* new_block = allocator->Allocate(); + +                it->Split(new_block, cur_address); +                it = m_memory_block_tree.insert(*new_block); +                it++; -            if (update_end_addr < cur_end_addr) { -                new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); +                cur_info = it->GetMemoryInfo(); +                cur_address = cur_info.GetAddress();              } -            new_node->Update(state, perm, attribute); +            // If we need to, create a new block after and insert it. +            if (cur_info.GetSize() > remaining_size) { +                KMemoryBlock* new_block = allocator->Allocate(); -            MergeAdjacent(new_node, next_node); -        } +                it->Split(new_block, cur_address + remaining_size); +                it = m_memory_block_tree.insert(*new_block); -        if (cur_end_addr - 1 >= update_end_addr - 1) { -            break; -        } +                cur_info = it->GetMemoryInfo(); +            } -        node = next_node; +            // Update block state. +            it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr), +                       static_cast<u8>(clear_disable_attr)); +            cur_address += cur_info.GetSize(); +            remaining_pages -= cur_info.GetNumPages(); +        } +        it++;      } + +    this->CoalesceForUpdate(allocator, address, num_pages);  } -void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state, -                                 KMemoryPermission perm, KMemoryAttribute attribute) { -    const VAddr update_end_addr{addr + num_pages * PageSize}; -    iterator node{memory_block_tree.begin()}; +void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, +                                        VAddr address, size_t num_pages, KMemoryState test_state, +                                        KMemoryPermission test_perm, KMemoryAttribute test_attr, +                                        KMemoryState state, KMemoryPermission perm, +                                        KMemoryAttribute attr) { +    // Ensure for auditing that we never end up with an invalid tree. +    KScopedMemoryBlockManagerAuditor auditor(this); +    ASSERT(Common::IsAligned(address, PageSize)); +    ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == +           KMemoryAttribute::None); + +    VAddr cur_address = address; +    size_t remaining_pages = num_pages; +    iterator it = this->FindIterator(address); + +    while (remaining_pages > 0) { +        const size_t remaining_size = remaining_pages * PageSize; +        KMemoryInfo cur_info = it->GetMemoryInfo(); +        if (it->HasProperties(test_state, test_perm, test_attr) && +            !it->HasProperties(state, perm, attr)) { +            // If we need to, create a new block before and insert it. +            if (cur_info.GetAddress() != cur_address) { +                KMemoryBlock* new_block = allocator->Allocate(); + +                it->Split(new_block, cur_address); +                it = m_memory_block_tree.insert(*new_block); +                it++; + +                cur_info = it->GetMemoryInfo(); +                cur_address = cur_info.GetAddress(); +            } -    while (node != memory_block_tree.end()) { -        KMemoryBlock* block{&(*node)}; -        iterator next_node{std::next(node)}; -        const VAddr cur_addr{block->GetAddress()}; -        const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; +            // If we need to, create a new block after and insert it. +            if (cur_info.GetSize() > remaining_size) { +                KMemoryBlock* new_block = allocator->Allocate(); -        if (addr < cur_end_addr && cur_addr < update_end_addr) { -            iterator new_node{node}; +                it->Split(new_block, cur_address + remaining_size); +                it = m_memory_block_tree.insert(*new_block); -            if (addr > cur_addr) { -                memory_block_tree.insert(node, block->Split(addr)); +                cur_info = it->GetMemoryInfo();              } -            if (update_end_addr < cur_end_addr) { -                new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); +            // Update block state. +            it->Update(state, perm, attr, false, 0, 0); +            cur_address += cur_info.GetSize(); +            remaining_pages -= cur_info.GetNumPages(); +        } else { +            // If we already have the right properties, just advance. +            if (cur_address + remaining_size < cur_info.GetEndAddress()) { +                remaining_pages = 0; +                cur_address += remaining_size; +            } else { +                remaining_pages = +                    (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; +                cur_address = cur_info.GetEndAddress();              } - -            new_node->Update(state, perm, attribute); - -            MergeAdjacent(new_node, next_node); -        } - -        if (cur_end_addr - 1 >= update_end_addr - 1) { -            break;          } - -        node = next_node; +        it++;      } + +    this->CoalesceForUpdate(allocator, address, num_pages);  } -void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, +void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, +                                     size_t num_pages, MemoryBlockLockFunction lock_func,                                       KMemoryPermission perm) { -    const VAddr update_end_addr{addr + num_pages * PageSize}; -    iterator node{memory_block_tree.begin()}; +    // Ensure for auditing that we never end up with an invalid tree. +    KScopedMemoryBlockManagerAuditor auditor(this); +    ASSERT(Common::IsAligned(address, PageSize)); -    while (node != memory_block_tree.end()) { -        KMemoryBlock* block{&(*node)}; -        iterator next_node{std::next(node)}; -        const VAddr cur_addr{block->GetAddress()}; -        const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; +    VAddr cur_address = address; +    size_t remaining_pages = num_pages; +    iterator it = this->FindIterator(address); -        if (addr < cur_end_addr && cur_addr < update_end_addr) { -            iterator new_node{node}; +    const VAddr end_address = address + (num_pages * PageSize); -            if (addr > cur_addr) { -                memory_block_tree.insert(node, block->Split(addr)); -            } +    while (remaining_pages > 0) { +        const size_t remaining_size = remaining_pages * PageSize; +        KMemoryInfo cur_info = it->GetMemoryInfo(); -            if (update_end_addr < cur_end_addr) { -                new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); -            } +        // If we need to, create a new block before and insert it. +        if (cur_info.m_address != cur_address) { +            KMemoryBlock* new_block = allocator->Allocate(); -            lock_func(new_node, perm); +            it->Split(new_block, cur_address); +            it = m_memory_block_tree.insert(*new_block); +            it++; -            MergeAdjacent(new_node, next_node); +            cur_info = it->GetMemoryInfo(); +            cur_address = cur_info.GetAddress();          } -        if (cur_end_addr - 1 >= update_end_addr - 1) { -            break; +        if (cur_info.GetSize() > remaining_size) { +            // If we need to, create a new block after and insert it. +            KMemoryBlock* new_block = allocator->Allocate(); + +            it->Split(new_block, cur_address + remaining_size); +            it = m_memory_block_tree.insert(*new_block); + +            cur_info = it->GetMemoryInfo();          } -        node = next_node; +        // Call the locked update function. +        (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address, +                                          cur_info.GetEndAddress() == end_address); +        cur_address += cur_info.GetSize(); +        remaining_pages -= cur_info.GetNumPages(); +        it++;      } -} -void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { -    const_iterator it{FindIterator(start)}; -    KMemoryInfo info{}; -    do { -        info = it->GetMemoryInfo(); -        func(info); -        it = std::next(it); -    } while (info.addr + info.size - 1 < end - 1 && it != cend()); +    this->CoalesceForUpdate(allocator, address, num_pages);  } -void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) { -    KMemoryBlock* block{&(*it)}; - -    auto EraseIt = [&](const iterator it_to_erase) { -        if (next_it == it_to_erase) { -            next_it = std::next(next_it); +// Debug. +bool KMemoryBlockManager::CheckState() const { +    // Loop over every block, ensuring that we are sorted and coalesced. +    auto it = m_memory_block_tree.cbegin(); +    auto prev = it++; +    while (it != m_memory_block_tree.cend()) { +        const KMemoryInfo prev_info = prev->GetMemoryInfo(); +        const KMemoryInfo cur_info = it->GetMemoryInfo(); + +        // Sequential blocks which can be merged should be merged. +        if (prev->CanMergeWith(*it)) { +            return false;          } -        memory_block_tree.erase(it_to_erase); -    }; -    if (it != memory_block_tree.begin()) { -        KMemoryBlock* prev{&(*std::prev(it))}; - -        if (block->HasSameProperties(*prev)) { -            const iterator prev_it{std::prev(it)}; +        // Sequential blocks should be sequential. +        if (prev_info.GetEndAddress() != cur_info.GetAddress()) { +            return false; +        } -            prev->Add(block->GetNumPages()); -            EraseIt(it); +        // If the block is ipc locked, it must have a count. +        if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && +            cur_info.m_ipc_lock_count == 0) { +            return false; +        } -            it = prev_it; -            block = prev; +        // If the block is device shared, it must have a count. +        if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && +            cur_info.m_device_use_count == 0) { +            return false;          } + +        // Advance the iterator. +        prev = it++;      } -    if (it != cend()) { -        const KMemoryBlock* const next{&(*std::next(it))}; +    // Our loop will miss checking the last block, potentially, so check it. +    if (prev != m_memory_block_tree.cend()) { +        const KMemoryInfo prev_info = prev->GetMemoryInfo(); +        // If the block is ipc locked, it must have a count. +        if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && +            prev_info.m_ipc_lock_count == 0) { +            return false; +        } -        if (block->HasSameProperties(*next)) { -            block->Add(next->GetNumPages()); -            EraseIt(std::next(it)); +        // If the block is device shared, it must have a count. +        if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && +            prev_info.m_device_use_count == 0) { +            return false;          }      } + +    return true;  }  } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h index e14741b89..b4ee4e319 100644 --- a/src/core/hle/kernel/k_memory_block_manager.h +++ b/src/core/hle/kernel/k_memory_block_manager.h @@ -4,63 +4,154 @@  #pragma once  #include <functional> -#include <list> +#include "common/common_funcs.h"  #include "common/common_types.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h"  #include "core/hle/kernel/k_memory_block.h"  namespace Kernel { +class KMemoryBlockManagerUpdateAllocator { +public: +    static constexpr size_t MaxBlocks = 2; + +private: +    KMemoryBlock* m_blocks[MaxBlocks]; +    size_t m_index; +    KMemoryBlockSlabManager* m_slab_manager; + +private: +    Result Initialize(size_t num_blocks) { +        // Check num blocks. +        ASSERT(num_blocks <= MaxBlocks); + +        // Set index. +        m_index = MaxBlocks - num_blocks; + +        // Allocate the blocks. +        for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) { +            m_blocks[m_index + i] = m_slab_manager->Allocate(); +            R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource); +        } + +        return ResultSuccess; +    } + +public: +    KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm, +                                       size_t num_blocks = MaxBlocks) +        : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) { +        *out_result = this->Initialize(num_blocks); +    } + +    ~KMemoryBlockManagerUpdateAllocator() { +        for (const auto& block : m_blocks) { +            if (block != nullptr) { +                m_slab_manager->Free(block); +            } +        } +    } + +    KMemoryBlock* Allocate() { +        ASSERT(m_index < MaxBlocks); +        ASSERT(m_blocks[m_index] != nullptr); +        KMemoryBlock* block = nullptr; +        std::swap(block, m_blocks[m_index++]); +        return block; +    } + +    void Free(KMemoryBlock* block) { +        ASSERT(m_index <= MaxBlocks); +        ASSERT(block != nullptr); +        if (m_index == 0) { +            m_slab_manager->Free(block); +        } else { +            m_blocks[--m_index] = block; +        } +    } +}; +  class KMemoryBlockManager final {  public: -    using MemoryBlockTree = std::list<KMemoryBlock>; +    using MemoryBlockTree = +        Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>; +    using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left, +                                                           bool right);      using iterator = MemoryBlockTree::iterator;      using const_iterator = MemoryBlockTree::const_iterator;  public: -    KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_); +    KMemoryBlockManager(); + +    using HostUnmapCallback = std::function<void(VAddr, u64)>; + +    Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager); +    void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);      iterator end() { -        return memory_block_tree.end(); +        return m_memory_block_tree.end();      }      const_iterator end() const { -        return memory_block_tree.end(); +        return m_memory_block_tree.end();      }      const_iterator cend() const { -        return memory_block_tree.cend(); +        return m_memory_block_tree.cend();      } -    iterator FindIterator(VAddr addr); +    VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, +                       size_t alignment, size_t offset, size_t guard_pages) const; -    VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, -                       std::size_t align, std::size_t offset, std::size_t guard_pages); +    void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, +                KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, +                KMemoryBlockDisableMergeAttribute set_disable_attr, +                KMemoryBlockDisableMergeAttribute clear_disable_attr); +    void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, +                    MemoryBlockLockFunction lock_func, KMemoryPermission perm); -    void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, -                KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state, -                KMemoryPermission perm, KMemoryAttribute attribute); +    void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, +                       size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, +                       KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, +                       KMemoryAttribute attr); -    void Update(VAddr addr, std::size_t num_pages, KMemoryState state, -                KMemoryPermission perm = KMemoryPermission::None, -                KMemoryAttribute attribute = KMemoryAttribute::None); - -    using LockFunc = std::function<void(iterator, KMemoryPermission)>; -    void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, -                    KMemoryPermission perm); +    iterator FindIterator(VAddr address) const { +        return m_memory_block_tree.find(KMemoryBlock( +            address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None)); +    } -    using IterateFunc = std::function<void(const KMemoryInfo&)>; -    void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); +    const KMemoryBlock* FindBlock(VAddr address) const { +        if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) { +            return std::addressof(*it); +        } -    KMemoryBlock& FindBlock(VAddr addr) { -        return *FindIterator(addr); +        return nullptr;      } +    // Debug. +    bool CheckState() const; +  private: -    void MergeAdjacent(iterator it, iterator& next_it); +    void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, +                           size_t num_pages); -    [[maybe_unused]] const VAddr start_addr; -    [[maybe_unused]] const VAddr end_addr; +    MemoryBlockTree m_memory_block_tree; +    VAddr m_start_address{}; +    VAddr m_end_address{}; +}; -    MemoryBlockTree memory_block_tree; +class KScopedMemoryBlockManagerAuditor { +public: +    explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) { +        ASSERT(m_manager->CheckState()); +    } +    explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m) +        : KScopedMemoryBlockManagerAuditor(std::addressof(m)) {} +    ~KScopedMemoryBlockManagerAuditor() { +        ASSERT(m_manager->CheckState()); +    } + +private: +    KMemoryBlockManager* m_manager;  };  } // namespace Kernel | 
