diff options
| author | Yuri Kunde Schlesner <yuriks@yuriks.net> | 2015-05-29 18:00:17 -0700 | 
|---|---|---|
| committer | Yuri Kunde Schlesner <yuriks@yuriks.net> | 2015-05-29 18:00:17 -0700 | 
| commit | 8a04c65e20ba1f5c472c026b7a558e1d54324306 (patch) | |
| tree | c11c0c6a472184f25479b0679748ee71653a2985 /src/core/hle | |
| parent | a489a846563fc64f236c7ede69ce0eb34af3521a (diff) | |
| parent | 88365a23e76fe3a4a64c9dc65aa8554e25a20af0 (diff) | |
Merge pull request #810 from yuriks/memmap
Kernel: Add VMManager to manage process address spaces
Diffstat (limited to 'src/core/hle')
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 245 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.h | 200 | 
2 files changed, 445 insertions, 0 deletions
| diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp new file mode 100644 index 000000000..b2dd21542 --- /dev/null +++ b/src/core/hle/kernel/vm_manager.cpp @@ -0,0 +1,245 @@ +// Copyright 2015 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" + +#include "core/hle/kernel/vm_manager.h" +#include "core/memory_setup.h" + +namespace Kernel { + +bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { +    ASSERT(base + size == next.base); +    if (permissions != next.permissions || +            meminfo_state != next.meminfo_state || +            type != next.type) { +        return false; +    } +    if (type == VMAType::AllocatedMemoryBlock && +            (backing_block != next.backing_block || offset + size != next.offset)) { +        return false; +    } +    if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) { +        return false; +    } +    if (type == VMAType::MMIO && paddr + size != next.paddr) { +        return false; +    } +    return true; +} + +VMManager::VMManager() { +    Reset(); +} + +void VMManager::Reset() { +    vma_map.clear(); + +    // Initialize the map with a single free region covering the entire managed space. +    VirtualMemoryArea initial_vma; +    initial_vma.size = MAX_ADDRESS; +    vma_map.emplace(initial_vma.base, initial_vma); + +    UpdatePageTableForVMA(initial_vma); +} + +VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { +    return std::prev(vma_map.upper_bound(target)); +} + +ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, +        std::shared_ptr<std::vector<u8>> block, u32 offset, u32 size, MemoryState state) { +    ASSERT(block != nullptr); +    ASSERT(offset + size <= block->size()); + +    // This is the appropriately sized VMA that will turn into our allocation. +    CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); +    VirtualMemoryArea& final_vma = vma_handle->second; +    ASSERT(final_vma.size == size); + +    final_vma.type = VMAType::AllocatedMemoryBlock; +    final_vma.permissions = VMAPermission::ReadWrite; +    final_vma.meminfo_state = state; +    final_vma.backing_block = block; +    final_vma.offset = offset; +    UpdatePageTableForVMA(final_vma); + +    return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); +} + +ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8 * memory, u32 size, MemoryState state) { +    ASSERT(memory != nullptr); + +    // This is the appropriately sized VMA that will turn into our allocation. +    CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); +    VirtualMemoryArea& final_vma = vma_handle->second; +    ASSERT(final_vma.size == size); + +    final_vma.type = VMAType::BackingMemory; +    final_vma.permissions = VMAPermission::ReadWrite; +    final_vma.meminfo_state = state; +    final_vma.backing_memory = memory; +    UpdatePageTableForVMA(final_vma); + +    return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); +} + +ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state) { +    // This is the appropriately sized VMA that will turn into our allocation. +    CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); +    VirtualMemoryArea& final_vma = vma_handle->second; +    ASSERT(final_vma.size == size); + +    final_vma.type = VMAType::MMIO; +    final_vma.permissions = VMAPermission::ReadWrite; +    final_vma.meminfo_state = state; +    final_vma.paddr = paddr; +    UpdatePageTableForVMA(final_vma); + +    return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); +} + +void VMManager::Unmap(VMAHandle vma_handle) { +    VMAIter iter = StripIterConstness(vma_handle); + +    VirtualMemoryArea& vma = iter->second; +    vma.type = VMAType::Free; +    vma.permissions = VMAPermission::None; +    vma.meminfo_state = MemoryState::Free; + +    vma.backing_block = nullptr; +    vma.offset = 0; +    vma.backing_memory = nullptr; +    vma.paddr = 0; + +    UpdatePageTableForVMA(vma); + +    MergeAdjacent(iter); +} + +void VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) { +    VMAIter iter = StripIterConstness(vma_handle); + +    VirtualMemoryArea& vma = iter->second; +    vma.permissions = new_perms; +    UpdatePageTableForVMA(vma); + +    MergeAdjacent(iter); +} + +VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle & iter) { +    // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given +    // non-const access to its container. +    return vma_map.erase(iter, iter); // Erases an empty range of elements +} + +ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) { +    ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: %8X", size); +    ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: %08X", base); + +    VMAIter vma_handle = StripIterConstness(FindVMA(base)); +    if (vma_handle == vma_map.end()) { +        // Target address is outside the range managed by the kernel +        return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS, +                ErrorSummary::InvalidArgument, ErrorLevel::Usage); // 0xE0E01BF5 +    } + +    VirtualMemoryArea& vma = vma_handle->second; +    if (vma.type != VMAType::Free) { +        // Region is already allocated +        return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS, +                ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5 +    } + +    u32 start_in_vma = base - vma.base; +    u32 end_in_vma = start_in_vma + size; + +    if (end_in_vma > vma.size) { +        // Requested allocation doesn't fit inside VMA +        return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS, +                ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5 +    } + +    if (end_in_vma != vma.size) { +        // Split VMA at the end of the allocated region +        SplitVMA(vma_handle, end_in_vma); +    } +    if (start_in_vma != 0) { +        // Split VMA at the start of the allocated region +        vma_handle = SplitVMA(vma_handle, start_in_vma); +    } + +    return MakeResult<VMAIter>(vma_handle); +} + +VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) { +    VirtualMemoryArea& old_vma = vma_handle->second; +    VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA + +    // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably +    // a bug. This restriction might be removed later. +    ASSERT(offset_in_vma < old_vma.size); +    ASSERT(offset_in_vma > 0); + +    old_vma.size = offset_in_vma; +    new_vma.base += offset_in_vma; +    new_vma.size -= offset_in_vma; + +    switch (new_vma.type) { +    case VMAType::Free: +        break; +    case VMAType::AllocatedMemoryBlock: +        new_vma.offset += offset_in_vma; +        break; +    case VMAType::BackingMemory: +        new_vma.backing_memory += offset_in_vma; +        break; +    case VMAType::MMIO: +        new_vma.paddr += offset_in_vma; +        break; +    } + +    ASSERT(old_vma.CanBeMergedWith(new_vma)); + +    return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma); +} + +VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) { +    VMAIter next_vma = std::next(iter); +    if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { +        iter->second.size += next_vma->second.size; +        vma_map.erase(next_vma); +    } + +    if (iter != vma_map.begin()) { +        VMAIter prev_vma = std::prev(iter); +        if (prev_vma->second.CanBeMergedWith(iter->second)) { +            prev_vma->second.size += iter->second.size; +            vma_map.erase(iter); +            iter = prev_vma; +        } +    } + +    return iter; +} + +void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { +    switch (vma.type) { +    case VMAType::Free: +        Memory::UnmapRegion(vma.base, vma.size); +        break; +    case VMAType::AllocatedMemoryBlock: +        Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_block->data() + vma.offset); +        break; +    case VMAType::BackingMemory: +        Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_memory); +        break; +    case VMAType::MMIO: +        // TODO(yuriks): Add support for MMIO handlers. +        Memory::MapIoRegion(vma.base, vma.size); +        break; +    } +} + +} diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h new file mode 100644 index 000000000..22b724603 --- /dev/null +++ b/src/core/hle/kernel/vm_manager.h @@ -0,0 +1,200 @@ +// Copyright 2015 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <map> +#include <memory> +#include <string> +#include <vector> + +#include "common/common_types.h" + +#include "core/hle/result.h" + +namespace Kernel { + +enum class VMAType : u8 { +    /// VMA represents an unmapped region of the address space. +    Free, +    /// VMA is backed by a ref-counted allocate memory block. +    AllocatedMemoryBlock, +    /// VMA is backed by a raw, unmanaged pointer. +    BackingMemory, +    /// VMA is mapped to MMIO registers at a fixed PAddr. +    MMIO, +    // TODO(yuriks): Implement MemoryAlias to support MAP/UNMAP +}; + +/// Permissions for mapped memory blocks +enum class VMAPermission : u8 { +    None = 0, +    Read = 1, +    Write = 2, +    Execute = 4, + +    ReadWrite = Read | Write, +    ReadExecute = Read | Execute, +    WriteExecute = Write | Execute, +    ReadWriteExecute = Read | Write | Execute, +}; + +/// Set of values returned in MemoryInfo.state by svcQueryMemory. +enum class MemoryState : u8 { +    Free = 0, +    Reserved = 1, +    IO = 2, +    Static = 3, +    Code = 4, +    Private = 5, +    Shared = 6, +    Continuous = 7, +    Aliased = 8, +    Alias = 9, +    AliasCode = 10, +    Locked = 11, +}; + +/** + * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space + * with homogeneous attributes across its extents. In this particular implementation each VMA is + * also backed by a single host memory allocation. + */ +struct VirtualMemoryArea { +    /// Virtual base address of the region. +    VAddr base = 0; +    /// Size of the region. +    u32 size = 0; + +    VMAType type = VMAType::Free; +    VMAPermission permissions = VMAPermission::None; +    /// Tag returned by svcQueryMemory. Not otherwise used. +    MemoryState meminfo_state = MemoryState::Free; + +    // Settings for type = AllocatedMemoryBlock +    /// Memory block backing this VMA. +    std::shared_ptr<std::vector<u8>> backing_block = nullptr; +    /// Offset into the backing_memory the mapping starts from. +    u32 offset = 0; + +    // Settings for type = BackingMemory +    /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed. +    u8* backing_memory = nullptr; + +    // Settings for type = MMIO +    /// Physical address of the register area this VMA maps to. +    PAddr paddr = 0; + +    /// Tests if this area can be merged to the right with `next`. +    bool CanBeMergedWith(const VirtualMemoryArea& next) const; +}; + +/** + * Manages a process' virtual addressing space. This class maintains a list of allocated and free + * regions in the address space, along with their attributes, and allows kernel clients to + * manipulate it, adjusting the page table to match. + * + * This is similar in idea and purpose to the VM manager present in operating system kernels, with + * the main difference being that it doesn't have to support swapping or memory mapping of files. + * The implementation is also simplified by not having to allocate page frames. See these articles + * about the Linux kernel for an explantion of the concept and implementation: + *  - http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/ + *  - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/ + */ +class VMManager { +    // TODO(yuriks): Make page tables switchable to support multiple VMManagers +public: +    /** +     * The maximum amount of address space managed by the kernel. Addresses above this are never used. +     * @note This is the limit used by the New 3DS kernel. Old 3DS used 0x20000000. +     */ +    static const u32 MAX_ADDRESS = 0x40000000; + +    /** +     * A map covering the entirety of the managed address space, keyed by the `base` field of each +     * VMA. It must always be modified by splitting or merging VMAs, so that the invariant +     * `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be +     * merged when possible so that no two similar and adjacent regions exist that have not been +     * merged. +     */ +    std::map<VAddr, VirtualMemoryArea> vma_map; +    using VMAHandle = decltype(vma_map)::const_iterator; + +    VMManager(); + +    /// Clears the address space map, re-initializing with a single free area. +    void Reset(); + +    /// Finds the VMA in which the given address is included in, or `vma_map.end()`. +    VMAHandle FindVMA(VAddr target) const; + +    // TODO(yuriks): Should these functions actually return the handle? + +    /** +     * Maps part of a ref-counted block of memory at a given address. +     * +     * @param target The guest address to start the mapping at. +     * @param block The block to be mapped. +     * @param offset Offset into `block` to map from. +     * @param size Size of the mapping. +     * @param state MemoryState tag to attach to the VMA. +     */ +    ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, +            u32 offset, u32 size, MemoryState state); + +    /** +     * Maps an unmanaged host memory pointer at a given address. +     * +     * @param target The guest address to start the mapping at. +     * @param memory The memory to be mapped. +     * @param size Size of the mapping. +     * @param state MemoryState tag to attach to the VMA. +     */ +    ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u32 size, MemoryState state); + +    /** +     * Maps a memory-mapped IO region at a given address. +     * +     * @param target The guest address to start the mapping at. +     * @param paddr The physical address where the registers are present. +     * @param size Size of the mapping. +     * @param state MemoryState tag to attach to the VMA. +     */ +    ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state); + +    /// Unmaps the given VMA. +    void Unmap(VMAHandle vma); + +    /// Changes the permissions of the given VMA. +    void Reprotect(VMAHandle vma, VMAPermission new_perms); + +private: +    using VMAIter = decltype(vma_map)::iterator; + +    /// Converts a VMAHandle to a mutable VMAIter. +    VMAIter StripIterConstness(const VMAHandle& iter); + +    /** +     * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing +     * the appropriate error checking. +     */ +    ResultVal<VMAIter> CarveVMA(VAddr base, u32 size); + +    /** +     * Splits a VMA in two, at the specified offset. +     * @returns the right side of the split, with the original iterator becoming the left side. +     */ +    VMAIter SplitVMA(VMAIter vma, u32 offset_in_vma); + +    /** +     * Checks for and merges the specified VMA with adjacent ones if possible. +     * @returns the merged VMA or the original if no merging was possible. +     */ +    VMAIter MergeAdjacent(VMAIter vma); + +    /// Updates the pages corresponding to this VMA so they match the VMA's attributes. +    void UpdatePageTableForVMA(const VirtualMemoryArea& vma); +}; + +} | 
