From 4c2ed2706e3579ec1304907dad0d45673768e1fc Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 12:33:20 -0500 Subject: core/memory: Introduce skeleton of Memory class Currently, the main memory management code is one of the remaining places where we have global state. The next series of changes will aim to rectify this. This change simply introduces the main skeleton of the class that will contain all the necessary state. --- src/core/memory.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index fa49f3dd0..2098f13f7 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -24,6 +24,18 @@ namespace Memory { static Common::PageTable* current_page_table = nullptr; +// Implementation class used to keep the specifics of the memory subsystem hidden +// from outside classes. This also allows modification to the internals of the memory +// subsystem without needing to rebuild all files that make use of the memory interface. +struct Memory::Impl { + explicit Impl(Core::System& system_) : system{system_} {} + + Core::System& system; +}; + +Memory::Memory(Core::System& system) : impl{std::make_unique(system)} {} +Memory::~Memory() = default; + void SetCurrentPageTable(Kernel::Process& process) { current_page_table = &process.VMManager().page_table; -- cgit v1.2.3 From 323680e5ad3ca0e27f2dd1de26816741b3243bed Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 13:09:12 -0500 Subject: core/memory: Migrate over memory mapping functions to the new Memory class Migrates all of the direct mapping facilities over to the new memory class. In the process, this also obsoletes the need for memory_setup.h, so we can remove it entirely from the project. --- src/core/memory.cpp | 177 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 106 insertions(+), 71 deletions(-) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 2098f13f7..28b65ca5e 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -17,7 +17,6 @@ #include "core/hle/kernel/process.h" #include "core/hle/kernel/vm_manager.h" #include "core/memory.h" -#include "core/memory_setup.h" #include "video_core/gpu.h" namespace Memory { @@ -30,99 +29,135 @@ static Common::PageTable* current_page_table = nullptr; struct Memory::Impl { explicit Impl(Core::System& system_) : system{system_} {} - Core::System& system; -}; + void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { + ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); + ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); + MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); + } -Memory::Memory(Core::System& system) : impl{std::make_unique(system)} {} -Memory::~Memory() = default; + void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer mmio_handler) { + ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); + ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); + MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, + Common::PageType::Special); + + const auto interval = boost::icl::discrete_interval::closed(base, base + size - 1); + const Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, + std::move(mmio_handler)}; + page_table.special_regions.add( + std::make_pair(interval, std::set{region})); + } -void SetCurrentPageTable(Kernel::Process& process) { - current_page_table = &process.VMManager().page_table; + void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { + ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); + ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); + MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, + Common::PageType::Unmapped); - const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth(); + const auto interval = boost::icl::discrete_interval::closed(base, base + size - 1); + page_table.special_regions.erase(interval); + } - auto& system = Core::System::GetInstance(); - system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); - system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); - system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width); - system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); -} + void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer hook) { + const auto interval = boost::icl::discrete_interval::closed(base, base + size - 1); + const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; + page_table.special_regions.add( + std::make_pair(interval, std::set{region})); + } + + void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer hook) { + const auto interval = boost::icl::discrete_interval::closed(base, base + size - 1); + const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; + page_table.special_regions.subtract( + std::make_pair(interval, std::set{region})); + } -static void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory, - Common::PageType type) { - LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, - (base + size) * PAGE_SIZE); - - // During boot, current_page_table might not be set yet, in which case we need not flush - if (Core::System::GetInstance().IsPoweredOn()) { - auto& gpu = Core::System::GetInstance().GPU(); - for (u64 i = 0; i < size; i++) { - const auto page = base + i; - if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) { - gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE); + /** + * Maps a region of pages as a specific type. + * + * @param page_table The page table to use to perform the mapping. + * @param base The base address to begin mapping at. + * @param size The total size of the range in bytes. + * @param memory The memory to map. + * @param type The page type to map the memory as. + */ + void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory, + Common::PageType type) { + LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, + (base + size) * PAGE_SIZE); + + // During boot, current_page_table might not be set yet, in which case we need not flush + if (system.IsPoweredOn()) { + auto& gpu = system.GPU(); + for (u64 i = 0; i < size; i++) { + const auto page = base + i; + if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) { + gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE); + } } } - } - VAddr end = base + size; - ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", - base + page_table.pointers.size()); + const VAddr end = base + size; + ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", + base + page_table.pointers.size()); - std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); + std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); - if (memory == nullptr) { - std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory); - } else { - while (base != end) { - page_table.pointers[base] = memory; + if (memory == nullptr) { + std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, + memory); + } else { + while (base != end) { + page_table.pointers[base] = memory; - base += 1; - memory += PAGE_SIZE; + base += 1; + memory += PAGE_SIZE; + } } } -} -void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); -} + Core::System& system; +}; -void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, - Common::MemoryHookPointer mmio_handler) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Special); +Memory::Memory(Core::System& system) : impl{std::make_unique(system)} {} +Memory::~Memory() = default; - auto interval = boost::icl::discrete_interval::closed(base, base + size - 1); - Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, std::move(mmio_handler)}; - page_table.special_regions.add( - std::make_pair(interval, std::set{region})); +void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { + impl->MapMemoryRegion(page_table, base, size, target); } -void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Unmapped); +void Memory::MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer mmio_handler) { + impl->MapIoRegion(page_table, base, size, std::move(mmio_handler)); +} + +void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { + impl->UnmapRegion(page_table, base, size); +} - auto interval = boost::icl::discrete_interval::closed(base, base + size - 1); - page_table.special_regions.erase(interval); +void Memory::AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer hook) { + impl->AddDebugHook(page_table, base, size, std::move(hook)); } -void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, - Common::MemoryHookPointer hook) { - auto interval = boost::icl::discrete_interval::closed(base, base + size - 1); - Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; - page_table.special_regions.add( - std::make_pair(interval, std::set{region})); +void Memory::RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer hook) { + impl->RemoveDebugHook(page_table, base, size, std::move(hook)); } -void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, - Common::MemoryHookPointer hook) { - auto interval = boost::icl::discrete_interval::closed(base, base + size - 1); - Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; - page_table.special_regions.subtract( - std::make_pair(interval, std::set{region})); +void SetCurrentPageTable(Kernel::Process& process) { + current_page_table = &process.VMManager().page_table; + + const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth(); + + auto& system = Core::System::GetInstance(); + system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); + system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); + system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width); + system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); } /** -- cgit v1.2.3 From e58748fd802dc069e90928d12d4db9ff994a869d Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 13:46:41 -0500 Subject: core/memory: Migrate over address checking functions to the new Memory class A fairly straightforward migration. These member functions can just be mostly moved verbatim with minor changes. We already have the necessary plumbing in places that they're used. IsKernelVirtualAddress() can remain a non-member function, since it doesn't rely on class state in any form. --- src/core/memory.cpp | 51 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 20 deletions(-) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 28b65ca5e..4c13ea1e7 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -75,6 +75,29 @@ struct Memory::Impl { std::make_pair(interval, std::set{region})); } + bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const { + const auto& page_table = process.VMManager().page_table; + + const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; + if (page_pointer != nullptr) { + return true; + } + + if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) { + return true; + } + + if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special) { + return false; + } + + return false; + } + + bool IsValidVirtualAddress(VAddr vaddr) const { + return IsValidVirtualAddress(*system.CurrentProcess(), vaddr); + } + /** * Maps a region of pages as a specific type. * @@ -148,6 +171,14 @@ void Memory::RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size impl->RemoveDebugHook(page_table, base, size, std::move(hook)); } +bool Memory::IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const { + return impl->IsValidVirtualAddress(process, vaddr); +} + +bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { + return impl->IsValidVirtualAddress(vaddr); +} + void SetCurrentPageTable(Kernel::Process& process) { current_page_table = &process.VMManager().page_table; @@ -256,26 +287,6 @@ void Write(const VAddr vaddr, const T data) { } } -bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { - const auto& page_table = process.VMManager().page_table; - - const u8* page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; - if (page_pointer) - return true; - - if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) - return true; - - if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special) - return false; - - return false; -} - -bool IsValidVirtualAddress(const VAddr vaddr) { - return IsValidVirtualAddress(*Core::System::GetInstance().CurrentProcess(), vaddr); -} - bool IsKernelVirtualAddress(const VAddr vaddr) { return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; } -- cgit v1.2.3 From fc7d0a17b6ec7dfc44a56f3e4a8bd97108f1c596 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 13:58:03 -0500 Subject: core/memory: Move memory read/write implementation functions into an anonymous namespace These will eventually be migrated into the main Memory class, but for now, we put them in an anonymous namespace, so that the other functions that use them, can be migrated over separately. --- src/core/memory.cpp | 195 ++++++++++++++++++++++++++-------------------------- 1 file changed, 98 insertions(+), 97 deletions(-) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 4c13ea1e7..017033613 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -20,8 +20,105 @@ #include "video_core/gpu.h" namespace Memory { +namespace { +Common::PageTable* current_page_table = nullptr; -static Common::PageTable* current_page_table = nullptr; +/** + * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) + * using a VMA from the current process + */ +u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { + const auto& vm_manager = process.VMManager(); + + const auto it = vm_manager.FindVMA(vaddr); + DEBUG_ASSERT(vm_manager.IsValidHandle(it)); + + u8* direct_pointer = nullptr; + const auto& vma = it->second; + switch (vma.type) { + case Kernel::VMAType::AllocatedMemoryBlock: + direct_pointer = vma.backing_block->data() + vma.offset; + break; + case Kernel::VMAType::BackingMemory: + direct_pointer = vma.backing_memory; + break; + case Kernel::VMAType::Free: + return nullptr; + default: + UNREACHABLE(); + } + + return direct_pointer + (vaddr - vma.base); +} + +/** + * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) + * using a VMA from the current process. + */ +u8* GetPointerFromVMA(VAddr vaddr) { + return ::Memory::GetPointerFromVMA(*Core::System::GetInstance().CurrentProcess(), vaddr); +} + +template +T Read(const VAddr vaddr) { + const u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; + if (page_pointer != nullptr) { + // NOTE: Avoid adding any extra logic to this fast-path block + T value; + std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T)); + return value; + } + + const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; + switch (type) { + case Common::PageType::Unmapped: + LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); + return 0; + case Common::PageType::Memory: + ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); + break; + case Common::PageType::RasterizerCachedMemory: { + const u8* const host_ptr{GetPointerFromVMA(vaddr)}; + Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T)); + T value; + std::memcpy(&value, host_ptr, sizeof(T)); + return value; + } + default: + UNREACHABLE(); + } + return {}; +} + +template +void Write(const VAddr vaddr, const T data) { + u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; + if (page_pointer != nullptr) { + // NOTE: Avoid adding any extra logic to this fast-path block + std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T)); + return; + } + + Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; + switch (type) { + case Common::PageType::Unmapped: + LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, + static_cast(data), vaddr); + return; + case Common::PageType::Memory: + ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); + break; + case Common::PageType::RasterizerCachedMemory: { + u8* const host_ptr{GetPointerFromVMA(vaddr)}; + Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T)); + std::memcpy(host_ptr, &data, sizeof(T)); + break; + } + default: + UNREACHABLE(); + } +} +} // Anonymous namespace // Implementation class used to keep the specifics of the memory subsystem hidden // from outside classes. This also allows modification to the internals of the memory @@ -191,102 +288,6 @@ void SetCurrentPageTable(Kernel::Process& process) { system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); } -/** - * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) - * using a VMA from the current process - */ -static u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { - const auto& vm_manager = process.VMManager(); - - const auto it = vm_manager.FindVMA(vaddr); - DEBUG_ASSERT(vm_manager.IsValidHandle(it)); - - u8* direct_pointer = nullptr; - const auto& vma = it->second; - switch (vma.type) { - case Kernel::VMAType::AllocatedMemoryBlock: - direct_pointer = vma.backing_block->data() + vma.offset; - break; - case Kernel::VMAType::BackingMemory: - direct_pointer = vma.backing_memory; - break; - case Kernel::VMAType::Free: - return nullptr; - default: - UNREACHABLE(); - } - - return direct_pointer + (vaddr - vma.base); -} - -/** - * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) - * using a VMA from the current process. - */ -static u8* GetPointerFromVMA(VAddr vaddr) { - return GetPointerFromVMA(*Core::System::GetInstance().CurrentProcess(), vaddr); -} - -template -T Read(const VAddr vaddr) { - const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer) { - // NOTE: Avoid adding any extra logic to this fast-path block - T value; - std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T)); - return value; - } - - Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; - switch (type) { - case Common::PageType::Unmapped: - LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); - return 0; - case Common::PageType::Memory: - ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); - break; - case Common::PageType::RasterizerCachedMemory: { - auto host_ptr{GetPointerFromVMA(vaddr)}; - Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T)); - T value; - std::memcpy(&value, host_ptr, sizeof(T)); - return value; - } - default: - UNREACHABLE(); - } - return {}; -} - -template -void Write(const VAddr vaddr, const T data) { - u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer) { - // NOTE: Avoid adding any extra logic to this fast-path block - std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T)); - return; - } - - Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; - switch (type) { - case Common::PageType::Unmapped: - LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, - static_cast(data), vaddr); - return; - case Common::PageType::Memory: - ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); - break; - case Common::PageType::RasterizerCachedMemory: { - auto host_ptr{GetPointerFromVMA(vaddr)}; - Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T)); - std::memcpy(host_ptr, &data, sizeof(T)); - break; - } - default: - UNREACHABLE(); - } -} - bool IsKernelVirtualAddress(const VAddr vaddr) { return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; } -- cgit v1.2.3 From 3f08e8d8d4ef16cf2468620fbfbdac46e43dcaef Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 15:19:15 -0500 Subject: core/memory: Migrate over GetPointer() With all of the interfaces ready for migration, it's trivial to migrate over GetPointer(). --- src/core/memory.cpp | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 017033613..93cd67e39 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -195,6 +195,21 @@ struct Memory::Impl { return IsValidVirtualAddress(*system.CurrentProcess(), vaddr); } + u8* GetPointer(const VAddr vaddr) { + u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; + if (page_pointer != nullptr) { + return page_pointer + (vaddr & PAGE_MASK); + } + + if (current_page_table->attributes[vaddr >> PAGE_BITS] == + Common::PageType::RasterizerCachedMemory) { + return GetPointerFromVMA(vaddr); + } + + LOG_ERROR(HW_Memory, "Unknown GetPointer @ 0x{:016X}", vaddr); + return nullptr; + } + /** * Maps a region of pages as a specific type. * @@ -276,6 +291,14 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { return impl->IsValidVirtualAddress(vaddr); } +u8* Memory::GetPointer(VAddr vaddr) { + return impl->GetPointer(vaddr); +} + +const u8* Memory::GetPointer(VAddr vaddr) const { + return impl->GetPointer(vaddr); +} + void SetCurrentPageTable(Kernel::Process& process) { current_page_table = &process.VMManager().page_table; @@ -292,21 +315,6 @@ bool IsKernelVirtualAddress(const VAddr vaddr) { return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; } -u8* GetPointer(const VAddr vaddr) { - u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer) { - return page_pointer + (vaddr & PAGE_MASK); - } - - if (current_page_table->attributes[vaddr >> PAGE_BITS] == - Common::PageType::RasterizerCachedMemory) { - return GetPointerFromVMA(vaddr); - } - - LOG_ERROR(HW_Memory, "Unknown GetPointer @ 0x{:016X}", vaddr); - return nullptr; -} - std::string ReadCString(VAddr vaddr, std::size_t max_length) { std::string string; string.reserve(max_length); -- cgit v1.2.3 From b2165c6b353be5e8117d1f9bc677bb198fa9d8cd Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 15:48:19 -0500 Subject: core/memory: Migrate over ReadCString() to the Memory class This only had one usage spot, so this is fairly straightforward to convert over. --- src/core/memory.cpp | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 93cd67e39..fb824d710 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -210,6 +210,21 @@ struct Memory::Impl { return nullptr; } + std::string ReadCString(VAddr vaddr, std::size_t max_length) { + std::string string; + string.reserve(max_length); + for (std::size_t i = 0; i < max_length; ++i) { + const char c = Read8(vaddr); + if (c == '\0') { + break; + } + string.push_back(c); + ++vaddr; + } + string.shrink_to_fit(); + return string; + } + /** * Maps a region of pages as a specific type. * @@ -299,6 +314,10 @@ const u8* Memory::GetPointer(VAddr vaddr) const { return impl->GetPointer(vaddr); } +std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { + return impl->ReadCString(vaddr, max_length); +} + void SetCurrentPageTable(Kernel::Process& process) { current_page_table = &process.VMManager().page_table; @@ -315,20 +334,6 @@ bool IsKernelVirtualAddress(const VAddr vaddr) { return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; } -std::string ReadCString(VAddr vaddr, std::size_t max_length) { - std::string string; - string.reserve(max_length); - for (std::size_t i = 0; i < max_length; ++i) { - char c = Read8(vaddr); - if (c == '\0') - break; - string.push_back(c); - ++vaddr; - } - string.shrink_to_fit(); - return string; -} - void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { if (vaddr == 0) { return; -- cgit v1.2.3 From 849581075a230ad0f5419bb5d5e1f9e48e6cfd8a Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 15:56:13 -0500 Subject: core/memory: Migrate over RasterizerMarkRegionCached() to the Memory class This is only used within the accelerated rasterizer in two places, so this is also a very trivial migration. --- src/core/memory.cpp | 130 +++++++++++++++++++++++++++------------------------- 1 file changed, 67 insertions(+), 63 deletions(-) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index fb824d710..8c3489ed3 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -225,6 +225,69 @@ struct Memory::Impl { return string; } + void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { + if (vaddr == 0) { + return; + } + + // Iterate over a contiguous CPU address space, which corresponds to the specified GPU + // address space, marking the region as un/cached. The region is marked un/cached at a + // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size + // is different). This assumes the specified GPU address region is contiguous as well. + + u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; + for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { + Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; + + if (cached) { + // Switch page type to cached if now cached + switch (page_type) { + case Common::PageType::Unmapped: + // It is not necessary for a process to have this region mapped into its address + // space, for example, a system module need not have a VRAM mapping. + break; + case Common::PageType::Memory: + page_type = Common::PageType::RasterizerCachedMemory; + current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; + break; + case Common::PageType::RasterizerCachedMemory: + // There can be more than one GPU region mapped per CPU region, so it's common + // that this area is already marked as cached. + break; + default: + UNREACHABLE(); + } + } else { + // Switch page type to uncached if now uncached + switch (page_type) { + case Common::PageType::Unmapped: + // It is not necessary for a process to have this region mapped into its address + // space, for example, a system module need not have a VRAM mapping. + break; + case Common::PageType::Memory: + // There can be more than one GPU region mapped per CPU region, so it's common + // that this area is already unmarked as cached. + break; + case Common::PageType::RasterizerCachedMemory: { + u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); + if (pointer == nullptr) { + // It's possible that this function has been called while updating the + // pagetable after unmapping a VMA. In that case the underlying VMA will no + // longer exist, and we should just leave the pagetable entry blank. + page_type = Common::PageType::Unmapped; + } else { + page_type = Common::PageType::Memory; + current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; + } + break; + } + default: + UNREACHABLE(); + } + } + } + } + /** * Maps a region of pages as a specific type. * @@ -318,6 +381,10 @@ std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { return impl->ReadCString(vaddr, max_length); } +void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { + impl->RasterizerMarkRegionCached(vaddr, size, cached); +} + void SetCurrentPageTable(Kernel::Process& process) { current_page_table = &process.VMManager().page_table; @@ -334,69 +401,6 @@ bool IsKernelVirtualAddress(const VAddr vaddr) { return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; } -void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { - if (vaddr == 0) { - return; - } - - // Iterate over a contiguous CPU address space, which corresponds to the specified GPU address - // space, marking the region as un/cached. The region is marked un/cached at a granularity of - // CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This - // assumes the specified GPU address region is contiguous as well. - - u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; - for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { - Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; - - if (cached) { - // Switch page type to cached if now cached - switch (page_type) { - case Common::PageType::Unmapped: - // It is not necessary for a process to have this region mapped into its address - // space, for example, a system module need not have a VRAM mapping. - break; - case Common::PageType::Memory: - page_type = Common::PageType::RasterizerCachedMemory; - current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; - break; - case Common::PageType::RasterizerCachedMemory: - // There can be more than one GPU region mapped per CPU region, so it's common that - // this area is already marked as cached. - break; - default: - UNREACHABLE(); - } - } else { - // Switch page type to uncached if now uncached - switch (page_type) { - case Common::PageType::Unmapped: - // It is not necessary for a process to have this region mapped into its address - // space, for example, a system module need not have a VRAM mapping. - break; - case Common::PageType::Memory: - // There can be more than one GPU region mapped per CPU region, so it's common that - // this area is already unmarked as cached. - break; - case Common::PageType::RasterizerCachedMemory: { - u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); - if (pointer == nullptr) { - // It's possible that this function has been called while updating the pagetable - // after unmapping a VMA. In that case the underlying VMA will no longer exist, - // and we should just leave the pagetable entry blank. - page_type = Common::PageType::Unmapped; - } else { - page_type = Common::PageType::Memory; - current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; - } - break; - } - default: - UNREACHABLE(); - } - } - } -} - u8 Read8(const VAddr addr) { return Read(addr); } -- cgit v1.2.3 From 89ef3ef5752e42d0eb0bdfa23cc72d391db74216 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 16:06:49 -0500 Subject: core/memory: Migrate over ZeroBlock() and CopyBlock() to the Memory class These currently aren't used anywhere in the codebase, so these are very trivial to move over to the Memory class. --- src/core/memory.cpp | 199 +++++++++++++++++++++++++++++----------------------- 1 file changed, 110 insertions(+), 89 deletions(-) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 8c3489ed3..c939e980d 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -225,6 +225,99 @@ struct Memory::Impl { return string; } + void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { + const auto& page_table = process.VMManager().page_table; + std::size_t remaining_size = size; + std::size_t page_index = dest_addr >> PAGE_BITS; + std::size_t page_offset = dest_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = + std::min(static_cast(PAGE_SIZE) - page_offset, remaining_size); + const auto current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case Common::PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, dest_addr, size); + break; + } + case Common::PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); + + u8* dest_ptr = page_table.pointers[page_index] + page_offset; + std::memset(dest_ptr, 0, copy_amount); + break; + } + case Common::PageType::RasterizerCachedMemory: { + u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); + std::memset(host_ptr, 0, copy_amount); + break; + } + default: + UNREACHABLE(); + } + + page_index++; + page_offset = 0; + remaining_size -= copy_amount; + } + } + + void ZeroBlock(const VAddr dest_addr, const std::size_t size) { + ZeroBlock(*system.CurrentProcess(), dest_addr, size); + } + + void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, + const std::size_t size) { + const auto& page_table = process.VMManager().page_table; + std::size_t remaining_size = size; + std::size_t page_index = src_addr >> PAGE_BITS; + std::size_t page_offset = src_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = + std::min(static_cast(PAGE_SIZE) - page_offset, remaining_size); + const auto current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case Common::PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, src_addr, size); + ZeroBlock(process, dest_addr, copy_amount); + break; + } + case Common::PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); + const u8* src_ptr = page_table.pointers[page_index] + page_offset; + WriteBlock(process, dest_addr, src_ptr, copy_amount); + break; + } + case Common::PageType::RasterizerCachedMemory: { + const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + system.GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); + WriteBlock(process, dest_addr, host_ptr, copy_amount); + break; + } + default: + UNREACHABLE(); + } + + page_index++; + page_offset = 0; + dest_addr += static_cast(copy_amount); + src_addr += static_cast(copy_amount); + remaining_size -= copy_amount; + } + } + + void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) { + return CopyBlock(*system.CurrentProcess(), dest_addr, src_addr, size); + } + void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { if (vaddr == 0) { return; @@ -381,6 +474,23 @@ std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { return impl->ReadCString(vaddr, max_length); } +void Memory::ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size) { + impl->ZeroBlock(process, dest_addr, size); +} + +void Memory::ZeroBlock(VAddr dest_addr, std::size_t size) { + impl->ZeroBlock(dest_addr, size); +} + +void Memory::CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, + const std::size_t size) { + impl->CopyBlock(process, dest_addr, src_addr, size); +} + +void Memory::CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) { + impl->CopyBlock(dest_addr, src_addr, size); +} + void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { impl->RasterizerMarkRegionCached(vaddr, size, cached); } @@ -529,93 +639,4 @@ void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t WriteBlock(*Core::System::GetInstance().CurrentProcess(), dest_addr, src_buffer, size); } -void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; - std::size_t remaining_size = size; - std::size_t page_index = dest_addr >> PAGE_BITS; - std::size_t page_offset = dest_addr & PAGE_MASK; - - while (remaining_size > 0) { - const std::size_t copy_amount = - std::min(static_cast(PAGE_SIZE) - page_offset, remaining_size); - const VAddr current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); - - switch (page_table.attributes[page_index]) { - case Common::PageType::Unmapped: { - LOG_ERROR(HW_Memory, - "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", - current_vaddr, dest_addr, size); - break; - } - case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - - u8* dest_ptr = page_table.pointers[page_index] + page_offset; - std::memset(dest_ptr, 0, copy_amount); - break; - } - case Common::PageType::RasterizerCachedMemory: { - const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; - Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); - std::memset(host_ptr, 0, copy_amount); - break; - } - default: - UNREACHABLE(); - } - - page_index++; - page_offset = 0; - remaining_size -= copy_amount; - } -} - -void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, - const std::size_t size) { - const auto& page_table = process.VMManager().page_table; - std::size_t remaining_size = size; - std::size_t page_index = src_addr >> PAGE_BITS; - std::size_t page_offset = src_addr & PAGE_MASK; - - while (remaining_size > 0) { - const std::size_t copy_amount = - std::min(static_cast(PAGE_SIZE) - page_offset, remaining_size); - const VAddr current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); - - switch (page_table.attributes[page_index]) { - case Common::PageType::Unmapped: { - LOG_ERROR(HW_Memory, - "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", - current_vaddr, src_addr, size); - ZeroBlock(process, dest_addr, copy_amount); - break; - } - case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - const u8* src_ptr = page_table.pointers[page_index] + page_offset; - WriteBlock(process, dest_addr, src_ptr, copy_amount); - break; - } - case Common::PageType::RasterizerCachedMemory: { - const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; - Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); - WriteBlock(process, dest_addr, host_ptr, copy_amount); - break; - } - default: - UNREACHABLE(); - } - - page_index++; - page_offset = 0; - dest_addr += static_cast(copy_amount); - src_addr += static_cast(copy_amount); - remaining_size -= copy_amount; - } -} - -void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) { - CopyBlock(*Core::System::GetInstance().CurrentProcess(), dest_addr, src_addr, size); -} - } // namespace Memory -- cgit v1.2.3 From b05bfc603689419dc515a656b9fc711d79994f13 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 16:29:34 -0500 Subject: core/memory: Migrate over Read{8, 16, 32, 64, Block} to the Memory class With all of the trivial parts of the memory interface moved over, we can get right into moving over the bits that are used. Note that this does require the use of GetInstance from the global system instance to be used within hle_ipc.cpp and the gdbstub. This is fine for the time being, as they both already rely on the global system instance in other functions. These will be removed in a change directed at both of these respectively. For now, it's sufficient, as it still accomplishes the goal of de-globalizing the memory code. --- src/core/memory.cpp | 228 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 132 insertions(+), 96 deletions(-) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index c939e980d..699c48107 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -59,37 +59,6 @@ u8* GetPointerFromVMA(VAddr vaddr) { return ::Memory::GetPointerFromVMA(*Core::System::GetInstance().CurrentProcess(), vaddr); } -template -T Read(const VAddr vaddr) { - const u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer != nullptr) { - // NOTE: Avoid adding any extra logic to this fast-path block - T value; - std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T)); - return value; - } - - const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; - switch (type) { - case Common::PageType::Unmapped: - LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); - return 0; - case Common::PageType::Memory: - ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); - break; - case Common::PageType::RasterizerCachedMemory: { - const u8* const host_ptr{GetPointerFromVMA(vaddr)}; - Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T)); - T value; - std::memcpy(&value, host_ptr, sizeof(T)); - return value; - } - default: - UNREACHABLE(); - } - return {}; -} - template void Write(const VAddr vaddr, const T data) { u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; @@ -210,6 +179,22 @@ struct Memory::Impl { return nullptr; } + u8 Read8(const VAddr addr) { + return Read(addr); + } + + u16 Read16(const VAddr addr) { + return Read(addr); + } + + u32 Read32(const VAddr addr) { + return Read(addr); + } + + u64 Read64(const VAddr addr) { + return Read(addr); + } + std::string ReadCString(VAddr vaddr, std::size_t max_length) { std::string string; string.reserve(max_length); @@ -225,6 +210,55 @@ struct Memory::Impl { return string; } + void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, + const std::size_t size) { + const auto& page_table = process.VMManager().page_table; + + std::size_t remaining_size = size; + std::size_t page_index = src_addr >> PAGE_BITS; + std::size_t page_offset = src_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = + std::min(static_cast(PAGE_SIZE) - page_offset, remaining_size); + const auto current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case Common::PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, src_addr, size); + std::memset(dest_buffer, 0, copy_amount); + break; + } + case Common::PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); + + const u8* const src_ptr = page_table.pointers[page_index] + page_offset; + std::memcpy(dest_buffer, src_ptr, copy_amount); + break; + } + case Common::PageType::RasterizerCachedMemory: { + const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + system.GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); + std::memcpy(dest_buffer, host_ptr, copy_amount); + break; + } + default: + UNREACHABLE(); + } + + page_index++; + page_offset = 0; + dest_buffer = static_cast(dest_buffer) + copy_amount; + remaining_size -= copy_amount; + } + } + + void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { + ReadBlock(*system.CurrentProcess(), src_addr, dest_buffer, size); + } + void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { const auto& page_table = process.VMManager().page_table; std::size_t remaining_size = size; @@ -425,6 +459,48 @@ struct Memory::Impl { } } + /** + * Reads a particular data type out of memory at the given virtual address. + * + * @param vaddr The virtual address to read the data type from. + * + * @tparam T The data type to read out of memory. This type *must* be + * trivially copyable, otherwise the behavior of this function + * is undefined. + * + * @returns The instance of T read from the specified virtual address. + */ + template + T Read(const VAddr vaddr) { + const u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; + if (page_pointer != nullptr) { + // NOTE: Avoid adding any extra logic to this fast-path block + T value; + std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T)); + return value; + } + + const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; + switch (type) { + case Common::PageType::Unmapped: + LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); + return 0; + case Common::PageType::Memory: + ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); + break; + case Common::PageType::RasterizerCachedMemory: { + const u8* const host_ptr = GetPointerFromVMA(vaddr); + system.GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T)); + T value; + std::memcpy(&value, host_ptr, sizeof(T)); + return value; + } + default: + UNREACHABLE(); + } + return {}; + } + Core::System& system; }; @@ -470,10 +546,35 @@ const u8* Memory::GetPointer(VAddr vaddr) const { return impl->GetPointer(vaddr); } +u8 Memory::Read8(const VAddr addr) { + return impl->Read8(addr); +} + +u16 Memory::Read16(const VAddr addr) { + return impl->Read16(addr); +} + +u32 Memory::Read32(const VAddr addr) { + return impl->Read32(addr); +} + +u64 Memory::Read64(const VAddr addr) { + return impl->Read64(addr); +} + std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { return impl->ReadCString(vaddr, max_length); } +void Memory::ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, + const std::size_t size) { + impl->ReadBlock(process, src_addr, dest_buffer, size); +} + +void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { + impl->ReadBlock(src_addr, dest_buffer, size); +} + void Memory::ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size) { impl->ZeroBlock(process, dest_addr, size); } @@ -511,71 +612,6 @@ bool IsKernelVirtualAddress(const VAddr vaddr) { return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; } -u8 Read8(const VAddr addr) { - return Read(addr); -} - -u16 Read16(const VAddr addr) { - return Read(addr); -} - -u32 Read32(const VAddr addr) { - return Read(addr); -} - -u64 Read64(const VAddr addr) { - return Read(addr); -} - -void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, - const std::size_t size) { - const auto& page_table = process.VMManager().page_table; - - std::size_t remaining_size = size; - std::size_t page_index = src_addr >> PAGE_BITS; - std::size_t page_offset = src_addr & PAGE_MASK; - - while (remaining_size > 0) { - const std::size_t copy_amount = - std::min(static_cast(PAGE_SIZE) - page_offset, remaining_size); - const VAddr current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); - - switch (page_table.attributes[page_index]) { - case Common::PageType::Unmapped: { - LOG_ERROR(HW_Memory, - "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", - current_vaddr, src_addr, size); - std::memset(dest_buffer, 0, copy_amount); - break; - } - case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - - const u8* src_ptr = page_table.pointers[page_index] + page_offset; - std::memcpy(dest_buffer, src_ptr, copy_amount); - break; - } - case Common::PageType::RasterizerCachedMemory: { - const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; - Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); - std::memcpy(dest_buffer, host_ptr, copy_amount); - break; - } - default: - UNREACHABLE(); - } - - page_index++; - page_offset = 0; - dest_buffer = static_cast(dest_buffer) + copy_amount; - remaining_size -= copy_amount; - } -} - -void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { - ReadBlock(*Core::System::GetInstance().CurrentProcess(), src_addr, dest_buffer, size); -} - void Write8(const VAddr addr, const u8 data) { Write(addr, data); } -- cgit v1.2.3 From e4c381b8850db96f162cfcf2cbe28b0e7c1f76f1 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 17:39:57 -0500 Subject: core/memory: Migrate over Write{8, 16, 32, 64, Block} to the Memory class The Write functions are used slightly less than the Read functions, which make these a bit nicer to move over. The only adjustments we really need to make here are to Dynarmic's exclusive monitor instance. We need to keep a reference to the currently active memory instance to perform exclusive read/write operations. --- src/core/memory.cpp | 220 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 128 insertions(+), 92 deletions(-) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 699c48107..5c940a82e 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -58,35 +58,6 @@ u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { u8* GetPointerFromVMA(VAddr vaddr) { return ::Memory::GetPointerFromVMA(*Core::System::GetInstance().CurrentProcess(), vaddr); } - -template -void Write(const VAddr vaddr, const T data) { - u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer != nullptr) { - // NOTE: Avoid adding any extra logic to this fast-path block - std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T)); - return; - } - - Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; - switch (type) { - case Common::PageType::Unmapped: - LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, - static_cast(data), vaddr); - return; - case Common::PageType::Memory: - ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); - break; - case Common::PageType::RasterizerCachedMemory: { - u8* const host_ptr{GetPointerFromVMA(vaddr)}; - Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T)); - std::memcpy(host_ptr, &data, sizeof(T)); - break; - } - default: - UNREACHABLE(); - } -} } // Anonymous namespace // Implementation class used to keep the specifics of the memory subsystem hidden @@ -195,6 +166,22 @@ struct Memory::Impl { return Read(addr); } + void Write8(const VAddr addr, const u8 data) { + Write(addr, data); + } + + void Write16(const VAddr addr, const u16 data) { + Write(addr, data); + } + + void Write32(const VAddr addr, const u32 data) { + Write(addr, data); + } + + void Write64(const VAddr addr, const u64 data) { + Write(addr, data); + } + std::string ReadCString(VAddr vaddr, std::size_t max_length) { std::string string; string.reserve(max_length); @@ -259,6 +246,53 @@ struct Memory::Impl { ReadBlock(*system.CurrentProcess(), src_addr, dest_buffer, size); } + void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, + const std::size_t size) { + const auto& page_table = process.VMManager().page_table; + std::size_t remaining_size = size; + std::size_t page_index = dest_addr >> PAGE_BITS; + std::size_t page_offset = dest_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = + std::min(static_cast(PAGE_SIZE) - page_offset, remaining_size); + const auto current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case Common::PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, dest_addr, size); + break; + } + case Common::PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); + + u8* const dest_ptr = page_table.pointers[page_index] + page_offset; + std::memcpy(dest_ptr, src_buffer, copy_amount); + break; + } + case Common::PageType::RasterizerCachedMemory: { + u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); + std::memcpy(host_ptr, src_buffer, copy_amount); + break; + } + default: + UNREACHABLE(); + } + + page_index++; + page_offset = 0; + src_buffer = static_cast(src_buffer) + copy_amount; + remaining_size -= copy_amount; + } + } + + void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { + WriteBlock(*system.CurrentProcess(), dest_addr, src_buffer, size); + } + void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { const auto& page_table = process.VMManager().page_table; std::size_t remaining_size = size; @@ -501,6 +535,46 @@ struct Memory::Impl { return {}; } + /** + * Writes a particular data type to memory at the given virtual address. + * + * @param vaddr The virtual address to write the data type to. + * + * @tparam T The data type to write to memory. This type *must* be + * trivially copyable, otherwise the behavior of this function + * is undefined. + * + * @returns The instance of T write to the specified virtual address. + */ + template + void Write(const VAddr vaddr, const T data) { + u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; + if (page_pointer != nullptr) { + // NOTE: Avoid adding any extra logic to this fast-path block + std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T)); + return; + } + + const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; + switch (type) { + case Common::PageType::Unmapped: + LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, + static_cast(data), vaddr); + return; + case Common::PageType::Memory: + ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); + break; + case Common::PageType::RasterizerCachedMemory: { + u8* const host_ptr{GetPointerFromVMA(vaddr)}; + system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T)); + std::memcpy(host_ptr, &data, sizeof(T)); + break; + } + default: + UNREACHABLE(); + } + } + Core::System& system; }; @@ -562,6 +636,22 @@ u64 Memory::Read64(const VAddr addr) { return impl->Read64(addr); } +void Memory::Write8(VAddr addr, u8 data) { + impl->Write8(addr, data); +} + +void Memory::Write16(VAddr addr, u16 data) { + impl->Write16(addr, data); +} + +void Memory::Write32(VAddr addr, u32 data) { + impl->Write32(addr, data); +} + +void Memory::Write64(VAddr addr, u64 data) { + impl->Write64(addr, data); +} + std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { return impl->ReadCString(vaddr, max_length); } @@ -575,6 +665,15 @@ void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_ impl->ReadBlock(src_addr, dest_buffer, size); } +void Memory::WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, + std::size_t size) { + impl->WriteBlock(process, dest_addr, src_buffer, size); +} + +void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { + impl->WriteBlock(dest_addr, src_buffer, size); +} + void Memory::ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size) { impl->ZeroBlock(process, dest_addr, size); } @@ -612,67 +711,4 @@ bool IsKernelVirtualAddress(const VAddr vaddr) { return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; } -void Write8(const VAddr addr, const u8 data) { - Write(addr, data); -} - -void Write16(const VAddr addr, const u16 data) { - Write(addr, data); -} - -void Write32(const VAddr addr, const u32 data) { - Write(addr, data); -} - -void Write64(const VAddr addr, const u64 data) { - Write(addr, data); -} - -void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, - const std::size_t size) { - const auto& page_table = process.VMManager().page_table; - std::size_t remaining_size = size; - std::size_t page_index = dest_addr >> PAGE_BITS; - std::size_t page_offset = dest_addr & PAGE_MASK; - - while (remaining_size > 0) { - const std::size_t copy_amount = - std::min(static_cast(PAGE_SIZE) - page_offset, remaining_size); - const VAddr current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); - - switch (page_table.attributes[page_index]) { - case Common::PageType::Unmapped: { - LOG_ERROR(HW_Memory, - "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", - current_vaddr, dest_addr, size); - break; - } - case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - - u8* dest_ptr = page_table.pointers[page_index] + page_offset; - std::memcpy(dest_ptr, src_buffer, copy_amount); - break; - } - case Common::PageType::RasterizerCachedMemory: { - const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; - Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); - std::memcpy(host_ptr, src_buffer, copy_amount); - break; - } - default: - UNREACHABLE(); - } - - page_index++; - page_offset = 0; - src_buffer = static_cast(src_buffer) + copy_amount; - remaining_size -= copy_amount; - } -} - -void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { - WriteBlock(*Core::System::GetInstance().CurrentProcess(), dest_addr, src_buffer, size); -} - } // namespace Memory -- cgit v1.2.3 From 50a518be69ef871a674afd91caebdf31cbda4485 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 18:28:44 -0500 Subject: core/memory: Migrate over GetPointerFromVMA() to the Memory class Now that everything else is migrated over, this is essentially just code relocation and conversion of a global accessor to the class member variable. All that remains is to migrate over the page table. --- src/core/memory.cpp | 72 ++++++++++++++++++++++++++--------------------------- 1 file changed, 36 insertions(+), 36 deletions(-) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 5c940a82e..a49e971aa 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -22,42 +22,6 @@ namespace Memory { namespace { Common::PageTable* current_page_table = nullptr; - -/** - * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) - * using a VMA from the current process - */ -u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { - const auto& vm_manager = process.VMManager(); - - const auto it = vm_manager.FindVMA(vaddr); - DEBUG_ASSERT(vm_manager.IsValidHandle(it)); - - u8* direct_pointer = nullptr; - const auto& vma = it->second; - switch (vma.type) { - case Kernel::VMAType::AllocatedMemoryBlock: - direct_pointer = vma.backing_block->data() + vma.offset; - break; - case Kernel::VMAType::BackingMemory: - direct_pointer = vma.backing_memory; - break; - case Kernel::VMAType::Free: - return nullptr; - default: - UNREACHABLE(); - } - - return direct_pointer + (vaddr - vma.base); -} - -/** - * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) - * using a VMA from the current process. - */ -u8* GetPointerFromVMA(VAddr vaddr) { - return ::Memory::GetPointerFromVMA(*Core::System::GetInstance().CurrentProcess(), vaddr); -} } // Anonymous namespace // Implementation class used to keep the specifics of the memory subsystem hidden @@ -135,6 +99,42 @@ struct Memory::Impl { return IsValidVirtualAddress(*system.CurrentProcess(), vaddr); } + /** + * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) + * using a VMA from the current process + */ + u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { + const auto& vm_manager = process.VMManager(); + + const auto it = vm_manager.FindVMA(vaddr); + DEBUG_ASSERT(vm_manager.IsValidHandle(it)); + + u8* direct_pointer = nullptr; + const auto& vma = it->second; + switch (vma.type) { + case Kernel::VMAType::AllocatedMemoryBlock: + direct_pointer = vma.backing_block->data() + vma.offset; + break; + case Kernel::VMAType::BackingMemory: + direct_pointer = vma.backing_memory; + break; + case Kernel::VMAType::Free: + return nullptr; + default: + UNREACHABLE(); + } + + return direct_pointer + (vaddr - vma.base); + } + + /** + * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) + * using a VMA from the current process. + */ + u8* GetPointerFromVMA(VAddr vaddr) { + return GetPointerFromVMA(*system.CurrentProcess(), vaddr); + } + u8* GetPointer(const VAddr vaddr) { u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; if (page_pointer != nullptr) { -- cgit v1.2.3 From e7e939104bb167babec7b5f7d5d8390c85f3cbf4 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 26 Nov 2019 18:34:30 -0500 Subject: core/memory; Migrate over SetCurrentPageTable() to the Memory class Now that literally every other API function is converted over to the Memory class, we can just move the file-local page table into the Memory implementation class, finally getting rid of global state within the memory code. --- src/core/memory.cpp | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'src/core/memory.cpp') diff --git a/src/core/memory.cpp b/src/core/memory.cpp index a49e971aa..91bf07a92 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -20,9 +20,6 @@ #include "video_core/gpu.h" namespace Memory { -namespace { -Common::PageTable* current_page_table = nullptr; -} // Anonymous namespace // Implementation class used to keep the specifics of the memory subsystem hidden // from outside classes. This also allows modification to the internals of the memory @@ -30,6 +27,17 @@ Common::PageTable* current_page_table = nullptr; struct Memory::Impl { explicit Impl(Core::System& system_) : system{system_} {} + void SetCurrentPageTable(Kernel::Process& process) { + current_page_table = &process.VMManager().page_table; + + const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth(); + + system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); + system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); + system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width); + system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); + } + void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); @@ -575,12 +583,17 @@ struct Memory::Impl { } } + Common::PageTable* current_page_table = nullptr; Core::System& system; }; Memory::Memory(Core::System& system) : impl{std::make_unique(system)} {} Memory::~Memory() = default; +void Memory::SetCurrentPageTable(Kernel::Process& process) { + impl->SetCurrentPageTable(process); +} + void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { impl->MapMemoryRegion(page_table, base, size, target); } @@ -695,18 +708,6 @@ void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { impl->RasterizerMarkRegionCached(vaddr, size, cached); } -void SetCurrentPageTable(Kernel::Process& process) { - current_page_table = &process.VMManager().page_table; - - const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth(); - - auto& system = Core::System::GetInstance(); - system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); - system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); - system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width); - system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); -} - bool IsKernelVirtualAddress(const VAddr vaddr) { return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; } -- cgit v1.2.3