summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.cpp1
-rw-r--r--src/core/hle/kernel/physical_memory.h5
-rw-r--r--src/core/hle/kernel/process.cpp4
-rw-r--r--src/core/hle/kernel/vm_manager.cpp37
-rw-r--r--src/core/hle/service/nifm/nifm.cpp20
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp8
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h3
-rw-r--r--src/core/hle/service/vi/display/vi_display.cpp27
-rw-r--r--src/core/hle/service/vi/display/vi_display.h9
-rw-r--r--src/core/hle/service/vi/vi.cpp14
-rw-r--r--src/core/loader/elf.cpp3
-rw-r--r--src/core/loader/kip.cpp5
-rw-r--r--src/core/loader/nso.cpp12
-rw-r--r--src/core/memory.cpp36
-rw-r--r--src/core/memory.h16
15 files changed, 139 insertions, 61 deletions
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp
index f8c7f0efd..e825c0526 100644
--- a/src/core/arm/dynarmic/arm_dynarmic.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic.cpp
@@ -141,6 +141,7 @@ std::unique_ptr<Dynarmic::A64::Jit> ARM_Dynarmic::MakeJit(Common::PageTable& pag
config.page_table = reinterpret_cast<void**>(page_table.pointers.data());
config.page_table_address_space_bits = address_space_bits;
config.silently_mirror_page_table = false;
+ config.absolute_offset_page_table = true;
// Multi-process state
config.processor_id = core_index;
diff --git a/src/core/hle/kernel/physical_memory.h b/src/core/hle/kernel/physical_memory.h
index 090565310..b689e8e8b 100644
--- a/src/core/hle/kernel/physical_memory.h
+++ b/src/core/hle/kernel/physical_memory.h
@@ -14,6 +14,9 @@ namespace Kernel {
// - Second to ensure all host backing memory used is aligned to 256 bytes due
// to strict alignment restrictions on GPU memory.
-using PhysicalMemory = std::vector<u8, Common::AlignmentAllocator<u8, 256>>;
+using PhysicalMemoryVector = std::vector<u8, Common::AlignmentAllocator<u8, 256>>;
+class PhysicalMemory final : public PhysicalMemoryVector {
+ using PhysicalMemoryVector::PhysicalMemoryVector;
+};
} // namespace Kernel
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 12ea4ebe3..b9035a0be 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -317,6 +317,8 @@ void Process::FreeTLSRegion(VAddr tls_address) {
}
void Process::LoadModule(CodeSet module_, VAddr base_addr) {
+ code_memory_size += module_.memory.size();
+
const auto memory = std::make_shared<PhysicalMemory>(std::move(module_.memory));
const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions,
@@ -332,8 +334,6 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) {
MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData);
MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData);
-
- code_memory_size += module_.memory.size();
}
Process::Process(Core::System& system)
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index a9a20ef76..0b3500fce 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include <algorithm>
+#include <cstring>
#include <iterator>
#include <utility>
#include "common/alignment.h"
@@ -269,18 +270,9 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
// If necessary, expand backing vector to cover new heap extents in
// the case of allocating. Otherwise, shrink the backing memory,
// if a smaller heap has been requested.
- const u64 old_heap_size = GetCurrentHeapSize();
- if (size > old_heap_size) {
- const u64 alloc_size = size - old_heap_size;
-
- heap_memory->insert(heap_memory->end(), alloc_size, 0);
- RefreshMemoryBlockMappings(heap_memory.get());
- } else if (size < old_heap_size) {
- heap_memory->resize(size);
- heap_memory->shrink_to_fit();
-
- RefreshMemoryBlockMappings(heap_memory.get());
- }
+ heap_memory->resize(size);
+ heap_memory->shrink_to_fit();
+ RefreshMemoryBlockMappings(heap_memory.get());
heap_end = heap_region_base + size;
ASSERT(GetCurrentHeapSize() == heap_memory->size());
@@ -752,24 +744,20 @@ void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryAre
// Always merge allocated memory blocks, even when they don't share the same backing block.
if (left.type == VMAType::AllocatedMemoryBlock &&
(left.backing_block != right.backing_block || left.offset + left.size != right.offset)) {
- const auto right_begin = right.backing_block->begin() + right.offset;
- const auto right_end = right_begin + right.size;
// Check if we can save work.
if (left.offset == 0 && left.size == left.backing_block->size()) {
// Fast case: left is an entire backing block.
- left.backing_block->insert(left.backing_block->end(), right_begin, right_end);
+ left.backing_block->resize(left.size + right.size);
+ std::memcpy(left.backing_block->data() + left.size,
+ right.backing_block->data() + right.offset, right.size);
} else {
// Slow case: make a new memory block for left and right.
- const auto left_begin = left.backing_block->begin() + left.offset;
- const auto left_end = left_begin + left.size;
- const auto left_size = static_cast<std::size_t>(std::distance(left_begin, left_end));
- const auto right_size = static_cast<std::size_t>(std::distance(right_begin, right_end));
-
auto new_memory = std::make_shared<PhysicalMemory>();
- new_memory->reserve(left_size + right_size);
- new_memory->insert(new_memory->end(), left_begin, left_end);
- new_memory->insert(new_memory->end(), right_begin, right_end);
+ new_memory->resize(left.size + right.size);
+ std::memcpy(new_memory->data(), left.backing_block->data() + left.offset, left.size);
+ std::memcpy(new_memory->data() + left.size, right.backing_block->data() + right.offset,
+ right.size);
left.backing_block = std::move(new_memory);
left.offset = 0;
@@ -792,8 +780,7 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
memory.UnmapRegion(page_table, vma.base, vma.size);
break;
case VMAType::AllocatedMemoryBlock:
- memory.MapMemoryRegion(page_table, vma.base, vma.size,
- vma.backing_block->data() + vma.offset);
+ memory.MapMemoryRegion(page_table, vma.base, vma.size, *vma.backing_block, vma.offset);
break;
case VMAType::BackingMemory:
memory.MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory);
diff --git a/src/core/hle/service/nifm/nifm.cpp b/src/core/hle/service/nifm/nifm.cpp
index 2e53b3221..767158444 100644
--- a/src/core/hle/service/nifm/nifm.cpp
+++ b/src/core/hle/service/nifm/nifm.cpp
@@ -9,6 +9,7 @@
#include "core/hle/kernel/writable_event.h"
#include "core/hle/service/nifm/nifm.h"
#include "core/hle/service/service.h"
+#include "core/settings.h"
namespace Service::NIFM {
@@ -86,7 +87,12 @@ private:
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
- rb.PushEnum(RequestState::Connected);
+
+ if (Settings::values.bcat_backend == "none") {
+ rb.PushEnum(RequestState::NotSubmitted);
+ } else {
+ rb.PushEnum(RequestState::Connected);
+ }
}
void GetResult(Kernel::HLERequestContext& ctx) {
@@ -194,14 +200,22 @@ private:
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
- rb.Push<u8>(1);
+ if (Settings::values.bcat_backend == "none") {
+ rb.Push<u8>(0);
+ } else {
+ rb.Push<u8>(1);
+ }
}
void IsAnyInternetRequestAccepted(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_NIFM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
- rb.Push<u8>(1);
+ if (Settings::values.bcat_backend == "none") {
+ rb.Push<u8>(0);
+ } else {
+ rb.Push<u8>(1);
+ }
}
Core::System& system;
};
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index 52623cf89..62752e419 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -88,6 +88,12 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
return layer_id;
}
+void NVFlinger::CloseLayer(u64 layer_id) {
+ for (auto& display : displays) {
+ display.CloseLayer(layer_id);
+ }
+}
+
std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) const {
const auto* const layer = FindLayer(display_id, layer_id);
@@ -192,7 +198,7 @@ void NVFlinger::Compose() {
const auto& igbp_buffer = buffer->get().igbp_buffer;
- const auto& gpu = system.GPU();
+ auto& gpu = system.GPU();
const auto& multi_fence = buffer->get().multi_fence;
for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) {
const auto& fence = multi_fence.fences[fence_id];
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index e3cc14bdc..57a21f33b 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -54,6 +54,9 @@ public:
/// If an invalid display ID is specified, then an empty optional is returned.
std::optional<u64> CreateLayer(u64 display_id);
+ /// Closes a layer on all displays for the given layer ID.
+ void CloseLayer(u64 layer_id);
+
/// Finds the buffer queue ID of the specified layer in the specified display.
///
/// If an invalid display ID or layer ID is provided, then an empty optional is returned.
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp
index cd18c1610..5a202ac81 100644
--- a/src/core/hle/service/vi/display/vi_display.cpp
+++ b/src/core/hle/service/vi/display/vi_display.cpp
@@ -24,11 +24,11 @@ Display::Display(u64 id, std::string name, Core::System& system) : id{id}, name{
Display::~Display() = default;
Layer& Display::GetLayer(std::size_t index) {
- return layers.at(index);
+ return *layers.at(index);
}
const Layer& Display::GetLayer(std::size_t index) const {
- return layers.at(index);
+ return *layers.at(index);
}
std::shared_ptr<Kernel::ReadableEvent> Display::GetVSyncEvent() const {
@@ -43,29 +43,38 @@ void Display::CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue) {
// TODO(Subv): Support more than 1 layer.
ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment");
- layers.emplace_back(id, buffer_queue);
+ layers.emplace_back(std::make_shared<Layer>(id, buffer_queue));
+}
+
+void Display::CloseLayer(u64 id) {
+ layers.erase(
+ std::remove_if(layers.begin(), layers.end(),
+ [id](const std::shared_ptr<Layer>& layer) { return layer->GetID() == id; }),
+ layers.end());
}
Layer* Display::FindLayer(u64 id) {
- const auto itr = std::find_if(layers.begin(), layers.end(),
- [id](const VI::Layer& layer) { return layer.GetID() == id; });
+ const auto itr =
+ std::find_if(layers.begin(), layers.end(),
+ [id](const std::shared_ptr<Layer>& layer) { return layer->GetID() == id; });
if (itr == layers.end()) {
return nullptr;
}
- return &*itr;
+ return itr->get();
}
const Layer* Display::FindLayer(u64 id) const {
- const auto itr = std::find_if(layers.begin(), layers.end(),
- [id](const VI::Layer& layer) { return layer.GetID() == id; });
+ const auto itr =
+ std::find_if(layers.begin(), layers.end(),
+ [id](const std::shared_ptr<Layer>& layer) { return layer->GetID() == id; });
if (itr == layers.end()) {
return nullptr;
}
- return &*itr;
+ return itr->get();
}
} // namespace Service::VI
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h
index 8bb966a85..a3855d8cd 100644
--- a/src/core/hle/service/vi/display/vi_display.h
+++ b/src/core/hle/service/vi/display/vi_display.h
@@ -4,6 +4,7 @@
#pragma once
+#include <memory>
#include <string>
#include <vector>
@@ -69,6 +70,12 @@ public:
///
void CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue);
+ /// Closes and removes a layer from this display with the given ID.
+ ///
+ /// @param id The ID assigned to the layer to close.
+ ///
+ void CloseLayer(u64 id);
+
/// Attempts to find a layer with the given ID.
///
/// @param id The layer ID.
@@ -91,7 +98,7 @@ private:
u64 id;
std::string name;
- std::vector<Layer> layers;
+ std::vector<std::shared_ptr<Layer>> layers;
Kernel::EventPair vsync_event;
};
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 651c89dc0..519da74e0 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -1066,6 +1066,18 @@ private:
rb.Push<u64>(ctx.WriteBuffer(native_window.Serialize()));
}
+ void CloseLayer(Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto layer_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_VI, "called. layer_id=0x{:016X}", layer_id);
+
+ nv_flinger->CloseLayer(layer_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(RESULT_SUCCESS);
+ }
+
void CreateStrayLayer(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const u32 flags = rp.Pop<u32>();
@@ -1178,7 +1190,7 @@ IApplicationDisplayService::IApplicationDisplayService(
{1101, &IApplicationDisplayService::SetDisplayEnabled, "SetDisplayEnabled"},
{1102, &IApplicationDisplayService::GetDisplayResolution, "GetDisplayResolution"},
{2020, &IApplicationDisplayService::OpenLayer, "OpenLayer"},
- {2021, nullptr, "CloseLayer"},
+ {2021, &IApplicationDisplayService::CloseLayer, "CloseLayer"},
{2030, &IApplicationDisplayService::CreateStrayLayer, "CreateStrayLayer"},
{2031, &IApplicationDisplayService::DestroyStrayLayer, "DestroyStrayLayer"},
{2101, &IApplicationDisplayService::SetLayerScalingMode, "SetLayerScalingMode"},
diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp
index f1795fdd6..8908e5328 100644
--- a/src/core/loader/elf.cpp
+++ b/src/core/loader/elf.cpp
@@ -335,7 +335,8 @@ Kernel::CodeSet ElfReader::LoadInto(VAddr vaddr) {
codeset_segment->addr = segment_addr;
codeset_segment->size = aligned_size;
- memcpy(&program_image[current_image_position], GetSegmentPtr(i), p->p_filesz);
+ std::memcpy(program_image.data() + current_image_position, GetSegmentPtr(i),
+ p->p_filesz);
current_image_position += aligned_size;
}
}
diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp
index 474b55cb1..092103abe 100644
--- a/src/core/loader/kip.cpp
+++ b/src/core/loader/kip.cpp
@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <cstring>
#include "core/file_sys/kernel_executable.h"
#include "core/file_sys/program_metadata.h"
#include "core/gdbstub/gdbstub.h"
@@ -76,8 +77,8 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::Process& process) {
segment.addr = offset;
segment.offset = offset;
segment.size = PageAlignSize(static_cast<u32>(data.size()));
- program_image.resize(offset);
- program_image.insert(program_image.end(), data.begin(), data.end());
+ program_image.resize(offset + data.size());
+ std::memcpy(program_image.data() + offset, data.data(), data.size());
};
load_segment(codeset.CodeSegment(), kip->GetTextSection(), kip->GetTextOffset());
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index f629892ae..515c5accb 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include <cinttypes>
+#include <cstring>
#include <vector>
#include "common/common_funcs.h"
@@ -96,8 +97,9 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
if (nso_header.IsSegmentCompressed(i)) {
data = DecompressSegment(data, nso_header.segments[i]);
}
- program_image.resize(nso_header.segments[i].location);
- program_image.insert(program_image.end(), data.begin(), data.end());
+ program_image.resize(nso_header.segments[i].location + data.size());
+ std::memcpy(program_image.data() + nso_header.segments[i].location, data.data(),
+ data.size());
codeset.segments[i].addr = nso_header.segments[i].location;
codeset.segments[i].offset = nso_header.segments[i].location;
codeset.segments[i].size = PageAlignSize(static_cast<u32>(data.size()));
@@ -139,12 +141,12 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
std::vector<u8> pi_header;
pi_header.insert(pi_header.begin(), reinterpret_cast<u8*>(&nso_header),
reinterpret_cast<u8*>(&nso_header) + sizeof(NSOHeader));
- pi_header.insert(pi_header.begin() + sizeof(NSOHeader), program_image.begin(),
- program_image.end());
+ pi_header.insert(pi_header.begin() + sizeof(NSOHeader), program_image.data(),
+ program_image.data() + program_image.size());
pi_header = pm->PatchNSO(pi_header, file.GetName());
- std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.begin());
+ std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.data());
}
// Apply cheats if they exist and the program has a valid title ID
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 91bf07a92..f0888327f 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -14,6 +14,7 @@
#include "common/swap.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
+#include "core/hle/kernel/physical_memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/memory.h"
@@ -38,6 +39,11 @@ struct Memory::Impl {
system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width);
}
+ void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size,
+ Kernel::PhysicalMemory& memory, VAddr offset) {
+ MapMemoryRegion(page_table, base, size, memory.data() + offset);
+ }
+
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
@@ -146,7 +152,7 @@ struct Memory::Impl {
u8* GetPointer(const VAddr vaddr) {
u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
if (page_pointer != nullptr) {
- return page_pointer + (vaddr & PAGE_MASK);
+ return page_pointer + vaddr;
}
if (current_page_table->attributes[vaddr >> PAGE_BITS] ==
@@ -229,7 +235,8 @@ struct Memory::Impl {
case Common::PageType::Memory: {
DEBUG_ASSERT(page_table.pointers[page_index]);
- const u8* const src_ptr = page_table.pointers[page_index] + page_offset;
+ const u8* const src_ptr =
+ page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
std::memcpy(dest_buffer, src_ptr, copy_amount);
break;
}
@@ -276,7 +283,8 @@ struct Memory::Impl {
case Common::PageType::Memory: {
DEBUG_ASSERT(page_table.pointers[page_index]);
- u8* const dest_ptr = page_table.pointers[page_index] + page_offset;
+ u8* const dest_ptr =
+ page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
std::memcpy(dest_ptr, src_buffer, copy_amount);
break;
}
@@ -322,7 +330,8 @@ struct Memory::Impl {
case Common::PageType::Memory: {
DEBUG_ASSERT(page_table.pointers[page_index]);
- u8* dest_ptr = page_table.pointers[page_index] + page_offset;
+ u8* dest_ptr =
+ page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
std::memset(dest_ptr, 0, copy_amount);
break;
}
@@ -368,7 +377,8 @@ struct Memory::Impl {
}
case Common::PageType::Memory: {
DEBUG_ASSERT(page_table.pointers[page_index]);
- const u8* src_ptr = page_table.pointers[page_index] + page_offset;
+ const u8* src_ptr =
+ page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
WriteBlock(process, dest_addr, src_ptr, copy_amount);
break;
}
@@ -446,7 +456,8 @@ struct Memory::Impl {
page_type = Common::PageType::Unmapped;
} else {
page_type = Common::PageType::Memory;
- current_page_table->pointers[vaddr >> PAGE_BITS] = pointer;
+ current_page_table->pointers[vaddr >> PAGE_BITS] =
+ pointer - (vaddr & ~PAGE_MASK);
}
break;
}
@@ -493,7 +504,9 @@ struct Memory::Impl {
memory);
} else {
while (base != end) {
- page_table.pointers[base] = memory;
+ page_table.pointers[base] = memory - (base << PAGE_BITS);
+ ASSERT_MSG(page_table.pointers[base],
+ "memory mapping base yield a nullptr within the table");
base += 1;
memory += PAGE_SIZE;
@@ -518,7 +531,7 @@ struct Memory::Impl {
if (page_pointer != nullptr) {
// NOTE: Avoid adding any extra logic to this fast-path block
T value;
- std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T));
+ std::memcpy(&value, &page_pointer[vaddr], sizeof(T));
return value;
}
@@ -559,7 +572,7 @@ struct Memory::Impl {
u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
if (page_pointer != nullptr) {
// NOTE: Avoid adding any extra logic to this fast-path block
- std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T));
+ std::memcpy(&page_pointer[vaddr], &data, sizeof(T));
return;
}
@@ -594,6 +607,11 @@ void Memory::SetCurrentPageTable(Kernel::Process& process) {
impl->SetCurrentPageTable(process);
}
+void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size,
+ Kernel::PhysicalMemory& memory, VAddr offset) {
+ impl->MapMemoryRegion(page_table, base, size, memory, offset);
+}
+
void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
impl->MapMemoryRegion(page_table, base, size, target);
}
diff --git a/src/core/memory.h b/src/core/memory.h
index 1428a6d60..8913a9da4 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -19,8 +19,9 @@ class System;
}
namespace Kernel {
+class PhysicalMemory;
class Process;
-}
+} // namespace Kernel
namespace Memory {
@@ -66,6 +67,19 @@ public:
void SetCurrentPageTable(Kernel::Process& process);
/**
+ * Maps an physical buffer onto a region of the emulated process address space.
+ *
+ * @param page_table The page table of the emulated process.
+ * @param base The address to start mapping at. Must be page-aligned.
+ * @param size The amount of bytes to map. Must be page-aligned.
+ * @param memory Physical buffer with the memory backing the mapping. Must be of length
+ * at least `size + offset`.
+ * @param offset The offset within the physical memory. Must be page-aligned.
+ */
+ void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size,
+ Kernel::PhysicalMemory& memory, VAddr offset);
+
+ /**
* Maps an allocated buffer onto a region of the emulated process address space.
*
* @param page_table The page table of the emulated process.