From 50a5d09d32e62bef5162d47df7b856864ac5eb21 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Thu, 13 Sep 2018 17:08:00 -0400 Subject: kernel/errors: Amend error code for ERR_NOT_FOUND This is the value returned by the kernel for svcConnectToNamedPort() if the named port cannot be found. --- src/core/hle/kernel/errors.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h index ad39c8271..6c9fc9bfe 100644 --- a/src/core/hle/kernel/errors.h +++ b/src/core/hle/kernel/errors.h @@ -29,6 +29,7 @@ enum { SynchronizationCanceled = 118, TooLarge = 119, InvalidEnumValue = 120, + NoSuchEntry = 121, InvalidState = 125, ResourceLimitExceeded = 132, }; @@ -63,7 +64,7 @@ constexpr ResultCode ERR_INVALID_OBJECT_ADDR(-1); constexpr ResultCode ERR_NOT_AUTHORIZED(-1); /// Alternate code returned instead of ERR_INVALID_HANDLE in some code paths. constexpr ResultCode ERR_INVALID_HANDLE_OS(-1); -constexpr ResultCode ERR_NOT_FOUND(-1); +constexpr ResultCode ERR_NOT_FOUND(ErrorModule::Kernel, ErrCodes::NoSuchEntry); constexpr ResultCode RESULT_TIMEOUT(ErrorModule::Kernel, ErrCodes::Timeout); /// Returned when Accept() is called on a port with no sessions to be accepted. constexpr ResultCode ERR_NO_PENDING_SESSIONS(-1); -- cgit v1.2.3 From 7bd2faad9a41a04d81e5b33d454ca01d9eb650e0 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Thu, 13 Sep 2018 19:09:04 -0400 Subject: kernel/svc: Sanitize heap sizes within svcSetHeapSize() The kernel checks if the given size is a multiple of 2MB and <= to 4GB before going ahead and attempting to allocate that much memory. --- src/core/hle/kernel/errors.h | 2 ++ src/core/hle/kernel/svc.cpp | 6 ++++++ 2 files changed, 8 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h index ad39c8271..2be2fad82 100644 --- a/src/core/hle/kernel/errors.h +++ b/src/core/hle/kernel/errors.h @@ -17,6 +17,7 @@ enum { // Confirmed Switch OS error codes MaxConnectionsReached = 7, + InvalidSize = 101, InvalidAddress = 102, HandleTableFull = 105, InvalidMemoryState = 106, @@ -55,6 +56,7 @@ constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS(ErrorModule::Kernel, ErrCodes::InvalidMemoryPermissions); constexpr ResultCode ERR_INVALID_HANDLE(ErrorModule::Kernel, ErrCodes::InvalidHandle); constexpr ResultCode ERR_INVALID_PROCESSOR_ID(ErrorModule::Kernel, ErrCodes::InvalidProcessorId); +constexpr ResultCode ERR_INVALID_SIZE(ErrorModule::Kernel, ErrCodes::InvalidSize); constexpr ResultCode ERR_INVALID_STATE(ErrorModule::Kernel, ErrCodes::InvalidState); constexpr ResultCode ERR_INVALID_THREAD_PRIORITY(ErrorModule::Kernel, ErrCodes::InvalidThreadPriority); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index f500fd2e7..a3d169e46 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -39,6 +39,12 @@ namespace Kernel { /// Set the process heap to a given Size. It can both extend and shrink the heap. static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) { LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size); + + // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 4GB. + if ((heap_size & 0xFFFFFFFE001FFFFF) != 0) { + return ERR_INVALID_SIZE; + } + auto& process = *Core::CurrentProcess(); CASCADE_RESULT(*heap_addr, process.HeapAllocate(Memory::HEAP_VADDR, heap_size, VMAPermission::ReadWrite)); -- cgit v1.2.3 From 496c67fd730cd27ed1a6ce087d224bd2b736ad4b Mon Sep 17 00:00:00 2001 From: Lioncash Date: Thu, 13 Sep 2018 19:14:50 -0400 Subject: kernel/svc: Sanitize addresses and sizes within svcMapMemory() and svcUnmapMemory() The kernel checks if the addresses and given size is 4KB aligned before continuing onwards to map the memory. --- src/core/hle/kernel/svc.cpp | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index a3d169e46..3eb77812e 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -35,6 +35,11 @@ #include "core/hle/service/service.h" namespace Kernel { +namespace { +constexpr bool Is4KBAligned(VAddr address) { + return (address & 0xFFF) == 0; +} +} // Anonymous namespace /// Set the process heap to a given Size. It can both extend and shrink the heap. static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) { @@ -62,6 +67,15 @@ static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) { LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); + + if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) { + return ERR_INVALID_ADDRESS; + } + + if (size == 0 || !Is4KBAligned(size)) { + return ERR_INVALID_SIZE; + } + return Core::CurrentProcess()->MirrorMemory(dst_addr, src_addr, size); } @@ -69,6 +83,15 @@ static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) { static ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size) { LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); + + if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) { + return ERR_INVALID_ADDRESS; + } + + if (size == 0 || !Is4KBAligned(size)) { + return ERR_INVALID_SIZE; + } + return Core::CurrentProcess()->UnmapMemory(dst_addr, src_addr, size); } -- cgit v1.2.3 From accd1f17e451dfe23350055b5c1377846f3dce77 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Thu, 13 Sep 2018 20:16:43 -0400 Subject: kernel/svc: Sanitize addresses, permissions, and sizes within svcMapSharedMemory() and svcUnmapSharedMemory() Part of the checking done by the kernel is to check if the given address and size are 4KB aligned, as well as checking if the size isn't zero. It also only allows mapping shared memory as readable or read/write, but nothing else, and so we shouldn't allow mapping as anything else either. --- src/core/hle/kernel/svc.cpp | 42 +++++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 17 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 3eb77812e..d19182639 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -444,35 +444,43 @@ static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 s "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}", shared_memory_handle, addr, size, permissions); + if (!Is4KBAligned(addr)) { + return ERR_INVALID_ADDRESS; + } + + if (size == 0 || !Is4KBAligned(size)) { + return ERR_INVALID_SIZE; + } + + const auto permissions_type = static_cast(permissions); + if (permissions_type != MemoryPermission::Read && + permissions_type != MemoryPermission::ReadWrite) { + LOG_ERROR(Kernel_SVC, "Invalid permissions=0x{:08X}", permissions); + return ERR_INVALID_MEMORY_PERMISSIONS; + } + auto& kernel = Core::System::GetInstance().Kernel(); auto shared_memory = kernel.HandleTable().Get(shared_memory_handle); if (!shared_memory) { return ERR_INVALID_HANDLE; } - MemoryPermission permissions_type = static_cast(permissions); - switch (permissions_type) { - case MemoryPermission::Read: - case MemoryPermission::Write: - case MemoryPermission::ReadWrite: - case MemoryPermission::Execute: - case MemoryPermission::ReadExecute: - case MemoryPermission::WriteExecute: - case MemoryPermission::ReadWriteExecute: - case MemoryPermission::DontCare: - return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type, - MemoryPermission::DontCare); - default: - LOG_ERROR(Kernel_SVC, "unknown permissions=0x{:08X}", permissions); - } - - return RESULT_SUCCESS; + return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type, + MemoryPermission::DontCare); } static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size) { LOG_WARNING(Kernel_SVC, "called, shared_memory_handle=0x{:08X}, addr=0x{:X}, size=0x{:X}", shared_memory_handle, addr, size); + if (!Is4KBAligned(addr)) { + return ERR_INVALID_ADDRESS; + } + + if (size == 0 || !Is4KBAligned(size)) { + return ERR_INVALID_SIZE; + } + auto& kernel = Core::System::GetInstance().Kernel(); auto shared_memory = kernel.HandleTable().Get(shared_memory_handle); -- cgit v1.2.3 From 4f8756edd06d76af33208047a5ed9b776132d97a Mon Sep 17 00:00:00 2001 From: Lioncash Date: Thu, 13 Sep 2018 21:04:43 -0400 Subject: kernel/svc: Sanitize creation of shared memory via svcCreateSharedMemory() The kernel caps the size limit of shared memory to 8589930496 bytes (or (1GB - 512 bytes) * 8), so approximately 8GB, where every GB has a 512 byte sector taken off of it. It also ensures the shared memory is created with either read or read/write permissions for both permission types passed in, allowing the remote permissions to also be set as "don't care". --- src/core/hle/kernel/svc.cpp | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index d19182639..4529002ba 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -935,12 +935,28 @@ static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permiss LOG_TRACE(Kernel_SVC, "called, size=0x{:X}, localPerms=0x{:08X}, remotePerms=0x{:08X}", size, local_permissions, remote_permissions); + // Size must be a multiple of 4KB and be less than or equal to + // approx. 8 GB (actually (1GB - 512B) * 8) + if (size == 0 || (size & 0xFFFFFFFE00000FFF) != 0) { + return ERR_INVALID_SIZE; + } + + const auto local_perms = static_cast(local_permissions); + if (local_perms != MemoryPermission::Read && local_perms != MemoryPermission::ReadWrite) { + return ERR_INVALID_MEMORY_PERMISSIONS; + } + + const auto remote_perms = static_cast(remote_permissions); + if (remote_perms != MemoryPermission::Read && remote_perms != MemoryPermission::ReadWrite && + remote_perms != MemoryPermission::DontCare) { + return ERR_INVALID_MEMORY_PERMISSIONS; + } + auto& kernel = Core::System::GetInstance().Kernel(); auto& handle_table = kernel.HandleTable(); auto shared_mem_handle = SharedMemory::Create(kernel, handle_table.Get(KernelHandle::CurrentProcess), size, - static_cast(local_permissions), - static_cast(remote_permissions)); + local_perms, remote_perms); CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle)); return RESULT_SUCCESS; -- cgit v1.2.3 From 63c2e32e207d31ecadd9022e1d7cd705c9febac8 Mon Sep 17 00:00:00 2001 From: fearlessTobi Date: Sat, 15 Sep 2018 15:21:06 +0200 Subject: Port #4182 from Citra: "Prefix all size_t with std::" --- src/core/hle/kernel/address_arbiter.cpp | 25 +++++++++++++------------ src/core/hle/kernel/handle_table.cpp | 2 +- src/core/hle/kernel/handle_table.h | 2 +- src/core/hle/kernel/hle_ipc.cpp | 27 ++++++++++++++------------- src/core/hle/kernel/hle_ipc.h | 20 ++++++++++---------- src/core/hle/kernel/process.cpp | 6 +++--- src/core/hle/kernel/process.h | 4 ++-- src/core/hle/kernel/shared_memory.h | 2 +- src/core/hle/kernel/svc.cpp | 27 ++++++++++++++------------- src/core/hle/kernel/thread.cpp | 2 +- src/core/hle/kernel/thread.h | 2 +- src/core/hle/kernel/vm_manager.cpp | 2 +- src/core/hle/kernel/vm_manager.h | 4 ++-- src/core/hle/kernel/wait_object.cpp | 2 +- 14 files changed, 65 insertions(+), 62 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 6657accd5..93577591f 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -35,16 +35,17 @@ static ResultCode WaitForAddress(VAddr address, s64 timeout) { // Gets the threads waiting on an address. static std::vector> GetThreadsWaitingOnAddress(VAddr address) { - const auto RetrieveWaitingThreads = - [](size_t core_index, std::vector>& waiting_threads, VAddr arb_addr) { - const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); - auto& thread_list = scheduler->GetThreadList(); - - for (auto& thread : thread_list) { - if (thread->arb_wait_address == arb_addr) - waiting_threads.push_back(thread); - } - }; + const auto RetrieveWaitingThreads = [](std::size_t core_index, + std::vector>& waiting_threads, + VAddr arb_addr) { + const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); + auto& thread_list = scheduler->GetThreadList(); + + for (auto& thread : thread_list) { + if (thread->arb_wait_address == arb_addr) + waiting_threads.push_back(thread); + } + }; // Retrieve all threads that are waiting for this address. std::vector> threads; @@ -66,12 +67,12 @@ static std::vector> GetThreadsWaitingOnAddress(VAddr address) static void WakeThreads(std::vector>& waiting_threads, s32 num_to_wake) { // Only process up to 'target' threads, unless 'target' is <= 0, in which case process // them all. - size_t last = waiting_threads.size(); + std::size_t last = waiting_threads.size(); if (num_to_wake > 0) last = num_to_wake; // Signal the waiting threads. - for (size_t i = 0; i < last; i++) { + for (std::size_t i = 0; i < last; i++) { ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb); waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS); waiting_threads[i]->arb_wait_address = 0; diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index 3a079b9a9..5ee5c05e3 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp @@ -65,7 +65,7 @@ ResultCode HandleTable::Close(Handle handle) { } bool HandleTable::IsValid(Handle handle) const { - size_t slot = GetSlot(handle); + std::size_t slot = GetSlot(handle); u16 generation = GetGeneration(handle); return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation; diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h index cac928adb..9e2f33e8a 100644 --- a/src/core/hle/kernel/handle_table.h +++ b/src/core/hle/kernel/handle_table.h @@ -93,7 +93,7 @@ private: * This is the maximum limit of handles allowed per process in CTR-OS. It can be further * reduced by ExHeader values, but this is not emulated here. */ - static const size_t MAX_COUNT = 4096; + static const std::size_t MAX_COUNT = 4096; static u16 GetSlot(Handle handle) { return handle >> 15; diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 7264be906..72fb9d250 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -42,9 +42,9 @@ SharedPtr HLERequestContext::SleepClientThread(SharedPtr thread, Kernel::SharedPtr event) { // Put the client thread to sleep until the wait event is signaled or the timeout expires. - thread->wakeup_callback = - [context = *this, callback](ThreadWakeupReason reason, SharedPtr thread, - SharedPtr object, size_t index) mutable -> bool { + thread->wakeup_callback = [context = *this, callback]( + ThreadWakeupReason reason, SharedPtr thread, + SharedPtr object, std::size_t index) mutable -> bool { ASSERT(thread->status == ThreadStatus::WaitHLEEvent); callback(thread, context, reason); context.WriteToOutgoingCommandBuffer(*thread); @@ -199,8 +199,8 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb } // The data_size already includes the payload header, the padding and the domain header. - size_t size = data_payload_offset + command_header->data_size - - sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; + std::size_t size = data_payload_offset + command_header->data_size - + sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; if (domain_message_header) size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32); std::copy_n(src_cmdbuf, size, cmd_buf.begin()); @@ -217,8 +217,8 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) ParseCommandBuffer(cmd_buf.data(), false); // The data_size already includes the payload header, the padding and the domain header. - size_t size = data_payload_offset + command_header->data_size - - sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; + std::size_t size = data_payload_offset + command_header->data_size - + sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; if (domain_message_header) size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32); @@ -229,7 +229,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) "Handle descriptor bit set but no handles to translate"); // We write the translated handles at a specific offset in the command buffer, this space // was already reserved when writing the header. - size_t current_offset = + std::size_t current_offset = (sizeof(IPC::CommandHeader) + sizeof(IPC::HandleDescriptorHeader)) / sizeof(u32); ASSERT_MSG(!handle_descriptor_header->send_current_pid, "Sending PID is not implemented"); @@ -258,7 +258,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) ASSERT(domain_message_header->num_objects == domain_objects.size()); // Write the domain objects to the command buffer, these go after the raw untranslated data. // TODO(Subv): This completely ignores C buffers. - size_t domain_offset = size - domain_message_header->num_objects; + std::size_t domain_offset = size - domain_message_header->num_objects; auto& request_handlers = server_session->domain_request_handlers; for (auto& object : domain_objects) { @@ -291,14 +291,15 @@ std::vector HLERequestContext::ReadBuffer(int buffer_index) const { return buffer; } -size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffer_index) const { +std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, + int buffer_index) const { if (size == 0) { LOG_WARNING(Core, "skip empty buffer write"); return 0; } const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()}; - const size_t buffer_size{GetWriteBufferSize(buffer_index)}; + const std::size_t buffer_size{GetWriteBufferSize(buffer_index)}; if (size > buffer_size) { LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size, buffer_size); @@ -314,13 +315,13 @@ size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffe return size; } -size_t HLERequestContext::GetReadBufferSize(int buffer_index) const { +std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const { const bool is_buffer_a{BufferDescriptorA().size() && BufferDescriptorA()[buffer_index].Size()}; return is_buffer_a ? BufferDescriptorA()[buffer_index].Size() : BufferDescriptorX()[buffer_index].Size(); } -size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const { +std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const { const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()}; return is_buffer_b ? BufferDescriptorB()[buffer_index].Size() : BufferDescriptorC()[buffer_index].Size(); diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index f0d07f1b6..894479ee0 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -170,7 +170,7 @@ public: std::vector ReadBuffer(int buffer_index = 0) const; /// Helper function to write a buffer using the appropriate buffer descriptor - size_t WriteBuffer(const void* buffer, size_t size, int buffer_index = 0) const; + std::size_t WriteBuffer(const void* buffer, std::size_t size, int buffer_index = 0) const; /* Helper function to write a buffer using the appropriate buffer descriptor * @@ -182,7 +182,7 @@ public: */ template >> - size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const { + std::size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const { using ContiguousType = typename ContiguousContainer::value_type; static_assert(std::is_trivially_copyable_v, @@ -193,19 +193,19 @@ public: } /// Helper function to get the size of the input buffer - size_t GetReadBufferSize(int buffer_index = 0) const; + std::size_t GetReadBufferSize(int buffer_index = 0) const; /// Helper function to get the size of the output buffer - size_t GetWriteBufferSize(int buffer_index = 0) const; + std::size_t GetWriteBufferSize(int buffer_index = 0) const; template - SharedPtr GetCopyObject(size_t index) { + SharedPtr GetCopyObject(std::size_t index) { ASSERT(index < copy_objects.size()); return DynamicObjectCast(copy_objects[index]); } template - SharedPtr GetMoveObject(size_t index) { + SharedPtr GetMoveObject(std::size_t index) { ASSERT(index < move_objects.size()); return DynamicObjectCast(move_objects[index]); } @@ -223,7 +223,7 @@ public: } template - std::shared_ptr GetDomainRequestHandler(size_t index) const { + std::shared_ptr GetDomainRequestHandler(std::size_t index) const { return std::static_pointer_cast(domain_request_handlers[index]); } @@ -240,15 +240,15 @@ public: domain_objects.clear(); } - size_t NumMoveObjects() const { + std::size_t NumMoveObjects() const { return move_objects.size(); } - size_t NumCopyObjects() const { + std::size_t NumCopyObjects() const { return copy_objects.size(); } - size_t NumDomainObjects() const { + std::size_t NumDomainObjects() const { return domain_objects.size(); } diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index b025e323f..7a272d031 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -40,8 +40,8 @@ SharedPtr Process::Create(KernelCore& kernel, std::string&& name) { return process; } -void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) { - for (size_t i = 0; i < len; ++i) { +void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) { + for (std::size_t i = 0; i < len; ++i) { u32 descriptor = kernel_caps[i]; u32 type = descriptor >> 20; @@ -211,7 +211,7 @@ ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) { "Shared memory exceeds bounds of mapped block"); const std::shared_ptr>& backing_block = vma->second.backing_block; - size_t backing_block_offset = vma->second.offset + vma_offset; + std::size_t backing_block_offset = vma->second.offset + vma_offset; CASCADE_RESULT(auto new_vma, vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size, diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 1587d40c1..81538f70c 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -59,7 +59,7 @@ class ResourceLimit; struct CodeSet final : public Object { struct Segment { - size_t offset = 0; + std::size_t offset = 0; VAddr addr = 0; u32 size = 0; }; @@ -164,7 +164,7 @@ public: * Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them * to this process. */ - void ParseKernelCaps(const u32* kernel_caps, size_t len); + void ParseKernelCaps(const u32* kernel_caps, std::size_t len); /** * Applies address space changes and launches the process main thread. diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h index 2c729afe3..2c06bb7ce 100644 --- a/src/core/hle/kernel/shared_memory.h +++ b/src/core/hle/kernel/shared_memory.h @@ -119,7 +119,7 @@ public: /// Backing memory for this shared memory block. std::shared_ptr> backing_block; /// Offset into the backing block for this shared memory. - size_t backing_block_offset; + std::size_t backing_block_offset; /// Size of the memory block. Page-aligned. u64 size; /// Permission restrictions applied to the process which created the block. diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index f500fd2e7..a5aaa089d 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -146,7 +146,7 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) { /// Default thread wakeup callback for WaitSynchronization static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr thread, - SharedPtr object, size_t index) { + SharedPtr object, std::size_t index) { ASSERT(thread->status == ThreadStatus::WaitSynchAny); if (reason == ThreadWakeupReason::Timeout) { @@ -647,16 +647,17 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", condition_variable_addr, target); - auto RetrieveWaitingThreads = - [](size_t core_index, std::vector>& waiting_threads, VAddr condvar_addr) { - const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); - auto& thread_list = scheduler->GetThreadList(); + auto RetrieveWaitingThreads = [](std::size_t core_index, + std::vector>& waiting_threads, + VAddr condvar_addr) { + const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); + auto& thread_list = scheduler->GetThreadList(); - for (auto& thread : thread_list) { - if (thread->condvar_wait_address == condvar_addr) - waiting_threads.push_back(thread); - } - }; + for (auto& thread : thread_list) { + if (thread->condvar_wait_address == condvar_addr) + waiting_threads.push_back(thread); + } + }; // Retrieve a list of all threads that are waiting for this condition variable. std::vector> waiting_threads; @@ -672,7 +673,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target // Only process up to 'target' threads, unless 'target' is -1, in which case process // them all. - size_t last = waiting_threads.size(); + std::size_t last = waiting_threads.size(); if (target != -1) last = target; @@ -680,12 +681,12 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target if (last > waiting_threads.size()) return RESULT_SUCCESS; - for (size_t index = 0; index < last; ++index) { + for (std::size_t index = 0; index < last; ++index) { auto& thread = waiting_threads[index]; ASSERT(thread->condvar_wait_address == condition_variable_addr); - size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); + std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); auto& monitor = Core::System::GetInstance().Monitor(); diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 3f12a84dc..89cd5f401 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -275,7 +275,7 @@ ResultVal> Thread::Create(KernelCore& kernel, std::string name available_slot = 0; // Use the first slot in the new page // Allocate some memory from the end of the linear heap for this region. - const size_t offset = thread->tls_memory->size(); + const std::size_t offset = thread->tls_memory->size(); thread->tls_memory->insert(thread->tls_memory->end(), Memory::PAGE_SIZE, 0); auto& vm_manager = owner_process->vm_manager; diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index cb57ee78a..df4748942 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -254,7 +254,7 @@ public: Handle callback_handle; using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr thread, - SharedPtr object, size_t index); + SharedPtr object, std::size_t index); // Callback that will be invoked when the thread is resumed from a waiting state. If the thread // was waiting via WaitSynchronizationN then the object will be the last object that became // available. In case of a timeout, the object will be nullptr. diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 479cacb62..608cbd57b 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -86,7 +86,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { ResultVal VMManager::MapMemoryBlock(VAddr target, std::shared_ptr> block, - size_t offset, u64 size, + std::size_t offset, u64 size, MemoryState state) { ASSERT(block != nullptr); ASSERT(offset + size <= block->size()); diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 98bd04bea..de75036c0 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -81,7 +81,7 @@ struct VirtualMemoryArea { /// Memory block backing this VMA. std::shared_ptr> backing_block = nullptr; /// Offset into the backing_memory the mapping starts from. - size_t offset = 0; + std::size_t offset = 0; // Settings for type = BackingMemory /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed. @@ -147,7 +147,7 @@ public: * @param state MemoryState tag to attach to the VMA. */ ResultVal MapMemoryBlock(VAddr target, std::shared_ptr> block, - size_t offset, u64 size, MemoryState state); + std::size_t offset, u64 size, MemoryState state); /** * Maps an unmanaged host memory pointer at a given address. diff --git a/src/core/hle/kernel/wait_object.cpp b/src/core/hle/kernel/wait_object.cpp index eef00b729..b190ceb98 100644 --- a/src/core/hle/kernel/wait_object.cpp +++ b/src/core/hle/kernel/wait_object.cpp @@ -81,7 +81,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr thread) { } } - size_t index = thread->GetWaitObjectIndex(this); + std::size_t index = thread->GetWaitObjectIndex(this); for (auto& object : thread->wait_objects) object->RemoveWaitingThread(thread.get()); -- cgit v1.2.3 From 54ddb37b3ccf070e474e1530ba16d8fa830630d8 Mon Sep 17 00:00:00 2001 From: Valentin Vanelslande Date: Sat, 8 Sep 2018 07:40:24 -0500 Subject: Port # #4192 from Citra: "svc: change unknown to thread in CreateThread" --- src/core/hle/kernel/svc.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index f500fd2e7..e60fecf11 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -524,7 +524,7 @@ static void ExitProcess() { /// Creates a new thread static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, VAddr stack_top, u32 priority, s32 processor_id) { - std::string name = fmt::format("unknown-{:X}", entry_point); + std::string name = fmt::format("thread-{:X}", entry_point); if (priority > THREADPRIO_LOWEST) { return ERR_INVALID_THREAD_PRIORITY; -- cgit v1.2.3 From b6867602ca5accf84b5e1f9d4895b232c9448816 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Mon, 17 Sep 2018 18:49:51 -0400 Subject: kernel/svc: Handle error cases for svcArbitrateLock() and svcArbitrateUnlock() The kernel does the equivalent of the following check before proceeding: if (address + 0x8000000000 < 0x7FFFE00000) { return ERR_INVALID_MEMORY_STATE; } which is essentially what our IsKernelVirtualAddress() function does. So we should also be checking for this. The kernel also checks if the given input addresses are 4-byte aligned, however our Mutex::TryAcquire() and Mutex::Release() functions already handle this, so we don't need to add code for this case. --- src/core/hle/kernel/svc.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index c5c1697ee..371fc439e 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -280,6 +280,10 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr, "requesting_current_thread_handle=0x{:08X}", holding_thread_handle, mutex_addr, requesting_thread_handle); + if (Memory::IsKernelVirtualAddress(mutex_addr)) { + return ERR_INVALID_ADDRESS_STATE; + } + auto& handle_table = Core::System::GetInstance().Kernel().HandleTable(); return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle, requesting_thread_handle); @@ -289,6 +293,10 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr, static ResultCode ArbitrateUnlock(VAddr mutex_addr) { LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); + if (Memory::IsKernelVirtualAddress(mutex_addr)) { + return ERR_INVALID_ADDRESS_STATE; + } + return Mutex::Release(mutex_addr); } -- cgit v1.2.3 From 71b48cb00fed9ff70b99415cdf41c2f41c69ad1e Mon Sep 17 00:00:00 2001 From: Lioncash Date: Mon, 17 Sep 2018 23:22:02 -0400 Subject: kernel/mutex: Replace ResultCode construction for invalid addresses with the named variant We already have a ResultCode constant for the case of an invalid address, so we can just use it instead of re-rolling that ResultCode type. --- src/core/hle/kernel/mutex.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 36bf0b677..51f4544be 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -62,7 +62,7 @@ ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle ho Handle requesting_thread_handle) { // The mutex address must be 4-byte aligned if ((address % sizeof(u32)) != 0) { - return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidAddress); + return ERR_INVALID_ADDRESS; } SharedPtr holding_thread = handle_table.Get(holding_thread_handle); @@ -100,7 +100,7 @@ ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle ho ResultCode Mutex::Release(VAddr address) { // The mutex address must be 4-byte aligned if ((address % sizeof(u32)) != 0) { - return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidAddress); + return ERR_INVALID_ADDRESS; } auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(GetCurrentThread(), address); -- cgit v1.2.3 From b51e7e028839a5d1cb92024e327b7f8b8fb62f40 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 18 Sep 2018 02:49:40 -0400 Subject: arm_interface: Remove ARM11-isms from the CPU interface This modifies the CPU interface to more accurately match an AArch64-supporting CPU as opposed to an ARM11 one. Two of the methods don't even make sense to keep around for this interface, as Adv Simd is used, rather than the VFP in the primary execution state. This is essentially a modernization change that should have occurred from the get-go. --- src/core/hle/kernel/thread.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 89cd5f401..d4183d6e3 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -217,8 +217,8 @@ static void ResetThreadContext(Core::ARM_Interface::ThreadContext& context, VAdd context.cpu_registers[0] = arg; context.pc = entry_point; context.sp = stack_top; - context.cpsr = 0; - context.fpscr = 0; + context.pstate = 0; + context.fpcr = 0; } ResultVal> Thread::Create(KernelCore& kernel, std::string name, VAddr entry_point, -- cgit v1.2.3 From f85ab0a12356e733ae496d34b6767a527d10be93 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Tue, 18 Sep 2018 04:25:35 -0400 Subject: svc_wrap: Convert the PARAM macro into a function This can just be a regular function, getting rid of the need to also explicitly undef the define at the end of the file. Given FuncReturn() was already converted into a function, it's #undef can also be removed. --- src/core/hle/kernel/svc_wrap.h | 73 +++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 37 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 1eda5f879..fea9ba5ea 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -13,7 +13,9 @@ namespace Kernel { -#define PARAM(n) Core::CurrentArmInterface().GetReg(n) +static inline u64 Param(int n) { + return Core::CurrentArmInterface().GetReg(n); +} /** * HLE a function return from the current ARM userland process @@ -28,23 +30,23 @@ static inline void FuncReturn(u64 res) { template void SvcWrap() { - FuncReturn(func(PARAM(0)).raw); + FuncReturn(func(Param(0)).raw); } template void SvcWrap() { - FuncReturn(func((u32)PARAM(0)).raw); + FuncReturn(func((u32)Param(0)).raw); } template void SvcWrap() { - FuncReturn(func((u32)PARAM(0), (u32)PARAM(1)).raw); + FuncReturn(func((u32)Param(0), (u32)Param(1)).raw); } template void SvcWrap() { u32 param_1 = 0; - u32 retval = func(¶m_1, (u32)PARAM(1)).raw; + u32 retval = func(¶m_1, (u32)Param(1)).raw; Core::CurrentArmInterface().SetReg(1, param_1); FuncReturn(retval); } @@ -52,39 +54,39 @@ void SvcWrap() { template void SvcWrap() { u32 param_1 = 0; - u32 retval = func(¶m_1, PARAM(1)).raw; + u32 retval = func(¶m_1, Param(1)).raw; Core::CurrentArmInterface().SetReg(1, param_1); FuncReturn(retval); } template void SvcWrap() { - FuncReturn(func(PARAM(0), (s32)PARAM(1)).raw); + FuncReturn(func(Param(0), (s32)Param(1)).raw); } template void SvcWrap() { u64 param_1 = 0; - u32 retval = func(¶m_1, PARAM(1)).raw; + u32 retval = func(¶m_1, Param(1)).raw; Core::CurrentArmInterface().SetReg(1, param_1); FuncReturn(retval); } template void SvcWrap() { - FuncReturn(func((u32)(PARAM(0) & 0xFFFFFFFF), PARAM(1)).raw); + FuncReturn(func((u32)(Param(0) & 0xFFFFFFFF), Param(1)).raw); } template void SvcWrap() { - FuncReturn(func((u32)(PARAM(0) & 0xFFFFFFFF), (u32)(PARAM(1) & 0xFFFFFFFF), PARAM(2)).raw); + FuncReturn(func((u32)(Param(0) & 0xFFFFFFFF), (u32)(Param(1) & 0xFFFFFFFF), Param(2)).raw); } template void SvcWrap() { u32 param_1 = 0; u64 param_2 = 0; - ResultCode retval = func((u32)(PARAM(2) & 0xFFFFFFFF), ¶m_1, ¶m_2); + ResultCode retval = func((u32)(Param(2) & 0xFFFFFFFF), ¶m_1, ¶m_2); Core::CurrentArmInterface().SetReg(1, param_1); Core::CurrentArmInterface().SetReg(2, param_2); FuncReturn(retval.raw); @@ -93,46 +95,46 @@ void SvcWrap() { template void SvcWrap() { FuncReturn( - func(PARAM(0), PARAM(1), (u32)(PARAM(3) & 0xFFFFFFFF), (u32)(PARAM(3) & 0xFFFFFFFF)).raw); + func(Param(0), Param(1), (u32)(Param(3) & 0xFFFFFFFF), (u32)(Param(3) & 0xFFFFFFFF)).raw); } template void SvcWrap() { - FuncReturn(func((u32)PARAM(0), PARAM(1), (u32)PARAM(2)).raw); + FuncReturn(func((u32)Param(0), Param(1), (u32)Param(2)).raw); } template void SvcWrap() { - FuncReturn(func(PARAM(0), PARAM(1), PARAM(2)).raw); + FuncReturn(func(Param(0), Param(1), Param(2)).raw); } template void SvcWrap() { - FuncReturn(func((u32)PARAM(0), PARAM(1), PARAM(2), (u32)PARAM(3)).raw); + FuncReturn(func((u32)Param(0), Param(1), Param(2), (u32)Param(3)).raw); } template void SvcWrap() { - FuncReturn(func((u32)PARAM(0), PARAM(1), PARAM(2)).raw); + FuncReturn(func((u32)Param(0), Param(1), Param(2)).raw); } template void SvcWrap() { u32 param_1 = 0; - ResultCode retval = func(¶m_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (s64)PARAM(3)); + ResultCode retval = func(¶m_1, Param(1), (u32)(Param(2) & 0xFFFFFFFF), (s64)Param(3)); Core::CurrentArmInterface().SetReg(1, param_1); FuncReturn(retval.raw); } template void SvcWrap() { - FuncReturn(func(PARAM(0), PARAM(1), (u32)PARAM(2), (s64)PARAM(3)).raw); + FuncReturn(func(Param(0), Param(1), (u32)Param(2), (s64)Param(3)).raw); } template void SvcWrap() { u64 param_1 = 0; - u32 retval = func(¶m_1, PARAM(1), PARAM(2), PARAM(3)).raw; + u32 retval = func(¶m_1, Param(1), Param(2), Param(3)).raw; Core::CurrentArmInterface().SetReg(1, param_1); FuncReturn(retval); } @@ -141,7 +143,7 @@ template void SvcWrap() { u32 param_1 = 0; u32 retval = - func(¶m_1, PARAM(1), PARAM(2), PARAM(3), (u32)PARAM(4), (s32)(PARAM(5) & 0xFFFFFFFF)) + func(¶m_1, Param(1), Param(2), Param(3), (u32)Param(4), (s32)(Param(5) & 0xFFFFFFFF)) .raw; Core::CurrentArmInterface().SetReg(1, param_1); FuncReturn(retval); @@ -151,13 +153,13 @@ template void SvcWrap() { MemoryInfo memory_info = {}; PageInfo page_info = {}; - u32 retval = func(&memory_info, &page_info, PARAM(2)).raw; + u32 retval = func(&memory_info, &page_info, Param(2)).raw; - Memory::Write64(PARAM(0), memory_info.base_address); - Memory::Write64(PARAM(0) + 8, memory_info.size); - Memory::Write32(PARAM(0) + 16, memory_info.type); - Memory::Write32(PARAM(0) + 20, memory_info.attributes); - Memory::Write32(PARAM(0) + 24, memory_info.permission); + Memory::Write64(Param(0), memory_info.base_address); + Memory::Write64(Param(0) + 8, memory_info.size); + Memory::Write32(Param(0) + 16, memory_info.type); + Memory::Write32(Param(0) + 20, memory_info.attributes); + Memory::Write32(Param(0) + 24, memory_info.permission); FuncReturn(retval); } @@ -165,7 +167,7 @@ void SvcWrap() { template void SvcWrap() { u32 param_1 = 0; - u32 retval = func(¶m_1, PARAM(1), PARAM(2), (u32)(PARAM(3) & 0xFFFFFFFF)).raw; + u32 retval = func(¶m_1, Param(1), Param(2), (u32)(Param(3) & 0xFFFFFFFF)).raw; Core::CurrentArmInterface().SetReg(1, param_1); FuncReturn(retval); } @@ -174,7 +176,7 @@ template void SvcWrap() { u32 param_1 = 0; u32 retval = - func(¶m_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (u32)(PARAM(3) & 0xFFFFFFFF)).raw; + func(¶m_1, Param(1), (u32)(Param(2) & 0xFFFFFFFF), (u32)(Param(3) & 0xFFFFFFFF)).raw; Core::CurrentArmInterface().SetReg(1, param_1); FuncReturn(retval); } @@ -182,14 +184,14 @@ void SvcWrap() { template void SvcWrap() { FuncReturn( - func(PARAM(0), (u32)(PARAM(1) & 0xFFFFFFFF), (s32)(PARAM(2) & 0xFFFFFFFF), (s64)PARAM(3)) + func(Param(0), (u32)(Param(1) & 0xFFFFFFFF), (s32)(Param(2) & 0xFFFFFFFF), (s64)Param(3)) .raw); } template void SvcWrap() { - FuncReturn(func(PARAM(0), (u32)(PARAM(1) & 0xFFFFFFFF), (s32)(PARAM(2) & 0xFFFFFFFF), - (s32)(PARAM(3) & 0xFFFFFFFF)) + FuncReturn(func(Param(0), (u32)(Param(1) & 0xFFFFFFFF), (s32)(Param(2) & 0xFFFFFFFF), + (s32)(Param(3) & 0xFFFFFFFF)) .raw); } @@ -219,20 +221,17 @@ void SvcWrap() { template void SvcWrap() { - func((s64)PARAM(0)); + func((s64)Param(0)); } template void SvcWrap() { - func(PARAM(0), PARAM(1)); + func(Param(0), Param(1)); } template void SvcWrap() { - func(PARAM(0), PARAM(1), PARAM(2)); + func(Param(0), Param(1), Param(2)); } -#undef PARAM -#undef FuncReturn - } // namespace Kernel -- cgit v1.2.3