From 04780a2b9409f008b146fd4fee2e33ddbbf24b55 Mon Sep 17 00:00:00 2001 From: pineappleEA Date: Fri, 3 Nov 2023 16:31:05 +0100 Subject: [PATCH] early-access version 3962 --- README.md | 2 +- .../java/org/yuzu/yuzu_emu/NativeLibrary.kt | 4 +- .../yuzu_emu/activities/EmulationActivity.kt | 2 + .../fragments/HomeSettingsFragment.kt | 26 +- .../org/yuzu/yuzu_emu/utils/InputHandler.kt | 2 +- .../main/java/org/yuzu/yuzu_emu/utils/Log.kt | 35 +- src/android/app/src/main/jni/CMakeLists.txt | 1 + src/android/app/src/main/jni/native.cpp | 9 +- src/android/app/src/main/jni/native_log.cpp | 31 + src/common/page_table.cpp | 30 +- src/common/page_table.h | 17 +- src/core/CMakeLists.txt | 7 +- src/core/debugger/gdbstub.cpp | 98 +- src/core/hid/emulated_controller.cpp | 14 +- src/core/hid/hid_types.h | 15 +- .../board/nintendo/nx/k_system_control.cpp | 13 +- .../board/nintendo/nx/k_system_control.h | 7 +- src/core/hle/kernel/k_capabilities.cpp | 36 +- src/core/hle/kernel/k_capabilities.h | 17 +- .../hle/kernel/k_device_address_space.cpp | 4 +- src/core/hle/kernel/k_device_address_space.h | 10 +- src/core/hle/kernel/k_memory_layout.h | 8 + src/core/hle/kernel/k_memory_manager.cpp | 12 +- src/core/hle/kernel/k_page_table.h | 542 +- src/core/hle/kernel/k_page_table_base.cpp | 5718 +++++++++++++++++ src/core/hle/kernel/k_page_table_base.h | 759 +++ src/core/hle/kernel/k_process.cpp | 18 +- src/core/hle/kernel/k_process.h | 14 +- src/core/hle/kernel/k_process_page_table.h | 480 ++ src/core/hle/kernel/k_server_session.cpp | 2 +- src/core/hle/kernel/k_system_resource.cpp | 2 +- src/core/hle/kernel/k_thread_local_page.cpp | 4 +- src/core/hle/kernel/svc/svc_memory.cpp | 6 +- .../hle/kernel/svc/svc_physical_memory.cpp | 9 +- .../hle/kernel/svc/svc_process_memory.cpp | 3 +- src/core/hle/kernel/svc/svc_query_memory.cpp | 8 +- src/core/hle/result.h | 31 + src/core/hle/service/hid/controllers/npad.cpp | 6 +- src/core/hle/service/hid/ring_lifo.h | 6 +- src/core/hle/service/ldr/ldr.cpp | 45 +- src/core/hle/service/nvnflinger/buffer_item.h | 2 +- .../nvnflinger/buffer_queue_consumer.cpp | 27 +- .../nvnflinger/buffer_queue_consumer.h | 9 +- .../service/nvnflinger/buffer_queue_core.cpp | 12 - .../service/nvnflinger/buffer_queue_core.h | 3 - .../nvnflinger/buffer_queue_producer.cpp | 19 +- .../nvnflinger/buffer_queue_producer.h | 3 +- src/core/hle/service/nvnflinger/buffer_slot.h | 2 +- .../hle/service/nvnflinger/consumer_base.cpp | 20 + .../hle/service/nvnflinger/consumer_base.h | 2 + .../nvnflinger/fb_share_buffer_manager.cpp | 2 +- .../hle/service/nvnflinger/nvnflinger.cpp | 22 +- src/core/hle/service/nvnflinger/nvnflinger.h | 2 + src/core/hle/service/nvnflinger/status.h | 2 +- .../service/nvnflinger/ui/graphic_buffer.cpp | 34 + .../service/nvnflinger/ui/graphic_buffer.h | 25 +- .../hle/service/vi/display/vi_display.cpp | 2 +- src/core/memory.cpp | 6 +- .../renderer_vulkan/vk_rasterizer.cpp | 6 +- 59 files changed, 7431 insertions(+), 822 deletions(-) create mode 100755 src/android/app/src/main/jni/native_log.cpp create mode 100755 src/core/hle/kernel/k_page_table_base.cpp create mode 100755 src/core/hle/kernel/k_page_table_base.h create mode 100755 src/core/hle/kernel/k_process_page_table.h create mode 100755 src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp diff --git a/README.md b/README.md index e57f7c409..36f08a850 100755 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ yuzu emulator early access ============= -This is the source code for early-access 3961. +This is the source code for early-access 3962. ## Legal Notice diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/NativeLibrary.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/NativeLibrary.kt index 07f1b4842..ed8fe6c3f 100755 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/NativeLibrary.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/NativeLibrary.kt @@ -462,12 +462,12 @@ object NativeLibrary { } fun setEmulationActivity(emulationActivity: EmulationActivity?) { - Log.verbose("[NativeLibrary] Registering EmulationActivity.") + Log.debug("[NativeLibrary] Registering EmulationActivity.") sEmulationActivity = WeakReference(emulationActivity) } fun clearEmulationActivity() { - Log.verbose("[NativeLibrary] Unregistering EmulationActivity.") + Log.debug("[NativeLibrary] Unregistering EmulationActivity.") sEmulationActivity.clear() } diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/activities/EmulationActivity.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/activities/EmulationActivity.kt index f37875ffe..da98d4ef5 100755 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/activities/EmulationActivity.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/activities/EmulationActivity.kt @@ -47,6 +47,7 @@ import org.yuzu.yuzu_emu.model.EmulationViewModel import org.yuzu.yuzu_emu.model.Game import org.yuzu.yuzu_emu.utils.ForegroundService import org.yuzu.yuzu_emu.utils.InputHandler +import org.yuzu.yuzu_emu.utils.Log import org.yuzu.yuzu_emu.utils.MemoryUtil import org.yuzu.yuzu_emu.utils.NfcReader import org.yuzu.yuzu_emu.utils.ThemeHelper @@ -80,6 +81,7 @@ class EmulationActivity : AppCompatActivity(), SensorEventListener { } override fun onCreate(savedInstanceState: Bundle?) { + Log.gameLaunched = true ThemeHelper.setTheme(this) super.onCreate(savedInstanceState) diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/HomeSettingsFragment.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/HomeSettingsFragment.kt index ed2a5cb55..4720daec4 100755 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/HomeSettingsFragment.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/HomeSettingsFragment.kt @@ -42,6 +42,7 @@ import org.yuzu.yuzu_emu.model.HomeViewModel import org.yuzu.yuzu_emu.ui.main.MainActivity import org.yuzu.yuzu_emu.utils.FileUtil import org.yuzu.yuzu_emu.utils.GpuDriverHelper +import org.yuzu.yuzu_emu.utils.Log class HomeSettingsFragment : Fragment() { private var _binding: FragmentHomeSettingsBinding? = null @@ -312,19 +313,32 @@ class HomeSettingsFragment : Fragment() { } } + // Share the current log if we just returned from a game but share the old log + // if we just started the app and the old log exists. private fun shareLog() { - val file = DocumentFile.fromSingleUri( + val currentLog = DocumentFile.fromSingleUri( mainActivity, DocumentsContract.buildDocumentUri( DocumentProvider.AUTHORITY, "${DocumentProvider.ROOT_ID}/log/yuzu_log.txt" ) )!! - if (file.exists()) { - val intent = Intent(Intent.ACTION_SEND) - .setDataAndType(file.uri, FileUtil.TEXT_PLAIN) - .addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION) - .putExtra(Intent.EXTRA_STREAM, file.uri) + val oldLog = DocumentFile.fromSingleUri( + mainActivity, + DocumentsContract.buildDocumentUri( + DocumentProvider.AUTHORITY, + "${DocumentProvider.ROOT_ID}/log/yuzu_log.txt.old.txt" + ) + )!! + + val intent = Intent(Intent.ACTION_SEND) + .setDataAndType(currentLog.uri, FileUtil.TEXT_PLAIN) + .addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION) + if (!Log.gameLaunched && oldLog.exists()) { + intent.putExtra(Intent.EXTRA_STREAM, oldLog.uri) + startActivity(Intent.createChooser(intent, getText(R.string.share_log))) + } else if (currentLog.exists()) { + intent.putExtra(Intent.EXTRA_STREAM, currentLog.uri) startActivity(Intent.createChooser(intent, getText(R.string.share_log))) } else { Toast.makeText( diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/utils/InputHandler.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/utils/InputHandler.kt index fc6a8b5cb..47bde5081 100755 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/utils/InputHandler.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/utils/InputHandler.kt @@ -68,7 +68,7 @@ object InputHandler { private fun getPlayerNumber(index: Int, deviceId: Int = -1): Int { var deviceIndex = index if (deviceId != -1) { - deviceIndex = controllerIds[deviceId]!! + deviceIndex = controllerIds[deviceId] ?: 0 } // TODO: Joycons are handled as different controllers. Find a way to merge them. diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/utils/Log.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/utils/Log.kt index a193e82a4..fb682c344 100755 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/utils/Log.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/utils/Log.kt @@ -3,38 +3,17 @@ package org.yuzu.yuzu_emu.utils -import android.util.Log -import org.yuzu.yuzu_emu.BuildConfig - -/** - * Contains methods that call through to [android.util.Log], but - * with the same TAG automatically provided. Also no-ops VERBOSE and DEBUG log - * levels in release builds. - */ object Log { - private const val TAG = "Yuzu Frontend" + // Tracks whether we should share the old log or the current log + var gameLaunched = false - fun verbose(message: String) { - if (BuildConfig.DEBUG) { - Log.v(TAG, message) - } - } + external fun debug(message: String) - fun debug(message: String) { - if (BuildConfig.DEBUG) { - Log.d(TAG, message) - } - } + external fun warning(message: String) - fun info(message: String) { - Log.i(TAG, message) - } + external fun info(message: String) - fun warning(message: String) { - Log.w(TAG, message) - } + external fun error(message: String) - fun error(message: String) { - Log.e(TAG, message) - } + external fun critical(message: String) } diff --git a/src/android/app/src/main/jni/CMakeLists.txt b/src/android/app/src/main/jni/CMakeLists.txt index 1c36661f5..88a570f68 100755 --- a/src/android/app/src/main/jni/CMakeLists.txt +++ b/src/android/app/src/main/jni/CMakeLists.txt @@ -18,6 +18,7 @@ add_library(yuzu-android SHARED native_config.cpp uisettings.cpp game_metadata.cpp + native_log.cpp ) set_property(TARGET yuzu-android PROPERTY IMPORTED_LOCATION ${FFmpeg_LIBRARY_DIR}) diff --git a/src/android/app/src/main/jni/native.cpp b/src/android/app/src/main/jni/native.cpp index 0e458df38..294e41045 100755 --- a/src/android/app/src/main/jni/native.cpp +++ b/src/android/app/src/main/jni/native.cpp @@ -248,6 +248,11 @@ void EmulationSession::ConfigureFilesystemProvider(const std::string& filepath) } void EmulationSession::InitializeSystem() { + // Initialize logging system + Common::Log::Initialize(); + Common::Log::SetColorConsoleBackendEnabled(true); + Common::Log::Start(); + // Initialize filesystem. m_system.SetFilesystem(m_vfs); m_system.GetUserChannel().clear(); @@ -462,10 +467,6 @@ void EmulationSession::OnEmulationStopped(Core::SystemResultStatus result) { } static Core::SystemResultStatus RunEmulation(const std::string& filepath) { - Common::Log::Initialize(); - Common::Log::SetColorConsoleBackendEnabled(true); - Common::Log::Start(); - MicroProfileOnThreadCreate("EmuThread"); SCOPE_EXIT({ MicroProfileShutdown(); }); diff --git a/src/android/app/src/main/jni/native_log.cpp b/src/android/app/src/main/jni/native_log.cpp new file mode 100755 index 000000000..33d691dc8 --- /dev/null +++ b/src/android/app/src/main/jni/native_log.cpp @@ -0,0 +1,31 @@ +// SPDX-FileCopyrightText: 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include + +#include "android_common/android_common.h" + +extern "C" { + +void Java_org_yuzu_yuzu_1emu_utils_Log_debug(JNIEnv* env, jobject obj, jstring jmessage) { + LOG_DEBUG(Frontend, "{}", GetJString(env, jmessage)); +} + +void Java_org_yuzu_yuzu_1emu_utils_Log_warning(JNIEnv* env, jobject obj, jstring jmessage) { + LOG_WARNING(Frontend, "{}", GetJString(env, jmessage)); +} + +void Java_org_yuzu_yuzu_1emu_utils_Log_info(JNIEnv* env, jobject obj, jstring jmessage) { + LOG_INFO(Frontend, "{}", GetJString(env, jmessage)); +} + +void Java_org_yuzu_yuzu_1emu_utils_Log_error(JNIEnv* env, jobject obj, jstring jmessage) { + LOG_ERROR(Frontend, "{}", GetJString(env, jmessage)); +} + +void Java_org_yuzu_yuzu_1emu_utils_Log_critical(JNIEnv* env, jobject obj, jstring jmessage) { + LOG_CRITICAL(Frontend, "{}", GetJString(env, jmessage)); +} + +} // extern "C" diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp index 01fcdc5c0..b86c78c38 100755 --- a/src/common/page_table.cpp +++ b/src/common/page_table.cpp @@ -9,12 +9,12 @@ PageTable::PageTable() = default; PageTable::~PageTable() noexcept = default; -bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context, - u64 address) const { +bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, + Common::ProcessAddress address) const { // Setup invalid defaults. - out_entry.phys_addr = 0; - out_entry.block_size = page_size; - out_context.next_page = 0; + out_entry->phys_addr = 0; + out_entry->block_size = page_size; + out_context->next_page = 0; // Validate that we can read the actual entry. const auto page = address / page_size; @@ -29,20 +29,20 @@ bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_ } // Populate the results. - out_entry.phys_addr = phys_addr + address; - out_context.next_page = page + 1; - out_context.next_offset = address + page_size; + out_entry->phys_addr = phys_addr + GetInteger(address); + out_context->next_page = page + 1; + out_context->next_offset = GetInteger(address) + page_size; return true; } -bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const { +bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const { // Setup invalid defaults. - out_entry.phys_addr = 0; - out_entry.block_size = page_size; + out_entry->phys_addr = 0; + out_entry->block_size = page_size; // Validate that we can read the actual entry. - const auto page = context.next_page; + const auto page = context->next_page; if (page >= backing_addr.size()) { return false; } @@ -54,9 +54,9 @@ bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& c } // Populate the results. - out_entry.phys_addr = phys_addr + context.next_offset; - context.next_page = page + 1; - context.next_offset += page_size; + out_entry->phys_addr = phys_addr + context->next_offset; + context->next_page = page + 1; + context->next_offset += page_size; return true; } diff --git a/src/common/page_table.h b/src/common/page_table.h index 714ed2ab5..9f97b9a8d 100755 --- a/src/common/page_table.h +++ b/src/common/page_table.h @@ -6,6 +6,7 @@ #include #include "common/common_types.h" +#include "common/typed_address.h" #include "common/virtual_buffer.h" namespace Common { @@ -100,9 +101,9 @@ struct PageTable { PageTable(PageTable&&) noexcept = default; PageTable& operator=(PageTable&&) noexcept = default; - bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context, - u64 address) const; - bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const; + bool BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, + Common::ProcessAddress address) const; + bool ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const; /** * Resizes the page table to be able to accommodate enough pages within @@ -117,6 +118,16 @@ struct PageTable { return current_address_space_width_in_bits; } + bool GetPhysicalAddress(Common::PhysicalAddress* out_phys_addr, + Common::ProcessAddress virt_addr) const { + if (virt_addr > (1ULL << this->GetAddressSpaceBits())) { + return false; + } + + *out_phys_addr = backing_addr[virt_addr / page_size] + GetInteger(virt_addr); + return true; + } + /** * Vector of memory pointers backing each page. An entry can only be non-null if the * corresponding attribute element is of type `Memory`. diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 96f9c6126..b60c550af 100755 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -271,8 +271,9 @@ add_library(core STATIC hle/kernel/k_page_heap.h hle/kernel/k_page_group.cpp hle/kernel/k_page_group.h - hle/kernel/k_page_table.cpp hle/kernel/k_page_table.h + hle/kernel/k_page_table_base.cpp + hle/kernel/k_page_table_base.h hle/kernel/k_page_table_manager.h hle/kernel/k_page_table_slab_heap.h hle/kernel/k_port.cpp @@ -280,6 +281,7 @@ add_library(core STATIC hle/kernel/k_priority_queue.h hle/kernel/k_process.cpp hle/kernel/k_process.h + hle/kernel/k_process_page_table.h hle/kernel/k_readable_event.cpp hle/kernel/k_readable_event.h hle/kernel/k_resource_limit.cpp @@ -330,8 +332,6 @@ add_library(core STATIC hle/kernel/physical_core.cpp hle/kernel/physical_core.h hle/kernel/physical_memory.h - hle/kernel/process_capability.cpp - hle/kernel/process_capability.h hle/kernel/slab_helpers.h hle/kernel/svc.cpp hle/kernel/svc.h @@ -715,6 +715,7 @@ add_library(core STATIC hle/service/nvnflinger/producer_listener.h hle/service/nvnflinger/status.h hle/service/nvnflinger/ui/fence.h + hle/service/nvnflinger/ui/graphic_buffer.cpp hle/service/nvnflinger/ui/graphic_buffer.h hle/service/nvnflinger/window.h hle/service/olsc/olsc.cpp diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp index 7cb880c3d..93e7d7a44 100755 --- a/src/core/debugger/gdbstub.cpp +++ b/src/core/debugger/gdbstub.cpp @@ -727,29 +727,34 @@ static constexpr const char* GetMemoryPermissionString(const Kernel::Svc::Memory } } -static VAddr GetModuleEnd(Kernel::KPageTable& page_table, VAddr base) { - Kernel::Svc::MemoryInfo mem_info; +static VAddr GetModuleEnd(Kernel::KProcessPageTable& page_table, VAddr base) { + Kernel::KMemoryInfo mem_info; + Kernel::Svc::MemoryInfo svc_mem_info; + Kernel::Svc::PageInfo page_info; VAddr cur_addr{base}; // Expect: r-x Code (.text) - mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); - cur_addr = mem_info.base_address + mem_info.size; - if (mem_info.state != Kernel::Svc::MemoryState::Code || - mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) { + R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr)); + svc_mem_info = mem_info.GetSvcMemoryInfo(); + cur_addr = svc_mem_info.base_address + svc_mem_info.size; + if (svc_mem_info.state != Kernel::Svc::MemoryState::Code || + svc_mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) { return cur_addr - 1; } // Expect: r-- Code (.rodata) - mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); - cur_addr = mem_info.base_address + mem_info.size; - if (mem_info.state != Kernel::Svc::MemoryState::Code || - mem_info.permission != Kernel::Svc::MemoryPermission::Read) { + R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr)); + svc_mem_info = mem_info.GetSvcMemoryInfo(); + cur_addr = svc_mem_info.base_address + svc_mem_info.size; + if (svc_mem_info.state != Kernel::Svc::MemoryState::Code || + svc_mem_info.permission != Kernel::Svc::MemoryPermission::Read) { return cur_addr - 1; } // Expect: rw- CodeData (.data) - mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); - cur_addr = mem_info.base_address + mem_info.size; + R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr)); + svc_mem_info = mem_info.GetSvcMemoryInfo(); + cur_addr = svc_mem_info.base_address + svc_mem_info.size; return cur_addr - 1; } @@ -767,7 +772,7 @@ void GDBStub::HandleRcmd(const std::vector& command) { if (command_str == "get fastmem") { if (Settings::IsFastmemEnabled()) { - const auto& impl = page_table.PageTableImpl(); + const auto& impl = page_table.GetImpl(); const auto region = reinterpret_cast(impl.fastmem_arena); const auto region_bits = impl.current_address_space_width_in_bits; const auto region_size = 1ULL << region_bits; @@ -785,20 +790,22 @@ void GDBStub::HandleRcmd(const std::vector& command) { reply = fmt::format("Process: {:#x} ({})\n" "Program Id: {:#018x}\n", process->GetProcessId(), process->GetName(), process->GetProgramId()); - reply += fmt::format("Layout:\n" - " Alias: {:#012x} - {:#012x}\n" - " Heap: {:#012x} - {:#012x}\n" - " Aslr: {:#012x} - {:#012x}\n" - " Stack: {:#012x} - {:#012x}\n" - "Modules:\n", - GetInteger(page_table.GetAliasRegionStart()), - GetInteger(page_table.GetAliasRegionEnd()), - GetInteger(page_table.GetHeapRegionStart()), - GetInteger(page_table.GetHeapRegionEnd()), - GetInteger(page_table.GetAliasCodeRegionStart()), - GetInteger(page_table.GetAliasCodeRegionEnd()), - GetInteger(page_table.GetStackRegionStart()), - GetInteger(page_table.GetStackRegionEnd())); + reply += fmt::format( + "Layout:\n" + " Alias: {:#012x} - {:#012x}\n" + " Heap: {:#012x} - {:#012x}\n" + " Aslr: {:#012x} - {:#012x}\n" + " Stack: {:#012x} - {:#012x}\n" + "Modules:\n", + GetInteger(page_table.GetAliasRegionStart()), + GetInteger(page_table.GetAliasRegionStart()) + page_table.GetAliasRegionSize() - 1, + GetInteger(page_table.GetHeapRegionStart()), + GetInteger(page_table.GetHeapRegionStart()) + page_table.GetHeapRegionSize() - 1, + GetInteger(page_table.GetAliasCodeRegionStart()), + GetInteger(page_table.GetAliasCodeRegionStart()) + page_table.GetAliasCodeRegionSize() - + 1, + GetInteger(page_table.GetStackRegionStart()), + GetInteger(page_table.GetStackRegionStart()) + page_table.GetStackRegionSize() - 1); for (const auto& [vaddr, name] : modules) { reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr, @@ -811,27 +818,34 @@ void GDBStub::HandleRcmd(const std::vector& command) { while (true) { using MemoryAttribute = Kernel::Svc::MemoryAttribute; - auto mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); + Kernel::KMemoryInfo mem_info{}; + Kernel::Svc::PageInfo page_info{}; + R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), + cur_addr)); + auto svc_mem_info = mem_info.GetSvcMemoryInfo(); - if (mem_info.state != Kernel::Svc::MemoryState::Inaccessible || - mem_info.base_address + mem_info.size - 1 != std::numeric_limits::max()) { - const char* state = GetMemoryStateName(mem_info.state); - const char* perm = GetMemoryPermissionString(mem_info); + if (svc_mem_info.state != Kernel::Svc::MemoryState::Inaccessible || + svc_mem_info.base_address + svc_mem_info.size - 1 != + std::numeric_limits::max()) { + const char* state = GetMemoryStateName(svc_mem_info.state); + const char* perm = GetMemoryPermissionString(svc_mem_info); - const char l = True(mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-'; - const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-'; - const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-'; - const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-'; + const char l = True(svc_mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-'; + const char i = + True(svc_mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-'; + const char d = + True(svc_mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-'; + const char u = True(svc_mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-'; const char p = - True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-'; + True(svc_mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-'; - reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", - mem_info.base_address, - mem_info.base_address + mem_info.size - 1, perm, state, l, i, - d, u, p, mem_info.ipc_count, mem_info.device_count); + reply += fmt::format( + " {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", svc_mem_info.base_address, + svc_mem_info.base_address + svc_mem_info.size - 1, perm, state, l, i, d, u, p, + svc_mem_info.ipc_count, svc_mem_info.device_count); } - const uintptr_t next_address = mem_info.base_address + mem_info.size; + const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size; if (next_address <= cur_addr) { break; } diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp index 31a2fe617..dfa254d15 100755 --- a/src/core/hid/emulated_controller.cpp +++ b/src/core/hid/emulated_controller.cpp @@ -1091,30 +1091,30 @@ void EmulatedController::SetBattery(const Common::Input::CallbackStatus& callbac bool is_charging = false; bool is_powered = false; - NpadBatteryLevel battery_level = 0; + NpadBatteryLevel battery_level = NpadBatteryLevel::Empty; switch (controller.battery_values[index]) { case Common::Input::BatteryLevel::Charging: is_charging = true; is_powered = true; - battery_level = 6; + battery_level = NpadBatteryLevel::Full; break; case Common::Input::BatteryLevel::Medium: - battery_level = 6; + battery_level = NpadBatteryLevel::High; break; case Common::Input::BatteryLevel::Low: - battery_level = 4; + battery_level = NpadBatteryLevel::Low; break; case Common::Input::BatteryLevel::Critical: - battery_level = 2; + battery_level = NpadBatteryLevel::Critical; break; case Common::Input::BatteryLevel::Empty: - battery_level = 0; + battery_level = NpadBatteryLevel::Empty; break; case Common::Input::BatteryLevel::None: case Common::Input::BatteryLevel::Full: default: is_powered = true; - battery_level = 8; + battery_level = NpadBatteryLevel::Full; break; } diff --git a/src/core/hid/hid_types.h b/src/core/hid/hid_types.h index e0916d9e6..881bbfd9b 100755 --- a/src/core/hid/hid_types.h +++ b/src/core/hid/hid_types.h @@ -302,6 +302,15 @@ enum class TouchScreenModeForNx : u8 { Heat2, }; +// This is nn::hid::system::NpadBatteryLevel +enum class NpadBatteryLevel : u32 { + Empty, + Critical, + Low, + High, + Full, +}; + // This is nn::hid::NpadStyleTag struct NpadStyleTag { union { @@ -385,16 +394,12 @@ struct NpadGcTriggerState { }; static_assert(sizeof(NpadGcTriggerState) == 0x10, "NpadGcTriggerState is an invalid size"); -// This is nn::hid::system::NpadBatteryLevel -using NpadBatteryLevel = u32; -static_assert(sizeof(NpadBatteryLevel) == 0x4, "NpadBatteryLevel is an invalid size"); - // This is nn::hid::system::NpadPowerInfo struct NpadPowerInfo { bool is_powered{}; bool is_charging{}; INSERT_PADDING_BYTES(0x6); - NpadBatteryLevel battery_level{8}; + NpadBatteryLevel battery_level{NpadBatteryLevel::Full}; }; static_assert(sizeof(NpadPowerInfo) == 0xC, "NpadPowerInfo is an invalid size"); diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp index df762e8b0..5c3940cd8 100755 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp @@ -222,7 +222,7 @@ Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* }; // We succeeded. - *out = KPageTable::GetHeapVirtualAddress(kernel.MemoryLayout(), paddr); + *out = KPageTable::GetHeapVirtualAddress(kernel, paddr); R_SUCCEED(); } @@ -238,8 +238,17 @@ void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress addres ASSERT(Common::IsAligned(size, alignment)); // Close the secure region's pages. - kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), address), + kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel, address), size / PageSize); } +// Insecure Memory. +KResourceLimit* KSystemControl::GetInsecureMemoryResourceLimit(KernelCore& kernel) { + return kernel.GetSystemResourceLimit(); +} + +u32 KSystemControl::GetInsecureMemoryPool() { + return static_cast(KMemoryManager::Pool::SystemNonSecure); +} + } // namespace Kernel::Board::Nintendo::Nx diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h index 116c34930..5fe3bac21 100755 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h @@ -8,7 +8,8 @@ namespace Kernel { class KernelCore; -} +class KResourceLimit; +} // namespace Kernel namespace Kernel::Board::Nintendo::Nx { @@ -40,6 +41,10 @@ public: u32 pool); static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size, u32 pool); + + // Insecure Memory. + static KResourceLimit* GetInsecureMemoryResourceLimit(KernelCore& kernel); + static u32 GetInsecureMemoryPool(); }; } // namespace Kernel::Board::Nintendo::Nx diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp index e7da7a21d..fb890f978 100755 --- a/src/core/hle/kernel/k_capabilities.cpp +++ b/src/core/hle/kernel/k_capabilities.cpp @@ -4,14 +4,15 @@ #include "core/hardware_properties.h" #include "core/hle/kernel/k_capabilities.h" #include "core/hle/kernel/k_memory_layout.h" -#include "core/hle/kernel/k_page_table.h" +#include "core/hle/kernel/k_process_page_table.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_version.h" namespace Kernel { -Result KCapabilities::InitializeForKip(std::span kern_caps, KPageTable* page_table) { +Result KCapabilities::InitializeForKip(std::span kern_caps, + KProcessPageTable* page_table) { // We're initializing an initial process. m_svc_access_flags.reset(); m_irq_access_flags.reset(); @@ -41,7 +42,8 @@ Result KCapabilities::InitializeForKip(std::span kern_caps, KPageTabl R_RETURN(this->SetCapabilities(kern_caps, page_table)); } -Result KCapabilities::InitializeForUser(std::span user_caps, KPageTable* page_table) { +Result KCapabilities::InitializeForUser(std::span user_caps, + KProcessPageTable* page_table) { // We're initializing a user process. m_svc_access_flags.reset(); m_irq_access_flags.reset(); @@ -121,7 +123,7 @@ Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) { R_SUCCEED(); } -Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) { +Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table) { const auto range_pack = MapRange{cap}; const auto size_pack = MapRangeSize{size_cap}; @@ -142,16 +144,13 @@ Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* p ? KMemoryPermission::UserRead : KMemoryPermission::UserReadWrite; if (MapRangeSize{size_cap}.normal) { - // R_RETURN(page_table->MapStatic(phys_addr, size, perm)); + R_RETURN(page_table->MapStatic(phys_addr, size, perm)); } else { - // R_RETURN(page_table->MapIo(phys_addr, size, perm)); + R_RETURN(page_table->MapIo(phys_addr, size, perm)); } - - UNIMPLEMENTED(); - R_SUCCEED(); } -Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) { +Result KCapabilities::MapIoPage_(const u32 cap, KProcessPageTable* page_table) { // Get/validate address/size const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize; const size_t num_pages = 1; @@ -160,10 +159,7 @@ Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) { R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress); // Do the mapping. - // R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite)); - - UNIMPLEMENTED(); - R_SUCCEED(); + R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission::UserReadWrite)); } template @@ -200,13 +196,11 @@ Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) { R_SUCCEED(); } -Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) { +Result KCapabilities::MapRegion_(const u32 cap, KProcessPageTable* page_table) { // Map each region into the process's page table. return ProcessMapRegionCapability( - cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result { - // R_RETURN(page_table->MapRegion(region_type, perm)); - UNIMPLEMENTED(); - R_SUCCEED(); + cap, [page_table](KMemoryRegionType region_type, KMemoryPermission perm) -> Result { + R_RETURN(page_table->MapRegion(region_type, perm)); }); } @@ -280,7 +274,7 @@ Result KCapabilities::SetDebugFlagsCapability(const u32 cap) { } Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc, - KPageTable* page_table) { + KProcessPageTable* page_table) { // Validate this is a capability we can act on. const auto type = GetCapabilityType(cap); R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument); @@ -318,7 +312,7 @@ Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc, } } -Result KCapabilities::SetCapabilities(std::span caps, KPageTable* page_table) { +Result KCapabilities::SetCapabilities(std::span caps, KProcessPageTable* page_table) { u32 set_flags = 0, set_svc = 0; for (size_t i = 0; i < caps.size(); i++) { diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h index ebd4eedb1..013d952ad 100755 --- a/src/core/hle/kernel/k_capabilities.h +++ b/src/core/hle/kernel/k_capabilities.h @@ -15,15 +15,15 @@ namespace Kernel { -class KPageTable; +class KProcessPageTable; class KernelCore; class KCapabilities { public: constexpr explicit KCapabilities() = default; - Result InitializeForKip(std::span kern_caps, KPageTable* page_table); - Result InitializeForUser(std::span user_caps, KPageTable* page_table); + Result InitializeForKip(std::span kern_caps, KProcessPageTable* page_table); + Result InitializeForUser(std::span user_caps, KProcessPageTable* page_table); static Result CheckCapabilities(KernelCore& kernel, std::span user_caps); @@ -264,9 +264,9 @@ private: Result SetCorePriorityCapability(const u32 cap); Result SetSyscallMaskCapability(const u32 cap, u32& set_svc); - Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table); - Result MapIoPage_(const u32 cap, KPageTable* page_table); - Result MapRegion_(const u32 cap, KPageTable* page_table); + Result MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table); + Result MapIoPage_(const u32 cap, KProcessPageTable* page_table); + Result MapRegion_(const u32 cap, KProcessPageTable* page_table); Result SetInterruptPairCapability(const u32 cap); Result SetProgramTypeCapability(const u32 cap); Result SetKernelVersionCapability(const u32 cap); @@ -277,8 +277,9 @@ private: static Result ProcessMapRegionCapability(const u32 cap, F f); static Result CheckMapRegion(KernelCore& kernel, const u32 cap); - Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table); - Result SetCapabilities(std::span caps, KPageTable* page_table); + Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, + KProcessPageTable* page_table); + Result SetCapabilities(std::span caps, KProcessPageTable* page_table); private: Svc::SvcAccessFlagSet m_svc_access_flags{}; diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp index f48896715..f0703f795 100755 --- a/src/core/hle/kernel/k_device_address_space.cpp +++ b/src/core/hle/kernel/k_device_address_space.cpp @@ -54,7 +54,7 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) { R_SUCCEED(); } -Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address, +Result KDeviceAddressSpace::Map(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, u64 device_address, u32 option, bool is_aligned) { // Check that the address falls within the space. R_UNLESS((m_space_address <= device_address && @@ -113,7 +113,7 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_ R_SUCCEED(); } -Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address, +Result KDeviceAddressSpace::Unmap(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, u64 device_address) { // Check that the address falls within the space. R_UNLESS((m_space_address <= device_address && diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h index 18556e3cc..ff0ec8152 100755 --- a/src/core/hle/kernel/k_device_address_space.h +++ b/src/core/hle/kernel/k_device_address_space.h @@ -5,7 +5,7 @@ #include -#include "core/hle/kernel/k_page_table.h" +#include "core/hle/kernel/k_process_page_table.h" #include "core/hle/kernel/k_typed_address.h" #include "core/hle/kernel/slab_helpers.h" #include "core/hle/result.h" @@ -31,23 +31,23 @@ public: Result Attach(Svc::DeviceName device_name); Result Detach(Svc::DeviceName device_name); - Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size, + Result MapByForce(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, u64 device_address, u32 option) { R_RETURN(this->Map(page_table, process_address, size, device_address, option, false)); } - Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size, + Result MapAligned(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, u64 device_address, u32 option) { R_RETURN(this->Map(page_table, process_address, size, device_address, option, true)); } - Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size, + Result Unmap(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, u64 device_address); static void Initialize(); private: - Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size, + Result Map(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, u64 device_address, u32 option, bool is_aligned); private: diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 1c432a433..faea02e9a 100755 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -394,6 +394,14 @@ private: return region.GetEndAddress(); } +public: + static const KMemoryRegion* Find(const KMemoryLayout& layout, KVirtualAddress address) { + return Find(address, layout.GetVirtualMemoryRegionTree()); + } + static const KMemoryRegion* Find(const KMemoryLayout& layout, KPhysicalAddress address) { + return Find(address, layout.GetPhysicalMemoryRegionTree()); + } + private: u64 m_linear_phys_to_virt_diff{}; u64 m_linear_virt_to_phys_diff{}; diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 1c1cd4c3b..546840c47 100755 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -456,8 +456,7 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size, } void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) { - auto optimize_pa = - KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); + auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); auto* optimize_map = kernel.System().DeviceMemory().GetPointer(optimize_pa); std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); @@ -465,8 +464,7 @@ void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) { void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, size_t num_pages) { - auto optimize_pa = - KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); + auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); auto* optimize_map = kernel.System().DeviceMemory().GetPointer(optimize_pa); // Get the range we're tracking. @@ -485,8 +483,7 @@ void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysi void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, size_t num_pages) { - auto optimize_pa = - KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); + auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); auto* optimize_map = kernel.System().DeviceMemory().GetPointer(optimize_pa); // Get the range we're tracking. @@ -506,8 +503,7 @@ void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysica bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, size_t num_pages, u8 fill_pattern) { auto& device_memory = kernel.System().DeviceMemory(); - auto optimize_pa = - KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); + auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); auto* optimize_map = device_memory.GetPointer(optimize_pa); // We want to return whether any pages were newly allocated. diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index fa8360b88..a29c9a751 100755 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -3,548 +3,14 @@ #pragma once -#include - -#include "common/common_funcs.h" -#include "common/page_table.h" -#include "core/file_sys/program_metadata.h" -#include "core/hle/kernel/k_dynamic_resource_manager.h" -#include "core/hle/kernel/k_light_lock.h" -#include "core/hle/kernel/k_memory_block.h" -#include "core/hle/kernel/k_memory_block_manager.h" -#include "core/hle/kernel/k_memory_layout.h" -#include "core/hle/kernel/k_memory_manager.h" -#include "core/hle/kernel/k_typed_address.h" -#include "core/hle/result.h" -#include "core/memory.h" - -namespace Core { -class System; -} +#include "core/hle/kernel/k_page_table_base.h" namespace Kernel { -enum class DisableMergeAttribute : u8 { - None = (0U << 0), - DisableHead = (1U << 0), - DisableHeadAndBody = (1U << 1), - EnableHeadAndBody = (1U << 2), - DisableTail = (1U << 3), - EnableTail = (1U << 4), - EnableAndMergeHeadBodyTail = (1U << 5), - EnableHeadBodyTail = EnableHeadAndBody | EnableTail, - DisableHeadBodyTail = DisableHeadAndBody | DisableTail, -}; - -struct KPageProperties { - KMemoryPermission perm; - bool io; - bool uncached; - DisableMergeAttribute disable_merge_attributes; -}; -static_assert(std::is_trivial_v); -static_assert(sizeof(KPageProperties) == sizeof(u32)); - -class KBlockInfoManager; -class KMemoryBlockManager; -class KResourceLimit; -class KSystemResource; - -class KPageTable final { -protected: - struct PageLinkedList; - +class KPageTable final : public KPageTableBase { public: - enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll }; - - YUZU_NON_COPYABLE(KPageTable); - YUZU_NON_MOVEABLE(KPageTable); - - explicit KPageTable(Core::System& system_); - ~KPageTable(); - - Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr, - bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, - KProcessAddress code_addr, size_t code_size, - KSystemResource* system_resource, KResourceLimit* resource_limit, - Core::Memory::Memory& memory); - - void Finalize(); - - Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state, - KMemoryPermission perm); - Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); - Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, - ICacheInvalidationStrategy icache_invalidation_strategy); - Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table, - KProcessAddress src_addr); - Result MapPhysicalMemory(KProcessAddress addr, size_t size); - Result UnmapPhysicalMemory(KProcessAddress addr, size_t size); - Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size); - Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size); - Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, - Svc::MemoryPermission svc_perm); - KMemoryInfo QueryInfo(KProcessAddress addr); - Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm); - Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr); - Result SetMaxHeapSize(size_t size); - Result SetHeapSize(u64* out, size_t size); - Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size, - KMemoryPermission perm, bool is_aligned, bool check_heap); - Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap); - - Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size); - - Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size); - Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size); - - Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr, - KPageTable& src_page_table, KMemoryPermission test_perm, - KMemoryState dst_state, bool send); - Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state); - Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state); - - Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, - KMemoryPermission perm); - Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg); - Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size); - Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg); - Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages, - KMemoryState state_mask, KMemoryState state, - KMemoryPermission perm_mask, KMemoryPermission perm, - KMemoryAttribute attr_mask, KMemoryAttribute attr); - - Common::PageTable& PageTableImpl() { - return *m_page_table_impl; - } - - const Common::PageTable& PageTableImpl() const { - return *m_page_table_impl; - } - - KBlockInfoManager* GetBlockInfoManager() { - return m_block_info_manager; - } - - Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, - KPhysicalAddress phys_addr, KProcessAddress region_start, - size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { - R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, - region_num_pages, state, perm)); - } - - Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, - KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { - R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, - this->GetRegionAddress(state), - this->GetRegionSize(state) / PageSize, state, perm)); - } - - Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state, - KMemoryPermission perm) { - R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false, - this->GetRegionAddress(state), - this->GetRegionSize(state) / PageSize, state, perm)); - } - - Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, - KMemoryPermission perm); - Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state); - - Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, - KProcessAddress region_start, size_t region_num_pages, KMemoryState state, - KMemoryPermission perm); - Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state, - KMemoryPermission perm); - Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state); - void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, - const KPageGroup& pg); - - KProcessAddress GetRegionAddress(Svc::MemoryState state) const; - size_t GetRegionSize(Svc::MemoryState state) const; - bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const; - - KProcessAddress GetRegionAddress(KMemoryState state) const { - return this->GetRegionAddress(static_cast(state & KMemoryState::Mask)); - } - size_t GetRegionSize(KMemoryState state) const { - return this->GetRegionSize(static_cast(state & KMemoryState::Mask)); - } - bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { - return this->CanContain(addr, size, - static_cast(state & KMemoryState::Mask)); - } - -protected: - struct PageLinkedList { - private: - struct Node { - Node* m_next; - std::array m_buffer; - }; - - public: - constexpr PageLinkedList() = default; - - void Push(Node* n) { - ASSERT(Common::IsAligned(reinterpret_cast(n), PageSize)); - n->m_next = m_root; - m_root = n; - } - - void Push(Core::Memory::Memory& memory, KVirtualAddress addr) { - this->Push(memory.GetPointer(GetInteger(addr))); - } - - Node* Peek() const { - return m_root; - } - - Node* Pop() { - Node* const r = m_root; - - m_root = r->m_next; - r->m_next = nullptr; - - return r; - } - - private: - Node* m_root{}; - }; - static_assert(std::is_trivially_destructible::value); - -private: - enum class OperationType : u32 { - Map = 0, - MapGroup = 1, - MapFirstGroup = 2, - Unmap = 3, - ChangePermissions = 4, - ChangePermissionsAndRefresh = 5, - ChangePermissionsAndRefreshAndFlush = 6, - Separate = 7, - }; - - static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = - KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; - - Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, - KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, - size_t region_num_pages, KMemoryState state, KMemoryPermission perm); - bool IsRegionContiguous(KProcessAddress addr, u64 size) const; - void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list); - KMemoryInfo QueryInfoImpl(KProcessAddress addr); - KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages, - u64 needed_num_pages, size_t align); - Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group, - OperationType operation); - Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm, - OperationType operation, KPhysicalAddress map_addr = 0); - void FinalizeUpdate(PageLinkedList* page_list); - - KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, - size_t num_pages, size_t alignment, size_t offset, - size_t guard_pages); - - Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size, - KMemoryState state_mask, KMemoryState state, - KMemoryPermission perm_mask, KMemoryPermission perm, - KMemoryAttribute attr_mask, KMemoryAttribute attr) const; - Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, - KMemoryPermission perm, KMemoryAttribute attr_mask, - KMemoryAttribute attr) const { - R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, - perm, attr_mask, attr)); - } - - Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, - KMemoryPermission perm_mask, KMemoryPermission perm, - KMemoryAttribute attr_mask, KMemoryAttribute attr) const; - Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, size_t* out_blocks_needed, - KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, - KMemoryState state_mask, KMemoryState state, - KMemoryPermission perm_mask, KMemoryPermission perm, - KMemoryAttribute attr_mask, KMemoryAttribute attr, - KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; - Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, size_t* out_blocks_needed, - KProcessAddress addr, size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, - KMemoryAttribute attr_mask, KMemoryAttribute attr, - KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; - Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size, - KMemoryState state_mask, KMemoryState state, - KMemoryPermission perm_mask, KMemoryPermission perm, - KMemoryAttribute attr_mask, KMemoryAttribute attr, - KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { - R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, - state_mask, state, perm_mask, perm, attr_mask, attr, - ignore_attr)); - } - Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, - KMemoryAttribute attr_mask, KMemoryAttribute attr, - KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { - R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, - attr_mask, attr, ignore_attr)); - } - - Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress, - KProcessAddress addr, size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, - KMemoryPermission perm, KMemoryAttribute attr_mask, - KMemoryAttribute attr, KMemoryPermission new_perm, - KMemoryAttribute lock_attr); - Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, - KMemoryAttribute attr_mask, KMemoryAttribute attr, - KMemoryPermission new_perm, KMemoryAttribute lock_attr, - const KPageGroup* pg); - - Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages); - bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages); - - bool IsLockedByCurrentThread() const { - return m_general_lock.IsLockedByCurrentThread(); - } - - bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) { - ASSERT(this->IsLockedByCurrentThread()); - - return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); - } - - bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const { - ASSERT(this->IsLockedByCurrentThread()); - - *out = GetPhysicalAddr(virt_addr); - - return *out != 0; - } - - Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, - KProcessAddress address, size_t size, KMemoryPermission test_perm, - KMemoryState dst_state); - Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr, - KMemoryPermission test_perm, KMemoryState dst_state, - KPageTable& src_page_table, bool send); - void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address, - size_t size, KMemoryPermission prot_perm); - - Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, - size_t num_pages, KMemoryPermission perm); - Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, - const KPageGroup& pg, const KPageProperties properties, bool reuse_ll); - - mutable KLightLock m_general_lock; - mutable KLightLock m_map_physical_memory_lock; - -public: - constexpr KProcessAddress GetAddressSpaceStart() const { - return m_address_space_start; - } - constexpr KProcessAddress GetAddressSpaceEnd() const { - return m_address_space_end; - } - constexpr size_t GetAddressSpaceSize() const { - return m_address_space_end - m_address_space_start; - } - constexpr KProcessAddress GetHeapRegionStart() const { - return m_heap_region_start; - } - constexpr KProcessAddress GetHeapRegionEnd() const { - return m_heap_region_end; - } - constexpr size_t GetHeapRegionSize() const { - return m_heap_region_end - m_heap_region_start; - } - constexpr KProcessAddress GetAliasRegionStart() const { - return m_alias_region_start; - } - constexpr KProcessAddress GetAliasRegionEnd() const { - return m_alias_region_end; - } - constexpr size_t GetAliasRegionSize() const { - return m_alias_region_end - m_alias_region_start; - } - constexpr KProcessAddress GetStackRegionStart() const { - return m_stack_region_start; - } - constexpr KProcessAddress GetStackRegionEnd() const { - return m_stack_region_end; - } - constexpr size_t GetStackRegionSize() const { - return m_stack_region_end - m_stack_region_start; - } - constexpr KProcessAddress GetKernelMapRegionStart() const { - return m_kernel_map_region_start; - } - constexpr KProcessAddress GetKernelMapRegionEnd() const { - return m_kernel_map_region_end; - } - constexpr KProcessAddress GetCodeRegionStart() const { - return m_code_region_start; - } - constexpr KProcessAddress GetCodeRegionEnd() const { - return m_code_region_end; - } - constexpr KProcessAddress GetAliasCodeRegionStart() const { - return m_alias_code_region_start; - } - constexpr KProcessAddress GetAliasCodeRegionEnd() const { - return m_alias_code_region_end; - } - constexpr size_t GetAliasCodeRegionSize() const { - return m_alias_code_region_end - m_alias_code_region_start; - } - size_t GetNormalMemorySize() const { - KScopedLightLock lk(m_general_lock); - return GetHeapSize() + m_mapped_physical_memory_size; - } - constexpr size_t GetAddressSpaceWidth() const { - return m_address_space_width; - } - constexpr size_t GetHeapSize() const { - return m_current_heap_end - m_heap_region_start; - } - constexpr size_t GetNumGuardPages() const { - return IsKernel() ? 1 : 4; - } - KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const { - const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits]; - ASSERT(backing_addr); - return backing_addr + GetInteger(addr); - } - constexpr bool Contains(KProcessAddress addr) const { - return m_address_space_start <= addr && addr <= m_address_space_end - 1; - } - constexpr bool Contains(KProcessAddress addr, size_t size) const { - return m_address_space_start <= addr && addr < addr + size && - addr + size - 1 <= m_address_space_end - 1; - } - constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const { - return this->Contains(addr, size) && m_alias_region_start <= addr && - addr + size - 1 <= m_alias_region_end - 1; - } - constexpr bool IsInHeapRegion(KProcessAddress addr, size_t size) const { - return this->Contains(addr, size) && m_heap_region_start <= addr && - addr + size - 1 <= m_heap_region_end - 1; - } - -public: - static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout, - KPhysicalAddress addr) { - return layout.GetLinearVirtualAddress(addr); - } - - static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, - KVirtualAddress addr) { - return layout.GetLinearPhysicalAddress(addr); - } - - static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout, - KPhysicalAddress addr) { - return GetLinearMappedVirtualAddress(layout, addr); - } - - static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout, - KVirtualAddress addr) { - return GetLinearMappedPhysicalAddress(layout, addr); - } - - static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout, - KPhysicalAddress addr) { - return GetLinearMappedVirtualAddress(layout, addr); - } - - static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout, - KVirtualAddress addr) { - return GetLinearMappedPhysicalAddress(layout, addr); - } - -private: - constexpr bool IsKernel() const { - return m_is_kernel; - } - constexpr bool IsAslrEnabled() const { - return m_enable_aslr; - } - - constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const { - return (m_address_space_start <= addr) && - (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && - (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); - } - -private: - class KScopedPageTableUpdater { - private: - KPageTable* m_pt{}; - PageLinkedList m_ll; - - public: - explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {} - explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {} - ~KScopedPageTableUpdater() { - m_pt->FinalizeUpdate(this->GetPageList()); - } - - PageLinkedList* GetPageList() { - return std::addressof(m_ll); - } - }; - -private: - KProcessAddress m_address_space_start{}; - KProcessAddress m_address_space_end{}; - KProcessAddress m_heap_region_start{}; - KProcessAddress m_heap_region_end{}; - KProcessAddress m_current_heap_end{}; - KProcessAddress m_alias_region_start{}; - KProcessAddress m_alias_region_end{}; - KProcessAddress m_stack_region_start{}; - KProcessAddress m_stack_region_end{}; - KProcessAddress m_kernel_map_region_start{}; - KProcessAddress m_kernel_map_region_end{}; - KProcessAddress m_code_region_start{}; - KProcessAddress m_code_region_end{}; - KProcessAddress m_alias_code_region_start{}; - KProcessAddress m_alias_code_region_end{}; - - size_t m_max_heap_size{}; - size_t m_mapped_physical_memory_size{}; - size_t m_mapped_unsafe_physical_memory{}; - size_t m_mapped_insecure_memory{}; - size_t m_mapped_ipc_server_memory{}; - size_t m_address_space_width{}; - - KMemoryBlockManager m_memory_block_manager; - u32 m_allocate_option{}; - - bool m_is_kernel{}; - bool m_enable_aslr{}; - bool m_enable_device_address_space_merge{}; - - KMemoryBlockSlabManager* m_memory_block_slab_manager{}; - KBlockInfoManager* m_block_info_manager{}; - KResourceLimit* m_resource_limit{}; - - u32 m_heap_fill_value{}; - u32 m_ipc_fill_value{}; - u32 m_stack_fill_value{}; - const KMemoryRegion* m_cached_physical_heap_region{}; - - KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; - KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront}; - - std::unique_ptr m_page_table_impl; - - Core::System& m_system; - KernelCore& m_kernel; - Core::Memory::Memory* m_memory{}; + explicit KPageTable(KernelCore& kernel) : KPageTableBase(kernel) {} + ~KPageTable() = default; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp new file mode 100755 index 000000000..1cc019c06 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_base.cpp @@ -0,0 +1,5718 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "common/scope_exit.h" +#include "common/settings.h" +#include "core/core.h" +#include "core/hle/kernel/k_address_space_info.h" +#include "core/hle/kernel/k_page_table_base.h" +#include "core/hle/kernel/k_scoped_resource_reservation.h" +#include "core/hle/kernel/k_system_resource.h" + +namespace Kernel { + +namespace { + +class KScopedLightLockPair { + YUZU_NON_COPYABLE(KScopedLightLockPair); + YUZU_NON_MOVEABLE(KScopedLightLockPair); + +private: + KLightLock* m_lower; + KLightLock* m_upper; + +public: + KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) { + // Ensure our locks are in a consistent order. + if (std::addressof(lhs) <= std::addressof(rhs)) { + m_lower = std::addressof(lhs); + m_upper = std::addressof(rhs); + } else { + m_lower = std::addressof(rhs); + m_upper = std::addressof(lhs); + } + + // Acquire both locks. + m_lower->Lock(); + if (m_lower != m_upper) { + m_upper->Lock(); + } + } + + ~KScopedLightLockPair() { + // Unlock the upper lock. + if (m_upper != nullptr && m_upper != m_lower) { + m_upper->Unlock(); + } + + // Unlock the lower lock. + if (m_lower != nullptr) { + m_lower->Unlock(); + } + } + +public: + // Utility. + void TryUnlockHalf(KLightLock& lock) { + // Only allow unlocking if the lock is half the pair. + if (m_lower != m_upper) { + // We want to be sure the lock is one we own. + if (m_lower == std::addressof(lock)) { + lock.Unlock(); + m_lower = nullptr; + } else if (m_upper == std::addressof(lock)) { + lock.Unlock(); + m_upper = nullptr; + } + } + } +}; + +void InvalidateEntireInstructionCache(Core::System& system) { + system.InvalidateCpuInstructionCaches(); +} + +template +Result InvalidateDataCache(AddressType addr, u64 size) { + R_SUCCEED(); +} + +template +Result StoreDataCache(AddressType addr, u64 size) { + R_SUCCEED(); +} + +template +Result FlushDataCache(AddressType addr, u64 size) { + R_SUCCEED(); +} + +} // namespace + +void KPageTableBase::MemoryRange::Open() { + // If the range contains heap pages, open them. + if (this->IsHeap()) { + m_kernel.MemoryManager().Open(this->GetAddress(), this->GetSize() / PageSize); + } +} + +void KPageTableBase::MemoryRange::Close() { + // If the range contains heap pages, close them. + if (this->IsHeap()) { + m_kernel.MemoryManager().Close(this->GetAddress(), this->GetSize() / PageSize); + } +} + +KPageTableBase::KPageTableBase(KernelCore& kernel) + : m_kernel(kernel), m_system(kernel.System()), m_general_lock(kernel), + m_map_physical_memory_lock(kernel), m_device_map_lock(kernel) {} +KPageTableBase::~KPageTableBase() = default; + +Result KPageTableBase::InitializeForKernel(bool is_64_bit, KVirtualAddress start, + KVirtualAddress end, Core::Memory::Memory& memory) { + // Initialize our members. + m_address_space_width = + static_cast(is_64_bit ? Common::BitSize() : Common::BitSize()); + m_address_space_start = KProcessAddress(GetInteger(start)); + m_address_space_end = KProcessAddress(GetInteger(end)); + m_is_kernel = true; + m_enable_aslr = true; + m_enable_device_address_space_merge = false; + + m_heap_region_start = 0; + m_heap_region_end = 0; + m_current_heap_end = 0; + m_alias_region_start = 0; + m_alias_region_end = 0; + m_stack_region_start = 0; + m_stack_region_end = 0; + m_kernel_map_region_start = 0; + m_kernel_map_region_end = 0; + m_alias_code_region_start = 0; + m_alias_code_region_end = 0; + m_code_region_start = 0; + m_code_region_end = 0; + m_max_heap_size = 0; + m_mapped_physical_memory_size = 0; + m_mapped_unsafe_physical_memory = 0; + m_mapped_insecure_memory = 0; + m_mapped_ipc_server_memory = 0; + + m_memory_block_slab_manager = + m_kernel.GetSystemSystemResource().GetMemoryBlockSlabManagerPointer(); + m_block_info_manager = m_kernel.GetSystemSystemResource().GetBlockInfoManagerPointer(); + m_resource_limit = m_kernel.GetSystemResourceLimit(); + + m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool::System, + KMemoryManager::Direction::FromFront); + m_heap_fill_value = MemoryFillValue_Zero; + m_ipc_fill_value = MemoryFillValue_Zero; + m_stack_fill_value = MemoryFillValue_Zero; + + m_cached_physical_linear_region = nullptr; + m_cached_physical_heap_region = nullptr; + + // Initialize our implementation. + m_impl = std::make_unique(); + m_impl->Resize(m_address_space_width, PageBits); + + // Set the tracking memory. + m_memory = std::addressof(memory); + + // Initialize our memory block manager. + R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, + m_memory_block_slab_manager)); +} + +Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr, + bool enable_das_merge, bool from_back, + KMemoryManager::Pool pool, KProcessAddress code_address, + size_t code_size, KSystemResource* system_resource, + KResourceLimit* resource_limit, + Core::Memory::Memory& memory) { + // Calculate region extents. + const size_t as_width = GetAddressSpaceWidth(as_type); + const KProcessAddress start = 0; + const KProcessAddress end = (1ULL << as_width); + + // Validate the region. + ASSERT(start <= code_address); + ASSERT(code_address < code_address + code_size); + ASSERT(code_address + code_size - 1 <= end - 1); + + // Define helpers. + auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) { + return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); + }; + auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) { + return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); + }; + + // Set our bit width and heap/alias sizes. + m_address_space_width = static_cast(GetAddressSpaceWidth(as_type)); + size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); + size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); + + // Adjust heap/alias size if we don't have an alias region. + if ((as_type & Svc::CreateProcessFlag::AddressSpaceMask) == + Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) { + heap_region_size += alias_region_size; + alias_region_size = 0; + } + + // Set code regions and determine remaining sizes. + KProcessAddress process_code_start; + KProcessAddress process_code_end; + size_t stack_region_size; + size_t kernel_map_region_size; + if (m_address_space_width == 39) { + alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); + heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); + stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); + kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = m_code_region_end; + process_code_start = Common::AlignDown(GetInteger(code_address), RegionAlignment); + process_code_end = Common::AlignUp(GetInteger(code_address) + code_size, RegionAlignment); + } else { + stack_region_size = 0; + kernel_map_region_size = 0; + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); + m_stack_region_start = m_code_region_start; + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + + GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); + m_stack_region_end = m_code_region_end; + m_kernel_map_region_start = m_code_region_start; + m_kernel_map_region_end = m_code_region_end; + process_code_start = m_code_region_start; + process_code_end = m_code_region_end; + } + + // Set other basic fields. + m_enable_aslr = enable_aslr; + m_enable_device_address_space_merge = enable_das_merge; + m_address_space_start = start; + m_address_space_end = end; + m_is_kernel = false; + m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer(); + m_block_info_manager = system_resource->GetBlockInfoManagerPointer(); + m_resource_limit = resource_limit; + + // Determine the region we can place our undetermineds in. + KProcessAddress alloc_start; + size_t alloc_size; + if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >= + (GetInteger(end) - GetInteger(process_code_end))) { + alloc_start = m_code_region_start; + alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start); + } else { + alloc_start = process_code_end; + alloc_size = GetInteger(end) - GetInteger(process_code_end); + } + const size_t needed_size = + (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size); + R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory); + + const size_t remaining_size = alloc_size - needed_size; + + // Determine random placements for each region. + size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0; + if (enable_aslr) { + alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + } + + // Setup heap and alias regions. + m_alias_region_start = alloc_start + alias_rnd; + m_alias_region_end = m_alias_region_start + alias_region_size; + m_heap_region_start = alloc_start + heap_rnd; + m_heap_region_end = m_heap_region_start + heap_region_size; + + if (alias_rnd <= heap_rnd) { + m_heap_region_start += alias_region_size; + m_heap_region_end += alias_region_size; + } else { + m_alias_region_start += heap_region_size; + m_alias_region_end += heap_region_size; + } + + // Setup stack region. + if (stack_region_size) { + m_stack_region_start = alloc_start + stack_rnd; + m_stack_region_end = m_stack_region_start + stack_region_size; + + if (alias_rnd < stack_rnd) { + m_stack_region_start += alias_region_size; + m_stack_region_end += alias_region_size; + } else { + m_alias_region_start += stack_region_size; + m_alias_region_end += stack_region_size; + } + + if (heap_rnd < stack_rnd) { + m_stack_region_start += heap_region_size; + m_stack_region_end += heap_region_size; + } else { + m_heap_region_start += stack_region_size; + m_heap_region_end += stack_region_size; + } + } + + // Setup kernel map region. + if (kernel_map_region_size) { + m_kernel_map_region_start = alloc_start + kmap_rnd; + m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; + + if (alias_rnd < kmap_rnd) { + m_kernel_map_region_start += alias_region_size; + m_kernel_map_region_end += alias_region_size; + } else { + m_alias_region_start += kernel_map_region_size; + m_alias_region_end += kernel_map_region_size; + } + + if (heap_rnd < kmap_rnd) { + m_kernel_map_region_start += heap_region_size; + m_kernel_map_region_end += heap_region_size; + } else { + m_heap_region_start += kernel_map_region_size; + m_heap_region_end += kernel_map_region_size; + } + + if (stack_region_size) { + if (stack_rnd < kmap_rnd) { + m_kernel_map_region_start += stack_region_size; + m_kernel_map_region_end += stack_region_size; + } else { + m_stack_region_start += kernel_map_region_size; + m_stack_region_end += kernel_map_region_size; + } + } + } + + // Set heap and fill members. + m_current_heap_end = m_heap_region_start; + m_max_heap_size = 0; + m_mapped_physical_memory_size = 0; + m_mapped_unsafe_physical_memory = 0; + m_mapped_insecure_memory = 0; + m_mapped_ipc_server_memory = 0; + + // const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled(); + const bool fill_memory = false; + m_heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero; + m_ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero; + m_stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero; + + // Set allocation option. + m_allocate_option = + KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack + : KMemoryManager::Direction::FromFront); + + // Ensure that we regions inside our address space. + auto IsInAddressSpace = [&](KProcessAddress addr) { + return m_address_space_start <= addr && addr <= m_address_space_end; + }; + ASSERT(IsInAddressSpace(m_alias_region_start)); + ASSERT(IsInAddressSpace(m_alias_region_end)); + ASSERT(IsInAddressSpace(m_heap_region_start)); + ASSERT(IsInAddressSpace(m_heap_region_end)); + ASSERT(IsInAddressSpace(m_stack_region_start)); + ASSERT(IsInAddressSpace(m_stack_region_end)); + ASSERT(IsInAddressSpace(m_kernel_map_region_start)); + ASSERT(IsInAddressSpace(m_kernel_map_region_end)); + + // Ensure that we selected regions that don't overlap. + const KProcessAddress alias_start = m_alias_region_start; + const KProcessAddress alias_last = m_alias_region_end - 1; + const KProcessAddress heap_start = m_heap_region_start; + const KProcessAddress heap_last = m_heap_region_end - 1; + const KProcessAddress stack_start = m_stack_region_start; + const KProcessAddress stack_last = m_stack_region_end - 1; + const KProcessAddress kmap_start = m_kernel_map_region_start; + const KProcessAddress kmap_last = m_kernel_map_region_end - 1; + ASSERT(alias_last < heap_start || heap_last < alias_start); + ASSERT(alias_last < stack_start || stack_last < alias_start); + ASSERT(alias_last < kmap_start || kmap_last < alias_start); + ASSERT(heap_last < stack_start || stack_last < heap_start); + ASSERT(heap_last < kmap_start || kmap_last < heap_start); + + // Initialize our implementation. + m_impl = std::make_unique(); + m_impl->Resize(m_address_space_width, PageBits); + + // Set the tracking memory. + m_memory = std::addressof(memory); + + // Initialize our memory block manager. + R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, + m_memory_block_slab_manager)); +} + +void KPageTableBase::Finalize() { + auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { + if (Settings::IsFastmemEnabled()) { + m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); + } + }; + + // Finalize memory blocks. + m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback)); + + // Free any unsafe mapped memory. + if (m_mapped_unsafe_physical_memory) { + UNIMPLEMENTED(); + } + + // Release any insecure mapped memory. + if (m_mapped_insecure_memory) { + if (auto* const insecure_resource_limit = + KSystemControl::GetInsecureMemoryResourceLimit(m_kernel); + insecure_resource_limit != nullptr) { + insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, + m_mapped_insecure_memory); + } + } + + // Release any ipc server memory. + if (m_mapped_ipc_server_memory) { + m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, + m_mapped_ipc_server_memory); + } + + // Invalidate the entire instruction cache. + InvalidateEntireInstructionCache(m_system); + + // Close the backing page table, as the destructor is not called for guest objects. + m_impl.reset(); +} + +KProcessAddress KPageTableBase::GetRegionAddress(Svc::MemoryState state) const { + switch (state) { + case Svc::MemoryState::Free: + case Svc::MemoryState::Kernel: + return m_address_space_start; + case Svc::MemoryState::Normal: + return m_heap_region_start; + case Svc::MemoryState::Ipc: + case Svc::MemoryState::NonSecureIpc: + case Svc::MemoryState::NonDeviceIpc: + return m_alias_region_start; + case Svc::MemoryState::Stack: + return m_stack_region_start; + case Svc::MemoryState::Static: + case Svc::MemoryState::ThreadLocal: + return m_kernel_map_region_start; + case Svc::MemoryState::Io: + case Svc::MemoryState::Shared: + case Svc::MemoryState::AliasCode: + case Svc::MemoryState::AliasCodeData: + case Svc::MemoryState::Transfered: + case Svc::MemoryState::SharedTransfered: + case Svc::MemoryState::SharedCode: + case Svc::MemoryState::GeneratedCode: + case Svc::MemoryState::CodeOut: + case Svc::MemoryState::Coverage: + case Svc::MemoryState::Insecure: + return m_alias_code_region_start; + case Svc::MemoryState::Code: + case Svc::MemoryState::CodeData: + return m_code_region_start; + default: + UNREACHABLE(); + } +} + +size_t KPageTableBase::GetRegionSize(Svc::MemoryState state) const { + switch (state) { + case Svc::MemoryState::Free: + case Svc::MemoryState::Kernel: + return m_address_space_end - m_address_space_start; + case Svc::MemoryState::Normal: + return m_heap_region_end - m_heap_region_start; + case Svc::MemoryState::Ipc: + case Svc::MemoryState::NonSecureIpc: + case Svc::MemoryState::NonDeviceIpc: + return m_alias_region_end - m_alias_region_start; + case Svc::MemoryState::Stack: + return m_stack_region_end - m_stack_region_start; + case Svc::MemoryState::Static: + case Svc::MemoryState::ThreadLocal: + return m_kernel_map_region_end - m_kernel_map_region_start; + case Svc::MemoryState::Io: + case Svc::MemoryState::Shared: + case Svc::MemoryState::AliasCode: + case Svc::MemoryState::AliasCodeData: + case Svc::MemoryState::Transfered: + case Svc::MemoryState::SharedTransfered: + case Svc::MemoryState::SharedCode: + case Svc::MemoryState::GeneratedCode: + case Svc::MemoryState::CodeOut: + case Svc::MemoryState::Coverage: + case Svc::MemoryState::Insecure: + return m_alias_code_region_end - m_alias_code_region_start; + case Svc::MemoryState::Code: + case Svc::MemoryState::CodeData: + return m_code_region_end - m_code_region_start; + default: + UNREACHABLE(); + } +} + +bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const { + const KProcessAddress end = addr + size; + const KProcessAddress last = end - 1; + + const KProcessAddress region_start = this->GetRegionAddress(state); + const size_t region_size = this->GetRegionSize(state); + + const bool is_in_region = + region_start <= addr && addr < end && last <= region_start + region_size - 1; + const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || + m_heap_region_start == m_heap_region_end); + const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || + m_alias_region_start == m_alias_region_end); + switch (state) { + case Svc::MemoryState::Free: + case Svc::MemoryState::Kernel: + return is_in_region; + case Svc::MemoryState::Io: + case Svc::MemoryState::Static: + case Svc::MemoryState::Code: + case Svc::MemoryState::CodeData: + case Svc::MemoryState::Shared: + case Svc::MemoryState::AliasCode: + case Svc::MemoryState::AliasCodeData: + case Svc::MemoryState::Stack: + case Svc::MemoryState::ThreadLocal: + case Svc::MemoryState::Transfered: + case Svc::MemoryState::SharedTransfered: + case Svc::MemoryState::SharedCode: + case Svc::MemoryState::GeneratedCode: + case Svc::MemoryState::CodeOut: + case Svc::MemoryState::Coverage: + case Svc::MemoryState::Insecure: + return is_in_region && !is_in_heap && !is_in_alias; + case Svc::MemoryState::Normal: + ASSERT(is_in_heap); + return is_in_region && !is_in_alias; + case Svc::MemoryState::Ipc: + case Svc::MemoryState::NonSecureIpc: + case Svc::MemoryState::NonDeviceIpc: + ASSERT(is_in_alias); + return is_in_region && !is_in_heap; + default: + return false; + } +} + +Result KPageTableBase::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, + KMemoryAttribute attr) const { + // Validate the states match expectation. + R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory); + R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory); + R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory); + + R_SUCCEED(); +} + +Result KPageTableBase::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, + size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, + KMemoryAttribute attr_mask, + KMemoryAttribute attr) const { + ASSERT(this->IsLockedByCurrentThread()); + + // Get information about the first block. + const KProcessAddress last_addr = addr + size - 1; + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); + KMemoryInfo info = it->GetMemoryInfo(); + + // If the start address isn't aligned, we need a block. + const size_t blocks_for_start_align = + (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0; + + while (true) { + // Validate against the provided masks. + R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); + + // Break once we're done. + if (last_addr <= info.GetLastAddress()) { + break; + } + + // Advance our iterator. + it++; + ASSERT(it != m_memory_block_manager.cend()); + info = it->GetMemoryInfo(); + } + + // If the end address isn't aligned, we need a block. + const size_t blocks_for_end_align = + (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0; + + if (out_blocks_needed != nullptr) { + *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; + } + + R_SUCCEED(); +} + +Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, + KMemoryBlockManager::const_iterator it, + KProcessAddress last_addr, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, + KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { + ASSERT(this->IsLockedByCurrentThread()); + + // Get information about the first block. + KMemoryInfo info = it->GetMemoryInfo(); + + // Validate all blocks in the range have correct state. + const KMemoryState first_state = info.m_state; + const KMemoryPermission first_perm = info.m_permission; + const KMemoryAttribute first_attr = info.m_attribute; + while (true) { + // Validate the current block. + R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory); + R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory); + R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), + ResultInvalidCurrentMemory); + + // Validate against the provided masks. + R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); + + // Break once we're done. + if (last_addr <= info.GetLastAddress()) { + break; + } + + // Advance our iterator. + it++; + ASSERT(it != m_memory_block_manager.cend()); + info = it->GetMemoryInfo(); + } + + // Write output state. + if (out_state != nullptr) { + *out_state = first_state; + } + if (out_perm != nullptr) { + *out_perm = first_perm; + } + if (out_attr != nullptr) { + *out_attr = first_attr & ~ignore_attr; + } + + // If the end address isn't aligned, we need a block. + if (out_blocks_needed != nullptr) { + const size_t blocks_for_end_align = + (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) + ? 1 + : 0; + *out_blocks_needed = blocks_for_end_align; + } + + R_SUCCEED(); +} + +Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, + KProcessAddress addr, size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, + KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { + ASSERT(this->IsLockedByCurrentThread()); + + // Check memory state. + const KProcessAddress last_addr = addr + size - 1; + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); + R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr, + state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr)); + + // If the start address isn't aligned, we need a block. + if (out_blocks_needed != nullptr && + Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) { + ++(*out_blocks_needed); + } + + R_SUCCEED(); +} + +Result KPageTableBase::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, + KProcessAddress addr, size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, + KMemoryAttribute attr, KMemoryPermission new_perm, + KMemoryAttribute lock_attr) { + // Validate basic preconditions. + ASSERT(False(lock_attr & attr)); + ASSERT(False(lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); + + // Validate the lock request. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check that the output page group is empty, if it exists. + if (out_pg) { + ASSERT(out_pg->GetNumPages() == 0); + } + + // Check the state. + KMemoryState old_state; + KMemoryPermission old_perm; + KMemoryAttribute old_attr; + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), + std::addressof(old_attr), std::addressof(num_allocator_blocks), + addr, size, state_mask | KMemoryState::FlagReferenceCounted, + state | KMemoryState::FlagReferenceCounted, perm_mask, perm, + attr_mask, attr)); + + // Get the physical address, if we're supposed to. + if (out_paddr != nullptr) { + ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr)); + } + + // Make the page group, if we're supposed to. + if (out_pg != nullptr) { + R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); + } + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Decide on new perm and attr. + new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; + KMemoryAttribute new_attr = old_attr | static_cast(lock_attr); + + // Update permission, if we need to. + if (new_perm != old_perm) { + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + const KPageProperties properties = {new_perm, false, + True(old_attr & KMemoryAttribute::Uncached), + DisableMergeAttribute::DisableHeadBodyTail}; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, + OperationType::ChangePermissions, false)); + } + + // Apply the memory block updates. + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + + // If we have an output group, open. + if (out_pg) { + out_pg->Open(); + } + + R_SUCCEED(); +} + +Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, + KMemoryAttribute attr, KMemoryPermission new_perm, + KMemoryAttribute lock_attr, const KPageGroup* pg) { + // Validate basic preconditions. + ASSERT((attr_mask & lock_attr) == lock_attr); + ASSERT((attr & lock_attr) == lock_attr); + + // Validate the unlock request. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the state. + KMemoryState old_state; + KMemoryPermission old_perm; + KMemoryAttribute old_attr; + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), + std::addressof(old_attr), std::addressof(num_allocator_blocks), + addr, size, state_mask | KMemoryState::FlagReferenceCounted, + state | KMemoryState::FlagReferenceCounted, perm_mask, perm, + attr_mask, attr)); + + // Check the page group. + if (pg != nullptr) { + R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion); + } + + // Decide on new perm and attr. + new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; + KMemoryAttribute new_attr = old_attr & ~static_cast(lock_attr); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update permission, if we need to. + if (new_perm != old_perm) { + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + const KPageProperties properties = {new_perm, false, + True(old_attr & KMemoryAttribute::Uncached), + DisableMergeAttribute::EnableAndMergeHeadBodyTail}; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, + OperationType::ChangePermissions, false)); + } + + // Apply the memory block updates. + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); + + R_SUCCEED(); +} + +Result KPageTableBase::QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page, + KProcessAddress address) const { + ASSERT(this->IsLockedByCurrentThread()); + ASSERT(out_info != nullptr); + ASSERT(out_page != nullptr); + + const KMemoryBlock* block = m_memory_block_manager.FindBlock(address); + R_UNLESS(block != nullptr, ResultInvalidCurrentMemory); + + *out_info = block->GetMemoryInfo(); + out_page->flags = 0; + R_SUCCEED(); +} + +Result KPageTableBase::QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size, + Svc::MemoryState state) const { + ASSERT(!this->IsLockedByCurrentThread()); + ASSERT(out != nullptr); + + const KProcessAddress region_start = this->GetRegionAddress(state); + const size_t region_size = this->GetRegionSize(state); + + // Check that the address/size are potentially valid. + R_UNLESS((address < address + size), ResultNotFound); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + auto& impl = this->GetImpl(); + + // Begin traversal. + TraversalContext context; + TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; + bool cur_valid = false; + TraversalEntry next_entry; + bool next_valid; + size_t tot_size = 0; + + next_valid = + impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), region_start); + next_entry.block_size = + (next_entry.block_size - (GetInteger(region_start) & (next_entry.block_size - 1))); + + // Iterate, looking for entry. + while (true) { + if ((!next_valid && !cur_valid) || + (next_valid && cur_valid && + next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { + cur_entry.block_size += next_entry.block_size; + } else { + if (cur_valid && cur_entry.phys_addr <= address && + address + size <= cur_entry.phys_addr + cur_entry.block_size) { + // Check if this region is valid. + const KProcessAddress mapped_address = + (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr); + if (R_SUCCEEDED(this->CheckMemoryState( + mapped_address, size, KMemoryState::Mask, static_cast(state), + KMemoryPermission::UserRead, KMemoryPermission::UserRead, + KMemoryAttribute::None, KMemoryAttribute::None))) { + // It is! + *out = mapped_address; + R_SUCCEED(); + } + } + + // Update tracking variables. + tot_size += cur_entry.block_size; + cur_entry = next_entry; + cur_valid = next_valid; + } + + if (cur_entry.block_size + tot_size >= region_size) { + break; + } + + next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + } + + // Check the last entry. + R_UNLESS(cur_valid, ResultNotFound); + R_UNLESS(cur_entry.phys_addr <= address, ResultNotFound); + R_UNLESS(address + size <= cur_entry.phys_addr + cur_entry.block_size, ResultNotFound); + + // Check if the last region is valid. + const KProcessAddress mapped_address = + (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr); + R_TRY_CATCH(this->CheckMemoryState(mapped_address, size, KMemoryState::All, + static_cast(state), + KMemoryPermission::UserRead, KMemoryPermission::UserRead, + KMemoryAttribute::None, KMemoryAttribute::None)) { + R_CONVERT_ALL(ResultNotFound); + } + R_END_TRY_CATCH; + + // We found the region. + *out = mapped_address; + R_SUCCEED(); +} + +Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, + size_t size) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Validate that the source address's state is valid. + KMemoryState src_state; + size_t num_src_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, + std::addressof(num_src_allocator_blocks), src_address, size, + KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, + KMemoryPermission::All, KMemoryPermission::UserReadWrite, + KMemoryAttribute::All, KMemoryAttribute::None)); + + // Validate that the dst address's state is valid. + size_t num_dst_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, + KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + + // Create an update allocator for the source. + Result src_allocator_result; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); + + // Map the memory. + { + // Determine the number of pages being operated on. + const size_t num_pages = size / PageSize; + + // Create page groups for the memory being unmapped. + KPageGroup pg(m_kernel, m_block_info_manager); + + // Create the page group representing the source. + R_TRY(this->MakePageGroup(pg, src_address, num_pages)); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Reprotect the source as kernel-read/not mapped. + const KMemoryPermission new_src_perm = static_cast( + KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); + const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; + const KPageProperties src_properties = {new_src_perm, false, false, + DisableMergeAttribute::DisableHeadBodyTail}; + R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, + OperationType::ChangePermissions, false)); + + // Ensure that we unprotect the source pages on failure. + ON_RESULT_FAILURE { + const KPageProperties unprotect_properties = { + KMemoryPermission::UserReadWrite, false, false, + DisableMergeAttribute::EnableHeadBodyTail}; + R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, + unprotect_properties, OperationType::ChangePermissions, true)); + }; + + // Map the alias pages. + const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false, + DisableMergeAttribute::DisableHead}; + R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, + false)); + + // Apply the memory block updates. + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, + src_state, new_src_perm, new_src_attr, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update( + std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); + } + + R_SUCCEED(); +} + +Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, + size_t size) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Validate that the source address's state is valid. + KMemoryState src_state; + size_t num_src_allocator_blocks; + R_TRY(this->CheckMemoryState( + std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), + src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, + KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead, + KMemoryAttribute::All, KMemoryAttribute::Locked)); + + // Validate that the dst address's state is valid. + KMemoryPermission dst_perm; + size_t num_dst_allocator_blocks; + R_TRY(this->CheckMemoryState( + nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), + dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + + // Create an update allocator for the source. + Result src_allocator_result; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); + + // Unmap the memory. + { + // Determine the number of pages being operated on. + const size_t num_pages = size / PageSize; + + // Create page groups for the memory being unmapped. + KPageGroup pg(m_kernel, m_block_info_manager); + + // Create the page group representing the destination. + R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); + + // Ensure the page group is the valid for the source. + R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Unmap the aliased copy of the pages. + const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, + dst_unmap_properties, OperationType::Unmap, false)); + + // Ensure that we re-map the aliased pages on failure. + ON_RESULT_FAILURE { + this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg); + }; + + // Try to set the permissions for the source pages back to what they should be. + const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false, + DisableMergeAttribute::EnableAndMergeHeadBodyTail}; + R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, + OperationType::ChangePermissions, false)); + + // Apply the memory block updates. + m_memory_block_manager.Update( + std::addressof(src_allocator), src_address, num_pages, src_state, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); + m_memory_block_manager.Update( + std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); + } + + R_SUCCEED(); +} + +Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, + size_t size) { + // Validate the mapping request. + R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), + ResultInvalidMemoryRegion); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Verify that the source memory is normal heap. + KMemoryState src_state; + KMemoryPermission src_perm; + size_t num_src_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr, + std::addressof(num_src_allocator_blocks), src_address, size, + KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, + KMemoryPermission::UserReadWrite, KMemoryAttribute::All, + KMemoryAttribute::None)); + + // Verify that the destination memory is unmapped. + size_t num_dst_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, + KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + + // Create an update allocator for the source. + Result src_allocator_result; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); + + // Map the code memory. + { + // Determine the number of pages being operated on. + const size_t num_pages = size / PageSize; + + // Create page groups for the memory being unmapped. + KPageGroup pg(m_kernel, m_block_info_manager); + + // Create the page group representing the source. + R_TRY(this->MakePageGroup(pg, src_address, num_pages)); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Reprotect the source as kernel-read/not mapped. + const KMemoryPermission new_perm = static_cast( + KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); + const KPageProperties src_properties = {new_perm, false, false, + DisableMergeAttribute::DisableHeadBodyTail}; + R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, + OperationType::ChangePermissions, false)); + + // Ensure that we unprotect the source pages on failure. + ON_RESULT_FAILURE { + const KPageProperties unprotect_properties = { + src_perm, false, false, DisableMergeAttribute::EnableHeadBodyTail}; + R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, + unprotect_properties, OperationType::ChangePermissions, true)); + }; + + // Map the alias pages. + const KPageProperties dst_properties = {new_perm, false, false, + DisableMergeAttribute::DisableHead}; + R_TRY( + this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false)); + + // Apply the memory block updates. + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, + src_state, new_perm, KMemoryAttribute::Locked, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); + } + + R_SUCCEED(); +} + +Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, + size_t size) { + // Validate the mapping request. + R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), + ResultInvalidMemoryRegion); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Verify that the source memory is locked normal heap. + size_t num_src_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, + KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::Locked)); + + // Verify that the destination memory is aliasable code. + size_t num_dst_allocator_blocks; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, + KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None)); + + // Determine whether any pages being unmapped are code. + bool any_code_pages = false; + { + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); + while (true) { + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // Check if the memory has code flag. + if (True(info.GetState() & KMemoryState::FlagCode)) { + any_code_pages = true; + break; + } + + // Check if we're done. + if (dst_address + size - 1 <= info.GetLastAddress()) { + break; + } + + // Advance. + ++it; + } + } + + // Ensure that we maintain the instruction cache. + bool reprotected_pages = false; + SCOPE_EXIT({ + if (reprotected_pages && any_code_pages) { + InvalidateEntireInstructionCache(m_system); + } + }); + + // Unmap. + { + // Determine the number of pages being operated on. + const size_t num_pages = size / PageSize; + + // Create page groups for the memory being unmapped. + KPageGroup pg(m_kernel, m_block_info_manager); + + // Create the page group representing the destination. + R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); + + // Verify that the page group contains the same pages as the source. + R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion); + + // Create an update allocator for the source. + Result src_allocator_result; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Unmap the aliased copy of the pages. + const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, + dst_unmap_properties, OperationType::Unmap, false)); + + // Ensure that we re-map the aliased pages on failure. + ON_RESULT_FAILURE { + this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg); + }; + + // Try to set the permissions for the source pages back to what they should be. + const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false, + DisableMergeAttribute::EnableAndMergeHeadBodyTail}; + R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, + OperationType::ChangePermissions, false)); + + // Apply the memory block updates. + m_memory_block_manager.Update( + std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update( + std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); + + // Note that we reprotected pages. + reprotected_pages = true; + } + + R_SUCCEED(); +} + +Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) { + // Get the insecure memory resource limit and pool. + auto* const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(m_kernel); + const auto insecure_pool = + static_cast(KSystemControl::GetInsecureMemoryPool()); + + // Reserve the insecure memory. + // NOTE: ResultOutOfMemory is returned here instead of the usual LimitReached. + KScopedResourceReservation memory_reservation(insecure_resource_limit, + Svc::LimitableResource::PhysicalMemoryMax, size); + R_UNLESS(memory_reservation.Succeeded(), ResultOutOfMemory); + + // Allocate pages for the insecure memory. + KPageGroup pg(m_kernel, m_block_info_manager); + R_TRY(m_kernel.MemoryManager().AllocateAndOpen( + std::addressof(pg), size / PageSize, + KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction::FromFront))); + + // Close the opened pages when we're done with them. + // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed + // automatically. + SCOPE_EXIT({ pg.Close(); }); + + // Clear all the newly allocated pages. + for (const auto& it : pg) { + std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), + static_cast(m_heap_fill_value), it.GetSize()); + } + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Validate that the address's state is valid. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, + KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Map the pages. + const size_t num_pages = size / PageSize; + const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false, + DisableMergeAttribute::DisableHead}; + R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties, + OperationType::MapGroup, false)); + + // Apply the memory block update. + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, + KMemoryState::Insecure, KMemoryPermission::UserReadWrite, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); + + // Update our mapped insecure size. + m_mapped_insecure_memory += size; + + // Commit the memory reservation. + memory_reservation.Commit(); + + // We succeeded. + R_SUCCEED(); +} + +Result KPageTableBase::UnmapInsecureMemory(KProcessAddress address, size_t size) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, + KMemoryState::All, KMemoryState::Insecure, KMemoryPermission::All, + KMemoryPermission::UserReadWrite, KMemoryAttribute::All, + KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Unmap the memory. + const size_t num_pages = size / PageSize; + const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties, + OperationType::Unmap, false)); + + // Apply the memory block update. + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); + + // Update our mapped insecure size. + m_mapped_insecure_memory -= size; + + // Release the insecure memory from the insecure limit. + if (auto* const insecure_resource_limit = + KSystemControl::GetInsecureMemoryResourceLimit(m_kernel); + insecure_resource_limit != nullptr) { + insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, size); + } + + R_SUCCEED(); +} + +KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, + size_t num_pages, size_t alignment, size_t offset, + size_t guard_pages) const { + KProcessAddress address = 0; + + if (num_pages <= region_num_pages) { + if (this->IsAslrEnabled()) { + // Try to directly find a free area up to 8 times. + for (size_t i = 0; i < 8; i++) { + const size_t random_offset = + KSystemControl::GenerateRandomRange( + 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * + alignment; + const KProcessAddress candidate = + Common::AlignDown(GetInteger(region_start + random_offset), alignment) + offset; + + KMemoryInfo info; + Svc::PageInfo page_info; + R_ASSERT(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), + candidate)); + + if (info.m_state != KMemoryState::Free) { + continue; + } + if (!(region_start <= candidate)) { + continue; + } + if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { + continue; + } + if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= + info.GetLastAddress())) { + continue; + } + if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= + region_start + region_num_pages * PageSize - 1)) { + continue; + } + + address = candidate; + break; + } + // Fall back to finding the first free area with a random offset. + if (address == 0) { + // NOTE: Nintendo does not account for guard pages here. + // This may theoretically cause an offset to be chosen that cannot be mapped. + // We will account for guard pages. + const size_t offset_pages = KSystemControl::GenerateRandomRange( + 0, region_num_pages - num_pages - guard_pages); + address = m_memory_block_manager.FindFreeArea( + region_start + offset_pages * PageSize, region_num_pages - offset_pages, + num_pages, alignment, offset, guard_pages); + } + } + // Find the first free area. + if (address == 0) { + address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, + alignment, offset, guard_pages); + } + } + + return address; +} + +size_t KPageTableBase::GetSize(KMemoryState state) const { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Iterate, counting blocks with the desired state. + size_t total_size = 0; + for (KMemoryBlockManager::const_iterator it = + m_memory_block_manager.FindIterator(m_address_space_start); + it != m_memory_block_manager.end(); ++it) { + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + if (info.GetState() == state) { + total_size += info.GetSize(); + } + } + + return total_size; +} + +size_t KPageTableBase::GetCodeSize() const { + return this->GetSize(KMemoryState::Code); +} + +size_t KPageTableBase::GetCodeDataSize() const { + return this->GetSize(KMemoryState::CodeData); +} + +size_t KPageTableBase::GetAliasCodeSize() const { + return this->GetSize(KMemoryState::AliasCode); +} + +size_t KPageTableBase::GetAliasCodeDataSize() const { + return this->GetSize(KMemoryState::AliasCodeData); +} + +Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, + size_t num_pages, KMemoryPermission perm) { + ASSERT(this->IsLockedByCurrentThread()); + + // Create a page group to hold the pages we allocate. + KPageGroup pg(m_kernel, m_block_info_manager); + + // Allocate the pages. + R_TRY( + m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option)); + + // Ensure that the page group is closed when we're done working with it. + SCOPE_EXIT({ pg.Close(); }); + + // Clear all pages. + for (const auto& it : pg) { + std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), + static_cast(m_heap_fill_value), it.GetSize()); + } + + // Map the pages. + const KPageProperties properties = {perm, false, false, DisableMergeAttribute::None}; + R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType::MapGroup, + false)); +} + +Result KPageTableBase::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, + const KPageGroup& pg, const KPageProperties properties, + bool reuse_ll) { + ASSERT(this->IsLockedByCurrentThread()); + + // Note the current address, so that we can iterate. + const KProcessAddress start_address = address; + KProcessAddress cur_address = address; + + // Ensure that we clean up on failure. + ON_RESULT_FAILURE { + ASSERT(!reuse_ll); + if (cur_address != start_address) { + const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_ASSERT(this->Operate(page_list, start_address, + (cur_address - start_address) / PageSize, 0, false, + unmap_properties, OperationType::Unmap, true)); + } + }; + + // Iterate, mapping all pages in the group. + for (const auto& block : pg) { + // Map and advance. + const KPageProperties cur_properties = + (cur_address == start_address) + ? properties + : KPageProperties{properties.perm, properties.io, properties.uncached, + DisableMergeAttribute::None}; + R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true, + cur_properties, OperationType::Map, reuse_ll)); + cur_address += block.GetSize(); + } + + // We succeeded! + R_SUCCEED(); +} + +void KPageTableBase::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, + const KPageGroup& pg) { + ASSERT(this->IsLockedByCurrentThread()); + + // Note the current address, so that we can iterate. + const KProcessAddress start_address = address; + const KProcessAddress last_address = start_address + size - 1; + const KProcessAddress end_address = last_address + 1; + + // Iterate over the memory. + auto pg_it = pg.begin(); + ASSERT(pg_it != pg.end()); + + KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); + size_t pg_pages = pg_it->GetNumPages(); + + auto it = m_memory_block_manager.FindIterator(start_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != m_memory_block_manager.end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // Determine the range to map. + KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(start_address)); + const KProcessAddress map_end_address = + std::min(info.GetEndAddress(), GetInteger(end_address)); + ASSERT(map_end_address != map_address); + + // Determine if we should disable head merge. + const bool disable_head_merge = + info.GetAddress() >= GetInteger(start_address) && + True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal); + const KPageProperties map_properties = { + info.GetPermission(), false, false, + disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None}; + + // While we have pages to map, map them. + size_t map_pages = (map_end_address - map_address) / PageSize; + while (map_pages > 0) { + // Check if we're at the end of the physical block. + if (pg_pages == 0) { + // Ensure there are more pages to map. + ASSERT(pg_it != pg.end()); + + // Advance our physical block. + ++pg_it; + pg_phys_addr = pg_it->GetAddress(); + pg_pages = pg_it->GetNumPages(); + } + + // Map whatever we can. + const size_t cur_pages = std::min(pg_pages, map_pages); + R_ASSERT(this->Operate(page_list, map_address, map_pages, pg_phys_addr, true, + map_properties, OperationType::Map, true)); + + // Advance. + map_address += cur_pages * PageSize; + map_pages -= cur_pages; + + pg_phys_addr += cur_pages * PageSize; + pg_pages -= cur_pages; + } + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + break; + } + + // Advance. + ++it; + } + + // Check that we re-mapped precisely the page group. + ASSERT((++pg_it) == pg.end()); +} + +Result KPageTableBase::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) { + ASSERT(this->IsLockedByCurrentThread()); + + const size_t size = num_pages * PageSize; + + // We're making a new group, not adding to an existing one. + R_UNLESS(pg.empty(), ResultInvalidCurrentMemory); + + auto& impl = this->GetImpl(); + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr), + ResultInvalidCurrentMemory); + + // Prepare tracking variables. + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + // Iterate, adding to group as we go. + while (tot_size < size) { + R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)), + ResultInvalidCurrentMemory); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + const size_t cur_pages = cur_size / PageSize; + + R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); + R_TRY(pg.AddBlock(cur_addr, cur_pages)); + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + // Ensure we add the right amount for the last block. + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + // add the last block. + const size_t cur_pages = cur_size / PageSize; + R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); + R_TRY(pg.AddBlock(cur_addr, cur_pages)); + + R_SUCCEED(); +} + +bool KPageTableBase::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, + size_t num_pages) { + ASSERT(this->IsLockedByCurrentThread()); + + const size_t size = num_pages * PageSize; + + // Empty groups are necessarily invalid. + if (pg.empty()) { + return false; + } + + auto& impl = this->GetImpl(); + + // We're going to validate that the group we'd expect is the group we see. + auto cur_it = pg.begin(); + KPhysicalAddress cur_block_address = cur_it->GetAddress(); + size_t cur_block_pages = cur_it->GetNumPages(); + + auto UpdateCurrentIterator = [&]() { + if (cur_block_pages == 0) { + if ((++cur_it) == pg.end()) { + return false; + } + + cur_block_address = cur_it->GetAddress(); + cur_block_pages = cur_it->GetNumPages(); + } + return true; + }; + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + if (!impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr)) { + return false; + } + + // Prepare tracking variables. + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + // Iterate, comparing expected to actual. + while (tot_size < size) { + if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) { + return false; + } + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + const size_t cur_pages = cur_size / PageSize; + + if (!IsHeapPhysicalAddress(cur_addr)) { + return false; + } + + if (!UpdateCurrentIterator()) { + return false; + } + + if (cur_block_address != cur_addr || cur_block_pages < cur_pages) { + return false; + } + + cur_block_address += cur_size; + cur_block_pages -= cur_pages; + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + // Ensure we compare the right amount for the last block. + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + if (!IsHeapPhysicalAddress(cur_addr)) { + return false; + } + + if (!UpdateCurrentIterator()) { + return false; + } + + return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); +} + +Result KPageTableBase::GetContiguousMemoryRangeWithState( + MemoryRange* out, KProcessAddress address, size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr) { + ASSERT(this->IsLockedByCurrentThread()); + + auto& impl = this->GetImpl(); + + // Begin a traversal. + TraversalContext context; + TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; + R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), + ResultInvalidCurrentMemory); + + // Traverse until we have enough size or we aren't contiguous any more. + const KPhysicalAddress phys_address = cur_entry.phys_addr; + size_t contig_size; + for (contig_size = + cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); + contig_size < size; contig_size += cur_entry.block_size) { + if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) { + break; + } + if (cur_entry.phys_addr != phys_address + contig_size) { + break; + } + } + + // Take the minimum size for our region. + size = std::min(size, contig_size); + + // Check that the memory is contiguous (modulo the reference count bit). + const KMemoryState test_state_mask = state_mask | KMemoryState::FlagReferenceCounted; + const bool is_heap = R_SUCCEEDED(this->CheckMemoryStateContiguous( + address, size, test_state_mask, state | KMemoryState::FlagReferenceCounted, perm_mask, perm, + attr_mask, attr)); + if (!is_heap) { + R_TRY(this->CheckMemoryStateContiguous(address, size, test_state_mask, state, perm_mask, + perm, attr_mask, attr)); + } + + // The memory is contiguous, so set the output range. + out->Set(phys_address, size, is_heap); + R_SUCCEED(); +} + +Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, + Svc::MemoryPermission svc_perm) { + const size_t num_pages = size / PageSize; + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Verify we can change the memory permission. + KMemoryState old_state; + KMemoryPermission old_perm; + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, + std::addressof(num_allocator_blocks), addr, size, + KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::All, KMemoryAttribute::None)); + + // Determine new perm. + const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); + R_SUCCEED_IF(old_perm == new_perm); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Perform mapping operation. + const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, + OperationType::ChangePermissions, false)); + + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); + + R_SUCCEED(); +} + +Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size, + Svc::MemoryPermission svc_perm) { + const size_t num_pages = size / PageSize; + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Verify we can change the memory permission. + KMemoryState old_state; + KMemoryPermission old_perm; + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, + std::addressof(num_allocator_blocks), addr, size, + KMemoryState::FlagCode, KMemoryState::FlagCode, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::All, KMemoryAttribute::None)); + + // Make a new page group for the region. + KPageGroup pg(m_kernel, m_block_info_manager); + + // Determine new perm/state. + const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); + KMemoryState new_state = old_state; + const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite; + const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute; + const bool was_x = + (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute; + ASSERT(!(is_w && is_x)); + + if (is_w) { + switch (old_state) { + case KMemoryState::Code: + new_state = KMemoryState::CodeData; + break; + case KMemoryState::AliasCode: + new_state = KMemoryState::AliasCodeData; + break; + default: + UNREACHABLE(); + } + } + + // Create a page group, if we're setting execute permissions. + if (is_x) { + R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages)); + } + + // Succeed if there's nothing to do. + R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Perform mapping operation. + const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None}; + const auto operation = was_x ? OperationType::ChangePermissionsAndRefreshAndFlush + : OperationType::ChangePermissions; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, operation, + false)); + + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); + + // Ensure cache coherency, if we're setting pages as executable. + if (is_x) { + for (const auto& block : pg) { + StoreDataCache(GetHeapVirtualPointer(m_kernel, block.GetAddress()), block.GetSize()); + } + InvalidateEntireInstructionCache(m_system); + } + + R_SUCCEED(); +} + +Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask, + KMemoryAttribute attr) { + const size_t num_pages = size / PageSize; + ASSERT((mask | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Verify we can change the memory attribute. + KMemoryState old_state; + KMemoryPermission old_perm; + KMemoryAttribute old_attr; + size_t num_allocator_blocks; + constexpr KMemoryAttribute AttributeTestMask = + ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared); + const KMemoryState state_test_mask = + (True(mask & KMemoryAttribute::Uncached) ? KMemoryState::FlagCanChangeAttribute + : KMemoryState::None) | + (True(mask & KMemoryAttribute::PermissionLocked) ? KMemoryState::FlagCanPermissionLock + : KMemoryState::None); + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), + std::addressof(old_attr), std::addressof(num_allocator_blocks), + addr, size, state_test_mask, state_test_mask, + KMemoryPermission::None, KMemoryPermission::None, + AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // If we need to, perform a change attribute operation. + if (True(mask & KMemoryAttribute::Uncached)) { + // Determine the new attribute. + const KMemoryAttribute new_attr = + static_cast(((old_attr & ~mask) | (attr & mask))); + + // Perform operation. + const KPageProperties properties = {old_perm, false, + True(new_attr & KMemoryAttribute::Uncached), + DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, + OperationType::ChangePermissionsAndRefreshAndFlush, false)); + } + + // Update the blocks. + m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, mask, attr); + + R_SUCCEED(); +} + +Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) { + // Lock the physical memory mutex. + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); + + // Try to perform a reduction in heap, instead of an extension. + KProcessAddress cur_address; + size_t allocation_size; + { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Validate that setting heap size is possible at all. + R_UNLESS(!m_is_kernel, ResultOutOfMemory); + R_UNLESS(size <= static_cast(m_heap_region_end - m_heap_region_start), + ResultOutOfMemory); + R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); + + if (size < static_cast(m_current_heap_end - m_heap_region_start)) { + // The size being requested is less than the current size, so we need to free the end of + // the heap. + + // Validate memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState( + std::addressof(num_allocator_blocks), m_heap_region_start + size, + (m_current_heap_end - m_heap_region_start) - size, KMemoryState::All, + KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, + KMemoryAttribute::All, KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, + num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Unmap the end of the heap. + const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize; + const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, 0, + false, unmap_properties, OperationType::Unmap, false)); + + // Release the memory from the resource limit. + m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, + num_pages * PageSize); + + // Apply the memory block update. + m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, + num_pages, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + size == 0 ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None); + + // Update the current heap end. + m_current_heap_end = m_heap_region_start + size; + + // Set the output. + *out = m_heap_region_start; + R_SUCCEED(); + } else if (size == static_cast(m_current_heap_end - m_heap_region_start)) { + // The size requested is exactly the current size. + *out = m_heap_region_start; + R_SUCCEED(); + } else { + // We have to allocate memory. Determine how much to allocate and where while the table + // is locked. + cur_address = m_current_heap_end; + allocation_size = size - (m_current_heap_end - m_heap_region_start); + } + } + + // Reserve memory for the heap extension. + KScopedResourceReservation memory_reservation( + m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, allocation_size); + R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); + + // Allocate pages for the heap extension. + KPageGroup pg(m_kernel, m_block_info_manager); + R_TRY(m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, + m_allocate_option)); + + // Close the opened pages when we're done with them. + // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed + // automatically. + SCOPE_EXIT({ pg.Close(); }); + + // Clear all the newly allocated pages. + for (const auto& it : pg) { + std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), m_heap_fill_value, + it.GetSize()); + } + + // Map the pages. + { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Ensure that the heap hasn't changed since we began executing. + ASSERT(cur_address == m_current_heap_end); + + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, + allocation_size, KMemoryState::All, KMemoryState::Free, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator( + std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Map the pages. + const size_t num_pages = allocation_size / PageSize; + const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false, + (m_current_heap_end == m_heap_region_start) + ? DisableMergeAttribute::DisableHead + : DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg, + map_properties, OperationType::MapGroup, false)); + + // We succeeded, so commit our memory reservation. + memory_reservation.Commit(); + + // Apply the memory block update. + m_memory_block_manager.Update( + std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); + + // Update the current heap end. + m_current_heap_end = m_heap_region_start + size; + + // Set the output. + *out = m_heap_region_start; + R_SUCCEED(); + } +} + +Result KPageTableBase::SetMaxHeapSize(size_t size) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Only process page tables are allowed to set heap size. + ASSERT(!this->IsKernel()); + + m_max_heap_size = size; + + R_SUCCEED(); +} + +Result KPageTableBase::QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info, + KProcessAddress addr) const { + // If the address is invalid, create a fake block. + if (!this->Contains(addr, 1)) { + *out_info = { + .m_address = GetInteger(m_address_space_end), + .m_size = 0 - GetInteger(m_address_space_end), + .m_state = static_cast(Svc::MemoryState::Inaccessible), + .m_device_disable_merge_left_count = 0, + .m_device_disable_merge_right_count = 0, + .m_ipc_lock_count = 0, + .m_device_use_count = 0, + .m_ipc_disable_merge_count = 0, + .m_permission = KMemoryPermission::None, + .m_attribute = KMemoryAttribute::None, + .m_original_permission = KMemoryPermission::None, + .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None, + }; + out_page_info->flags = 0; + + R_SUCCEED(); + } + + // Otherwise, lock the table and query. + KScopedLightLock lk(m_general_lock); + R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr)); +} + +Result KPageTableBase::QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, + KProcessAddress address) const { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Align the address down to page size. + address = Common::AlignDown(GetInteger(address), PageSize); + + // Verify that we can query the address. + KMemoryInfo info; + Svc::PageInfo page_info; + R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address)); + + // Check the memory state. + R_TRY(this->CheckMemoryState(info, KMemoryState::FlagCanQueryPhysical, + KMemoryState::FlagCanQueryPhysical, + KMemoryPermission::UserReadExecute, KMemoryPermission::UserRead, + KMemoryAttribute::None, KMemoryAttribute::None)); + + // Prepare to traverse. + KPhysicalAddress phys_addr; + size_t phys_size; + + KProcessAddress virt_addr = info.GetAddress(); + KProcessAddress end_addr = info.GetEndAddress(); + + // Perform traversal. + { + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = + m_impl->BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr); + R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); + + // Set tracking variables. + phys_addr = next_entry.phys_addr; + phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); + + // Iterate. + while (true) { + // Continue the traversal. + traverse_valid = + m_impl->ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + if (!traverse_valid) { + break; + } + + if (next_entry.phys_addr != (phys_addr + phys_size)) { + // Check if we're done. + if (virt_addr <= address && address <= virt_addr + phys_size - 1) { + break; + } + + // Advance. + phys_addr = next_entry.phys_addr; + virt_addr += next_entry.block_size; + phys_size = + next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); + } else { + phys_size += next_entry.block_size; + } + + // Check if we're done. + if (end_addr < virt_addr + phys_size) { + break; + } + } + ASSERT(virt_addr <= address && address <= virt_addr + phys_size - 1); + + // Ensure we use the right size. + if (end_addr < virt_addr + phys_size) { + phys_size = end_addr - virt_addr; + } + } + + // Set the output. + out->physical_address = GetInteger(phys_addr); + out->virtual_address = GetInteger(virt_addr); + out->size = phys_size; + R_SUCCEED(); +} + +Result KPageTableBase::MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, + KPhysicalAddress phys_addr, size_t size, KMemoryState state, + KMemoryPermission perm) { + // Check pre-conditions. + ASSERT(this->IsLockedByCurrentThread()); + ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize)); + ASSERT(Common::IsAligned(size, PageSize)); + ASSERT(size > 0); + + R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress); + const size_t num_pages = size / PageSize; + const KPhysicalAddress last = phys_addr + size - 1; + + // Get region extents. + const KProcessAddress region_start = m_kernel_map_region_start; + const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start; + const size_t region_num_pages = region_size / PageSize; + + ASSERT(this->CanContain(region_start, region_size, state)); + + // Locate the memory region. + const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr); + R_UNLESS(region != nullptr, ResultInvalidAddress); + + ASSERT(region->Contains(GetInteger(phys_addr))); + + // Ensure that the region is mappable. + const bool is_rw = perm == KMemoryPermission::UserReadWrite; + while (true) { + // Check that the region exists. + R_UNLESS(region != nullptr, ResultInvalidAddress); + + // Check the region attributes. + R_UNLESS(!region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress); + R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, + ResultInvalidAddress); + R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress); + + // Check if we're done. + if (GetInteger(last) <= region->GetLastAddress()) { + break; + } + + // Advance. + region = region->GetNext(); + }; + + // Select an address to map at. + KProcessAddress addr = 0; + { + const size_t alignment = 4_KiB; + const KPhysicalAddress aligned_phys = + Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1; + R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress); + + const KPhysicalAddress last_aligned_paddr = + Common::AlignDown(GetInteger(last) + 1, alignment) - 1; + R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr), + ResultInvalidAddress); + + addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, + this->GetNumGuardPages()); + R_UNLESS(addr != 0, ResultOutOfMemory); + } + + // Check that we can map IO here. + ASSERT(this->CanContain(addr, size, state)); + R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryAttribute::None)); + + // Perform mapping operation. + const KPageProperties properties = {perm, state == KMemoryState::IoRegister, false, + DisableMergeAttribute::DisableHead}; + R_TRY(this->Operate(page_list, addr, num_pages, phys_addr, true, properties, OperationType::Map, + false)); + + // Set the output address. + *out = addr; + + R_SUCCEED(); +} + +Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Map the io memory. + KProcessAddress addr; + R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size, + KMemoryState::IoRegister, perm)); + + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize, + KMemoryState::IoRegister, perm, KMemoryAttribute::Locked, + KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); + + // We successfully mapped the pages. + R_SUCCEED(); +} + +Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, + size_t size, Svc::MemoryMapping mapping, + Svc::MemoryPermission svc_perm) { + const size_t num_pages = size / PageSize; + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Validate the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size, + KMemoryState::All, KMemoryState::None, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Perform mapping operation. + const KMemoryPermission perm = ConvertToKMemoryPermission(svc_perm); + const KPageProperties properties = {perm, mapping == Svc::MemoryMapping::IoRegister, + mapping == Svc::MemoryMapping::Uncached, + DisableMergeAttribute::DisableHead}; + R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties, + OperationType::Map, false)); + + // Update the blocks. + const auto state = + mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister; + m_memory_block_manager.Update( + std::addressof(allocator), dst_address, num_pages, state, perm, KMemoryAttribute::Locked, + KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); + + // We successfully mapped the pages. + R_SUCCEED(); +} + +Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, + size_t size, Svc::MemoryMapping mapping) { + const size_t num_pages = size / PageSize; + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Validate the memory state. + KMemoryState old_state; + KMemoryPermission old_perm; + KMemoryAttribute old_attr; + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState( + std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), + std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All, + mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister, + KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::Locked)); + + // Validate that the region being unmapped corresponds to the physical range described. + { + // Get the impl. + auto& impl = this->GetImpl(); + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + ASSERT( + impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address)); + + // Check that the physical region matches. + R_UNLESS(next_entry.phys_addr == phys_addr, ResultInvalidMemoryRegion); + + // Iterate. + for (size_t checked_size = + next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); + checked_size < size; checked_size += next_entry.block_size) { + // Continue the traversal. + ASSERT(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))); + + // Check that the physical region matches. + R_UNLESS(next_entry.phys_addr == phys_addr + checked_size, ResultInvalidMemoryRegion); + } + } + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // If the region being unmapped is Memory, synchronize. + if (mapping == Svc::MemoryMapping::Memory) { + // Change the region to be uncached. + const KPageProperties properties = {old_perm, false, true, DisableMergeAttribute::None}; + R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, properties, + OperationType::ChangePermissionsAndRefresh, false)); + + // Temporarily unlock ourselves, so that other operations can occur while we flush the + // region. + m_general_lock.Unlock(); + SCOPE_EXIT({ m_general_lock.Lock(); }); + + // Flush the region. + R_ASSERT(FlushDataCache(dst_address, size)); + } + + // Perform the unmap. + const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, + unmap_properties, OperationType::Unmap, false)); + + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, + KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); + + R_SUCCEED(); +} + +Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { + ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize)); + ASSERT(Common::IsAligned(size, PageSize)); + ASSERT(size > 0); + R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress); + const size_t num_pages = size / PageSize; + const KPhysicalAddress last = phys_addr + size - 1; + + // Get region extents. + const KProcessAddress region_start = this->GetRegionAddress(KMemoryState::Static); + const size_t region_size = this->GetRegionSize(KMemoryState::Static); + const size_t region_num_pages = region_size / PageSize; + + // Locate the memory region. + const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr); + R_UNLESS(region != nullptr, ResultInvalidAddress); + + ASSERT(region->Contains(GetInteger(phys_addr))); + R_UNLESS(GetInteger(last) <= region->GetLastAddress(), ResultInvalidAddress); + + // Check the region attributes. + const bool is_rw = perm == KMemoryPermission::UserReadWrite; + R_UNLESS(region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress); + R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress); + R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, + ResultInvalidAddress); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Select an address to map at. + KProcessAddress addr = 0; + { + const size_t alignment = 4_KiB; + const KPhysicalAddress aligned_phys = + Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1; + R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress); + + const KPhysicalAddress last_aligned_paddr = + Common::AlignDown(GetInteger(last) + 1, alignment) - 1; + R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr), + ResultInvalidAddress); + + addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, + this->GetNumGuardPages()); + R_UNLESS(addr != 0, ResultOutOfMemory); + } + + // Check that we can map static here. + ASSERT(this->CanContain(addr, size, KMemoryState::Static)); + R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Perform mapping operation. + const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, + OperationType::Map, false)); + + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState::Static, + perm, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); + + // We successfully mapped the pages. + R_SUCCEED(); +} + +Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) { + // Get the memory region. + const KMemoryRegion* region = + m_kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(region_type); + R_UNLESS(region != nullptr, ResultOutOfRange); + + // Check that the region is valid. + ASSERT(region->GetEndAddress() != 0); + + // Map the region. + R_TRY_CATCH(this->MapStatic(region->GetAddress(), region->GetSize(), perm)){ + R_CONVERT(ResultInvalidAddress, ResultOutOfRange)} R_END_TRY_CATCH; + + R_SUCCEED(); +} + +Result KPageTableBase::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, + KPhysicalAddress phys_addr, bool is_pa_valid, + KProcessAddress region_start, size_t region_num_pages, + KMemoryState state, KMemoryPermission perm) { + ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); + + // Ensure this is a valid map request. + R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), + ResultInvalidCurrentMemory); + R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Find a random address to map at. + KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, + 0, this->GetNumGuardPages()); + R_UNLESS(addr != 0, ResultOutOfMemory); + ASSERT(Common::IsAligned(GetInteger(addr), alignment)); + ASSERT(this->CanContain(addr, num_pages * PageSize, state)); + R_ASSERT(this->CheckMemoryState( + addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Perform mapping operation. + if (is_pa_valid) { + const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, + OperationType::Map, false)); + } else { + R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm)); + } + + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); + + // We successfully mapped the pages. + *out_addr = addr; + R_SUCCEED(); +} + +Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, + KMemoryPermission perm) { + // Check that the map is in range. + const size_t size = num_pages * PageSize; + R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, + KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Map the pages. + R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm)); + + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); + + R_SUCCEED(); +} + +Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { + // Check that the unmap is in range. + const size_t size = num_pages * PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, + KMemoryState::All, state, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Perform the unmap. + const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties, + OperationType::Unmap, false)); + + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); + + R_SUCCEED(); +} + +Result KPageTableBase::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, + KProcessAddress region_start, size_t region_num_pages, + KMemoryState state, KMemoryPermission perm) { + ASSERT(!this->IsLockedByCurrentThread()); + + // Ensure this is a valid map request. + const size_t num_pages = pg.GetNumPages(); + R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), + ResultInvalidCurrentMemory); + R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Find a random address to map at. + KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, + 0, this->GetNumGuardPages()); + R_UNLESS(addr != 0, ResultOutOfMemory); + ASSERT(this->CanContain(addr, num_pages * PageSize, state)); + R_ASSERT(this->CheckMemoryState( + addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Perform mapping operation. + const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; + R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); + + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); + + // We successfully mapped the pages. + *out_addr = addr; + R_SUCCEED(); +} + +Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state, + KMemoryPermission perm) { + ASSERT(!this->IsLockedByCurrentThread()); + + // Ensure this is a valid map request. + const size_t num_pages = pg.GetNumPages(); + const size_t size = num_pages * PageSize; + R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check if state allows us to map. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size, + KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Perform mapping operation. + const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; + R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); + + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); + + // We successfully mapped the pages. + R_SUCCEED(); +} + +Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, + KMemoryState state) { + ASSERT(!this->IsLockedByCurrentThread()); + + // Ensure this is a valid unmap request. + const size_t num_pages = pg.GetNumPages(); + const size_t size = num_pages * PageSize; + R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check if state allows us to unmap. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, + KMemoryState::All, state, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::None)); + + // Check that the page group is valid. + R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Perform unmapping operation. + const KPageProperties properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, properties, + OperationType::Unmap, false)); + + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); + + R_SUCCEED(); +} + +Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, + size_t num_pages, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, + KMemoryAttribute attr) { + // Ensure that the page group isn't null. + ASSERT(out != nullptr); + + // Make sure that the region we're mapping is valid for the table. + const size_t size = num_pages * PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check if state allows us to create the group. + R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, + state | KMemoryState::FlagReferenceCounted, perm_mask, perm, + attr_mask, attr)); + + // Create a new page group for the region. + R_TRY(this->MakePageGroup(*out, address, num_pages)); + + // Open a new reference to the pages in the group. + out->Open(); + + R_SUCCEED(); +} + +Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) { + // Check that the region is in range. + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + R_TRY(this->CheckMemoryStateContiguous( + address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted, + KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite, + KMemoryAttribute::Uncached, KMemoryAttribute::None)); + + // Get the impl. + auto& impl = this->GetImpl(); + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = + impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address); + R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); + + // Prepare tracking variables. + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + // Iterate. + while (tot_size < size) { + // Continue the traversal. + traverse_valid = + impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + // Check that the pages are linearly mapped. + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); + + // Invalidate the block. + if (cur_size > 0) { + // NOTE: Nintendo does not check the result of invalidation. + InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); + } + + // Advance. + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + // Ensure we use the right size for the last block. + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + // Check that the last block is linearly mapped. + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); + + // Invalidate the last block. + if (cur_size > 0) { + // NOTE: Nintendo does not check the result of invalidation. + InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); + } + + R_SUCCEED(); +} + +Result KPageTableBase::InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) { + // Check pre-condition: this is being called on the current process. + ASSERT(this == std::addressof(GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable())); + + // Check that the region is in range. + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + R_TRY(this->CheckMemoryStateContiguous( + address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted, + KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite, + KMemoryAttribute::Uncached, KMemoryAttribute::None)); + + // Invalidate the data cache. + R_RETURN(InvalidateDataCache(address, size)); +} + +Result KPageTableBase::ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, + size_t size) { + // Lightly validate the region is in range. + R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Require that the memory either be user readable or debuggable. + const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous( + src_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserRead, + KMemoryPermission::UserRead, KMemoryAttribute::None, KMemoryAttribute::None)); + if (!can_read) { + const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous( + src_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug, + KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + R_UNLESS(can_debug, ResultInvalidCurrentMemory); + } + + // Get the impl. + auto& impl = this->GetImpl(); + auto& dst_memory = GetCurrentMemory(m_system.Kernel()); + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = + impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_address); + R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); + + // Prepare tracking variables. + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&]() -> Result { + // Ensure the address is linear mapped. + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); + + // Copy as much aligned data as we can. + if (cur_size >= sizeof(u32)) { + const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); + const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr); + FlushDataCache(copy_src, copy_size); + R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, copy_size), ResultInvalidPointer); + + dst_address += copy_size; + cur_addr += copy_size; + cur_size -= copy_size; + } + + // Copy remaining data. + if (cur_size > 0) { + const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr); + FlushDataCache(copy_src, cur_size); + R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, cur_size), ResultInvalidPointer); + } + + R_SUCCEED(); + }; + + // Iterate. + while (tot_size < size) { + // Continue the traversal. + traverse_valid = + impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + // Perform copy. + R_TRY(PerformCopy()); + + // Advance. + dst_address += cur_size; + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + // Ensure we use the right size for the last block. + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + // Perform copy for the last block. + R_TRY(PerformCopy()); + + R_SUCCEED(); +} + +Result KPageTableBase::WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, + size_t size) { + // Lightly validate the region is in range. + R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Require that the memory either be user writable or debuggable. + const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous( + dst_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserReadWrite, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None)); + if (!can_read) { + const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous( + dst_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug, + KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + R_UNLESS(can_debug, ResultInvalidCurrentMemory); + } + + // Get the impl. + auto& impl = this->GetImpl(); + auto& src_memory = GetCurrentMemory(m_system.Kernel()); + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = + impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address); + R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); + + // Prepare tracking variables. + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&]() -> Result { + // Ensure the address is linear mapped. + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); + + // Copy as much aligned data as we can. + if (cur_size >= sizeof(u32)) { + const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); + void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr); + R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, copy_size), + ResultInvalidCurrentMemory); + + StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), copy_size); + + src_address += copy_size; + cur_addr += copy_size; + cur_size -= copy_size; + } + + // Copy remaining data. + if (cur_size > 0) { + void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr); + R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, cur_size), + ResultInvalidCurrentMemory); + + StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); + } + + R_SUCCEED(); + }; + + // Iterate. + while (tot_size < size) { + // Continue the traversal. + traverse_valid = + impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + // Perform copy. + R_TRY(PerformCopy()); + + // Advance. + src_address += cur_size; + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + // Ensure we use the right size for the last block. + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + // Perform copy for the last block. + R_TRY(PerformCopy()); + + // Invalidate the entire instruction cache, as this svc allows modifying executable pages. + InvalidateEntireInstructionCache(m_system); + + R_SUCCEED(); +} + +Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, + size_t size, KMemoryState state) { + // Check pre-conditions. + ASSERT(this->IsLockedByCurrentThread()); + + // Determine the mapping extents. + const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize); + const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize); + const size_t map_size = map_end - map_start; + + // Get the memory reference to write into. + auto& dst_memory = GetCurrentMemory(m_kernel); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Temporarily map the io memory. + KProcessAddress io_addr; + R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, + state, KMemoryPermission::UserRead)); + + // Ensure we unmap the io memory when we're done with it. + const KPageProperties unmap_properties = + KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None}; + SCOPE_EXIT({ + R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false, + unmap_properties, OperationType::Unmap, true)); + }); + + // Read the memory. + const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1)); + dst_memory.CopyBlock(dst_addr, read_addr, size); + + R_SUCCEED(); +} + +Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, + size_t size, KMemoryState state) { + // Check pre-conditions. + ASSERT(this->IsLockedByCurrentThread()); + + // Determine the mapping extents. + const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize); + const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize); + const size_t map_size = map_end - map_start; + + // Get the memory reference to read from. + auto& src_memory = GetCurrentMemory(m_kernel); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Temporarily map the io memory. + KProcessAddress io_addr; + R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, + state, KMemoryPermission::UserReadWrite)); + + // Ensure we unmap the io memory when we're done with it. + const KPageProperties unmap_properties = + KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None}; + SCOPE_EXIT({ + R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false, + unmap_properties, OperationType::Unmap, true)); + }); + + // Write the memory. + const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1)); + R_UNLESS(src_memory.CopyBlock(write_addr, src_addr, size), ResultInvalidPointer); + + R_SUCCEED(); +} + +Result KPageTableBase::ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, + size_t size, KMemoryState state) { + // Lightly validate the range before doing anything else. + R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory); + + // We need to lock both this table, and the current process's table, so set up some aliases. + KPageTableBase& src_page_table = *this; + KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable(); + + // Acquire the table locks. + KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); + + // Check that the desired range is readable io memory. + R_TRY(this->CheckMemoryStateContiguous(src_address, size, KMemoryState::All, state, + KMemoryPermission::UserRead, KMemoryPermission::UserRead, + KMemoryAttribute::None, KMemoryAttribute::None)); + + // Read the memory. + KProcessAddress dst = dst_address; + const KProcessAddress last_address = src_address + size - 1; + while (src_address <= last_address) { + // Get the current physical address. + KPhysicalAddress phys_addr; + ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), src_address)); + + // Determine the current read size. + const size_t cur_size = + std::min(last_address - src_address + 1, + Common::AlignDown(GetInteger(src_address) + PageSize, PageSize) - + GetInteger(src_address)); + + // Read. + R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state)); + + // Advance. + src_address += cur_size; + dst += cur_size; + } + + R_SUCCEED(); +} + +Result KPageTableBase::WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, + size_t size, KMemoryState state) { + // Lightly validate the range before doing anything else. + R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory); + + // We need to lock both this table, and the current process's table, so set up some aliases. + KPageTableBase& src_page_table = *this; + KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable(); + + // Acquire the table locks. + KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); + + // Check that the desired range is writable io memory. + R_TRY(this->CheckMemoryStateContiguous( + dst_address, size, KMemoryState::All, state, KMemoryPermission::UserReadWrite, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None)); + + // Read the memory. + KProcessAddress src = src_address; + const KProcessAddress last_address = dst_address + size - 1; + while (dst_address <= last_address) { + // Get the current physical address. + KPhysicalAddress phys_addr; + ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), dst_address)); + + // Determine the current read size. + const size_t cur_size = + std::min(last_address - dst_address + 1, + Common::AlignDown(GetInteger(dst_address) + PageSize, PageSize) - + GetInteger(dst_address)); + + // Read. + R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state)); + + // Advance. + dst_address += cur_size; + src += cur_size; + } + + R_SUCCEED(); +} + +Result KPageTableBase::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, + size_t size, KMemoryPermission perm, + bool is_aligned, bool check_heap) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + const KMemoryState test_state = + (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) | + (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); + size_t num_allocator_blocks; + KMemoryState old_state; + R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, + std::addressof(num_allocator_blocks), address, size, test_state, + test_state, perm, perm, + KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, + KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, + &KMemoryBlock::ShareToDevice, KMemoryPermission::None); + + // Set whether the locked memory was io. + *out_is_io = + static_cast(old_state & KMemoryState::Mask) == Svc::MemoryState::Io; + + R_SUCCEED(); +} + +Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, + bool check_heap) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + const KMemoryState test_state = + KMemoryState::FlagCanDeviceMap | + (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(num_allocator_blocks), address, size, test_state, test_state, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + const KMemoryBlockManager::MemoryBlockLockFunction lock_func = + m_enable_device_address_space_merge + ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare + : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, + KMemoryPermission::None); + + R_SUCCEED(); +} + +Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap, + KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, + &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); + + R_SUCCEED(); +} + +Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check memory state. + size_t allocator_num_blocks = 0; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(allocator_num_blocks), address, size, KMemoryState::FlagCanDeviceMap, + KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); + + // Create an update allocator for the region. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, allocator_num_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + m_memory_block_manager.UpdateLock( + std::addressof(allocator), address, num_pages, + m_enable_device_address_space_merge + ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare + : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight, + KMemoryPermission::None); + + R_SUCCEED(); +} + +Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out, + KProcessAddress address, size_t size, + KMemoryPermission perm, + bool is_aligned) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Get the range. + const KMemoryState test_state = + (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); + R_TRY(this->GetContiguousMemoryRangeWithState( + out, address, size, test_state, test_state, perm, perm, + KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, KMemoryAttribute::None)); + + // We got the range, so open it. + out->Open(); + + R_SUCCEED(); +} + +Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, + KProcessAddress address, + size_t size) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Get the range. + R_TRY(this->GetContiguousMemoryRangeWithState( + out, address, size, KMemoryState::FlagCanDeviceMap, KMemoryState::FlagCanDeviceMap, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); + + // We got the range, so open it. + out->Open(); + + R_SUCCEED(); +} + +Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, + size_t size) { + R_RETURN(this->LockMemoryAndOpen( + nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer, + KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All, + KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, + static_cast(KMemoryPermission::NotMapped | + KMemoryPermission::KernelReadWrite), + KMemoryAttribute::Locked)); +} + +Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) { + R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer, + KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, + KMemoryAttribute::Locked, nullptr)); +} + +Result KPageTableBase::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, + KMemoryPermission perm) { + R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer, + KMemoryState::FlagCanTransfer, KMemoryPermission::All, + KMemoryPermission::UserReadWrite, KMemoryAttribute::All, + KMemoryAttribute::None, perm, KMemoryAttribute::Locked)); +} + +Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size, + const KPageGroup& pg) { + R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer, + KMemoryState::FlagCanTransfer, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, + KMemoryAttribute::Locked, std::addressof(pg))); +} + +Result KPageTableBase::LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) { + R_RETURN(this->LockMemoryAndOpen( + out, nullptr, address, size, KMemoryState::FlagCanCodeMemory, + KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, + KMemoryAttribute::All, KMemoryAttribute::None, + static_cast(KMemoryPermission::NotMapped | + KMemoryPermission::KernelReadWrite), + KMemoryAttribute::Locked)); +} + +Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size, + const KPageGroup& pg) { + R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanCodeMemory, + KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, + KMemoryAttribute::Locked, std::addressof(pg))); +} + +Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, + KProcessAddress address, + size_t size) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Get the range. + R_TRY(this->GetContiguousMemoryRangeWithState( + out, address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted, + KMemoryPermission::UserRead, KMemoryPermission::UserRead, KMemoryAttribute::Uncached, + KMemoryAttribute::None)); + + // We got the range, so open it. + out->Open(); + + R_SUCCEED(); +} + +Result KPageTableBase::CopyMemoryFromLinearToUser( + KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask, + KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask, + KMemoryAttribute src_attr) { + // Lightly validate the range before doing anything else. + R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory); + + // Get the destination memory reference. + auto& dst_memory = GetCurrentMemory(m_kernel); + + // Copy the memory. + { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check memory state. + R_TRY(this->CheckMemoryStateContiguous( + src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, + src_attr_mask | KMemoryAttribute::Uncached, src_attr)); + + auto& impl = this->GetImpl(); + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = + impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr); + ASSERT(traverse_valid); + + // Prepare tracking variables. + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = + next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&]() -> Result { + // Ensure the address is linear mapped. + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); + + // Copy as much aligned data as we can. + if (cur_size >= sizeof(u32)) { + const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); + R_UNLESS(dst_memory.WriteBlock(dst_addr, + GetLinearMappedVirtualPointer(m_kernel, cur_addr), + copy_size), + ResultInvalidCurrentMemory); + + dst_addr += copy_size; + cur_addr += copy_size; + cur_size -= copy_size; + } + + // Copy remaining data. + if (cur_size > 0) { + R_UNLESS(dst_memory.WriteBlock( + dst_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size), + ResultInvalidCurrentMemory); + } + + R_SUCCEED(); + }; + + // Iterate. + while (tot_size < size) { + // Continue the traversal. + traverse_valid = + impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + // Perform copy. + R_TRY(PerformCopy()); + + // Advance. + dst_addr += cur_size; + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + // Ensure we use the right size for the last block. + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + // Perform copy for the last block. + R_TRY(PerformCopy()); + } + + R_SUCCEED(); +} + +Result KPageTableBase::CopyMemoryFromLinearToKernel( + void* buffer, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask, + KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask, + KMemoryAttribute src_attr) { + // Lightly validate the range before doing anything else. + R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory); + + // Copy the memory. + { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check memory state. + R_TRY(this->CheckMemoryStateContiguous( + src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, + src_attr_mask | KMemoryAttribute::Uncached, src_attr)); + + auto& impl = this->GetImpl(); + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = + impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr); + ASSERT(traverse_valid); + + // Prepare tracking variables. + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = + next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&]() -> Result { + // Ensure the address is linear mapped. + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); + + // Copy the data. + std::memcpy(buffer, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); + + R_SUCCEED(); + }; + + // Iterate. + while (tot_size < size) { + // Continue the traversal. + traverse_valid = + impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + // Perform copy. + R_TRY(PerformCopy()); + + // Advance. + buffer = reinterpret_cast(reinterpret_cast(buffer) + cur_size); + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + // Ensure we use the right size for the last block. + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + // Perform copy for the last block. + R_TRY(PerformCopy()); + } + + R_SUCCEED(); +} + +Result KPageTableBase::CopyMemoryFromUserToLinear( + KProcessAddress dst_addr, size_t size, KMemoryState dst_state_mask, KMemoryState dst_state, + KMemoryPermission dst_test_perm, KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, + KProcessAddress src_addr) { + // Lightly validate the range before doing anything else. + R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory); + + // Get the source memory reference. + auto& src_memory = GetCurrentMemory(m_kernel); + + // Copy the memory. + { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check memory state. + R_TRY(this->CheckMemoryStateContiguous( + dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, + dst_attr_mask | KMemoryAttribute::Uncached, dst_attr)); + + auto& impl = this->GetImpl(); + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = + impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr); + ASSERT(traverse_valid); + + // Prepare tracking variables. + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = + next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&]() -> Result { + // Ensure the address is linear mapped. + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); + + // Copy as much aligned data as we can. + if (cur_size >= sizeof(u32)) { + const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); + R_UNLESS(src_memory.ReadBlock(src_addr, + GetLinearMappedVirtualPointer(m_kernel, cur_addr), + copy_size), + ResultInvalidCurrentMemory); + src_addr += copy_size; + cur_addr += copy_size; + cur_size -= copy_size; + } + + // Copy remaining data. + if (cur_size > 0) { + R_UNLESS(src_memory.ReadBlock( + src_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size), + ResultInvalidCurrentMemory); + } + + R_SUCCEED(); + }; + + // Iterate. + while (tot_size < size) { + // Continue the traversal. + traverse_valid = + impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + // Perform copy. + R_TRY(PerformCopy()); + + // Advance. + src_addr += cur_size; + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + // Ensure we use the right size for the last block. + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + // Perform copy for the last block. + R_TRY(PerformCopy()); + } + + R_SUCCEED(); +} + +Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, + KMemoryState dst_state_mask, + KMemoryState dst_state, + KMemoryPermission dst_test_perm, + KMemoryAttribute dst_attr_mask, + KMemoryAttribute dst_attr, void* buffer) { + // Lightly validate the range before doing anything else. + R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory); + + // Copy the memory. + { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check memory state. + R_TRY(this->CheckMemoryStateContiguous( + dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, + dst_attr_mask | KMemoryAttribute::Uncached, dst_attr)); + + auto& impl = this->GetImpl(); + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = + impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr); + ASSERT(traverse_valid); + + // Prepare tracking variables. + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = + next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&]() -> Result { + // Ensure the address is linear mapped. + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); + + // Copy the data. + std::memcpy(GetLinearMappedVirtualPointer(m_kernel, cur_addr), buffer, cur_size); + + R_SUCCEED(); + }; + + // Iterate. + while (tot_size < size) { + // Continue the traversal. + traverse_valid = + impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + // Perform copy. + R_TRY(PerformCopy()); + + // Advance. + buffer = reinterpret_cast(reinterpret_cast(buffer) + cur_size); + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + // Ensure we use the right size for the last block. + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + // Perform copy for the last block. + R_TRY(PerformCopy()); + } + + R_SUCCEED(); +} + +Result KPageTableBase::CopyMemoryFromHeapToHeap( + KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size, + KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, + KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, + KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, + KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { + // For convenience, alias this. + KPageTableBase& src_page_table = *this; + + // Lightly validate the ranges before doing anything else. + R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory); + R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory); + + // Copy the memory. + { + // Acquire the table locks. + KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); + + // Check memory state. + R_TRY(src_page_table.CheckMemoryStateContiguous( + src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, + src_attr_mask | KMemoryAttribute::Uncached, src_attr)); + R_TRY(dst_page_table.CheckMemoryStateContiguous( + dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, + dst_attr_mask | KMemoryAttribute::Uncached, dst_attr)); + + // Get implementations. + auto& src_impl = src_page_table.GetImpl(); + auto& dst_impl = dst_page_table.GetImpl(); + + // Prepare for traversal. + TraversalContext src_context; + TraversalContext dst_context; + TraversalEntry src_next_entry; + TraversalEntry dst_next_entry; + bool traverse_valid; + + // Begin traversal. + traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), + std::addressof(src_context), src_addr); + ASSERT(traverse_valid); + traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), + std::addressof(dst_context), dst_addr); + ASSERT(traverse_valid); + + // Prepare tracking variables. + KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr; + KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr; + size_t cur_src_size = src_next_entry.block_size - + (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1)); + size_t cur_dst_size = dst_next_entry.block_size - + (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1)); + + // Adjust the initial block sizes. + src_next_entry.block_size = cur_src_size; + dst_next_entry.block_size = cur_dst_size; + + // Before we get any crazier, succeed if there's nothing to do. + R_SUCCEED_IF(size == 0); + + // We're going to manage dual traversal via an offset against the total size. + KPhysicalAddress cur_src_addr = cur_src_block_addr; + KPhysicalAddress cur_dst_addr = cur_dst_block_addr; + size_t cur_min_size = std::min(cur_src_size, cur_dst_size); + + // Iterate. + size_t ofs = 0; + while (ofs < size) { + // Determine how much we can copy this iteration. + const size_t cur_copy_size = std::min(cur_min_size, size - ofs); + + // If we need to advance the traversals, do so. + bool updated_src = false, updated_dst = false, skip_copy = false; + if (ofs + cur_copy_size != size) { + if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) { + // Continue the src traversal. + traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), + std::addressof(src_context)); + ASSERT(traverse_valid); + + // Update source. + updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr; + } + + if (cur_dst_addr + cur_min_size == + dst_next_entry.phys_addr + dst_next_entry.block_size) { + // Continue the dst traversal. + traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), + std::addressof(dst_context)); + ASSERT(traverse_valid); + + // Update destination. + updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr; + } + + // If we didn't update either of source/destination, skip the copy this iteration. + if (!updated_src && !updated_dst) { + skip_copy = true; + + // Update the source block address. + cur_src_block_addr = src_next_entry.phys_addr; + } + } + + // Do the copy, unless we're skipping it. + if (!skip_copy) { + // We need both ends of the copy to be heap blocks. + R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory); + R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory); + + // Copy the data. + std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr), + GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size); + + // Update. + cur_src_block_addr = src_next_entry.phys_addr; + cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size; + cur_dst_block_addr = dst_next_entry.phys_addr; + cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size; + + // Advance offset. + ofs += cur_copy_size; + } + + // Update min size. + cur_src_size = src_next_entry.block_size; + cur_dst_size = dst_next_entry.block_size; + cur_min_size = std::min(cur_src_block_addr - cur_src_addr + cur_src_size, + cur_dst_block_addr - cur_dst_addr + cur_dst_size); + } + } + + R_SUCCEED(); +} + +Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination( + KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size, + KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, + KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, + KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, + KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { + // For convenience, alias this. + KPageTableBase& src_page_table = *this; + + // Lightly validate the ranges before doing anything else. + R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory); + R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory); + + // Copy the memory. + { + // Acquire the table locks. + KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); + + // Check memory state for source. + R_TRY(src_page_table.CheckMemoryStateContiguous( + src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, + src_attr_mask | KMemoryAttribute::Uncached, src_attr)); + + // Destination state is intentionally unchecked. + + // Get implementations. + auto& src_impl = src_page_table.GetImpl(); + auto& dst_impl = dst_page_table.GetImpl(); + + // Prepare for traversal. + TraversalContext src_context; + TraversalContext dst_context; + TraversalEntry src_next_entry; + TraversalEntry dst_next_entry; + bool traverse_valid; + + // Begin traversal. + traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), + std::addressof(src_context), src_addr); + ASSERT(traverse_valid); + traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), + std::addressof(dst_context), dst_addr); + ASSERT(traverse_valid); + + // Prepare tracking variables. + KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr; + KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr; + size_t cur_src_size = src_next_entry.block_size - + (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1)); + size_t cur_dst_size = dst_next_entry.block_size - + (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1)); + + // Adjust the initial block sizes. + src_next_entry.block_size = cur_src_size; + dst_next_entry.block_size = cur_dst_size; + + // Before we get any crazier, succeed if there's nothing to do. + R_SUCCEED_IF(size == 0); + + // We're going to manage dual traversal via an offset against the total size. + KPhysicalAddress cur_src_addr = cur_src_block_addr; + KPhysicalAddress cur_dst_addr = cur_dst_block_addr; + size_t cur_min_size = std::min(cur_src_size, cur_dst_size); + + // Iterate. + size_t ofs = 0; + while (ofs < size) { + // Determine how much we can copy this iteration. + const size_t cur_copy_size = std::min(cur_min_size, size - ofs); + + // If we need to advance the traversals, do so. + bool updated_src = false, updated_dst = false, skip_copy = false; + if (ofs + cur_copy_size != size) { + if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) { + // Continue the src traversal. + traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), + std::addressof(src_context)); + ASSERT(traverse_valid); + + // Update source. + updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr; + } + + if (cur_dst_addr + cur_min_size == + dst_next_entry.phys_addr + dst_next_entry.block_size) { + // Continue the dst traversal. + traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), + std::addressof(dst_context)); + ASSERT(traverse_valid); + + // Update destination. + updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr; + } + + // If we didn't update either of source/destination, skip the copy this iteration. + if (!updated_src && !updated_dst) { + skip_copy = true; + + // Update the source block address. + cur_src_block_addr = src_next_entry.phys_addr; + } + } + + // Do the copy, unless we're skipping it. + if (!skip_copy) { + // We need both ends of the copy to be heap blocks. + R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory); + R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory); + + // Copy the data. + std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr), + GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size); + + // Update. + cur_src_block_addr = src_next_entry.phys_addr; + cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size; + cur_dst_block_addr = dst_next_entry.phys_addr; + cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size; + + // Advance offset. + ofs += cur_copy_size; + } + + // Update min size. + cur_src_size = src_next_entry.block_size; + cur_dst_size = dst_next_entry.block_size; + cur_min_size = std::min(cur_src_block_addr - cur_src_addr + cur_src_size, + cur_dst_block_addr - cur_dst_addr + cur_dst_size); + } + } + + R_SUCCEED(); +} + +Result KPageTableBase::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, + KProcessAddress address, size_t size, + KMemoryPermission test_perm, KMemoryState dst_state) { + // Validate pre-conditions. + ASSERT(this->IsLockedByCurrentThread()); + ASSERT(test_perm == KMemoryPermission::UserReadWrite || + test_perm == KMemoryPermission::UserRead); + + // Check that the address is in range. + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Get the source permission. + const auto src_perm = static_cast( + (test_perm == KMemoryPermission::UserReadWrite) + ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped + : KMemoryPermission::UserRead); + + // Get aligned extents. + const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize); + const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize); + const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize); + const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize); + + const auto aligned_src_last = GetInteger(aligned_src_end) - 1; + const auto mapping_src_last = GetInteger(mapping_src_end) - 1; + + // Get the test state and attribute mask. + KMemoryState test_state; + KMemoryAttribute test_attr_mask; + switch (dst_state) { + case KMemoryState::Ipc: + test_state = KMemoryState::FlagCanUseIpc; + test_attr_mask = + KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; + break; + case KMemoryState::NonSecureIpc: + test_state = KMemoryState::FlagCanUseNonSecureIpc; + test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; + break; + case KMemoryState::NonDeviceIpc: + test_state = KMemoryState::FlagCanUseNonDeviceIpc; + test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; + break; + default: + R_THROW(ResultInvalidCombination); + } + + // Ensure that on failure, we roll back appropriately. + size_t mapped_size = 0; + ON_RESULT_FAILURE { + if (mapped_size > 0) { + this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, + src_perm); + } + }; + + size_t blocks_needed = 0; + + // Iterate, mapping as needed. + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start); + while (true) { + const KMemoryInfo info = it->GetMemoryInfo(); + + // Validate the current block. + R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, + test_attr_mask, KMemoryAttribute::None)); + + if (mapping_src_start < mapping_src_end && + GetInteger(mapping_src_start) < info.GetEndAddress() && + info.GetAddress() < GetInteger(mapping_src_end)) { + const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start) + ? info.GetAddress() + : GetInteger(mapping_src_start); + const auto cur_end = mapping_src_last >= info.GetLastAddress() + ? info.GetEndAddress() + : GetInteger(mapping_src_end); + const size_t cur_size = cur_end - cur_start; + + if (info.GetAddress() < GetInteger(mapping_src_start)) { + ++blocks_needed; + } + if (mapping_src_last < info.GetLastAddress()) { + ++blocks_needed; + } + + // Set the permissions on the block, if we need to. + if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) { + const DisableMergeAttribute head_body_attr = + (GetInteger(mapping_src_start) >= info.GetAddress()) + ? DisableMergeAttribute::DisableHeadAndBody + : DisableMergeAttribute::None; + const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end)) + ? DisableMergeAttribute::DisableTail + : DisableMergeAttribute::None; + const KPageProperties properties = { + src_perm, false, false, + static_cast(head_body_attr | tail_attr)}; + R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, 0, false, properties, + OperationType::ChangePermissions, false)); + } + + // Note that we mapped this part. + mapped_size += cur_size; + } + + // If the block is at the end, we're done. + if (aligned_src_last <= info.GetLastAddress()) { + break; + } + + // Advance. + ++it; + ASSERT(it != m_memory_block_manager.end()); + } + + if (out_blocks_needed != nullptr) { + ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); + *out_blocks_needed = blocks_needed; + } + + R_SUCCEED(); +} + +Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size, + KProcessAddress src_addr, KMemoryPermission test_perm, + KMemoryState dst_state, KPageTableBase& src_page_table, + bool send) { + ASSERT(this->IsLockedByCurrentThread()); + ASSERT(src_page_table.IsLockedByCurrentThread()); + + // Check that we can theoretically map. + const KProcessAddress region_start = m_alias_region_start; + const size_t region_size = m_alias_region_end - m_alias_region_start; + R_UNLESS(size < region_size, ResultOutOfAddressSpace); + + // Get aligned source extents. + const KProcessAddress src_start = src_addr; + const KProcessAddress src_end = src_addr + size; + const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize); + const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize); + const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize); + const KProcessAddress mapping_src_end = + Common::AlignDown(GetInteger(src_start) + size, PageSize); + const size_t aligned_src_size = aligned_src_end - aligned_src_start; + const size_t mapping_src_size = + (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0; + + // Select a random address to map at. + KProcessAddress dst_addr = 0; + { + const size_t alignment = 4_KiB; + const size_t offset = GetInteger(aligned_src_start) & (alignment - 1); + + dst_addr = + this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, + alignment, offset, this->GetNumGuardPages()); + R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace); + } + + // Check that we can perform the operation we're about to perform. + ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Reserve space for any partial pages we allocate. + const size_t unmapped_size = aligned_src_size - mapping_src_size; + KScopedResourceReservation memory_reservation( + m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, unmapped_size); + R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); + + // Ensure that we manage page references correctly. + KPhysicalAddress start_partial_page = 0; + KPhysicalAddress end_partial_page = 0; + KProcessAddress cur_mapped_addr = dst_addr; + + // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll + // free on scope exit. + SCOPE_EXIT({ + if (start_partial_page != 0) { + m_kernel.MemoryManager().Close(start_partial_page, 1); + } + if (end_partial_page != 0) { + m_kernel.MemoryManager().Close(end_partial_page, 1); + } + }); + + ON_RESULT_FAILURE { + if (cur_mapped_addr != dst_addr) { + const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_ASSERT(this->Operate(updater.GetPageList(), dst_addr, + (cur_mapped_addr - dst_addr) / PageSize, 0, false, + unmap_properties, OperationType::Unmap, true)); + } + }; + + // Allocate the start page as needed. + if (aligned_src_start < mapping_src_start) { + start_partial_page = + m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); + R_UNLESS(start_partial_page != 0, ResultOutOfMemory); + } + + // Allocate the end page as needed. + if (mapping_src_end < aligned_src_end && + (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { + end_partial_page = + m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); + R_UNLESS(end_partial_page != 0, ResultOutOfMemory); + } + + // Get the implementation. + auto& src_impl = src_page_table.GetImpl(); + + // Get the fill value for partial pages. + const auto fill_val = m_ipc_fill_value; + + // Begin traversal. + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry), + std::addressof(context), aligned_src_start); + ASSERT(traverse_valid); + + // Prepare tracking variables. + KPhysicalAddress cur_block_addr = next_entry.phys_addr; + size_t cur_block_size = + next_entry.block_size - (GetInteger(cur_block_addr) & (next_entry.block_size - 1)); + size_t tot_block_size = cur_block_size; + + // Map the start page, if we have one. + if (start_partial_page != 0) { + // Ensure the page holds correct data. + u8* const start_partial_virt = GetHeapVirtualPointer(m_kernel, start_partial_page); + if (send) { + const size_t partial_offset = src_start - aligned_src_start; + size_t copy_size, clear_size; + if (src_end < mapping_src_start) { + copy_size = size; + clear_size = mapping_src_start - src_end; + } else { + copy_size = mapping_src_start - src_start; + clear_size = 0; + } + + std::memset(start_partial_virt, fill_val, partial_offset); + std::memcpy(start_partial_virt + partial_offset, + GetHeapVirtualPointer(m_kernel, cur_block_addr) + partial_offset, + copy_size); + if (clear_size > 0) { + std::memset(start_partial_virt + partial_offset + copy_size, fill_val, clear_size); + } + } else { + std::memset(start_partial_virt, fill_val, PageSize); + } + + // Map the page. + const KPageProperties start_map_properties = {test_perm, false, false, + DisableMergeAttribute::DisableHead}; + R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true, + start_map_properties, OperationType::Map, false)); + + // Update tracking extents. + cur_mapped_addr += PageSize; + cur_block_addr += PageSize; + cur_block_size -= PageSize; + + // If the block's size was one page, we may need to continue traversal. + if (cur_block_size == 0 && aligned_src_size > PageSize) { + traverse_valid = + src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + ASSERT(traverse_valid); + + cur_block_addr = next_entry.phys_addr; + cur_block_size = next_entry.block_size; + tot_block_size += next_entry.block_size; + } + } + + // Map the remaining pages. + while (aligned_src_start + tot_block_size < mapping_src_end) { + // Continue the traversal. + traverse_valid = + src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + ASSERT(traverse_valid); + + // Process the block. + if (next_entry.phys_addr != cur_block_addr + cur_block_size) { + // Map the block we've been processing so far. + const KPageProperties map_properties = {test_perm, false, false, + (cur_mapped_addr == dst_addr) + ? DisableMergeAttribute::DisableHead + : DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, cur_block_size / PageSize, + cur_block_addr, true, map_properties, OperationType::Map, false)); + + // Update tracking extents. + cur_mapped_addr += cur_block_size; + cur_block_addr = next_entry.phys_addr; + cur_block_size = next_entry.block_size; + } else { + cur_block_size += next_entry.block_size; + } + tot_block_size += next_entry.block_size; + } + + // Handle the last direct-mapped page. + if (const KProcessAddress mapped_block_end = + aligned_src_start + tot_block_size - cur_block_size; + mapped_block_end < mapping_src_end) { + const size_t last_block_size = mapping_src_end - mapped_block_end; + + // Map the last block. + const KPageProperties map_properties = {test_perm, false, false, + (cur_mapped_addr == dst_addr) + ? DisableMergeAttribute::DisableHead + : DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, last_block_size / PageSize, + cur_block_addr, true, map_properties, OperationType::Map, false)); + + // Update tracking extents. + cur_mapped_addr += last_block_size; + cur_block_addr += last_block_size; + if (mapped_block_end + cur_block_size < aligned_src_end && + cur_block_size == last_block_size) { + traverse_valid = + src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + ASSERT(traverse_valid); + + cur_block_addr = next_entry.phys_addr; + } + } + + // Map the end page, if we have one. + if (end_partial_page != 0) { + // Ensure the page holds correct data. + u8* const end_partial_virt = GetHeapVirtualPointer(m_kernel, end_partial_page); + if (send) { + const size_t copy_size = src_end - mapping_src_end; + std::memcpy(end_partial_virt, GetHeapVirtualPointer(m_kernel, cur_block_addr), + copy_size); + std::memset(end_partial_virt + copy_size, fill_val, PageSize - copy_size); + } else { + std::memset(end_partial_virt, fill_val, PageSize); + } + + // Map the page. + const KPageProperties map_properties = {test_perm, false, false, + (cur_mapped_addr == dst_addr) + ? DisableMergeAttribute::DisableHead + : DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true, + map_properties, OperationType::Map, false)); + } + + // Update memory blocks to reflect our changes + m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, + dst_state, test_perm, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); + + // Set the output address. + *out_addr = dst_addr + (src_start - aligned_src_start); + + // We succeeded. + memory_reservation.Commit(); + R_SUCCEED(); +} + +Result KPageTableBase::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, + KProcessAddress src_addr, KPageTableBase& src_page_table, + KMemoryPermission test_perm, KMemoryState dst_state, bool send) { + // For convenience, alias this. + KPageTableBase& dst_page_table = *this; + + // Acquire the table locks. + KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(std::addressof(src_page_table)); + + // Perform client setup. + size_t num_allocator_blocks; + R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), + std::addressof(num_allocator_blocks), src_addr, size, + test_perm, dst_state)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + src_page_table.m_memory_block_slab_manager, + num_allocator_blocks); + R_TRY(allocator_result); + + // Get the mapped extents. + const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize); + const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize); + const size_t src_map_size = src_map_end - src_map_start; + + // Ensure that we clean up appropriately if we fail after this. + const auto src_perm = static_cast( + (test_perm == KMemoryPermission::UserReadWrite) + ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped + : KMemoryPermission::UserRead); + ON_RESULT_FAILURE { + if (src_map_end > src_map_start) { + src_page_table.CleanupForIpcClientOnServerSetupFailure( + updater.GetPageList(), src_map_start, src_map_size, src_perm); + } + }; + + // Perform server setup. + R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, + src_page_table, send)); + + // If anything was mapped, ipc-lock the pages. + if (src_map_start < src_map_end) { + // Get the source permission. + src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, + (src_map_end - src_map_start) / PageSize, + &KMemoryBlock::LockForIpc, src_perm); + } + + R_SUCCEED(); +} + +Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size, + KMemoryState dst_state) { + // Validate the address. + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Validate the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, + KMemoryState::All, dst_state, KMemoryPermission::UserRead, + KMemoryPermission::UserRead, KMemoryAttribute::All, + KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Get aligned extents. + const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize); + const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize); + const size_t aligned_size = aligned_end - aligned_start; + const size_t aligned_num_pages = aligned_size / PageSize; + + // Unmap the pages. + const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, 0, false, + unmap_properties, OperationType::Unmap, false)); + + // Update memory blocks. + m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, + KMemoryState::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); + + // Release from the resource limit as relevant. + const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize); + const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize); + const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0; + m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, + aligned_size - mapping_size); + + R_SUCCEED(); +} + +Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size, + KMemoryState dst_state) { + // Validate the address. + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Get aligned source extents. + const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize); + const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize); + const KProcessAddress mapping_last = mapping_end - 1; + const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0; + + // If nothing was mapped, we're actually done immediately. + R_SUCCEED_IF(mapping_size == 0); + + // Get the test state and attribute mask. + KMemoryState test_state; + KMemoryAttribute test_attr_mask; + switch (dst_state) { + case KMemoryState::Ipc: + test_state = KMemoryState::FlagCanUseIpc; + test_attr_mask = + KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; + break; + case KMemoryState::NonSecureIpc: + test_state = KMemoryState::FlagCanUseNonSecureIpc; + test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; + break; + case KMemoryState::NonDeviceIpc: + test_state = KMemoryState::FlagCanUseNonDeviceIpc; + test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; + break; + default: + R_THROW(ResultInvalidCombination); + } + + // Lock the table. + // NOTE: Nintendo does this *after* creating the updater below, but this does not follow + // convention elsewhere in KPageTableBase. + KScopedLightLock lk(m_general_lock); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Ensure that on failure, we roll back appropriately. + size_t mapped_size = 0; + ON_RESULT_FAILURE { + if (mapped_size > 0) { + // Determine where the mapping ends. + const auto mapped_end = GetInteger(mapping_start) + mapped_size; + const auto mapped_last = mapped_end - 1; + + // Get current and next iterators. + KMemoryBlockManager::const_iterator start_it = + m_memory_block_manager.FindIterator(mapping_start); + KMemoryBlockManager::const_iterator next_it = start_it; + ++next_it; + + // Get the current block info. + KMemoryInfo cur_info = start_it->GetMemoryInfo(); + + // Create tracking variables. + KProcessAddress cur_address = cur_info.GetAddress(); + size_t cur_size = cur_info.GetSize(); + bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); + bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; + bool first = cur_info.GetIpcDisableMergeCount() == 1 && + False(cur_info.GetDisableMergeAttribute() & + KMemoryBlockDisableMergeAttribute::Locked); + + while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) { + // Check that we have a next block. + ASSERT(next_it != m_memory_block_manager.end()); + + // Get the next info. + const KMemoryInfo next_info = next_it->GetMemoryInfo(); + + // Check if we can consolidate the next block's permission set with the current one. + const bool next_perm_eq = + next_info.GetPermission() == next_info.GetOriginalPermission(); + const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; + if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && + cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { + // We can consolidate the reprotection for the current and next block into a + // single call. + cur_size += next_info.GetSize(); + } else { + // We have to operate on the current block. + if ((cur_needs_set_perm || first) && !cur_perm_eq) { + const KPageProperties properties = { + cur_info.GetPermission(), false, false, + first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail + : DisableMergeAttribute::None}; + R_ASSERT(this->Operate(updater.GetPageList(), cur_address, + cur_size / PageSize, 0, false, properties, + OperationType::ChangePermissions, true)); + } + + // Advance. + cur_address = next_info.GetAddress(); + cur_size = next_info.GetSize(); + first = false; + } + + // Advance. + cur_info = next_info; + cur_perm_eq = next_perm_eq; + cur_needs_set_perm = next_needs_set_perm; + ++next_it; + } + + // Process the last block. + if ((first || cur_needs_set_perm) && !cur_perm_eq) { + const KPageProperties properties = { + cur_info.GetPermission(), false, false, + first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail + : DisableMergeAttribute::None}; + R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, + false, properties, OperationType::ChangePermissions, true)); + } + } + }; + + // Iterate, reprotecting as needed. + { + // Get current and next iterators. + KMemoryBlockManager::const_iterator start_it = + m_memory_block_manager.FindIterator(mapping_start); + KMemoryBlockManager::const_iterator next_it = start_it; + ++next_it; + + // Validate the current block. + KMemoryInfo cur_info = start_it->GetMemoryInfo(); + R_ASSERT(this->CheckMemoryState( + cur_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None, + test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked)); + + // Create tracking variables. + KProcessAddress cur_address = cur_info.GetAddress(); + size_t cur_size = cur_info.GetSize(); + bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); + bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; + bool first = + cur_info.GetIpcDisableMergeCount() == 1 && + False(cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked); + + while ((cur_address + cur_size - 1) < mapping_last) { + // Check that we have a next block. + ASSERT(next_it != m_memory_block_manager.end()); + + // Get the next info. + const KMemoryInfo next_info = next_it->GetMemoryInfo(); + + // Validate the next block. + R_ASSERT(this->CheckMemoryState( + next_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None, + test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked)); + + // Check if we can consolidate the next block's permission set with the current one. + const bool next_perm_eq = + next_info.GetPermission() == next_info.GetOriginalPermission(); + const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; + if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && + cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { + // We can consolidate the reprotection for the current and next block into a single + // call. + cur_size += next_info.GetSize(); + } else { + // We have to operate on the current block. + if ((cur_needs_set_perm || first) && !cur_perm_eq) { + const KPageProperties properties = { + cur_needs_set_perm ? cur_info.GetOriginalPermission() + : cur_info.GetPermission(), + false, false, + first ? DisableMergeAttribute::EnableHeadAndBody + : DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, + false, properties, OperationType::ChangePermissions, + false)); + } + + // Mark that we mapped the block. + mapped_size += cur_size; + + // Advance. + cur_address = next_info.GetAddress(); + cur_size = next_info.GetSize(); + first = false; + } + + // Advance. + cur_info = next_info; + cur_perm_eq = next_perm_eq; + cur_needs_set_perm = next_needs_set_perm; + ++next_it; + } + + // Process the last block. + const auto lock_count = + cur_info.GetIpcLockCount() + + (next_it != m_memory_block_manager.end() + ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) + : 0); + if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) { + const DisableMergeAttribute head_body_attr = + first ? DisableMergeAttribute::EnableHeadAndBody : DisableMergeAttribute::None; + const DisableMergeAttribute tail_attr = + lock_count == 1 ? DisableMergeAttribute::EnableTail : DisableMergeAttribute::None; + const KPageProperties properties = { + cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(), + false, false, static_cast(head_body_attr | tail_attr)}; + R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, false, + properties, OperationType::ChangePermissions, false)); + } + } + + // Create an update allocator. + // NOTE: Guaranteed zero blocks needed here. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, 0); + R_TRY(allocator_result); + + // Unlock the pages. + m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, + mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, + KMemoryPermission::None); + + R_SUCCEED(); +} + +void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, + KProcessAddress address, size_t size, + KMemoryPermission prot_perm) { + ASSERT(this->IsLockedByCurrentThread()); + ASSERT(Common::IsAligned(GetInteger(address), PageSize)); + ASSERT(Common::IsAligned(size, PageSize)); + + // Get the mapped extents. + const KProcessAddress src_map_start = address; + const KProcessAddress src_map_end = address + size; + const KProcessAddress src_map_last = src_map_end - 1; + + // This function is only invoked when there's something to do. + ASSERT(src_map_end > src_map_start); + + // Iterate over blocks, fixing permissions. + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); + while (true) { + const KMemoryInfo info = it->GetMemoryInfo(); + + const auto cur_start = info.GetAddress() >= GetInteger(src_map_start) + ? info.GetAddress() + : GetInteger(src_map_start); + const auto cur_end = + src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); + + // If we can, fix the protections on the block. + if ((info.GetIpcLockCount() == 0 && + (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) || + (info.GetIpcLockCount() != 0 && + (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) { + // Check if we actually need to fix the protections on the block. + if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) || + (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) { + const bool start_nc = (info.GetAddress() == GetInteger(src_map_start)) + ? (False(info.GetDisableMergeAttribute() & + (KMemoryBlockDisableMergeAttribute::Locked | + KMemoryBlockDisableMergeAttribute::IpcLeft))) + : info.GetAddress() <= GetInteger(src_map_start); + + const DisableMergeAttribute head_body_attr = + start_nc ? DisableMergeAttribute::EnableHeadAndBody + : DisableMergeAttribute::None; + DisableMergeAttribute tail_attr; + if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) { + auto next_it = it; + ++next_it; + + const auto lock_count = + info.GetIpcLockCount() + + (next_it != m_memory_block_manager.end() + ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) + : 0); + tail_attr = lock_count == 0 ? DisableMergeAttribute::EnableTail + : DisableMergeAttribute::None; + } else { + tail_attr = DisableMergeAttribute::None; + } + + const KPageProperties properties = { + info.GetPermission(), false, false, + static_cast(head_body_attr | tail_attr)}; + R_ASSERT(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, 0, + false, properties, OperationType::ChangePermissions, true)); + } + } + + // If we're past the end of the region, we're done. + if (src_map_last <= info.GetLastAddress()) { + break; + } + + // Advance. + ++it; + ASSERT(it != m_memory_block_manager.end()); + } +} + +Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) { + // Lock the physical memory lock. + KScopedLightLock phys_lk(m_map_physical_memory_lock); + + // Calculate the last address for convenience. + const KProcessAddress last_address = address + size - 1; + + // Define iteration variables. + KProcessAddress cur_address; + size_t mapped_size; + + // The entire mapping process can be retried. + while (true) { + // Check if the memory is already mapped. + { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Iterate over the memory. + cur_address = address; + mapped_size = 0; + + auto it = m_memory_block_manager.FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != m_memory_block_manager.end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + if (info.GetState() != KMemoryState::Free) { + mapped_size += (last_address + 1 - cur_address); + } + break; + } + + // Track the memory if it's mapped. + if (info.GetState() != KMemoryState::Free) { + mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + + // If the size mapped is the size requested, we've nothing to do. + R_SUCCEED_IF(size == mapped_size); + } + + // Allocate and map the memory. + { + // Reserve the memory from the process resource limit. + KScopedResourceReservation memory_reservation( + m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, size - mapped_size); + R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); + + // Allocate pages for the new memory. + KPageGroup pg(m_kernel, m_block_info_manager); + R_TRY(m_kernel.MemoryManager().AllocateForProcess( + std::addressof(pg), (size - mapped_size) / PageSize, m_allocate_option, + GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value)); + + // If we fail in the next bit (or retry), we need to cleanup the pages. + auto pg_guard = SCOPE_GUARD({ + pg.OpenFirst(); + pg.Close(); + }); + + // Map the memory. + { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + size_t num_allocator_blocks = 0; + + // Verify that nobody has mapped memory since we first checked. + { + // Iterate over the memory. + size_t checked_mapped_size = 0; + cur_address = address; + + auto it = m_memory_block_manager.FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != m_memory_block_manager.end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + const bool is_free = info.GetState() == KMemoryState::Free; + if (is_free) { + if (info.GetAddress() < GetInteger(address)) { + ++num_allocator_blocks; + } + if (last_address < info.GetLastAddress()) { + ++num_allocator_blocks; + } + } + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + if (!is_free) { + checked_mapped_size += (last_address + 1 - cur_address); + } + break; + } + + // Track the memory if it's mapped. + if (!is_free) { + checked_mapped_size += + KProcessAddress(info.GetEndAddress()) - cur_address; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + + // If the size now isn't what it was before, somebody mapped or unmapped + // concurrently. If this happened, retry. + if (mapped_size != checked_mapped_size) { + continue; + } + } + + // Create an update allocator. + ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, + num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Prepare to iterate over the memory. + auto pg_it = pg.begin(); + KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); + size_t pg_pages = pg_it->GetNumPages(); + + // Reset the current tracking address, and make sure we clean up on failure. + pg_guard.Cancel(); + cur_address = address; + ON_RESULT_FAILURE { + if (cur_address > address) { + const KProcessAddress last_unmap_address = cur_address - 1; + + // Iterate, unmapping the pages. + cur_address = address; + + auto it = m_memory_block_manager.FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != m_memory_block_manager.end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // If the memory state is free, we mapped it and need to unmap it. + if (info.GetState() == KMemoryState::Free) { + // Determine the range to unmap. + const KPageProperties unmap_properties = { + KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + const size_t cur_pages = + std::min(KProcessAddress(info.GetEndAddress()) - cur_address, + last_unmap_address + 1 - cur_address) / + PageSize; + + // Unmap. + R_ASSERT(this->Operate(updater.GetPageList(), cur_address, + cur_pages, 0, false, unmap_properties, + OperationType::Unmap, true)); + } + + // Check if we're done. + if (last_unmap_address <= info.GetLastAddress()) { + break; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + } + + // Release any remaining unmapped memory. + m_kernel.MemoryManager().OpenFirst(pg_phys_addr, pg_pages); + m_kernel.MemoryManager().Close(pg_phys_addr, pg_pages); + for (++pg_it; pg_it != pg.end(); ++pg_it) { + m_kernel.MemoryManager().OpenFirst(pg_it->GetAddress(), + pg_it->GetNumPages()); + m_kernel.MemoryManager().Close(pg_it->GetAddress(), pg_it->GetNumPages()); + } + }; + + auto it = m_memory_block_manager.FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != m_memory_block_manager.end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // If it's unmapped, we need to map it. + if (info.GetState() == KMemoryState::Free) { + // Determine the range to map. + const KPageProperties map_properties = { + KMemoryPermission::UserReadWrite, false, false, + cur_address == this->GetAliasRegionStart() + ? DisableMergeAttribute::DisableHead + : DisableMergeAttribute::None}; + size_t map_pages = + std::min(KProcessAddress(info.GetEndAddress()) - cur_address, + last_address + 1 - cur_address) / + PageSize; + + // While we have pages to map, map them. + { + // Create a page group for the current mapping range. + KPageGroup cur_pg(m_kernel, m_block_info_manager); + { + ON_RESULT_FAILURE_2 { + cur_pg.OpenFirst(); + cur_pg.Close(); + }; + + size_t remain_pages = map_pages; + while (remain_pages > 0) { + // Check if we're at the end of the physical block. + if (pg_pages == 0) { + // Ensure there are more pages to map. + ASSERT(pg_it != pg.end()); + + // Advance our physical block. + ++pg_it; + pg_phys_addr = pg_it->GetAddress(); + pg_pages = pg_it->GetNumPages(); + } + + // Add whatever we can to the current block. + const size_t cur_pages = std::min(pg_pages, remain_pages); + R_TRY(cur_pg.AddBlock(pg_phys_addr + + ((pg_pages - cur_pages) * PageSize), + cur_pages)); + + // Advance. + remain_pages -= cur_pages; + pg_pages -= cur_pages; + } + } + + // Map the papges. + R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, + cur_pg, map_properties, + OperationType::MapFirstGroup, false)); + } + } + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + break; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + + // We succeeded, so commit the memory reservation. + memory_reservation.Commit(); + + // Increase our tracked mapped size. + m_mapped_physical_memory_size += (size - mapped_size); + + // Update the relevant memory blocks. + m_memory_block_manager.UpdateIfMatch( + std::addressof(allocator), address, size / PageSize, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + address == this->GetAliasRegionStart() + ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); + + R_SUCCEED(); + } + } + } +} + +Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) { + // Lock the physical memory lock. + KScopedLightLock phys_lk(m_map_physical_memory_lock); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Calculate the last address for convenience. + const KProcessAddress last_address = address + size - 1; + + // Define iteration variables. + KProcessAddress map_start_address = 0; + KProcessAddress map_last_address = 0; + + KProcessAddress cur_address; + size_t mapped_size; + size_t num_allocator_blocks = 0; + + // Check if the memory is mapped. + { + // Iterate over the memory. + cur_address = address; + mapped_size = 0; + + auto it = m_memory_block_manager.FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != m_memory_block_manager.end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // Verify the memory's state. + const bool is_normal = info.GetState() == KMemoryState::Normal && + info.GetAttribute() == KMemoryAttribute::None; + const bool is_free = info.GetState() == KMemoryState::Free; + R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory); + + if (is_normal) { + R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); + + if (map_start_address == 0) { + map_start_address = cur_address; + } + map_last_address = + (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; + + if (info.GetAddress() < GetInteger(address)) { + ++num_allocator_blocks; + } + if (last_address < info.GetLastAddress()) { + ++num_allocator_blocks; + } + + mapped_size += (map_last_address + 1 - cur_address); + } + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + break; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + + // If there's nothing mapped, we've nothing to do. + R_SUCCEED_IF(mapped_size == 0); + } + + // Create an update allocator. + ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Separate the mapping. + const KPageProperties sep_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), map_start_address, + (map_last_address + 1 - map_start_address) / PageSize, 0, false, + sep_properties, OperationType::Separate, false)); + + // Reset the current tracking address, and make sure we clean up on failure. + cur_address = address; + + // Iterate over the memory, unmapping as we go. + auto it = m_memory_block_manager.FindIterator(cur_address); + + const auto clear_merge_attr = + (it->GetState() == KMemoryState::Normal && + it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address) + ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None; + + while (true) { + // Check that the iterator is valid. + ASSERT(it != m_memory_block_manager.end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // If the memory state is normal, we need to unmap it. + if (info.GetState() == KMemoryState::Normal) { + // Determine the range to unmap. + const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, + last_address + 1 - cur_address) / + PageSize; + + // Unmap. + R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false, + unmap_properties, OperationType::Unmap, false)); + } + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + break; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + + // Release the memory resource. + m_mapped_physical_memory_size -= mapped_size; + m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, mapped_size); + + // Update memory blocks. + m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, + KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + clear_merge_attr); + + // We succeeded. + R_SUCCEED(); +} + +Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { + UNIMPLEMENTED(); + R_THROW(ResultNotImplemented); +} + +Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { + UNIMPLEMENTED(); + R_THROW(ResultNotImplemented); +} + +Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size, + KPageTableBase& src_page_table, + KProcessAddress src_address) { + // We need to lock both this table, and the current process's table, so set up an alias. + KPageTableBase& dst_page_table = *this; + + // Acquire the table locks. + KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); + + // Check that the memory is mapped in the destination process. + size_t num_allocator_blocks; + R_TRY(dst_page_table.CheckMemoryState( + std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All, + KMemoryState::SharedCode, KMemoryPermission::UserReadWrite, + KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); + + // Check that the memory is mapped in the source process. + R_TRY(src_page_table.CheckMemoryState(src_address, size, KMemoryState::FlagCanMapProcess, + KMemoryState::FlagCanMapProcess, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::None)); + + // Validate that the memory ranges are compatible. + { + // Define a helper type. + struct ContiguousRangeInfo { + public: + KPageTableBase& m_pt; + TraversalContext m_context; + TraversalEntry m_entry; + KPhysicalAddress m_phys_addr; + size_t m_cur_size; + size_t m_remaining_size; + + public: + ContiguousRangeInfo(KPageTableBase& pt, KProcessAddress address, size_t size) + : m_pt(pt), m_remaining_size(size) { + // Begin a traversal. + ASSERT(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry), + std::addressof(m_context), address)); + + // Setup tracking fields. + m_phys_addr = m_entry.phys_addr; + m_cur_size = std::min( + m_remaining_size, + m_entry.block_size - (GetInteger(m_phys_addr) & (m_entry.block_size - 1))); + + // Consume the whole contiguous block. + this->DetermineContiguousBlockExtents(); + } + + void ContinueTraversal() { + // Update our remaining size. + m_remaining_size = m_remaining_size - m_cur_size; + + // Update our tracking fields. + if (m_remaining_size > 0) { + m_phys_addr = m_entry.phys_addr; + m_cur_size = std::min(m_remaining_size, m_entry.block_size); + + // Consume the whole contiguous block. + this->DetermineContiguousBlockExtents(); + } + } + + private: + void DetermineContiguousBlockExtents() { + // Continue traversing until we're not contiguous, or we have enough. + while (m_cur_size < m_remaining_size) { + ASSERT(m_pt.GetImpl().ContinueTraversal(std::addressof(m_entry), + std::addressof(m_context))); + + // If we're not contiguous, we're done. + if (m_entry.phys_addr != m_phys_addr + m_cur_size) { + break; + } + + // Update our current size. + m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size); + } + } + }; + + // Create ranges for both tables. + ContiguousRangeInfo src_range(src_page_table, src_address, size); + ContiguousRangeInfo dst_range(dst_page_table, dst_address, size); + + // Validate the ranges. + while (src_range.m_remaining_size > 0 && dst_range.m_remaining_size > 0) { + R_UNLESS(src_range.m_phys_addr == dst_range.m_phys_addr, ResultInvalidMemoryRegion); + R_UNLESS(src_range.m_cur_size == dst_range.m_cur_size, ResultInvalidMemoryRegion); + + src_range.ContinueTraversal(); + dst_range.ContinueTraversal(); + } + } + + // We no longer need to hold our lock on the source page table. + lk.TryUnlockHalf(src_page_table.m_general_lock); + + // Create an update allocator. + Result allocator_result; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // We're going to perform an update, so create a helper. + KScopedPageTableUpdater updater(this); + + // Unmap the memory. + const size_t num_pages = size / PageSize; + const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, + DisableMergeAttribute::None}; + R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, unmap_properties, + OperationType::Unmap, false)); + + // Apply the memory block update. + m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, + KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); + + R_SUCCEED(); +} + +Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr, + size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, + const KPageProperties properties, OperationType operation, + bool reuse_ll) { + ASSERT(this->IsLockedByCurrentThread()); + ASSERT(num_pages > 0); + ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); + ASSERT(this->ContainsPages(virt_addr, num_pages)); + + // As we don't allocate page entries in guest memory, we don't need to allocate them from + // or free them to the page list, and so it goes unused (along with page properties). + + switch (operation) { + case OperationType::Unmap: { + // Ensure that any pages we track are closed on exit. + KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager()); + SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); + + // Make a page group representing the region to unmap. + this->MakePageGroup(pages_to_close, virt_addr, num_pages); + + // Unmap. + m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize); + + R_SUCCEED(); + } + case OperationType::Map: { + ASSERT(virt_addr != 0); + ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); + m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr); + + // Open references to pages, if we should. + if (this->IsHeapPhysicalAddress(phys_addr)) { + m_kernel.MemoryManager().Open(phys_addr, num_pages); + } + + R_SUCCEED(); + } + case OperationType::Separate: { + // TODO: Unimplemented. + R_SUCCEED(); + } + case OperationType::ChangePermissions: + case OperationType::ChangePermissionsAndRefresh: + case OperationType::ChangePermissionsAndRefreshAndFlush: + R_SUCCEED(); + default: + UNREACHABLE(); + } +} + +Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr, + size_t num_pages, const KPageGroup& page_group, + const KPageProperties properties, OperationType operation, + bool reuse_ll) { + ASSERT(this->IsLockedByCurrentThread()); + ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); + ASSERT(num_pages > 0); + ASSERT(num_pages == page_group.GetNumPages()); + + // As we don't allocate page entries in guest memory, we don't need to allocate them from + // the page list, and so it goes unused (along with page properties). + + switch (operation) { + case OperationType::MapGroup: + case OperationType::MapFirstGroup: { + // We want to maintain a new reference to every page in the group. + KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); + + for (const auto& node : page_group) { + const size_t size{node.GetNumPages() * PageSize}; + + // Map the pages. + m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress()); + + virt_addr += size; + } + + // We succeeded! We want to persist the reference to the pages. + spg.CancelClose(); + + R_SUCCEED(); + } + default: + UNREACHABLE(); + } +} + +void KPageTableBase::FinalizeUpdate(PageLinkedList* page_list) { + while (page_list->Peek()) { + [[maybe_unused]] auto page = page_list->Pop(); + + // TODO: Free page entries once they are allocated in guest memory. + // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page)); + // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0); + // this->GetPageTableManager().Free(page); + } +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h new file mode 100755 index 000000000..ee2c41e67 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_base.h @@ -0,0 +1,759 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include + +#include "common/common_funcs.h" +#include "common/page_table.h" +#include "core/core.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" +#include "core/hle/kernel/k_light_lock.h" +#include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_memory_block_manager.h" +#include "core/hle/kernel/k_memory_layout.h" +#include "core/hle/kernel/k_memory_manager.h" +#include "core/hle/kernel/k_typed_address.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/result.h" +#include "core/memory.h" + +namespace Kernel { + +enum class DisableMergeAttribute : u8 { + None = (0U << 0), + + DisableHead = (1U << 0), + DisableHeadAndBody = (1U << 1), + EnableHeadAndBody = (1U << 2), + DisableTail = (1U << 3), + EnableTail = (1U << 4), + EnableAndMergeHeadBodyTail = (1U << 5), + + EnableHeadBodyTail = EnableHeadAndBody | EnableTail, + DisableHeadBodyTail = DisableHeadAndBody | DisableTail, +}; +DECLARE_ENUM_FLAG_OPERATORS(DisableMergeAttribute); + +struct KPageProperties { + KMemoryPermission perm; + bool io; + bool uncached; + DisableMergeAttribute disable_merge_attributes; +}; +static_assert(std::is_trivial_v); +static_assert(sizeof(KPageProperties) == sizeof(u32)); + +class KResourceLimit; +class KSystemResource; + +class KPageTableBase { + YUZU_NON_COPYABLE(KPageTableBase); + YUZU_NON_MOVEABLE(KPageTableBase); + +public: + using TraversalEntry = Common::PageTable::TraversalEntry; + using TraversalContext = Common::PageTable::TraversalContext; + + class MemoryRange { + private: + KernelCore& m_kernel; + KPhysicalAddress m_address; + size_t m_size; + bool m_heap; + + public: + explicit MemoryRange(KernelCore& kernel) + : m_kernel(kernel), m_address(0), m_size(0), m_heap(false) {} + + void Set(KPhysicalAddress address, size_t size, bool heap) { + m_address = address; + m_size = size; + m_heap = heap; + } + + KPhysicalAddress GetAddress() const { + return m_address; + } + size_t GetSize() const { + return m_size; + } + bool IsHeap() const { + return m_heap; + } + + void Open(); + void Close(); + }; + +protected: + enum MemoryFillValue : u8 { + MemoryFillValue_Zero = 0, + MemoryFillValue_Stack = 'X', + MemoryFillValue_Ipc = 'Y', + MemoryFillValue_Heap = 'Z', + }; + + enum class OperationType { + Map = 0, + MapGroup = 1, + MapFirstGroup = 2, + Unmap = 3, + ChangePermissions = 4, + ChangePermissionsAndRefresh = 5, + ChangePermissionsAndRefreshAndFlush = 6, + Separate = 7, + }; + + static constexpr size_t MaxPhysicalMapAlignment = 1_GiB; + static constexpr size_t RegionAlignment = 2_MiB; + static_assert(RegionAlignment == KernelAslrAlignment); + + struct PageLinkedList { + private: + struct Node { + Node* m_next; + std::array m_buffer; + }; + static_assert(std::is_trivial_v); + + private: + Node* m_root{}; + + public: + constexpr PageLinkedList() : m_root(nullptr) {} + + void Push(Node* n) { + ASSERT(Common::IsAligned(reinterpret_cast(n), PageSize)); + n->m_next = m_root; + m_root = n; + } + + Node* Peek() const { + return m_root; + } + + Node* Pop() { + Node* const r = m_root; + + m_root = r->m_next; + r->m_next = nullptr; + + return r; + } + }; + static_assert(std::is_trivially_destructible_v); + + static constexpr auto DefaultMemoryIgnoreAttr = + KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; + + static constexpr size_t GetAddressSpaceWidth(Svc::CreateProcessFlag as_type) { + switch (static_cast(as_type & + Svc::CreateProcessFlag::AddressSpaceMask)) { + case Svc::CreateProcessFlag::AddressSpace64Bit: + return 39; + case Svc::CreateProcessFlag::AddressSpace64BitDeprecated: + return 36; + case Svc::CreateProcessFlag::AddressSpace32Bit: + case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias: + return 32; + default: + UNREACHABLE(); + } + } + +private: + class KScopedPageTableUpdater { + private: + KPageTableBase* m_pt; + PageLinkedList m_ll; + + public: + explicit KScopedPageTableUpdater(KPageTableBase* pt) : m_pt(pt), m_ll() {} + explicit KScopedPageTableUpdater(KPageTableBase& pt) + : KScopedPageTableUpdater(std::addressof(pt)) {} + ~KScopedPageTableUpdater() { + m_pt->FinalizeUpdate(this->GetPageList()); + } + + PageLinkedList* GetPageList() { + return std::addressof(m_ll); + } + }; + +private: + KernelCore& m_kernel; + Core::System& m_system; + KProcessAddress m_address_space_start{}; + KProcessAddress m_address_space_end{}; + KProcessAddress m_heap_region_start{}; + KProcessAddress m_heap_region_end{}; + KProcessAddress m_current_heap_end{}; + KProcessAddress m_alias_region_start{}; + KProcessAddress m_alias_region_end{}; + KProcessAddress m_stack_region_start{}; + KProcessAddress m_stack_region_end{}; + KProcessAddress m_kernel_map_region_start{}; + KProcessAddress m_kernel_map_region_end{}; + KProcessAddress m_alias_code_region_start{}; + KProcessAddress m_alias_code_region_end{}; + KProcessAddress m_code_region_start{}; + KProcessAddress m_code_region_end{}; + size_t m_max_heap_size{}; + size_t m_mapped_physical_memory_size{}; + size_t m_mapped_unsafe_physical_memory{}; + size_t m_mapped_insecure_memory{}; + size_t m_mapped_ipc_server_memory{}; + mutable KLightLock m_general_lock; + mutable KLightLock m_map_physical_memory_lock; + KLightLock m_device_map_lock; + std::unique_ptr m_impl{}; + Core::Memory::Memory* m_memory{}; + KMemoryBlockManager m_memory_block_manager{}; + u32 m_allocate_option{}; + u32 m_address_space_width{}; + bool m_is_kernel{}; + bool m_enable_aslr{}; + bool m_enable_device_address_space_merge{}; + KMemoryBlockSlabManager* m_memory_block_slab_manager{}; + KBlockInfoManager* m_block_info_manager{}; + KResourceLimit* m_resource_limit{}; + const KMemoryRegion* m_cached_physical_linear_region{}; + const KMemoryRegion* m_cached_physical_heap_region{}; + MemoryFillValue m_heap_fill_value{}; + MemoryFillValue m_ipc_fill_value{}; + MemoryFillValue m_stack_fill_value{}; + +public: + explicit KPageTableBase(KernelCore& kernel); + ~KPageTableBase(); + + Result InitializeForKernel(bool is_64_bit, KVirtualAddress start, KVirtualAddress end, + Core::Memory::Memory& memory); + Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr, + bool enable_device_address_space_merge, bool from_back, + KMemoryManager::Pool pool, KProcessAddress code_address, + size_t code_size, KSystemResource* system_resource, + KResourceLimit* resource_limit, Core::Memory::Memory& memory); + + void Finalize(); + + bool IsKernel() const { + return m_is_kernel; + } + bool IsAslrEnabled() const { + return m_enable_aslr; + } + + bool Contains(KProcessAddress addr) const { + return m_address_space_start <= addr && addr <= m_address_space_end - 1; + } + + bool Contains(KProcessAddress addr, size_t size) const { + return m_address_space_start <= addr && addr < addr + size && + addr + size - 1 <= m_address_space_end - 1; + } + + bool IsInAliasRegion(KProcessAddress addr, size_t size) const { + return this->Contains(addr, size) && m_alias_region_start <= addr && + addr + size - 1 <= m_alias_region_end - 1; + } + + bool IsInHeapRegion(KProcessAddress addr, size_t size) const { + return this->Contains(addr, size) && m_heap_region_start <= addr && + addr + size - 1 <= m_heap_region_end - 1; + } + + bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { + // Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the + // alias code region. + return this->CanContain(addr, size, Svc::MemoryState::AliasCode); + } + + KScopedLightLock AcquireDeviceMapLock() { + return KScopedLightLock(m_device_map_lock); + } + + KProcessAddress GetRegionAddress(Svc::MemoryState state) const; + size_t GetRegionSize(Svc::MemoryState state) const; + bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const; + + KProcessAddress GetRegionAddress(KMemoryState state) const { + return this->GetRegionAddress(static_cast(state & KMemoryState::Mask)); + } + size_t GetRegionSize(KMemoryState state) const { + return this->GetRegionSize(static_cast(state & KMemoryState::Mask)); + } + bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { + return this->CanContain(addr, size, + static_cast(state & KMemoryState::Mask)); + } + +public: + Core::Memory::Memory& GetMemory() { + return *m_memory; + } + + Core::Memory::Memory& GetMemory() const { + return *m_memory; + } + + Common::PageTable& GetImpl() { + return *m_impl; + } + + Common::PageTable& GetImpl() const { + return *m_impl; + } + + size_t GetNumGuardPages() const { + return this->IsKernel() ? 1 : 4; + } + +protected: + // NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions + // in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived + // class, and this avoids unnecessary virtual function calls. + Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages, + KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, + OperationType operation, bool reuse_ll); + Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages, + const KPageGroup& page_group, const KPageProperties properties, + OperationType operation, bool reuse_ll); + void FinalizeUpdate(PageLinkedList* page_list); + + bool IsLockedByCurrentThread() const { + return m_general_lock.IsLockedByCurrentThread(); + } + + bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) { + ASSERT(this->IsLockedByCurrentThread()); + + return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress( + m_cached_physical_linear_region, phys_addr); + } + + bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { + ASSERT(this->IsLockedByCurrentThread()); + + return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress( + m_cached_physical_linear_region, phys_addr, size); + } + + bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) { + ASSERT(this->IsLockedByCurrentThread()); + + return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region, + phys_addr); + } + + bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { + ASSERT(this->IsLockedByCurrentThread()); + + return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region, + phys_addr, size); + } + + bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) { + ASSERT(!this->IsLockedByCurrentThread()); + + return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region, + phys_addr); + } + + bool ContainsPages(KProcessAddress addr, size_t num_pages) const { + return (m_address_space_start <= addr) && + (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && + (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); + } + +private: + KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, + size_t num_pages, size_t alignment, size_t offset, + size_t guard_pages) const; + + Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr) const; + Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, + KMemoryAttribute attr) const { + R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, + perm, attr_mask, attr)); + } + + Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr) const; + Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, + KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr, + KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; + Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, + KProcessAddress addr, size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr, + KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; + Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr, + KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { + R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, + state_mask, state, perm_mask, perm, attr_mask, attr, + ignore_attr)); + } + Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr, + KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { + R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, + attr_mask, attr, ignore_attr)); + } + + Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, KProcessAddress addr, + size_t size, KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr, + KMemoryPermission new_perm, KMemoryAttribute lock_attr); + Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr, + KMemoryPermission new_perm, KMemoryAttribute lock_attr, + const KPageGroup* pg); + + Result QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page, + KProcessAddress address) const; + + Result QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size, + Svc::MemoryState state) const; + + Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, + size_t num_pages, KMemoryPermission perm); + Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, + const KPageGroup& pg, const KPageProperties properties, bool reuse_ll); + + void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, + const KPageGroup& pg); + + Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages); + bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages); + + Result GetContiguousMemoryRangeWithState(MemoryRange* out, KProcessAddress address, size_t size, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr); + + Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, + KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, + size_t region_num_pages, KMemoryState state, KMemoryPermission perm); + + Result MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, KPhysicalAddress phys_addr, + size_t size, KMemoryState state, KMemoryPermission perm); + Result ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, size_t size, + KMemoryState state); + Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, size_t size, + KMemoryState state); + + Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, + KProcessAddress address, size_t size, KMemoryPermission test_perm, + KMemoryState dst_state); + Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr, + KMemoryPermission test_perm, KMemoryState dst_state, + KPageTableBase& src_page_table, bool send); + void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address, + size_t size, KMemoryPermission prot_perm); + + size_t GetSize(KMemoryState state) const; + + bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const { + // Validate pre-conditions. + ASSERT(this->IsLockedByCurrentThread()); + + return this->GetImpl().GetPhysicalAddress(out, virt_addr); + } + +public: + bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress virt_addr) const { + // Validate pre-conditions. + ASSERT(!this->IsLockedByCurrentThread()); + + // Acquire exclusive access to the table while doing address translation. + KScopedLightLock lk(m_general_lock); + + return this->GetPhysicalAddressLocked(out, virt_addr); + } + + KBlockInfoManager* GetBlockInfoManager() const { + return m_block_info_manager; + } + + Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm); + Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, + Svc::MemoryPermission perm); + Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask, + KMemoryAttribute attr); + Result SetHeapSize(KProcessAddress* out, size_t size); + Result SetMaxHeapSize(size_t size); + Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info, + KProcessAddress addr) const; + Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) const; + Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const { + R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Static)); + } + Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const { + R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Io)); + } + Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); + Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); + Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); + Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); + Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); + Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, + Svc::MemoryMapping mapping, Svc::MemoryPermission perm); + Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, + Svc::MemoryMapping mapping); + Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); + Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm); + Result MapInsecureMemory(KProcessAddress address, size_t size); + Result UnmapInsecureMemory(KProcessAddress address, size_t size); + + Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, + KPhysicalAddress phys_addr, KProcessAddress region_start, + size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { + R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, + region_num_pages, state, perm)); + } + + Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, + KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { + R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, + this->GetRegionAddress(state), + this->GetRegionSize(state) / PageSize, state, perm)); + } + + Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state, + KMemoryPermission perm) { + R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false, + this->GetRegionAddress(state), + this->GetRegionSize(state) / PageSize, state, perm)); + } + + Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, + KMemoryPermission perm); + Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state); + + Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, + KProcessAddress region_start, size_t region_num_pages, KMemoryState state, + KMemoryPermission perm); + Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state, + KMemoryPermission perm); + Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state); + + Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr); + + Result InvalidateProcessDataCache(KProcessAddress address, size_t size); + Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size); + + Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); + Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, + KMemoryState state); + + Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); + Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, + KMemoryState state); + + Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size, + KMemoryPermission perm, bool is_aligned, bool check_heap); + Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap); + + Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size); + Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size); + + Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out, + KProcessAddress address, size_t size, + KMemoryPermission perm, bool is_aligned); + Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, KProcessAddress address, + size_t size); + + Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size); + Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size); + + Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, + KMemoryPermission perm); + Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg); + Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size); + Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg); + + Result OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, KProcessAddress address, + size_t size); + + Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, + KProcessAddress src_addr, KMemoryState src_state_mask, + KMemoryState src_state, KMemoryPermission src_test_perm, + KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); + Result CopyMemoryFromLinearToKernel(void* buffer, size_t size, KProcessAddress src_addr, + KMemoryState src_state_mask, KMemoryState src_state, + KMemoryPermission src_test_perm, + KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); + Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, + KMemoryState dst_state_mask, KMemoryState dst_state, + KMemoryPermission dst_test_perm, + KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, + KProcessAddress src_addr); + Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, + KMemoryState dst_state_mask, KMemoryState dst_state, + KMemoryPermission dst_test_perm, + KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, + void* buffer); + Result CopyMemoryFromHeapToHeap(KPageTableBase& dst_page_table, KProcessAddress dst_addr, + size_t size, KMemoryState dst_state_mask, + KMemoryState dst_state, KMemoryPermission dst_test_perm, + KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, + KProcessAddress src_addr, KMemoryState src_state_mask, + KMemoryState src_state, KMemoryPermission src_test_perm, + KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); + Result CopyMemoryFromHeapToHeapWithoutCheckDestination( + KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size, + KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, + KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, + KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, + KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); + + Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr, + KPageTableBase& src_page_table, KMemoryPermission test_perm, + KMemoryState dst_state, bool send); + Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state); + Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state); + + Result MapPhysicalMemory(KProcessAddress address, size_t size); + Result UnmapPhysicalMemory(KProcessAddress address, size_t size); + + Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size); + Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size); + + Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase& src_pt, + KProcessAddress src_address); + +public: + KProcessAddress GetAddressSpaceStart() const { + return m_address_space_start; + } + KProcessAddress GetHeapRegionStart() const { + return m_heap_region_start; + } + KProcessAddress GetAliasRegionStart() const { + return m_alias_region_start; + } + KProcessAddress GetStackRegionStart() const { + return m_stack_region_start; + } + KProcessAddress GetKernelMapRegionStart() const { + return m_kernel_map_region_start; + } + KProcessAddress GetCodeRegionStart() const { + return m_code_region_start; + } + KProcessAddress GetAliasCodeRegionStart() const { + return m_alias_code_region_start; + } + + size_t GetAddressSpaceSize() const { + return m_address_space_end - m_address_space_start; + } + size_t GetHeapRegionSize() const { + return m_heap_region_end - m_heap_region_start; + } + size_t GetAliasRegionSize() const { + return m_alias_region_end - m_alias_region_start; + } + size_t GetStackRegionSize() const { + return m_stack_region_end - m_stack_region_start; + } + size_t GetKernelMapRegionSize() const { + return m_kernel_map_region_end - m_kernel_map_region_start; + } + size_t GetCodeRegionSize() const { + return m_code_region_end - m_code_region_start; + } + size_t GetAliasCodeRegionSize() const { + return m_alias_code_region_end - m_alias_code_region_start; + } + + size_t GetNormalMemorySize() const { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size; + } + + size_t GetCodeSize() const; + size_t GetCodeDataSize() const; + size_t GetAliasCodeSize() const; + size_t GetAliasCodeDataSize() const; + + u32 GetAllocateOption() const { + return m_allocate_option; + } + + u32 GetAddressSpaceWidth() const { + return m_address_space_width; + } + +public: + // Linear mapped + static u8* GetLinearMappedVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) { + return kernel.System().DeviceMemory().GetPointer(addr); + } + + static KPhysicalAddress GetLinearMappedPhysicalAddress(KernelCore& kernel, + KVirtualAddress addr) { + return kernel.MemoryLayout().GetLinearPhysicalAddress(addr); + } + + static KVirtualAddress GetLinearMappedVirtualAddress(KernelCore& kernel, + KPhysicalAddress addr) { + return kernel.MemoryLayout().GetLinearVirtualAddress(addr); + } + + // Heap + static u8* GetHeapVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) { + return kernel.System().DeviceMemory().GetPointer(addr); + } + + static KPhysicalAddress GetHeapPhysicalAddress(KernelCore& kernel, KVirtualAddress addr) { + return GetLinearMappedPhysicalAddress(kernel, addr); + } + + static KVirtualAddress GetHeapVirtualAddress(KernelCore& kernel, KPhysicalAddress addr) { + return GetLinearMappedVirtualAddress(kernel, addr); + } + + // Member heap + u8* GetHeapVirtualPointer(KPhysicalAddress addr) { + return GetHeapVirtualPointer(m_kernel, addr); + } + + KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) { + return GetHeapPhysicalAddress(m_kernel, addr); + } + + KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) { + return GetHeapVirtualAddress(m_kernel, addr); + } + + // TODO: GetPageTableVirtualAddress + // TODO: GetPageTablePhysicalAddress +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 36d209a11..4f8d0756a 100755 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -298,9 +298,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); const bool enable_das_merge = False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); - R_TRY(m_page_table.InitializeForProcess( - as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, - params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetMemory())); + R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, + params.code_address, params.code_num_pages * PageSize, + m_system_resource, res_limit, this->GetMemory())); } ON_RESULT_FAILURE_2 { m_page_table.Finalize(); @@ -391,9 +391,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); const bool enable_das_merge = False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); - R_TRY(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge, - !enable_aslr, pool, params.code_address, code_size, - m_system_resource, res_limit, this->GetMemory())); + R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, + params.code_address, code_size, m_system_resource, res_limit, + this->GetMemory())); } ON_RESULT_FAILURE_2 { m_page_table.Finalize(); @@ -1122,9 +1122,9 @@ Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {} KProcess::KProcess(KernelCore& kernel) - : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel.System()}, - m_state_lock{kernel}, m_list_lock{kernel}, m_cond_var{kernel.System()}, - m_address_arbiter{kernel.System()}, m_handle_table{kernel} {} + : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel}, + m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()}, + m_handle_table{kernel} {} KProcess::~KProcess() = default; Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size, diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index f5e9a32b7..8040aef34 100755 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -5,13 +5,14 @@ #include +#include "core/file_sys/program_metadata.h" #include "core/hle/kernel/code_set.h" #include "core/hle/kernel/k_address_arbiter.h" #include "core/hle/kernel/k_capabilities.h" #include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_handle_table.h" -#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_page_table_manager.h" +#include "core/hle/kernel/k_process_page_table.h" #include "core/hle/kernel/k_system_resource.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread_local_page.h" @@ -65,7 +66,7 @@ private: using TLPIterator = TLPTree::iterator; private: - KPageTable m_page_table; + KProcessPageTable m_page_table; std::atomic m_used_kernel_memory_size{}; TLPTree m_fully_used_tlp_tree{}; TLPTree m_partially_used_tlp_tree{}; @@ -254,9 +255,8 @@ public: return m_is_hbl; } - Kernel::KMemoryManager::Direction GetAllocateOption() const { - // TODO: property of the KPageTableBase - return KMemoryManager::Direction::FromFront; + u32 GetAllocateOption() const { + return m_page_table.GetAllocateOption(); } ThreadList& GetThreadList() { @@ -295,10 +295,10 @@ public: return m_list_lock; } - KPageTable& GetPageTable() { + KProcessPageTable& GetPageTable() { return m_page_table; } - const KPageTable& GetPageTable() const { + const KProcessPageTable& GetPageTable() const { return m_page_table; } diff --git a/src/core/hle/kernel/k_process_page_table.h b/src/core/hle/kernel/k_process_page_table.h new file mode 100755 index 000000000..b7ae5abd0 --- /dev/null +++ b/src/core/hle/kernel/k_process_page_table.h @@ -0,0 +1,480 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/hle/kernel/k_page_table.h" +#include "core/hle/kernel/k_scoped_lock.h" +#include "core/hle/kernel/svc_types.h" + +namespace Core { +class ARM_Interface; +} + +namespace Kernel { + +class KProcessPageTable { +private: + KPageTable m_page_table; + +public: + KProcessPageTable(KernelCore& kernel) : m_page_table(kernel) {} + + Result Initialize(Svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, + bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, + size_t code_size, KSystemResource* system_resource, + KResourceLimit* resource_limit, Core::Memory::Memory& memory) { + R_RETURN(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge, + from_back, pool, code_address, code_size, + system_resource, resource_limit, memory)); + } + + void Finalize() { + m_page_table.Finalize(); + } + + Core::Memory::Memory& GetMemory() { + return m_page_table.GetMemory(); + } + + Core::Memory::Memory& GetMemory() const { + return m_page_table.GetMemory(); + } + + Common::PageTable& GetImpl() { + return m_page_table.GetImpl(); + } + + Common::PageTable& GetImpl() const { + return m_page_table.GetImpl(); + } + + size_t GetNumGuardPages() const { + return m_page_table.GetNumGuardPages(); + } + + KScopedLightLock AcquireDeviceMapLock() { + return m_page_table.AcquireDeviceMapLock(); + } + + Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm) { + R_RETURN(m_page_table.SetMemoryPermission(addr, size, perm)); + } + + Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, + Svc::MemoryPermission perm) { + R_RETURN(m_page_table.SetProcessMemoryPermission(addr, size, perm)); + } + + Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask, + KMemoryAttribute attr) { + R_RETURN(m_page_table.SetMemoryAttribute(addr, size, mask, attr)); + } + + Result SetHeapSize(KProcessAddress* out, size_t size) { + R_RETURN(m_page_table.SetHeapSize(out, size)); + } + + Result SetMaxHeapSize(size_t size) { + R_RETURN(m_page_table.SetMaxHeapSize(size)); + } + + Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info, + KProcessAddress addr) const { + R_RETURN(m_page_table.QueryInfo(out_info, out_page_info, addr)); + } + + Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) { + R_RETURN(m_page_table.QueryPhysicalAddress(out, address)); + } + + Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) { + R_RETURN(m_page_table.QueryStaticMapping(out, address, size)); + } + + Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) { + R_RETURN(m_page_table.QueryIoMapping(out, address, size)); + } + + Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + R_RETURN(m_page_table.MapMemory(dst_address, src_address, size)); + } + + Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + R_RETURN(m_page_table.UnmapMemory(dst_address, src_address, size)); + } + + Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + R_RETURN(m_page_table.MapCodeMemory(dst_address, src_address, size)); + } + + Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + R_RETURN(m_page_table.UnmapCodeMemory(dst_address, src_address, size)); + } + + Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { + R_RETURN(m_page_table.MapIo(phys_addr, size, perm)); + } + + Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, + Svc::MemoryMapping mapping, Svc::MemoryPermission perm) { + R_RETURN(m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm)); + } + + Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, + Svc::MemoryMapping mapping) { + R_RETURN(m_page_table.UnmapIoRegion(dst_address, phys_addr, size, mapping)); + } + + Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { + R_RETURN(m_page_table.MapStatic(phys_addr, size, perm)); + } + + Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) { + R_RETURN(m_page_table.MapRegion(region_type, perm)); + } + + Result MapInsecureMemory(KProcessAddress address, size_t size) { + R_RETURN(m_page_table.MapInsecureMemory(address, size)); + } + + Result UnmapInsecureMemory(KProcessAddress address, size_t size) { + R_RETURN(m_page_table.UnmapInsecureMemory(address, size)); + } + + Result MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state, + KMemoryPermission perm) { + R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm)); + } + + Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state) { + R_RETURN(m_page_table.UnmapPageGroup(address, pg, state)); + } + + Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, + KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { + R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm)); + } + + Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state, + KMemoryPermission perm) { + R_RETURN(m_page_table.MapPages(out_addr, num_pages, state, perm)); + } + + Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, + KMemoryPermission perm) { + R_RETURN(m_page_table.MapPages(address, num_pages, state, perm)); + } + + Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) { + R_RETURN(m_page_table.UnmapPages(addr, num_pages, state)); + } + + Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr) { + R_RETURN(m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, + perm_mask, perm, attr_mask, attr)); + } + + Result InvalidateProcessDataCache(KProcessAddress address, size_t size) { + R_RETURN(m_page_table.InvalidateProcessDataCache(address, size)); + } + + Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + R_RETURN(m_page_table.ReadDebugMemory(dst_address, src_address, size)); + } + + Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, + KMemoryState state) { + R_RETURN(m_page_table.ReadDebugIoMemory(dst_address, src_address, size, state)); + } + + Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + R_RETURN(m_page_table.WriteDebugMemory(dst_address, src_address, size)); + } + + Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, + KMemoryState state) { + R_RETURN(m_page_table.WriteDebugIoMemory(dst_address, src_address, size, state)); + } + + Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size, + KMemoryPermission perm, bool is_aligned, bool check_heap) { + R_RETURN(m_page_table.LockForMapDeviceAddressSpace(out_is_io, address, size, perm, + is_aligned, check_heap)); + } + + Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) { + R_RETURN(m_page_table.LockForUnmapDeviceAddressSpace(address, size, check_heap)); + } + + Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) { + R_RETURN(m_page_table.UnlockForDeviceAddressSpace(address, size)); + } + + Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) { + R_RETURN(m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size)); + } + + Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out, + KProcessAddress address, size_t size, + KMemoryPermission perm, bool is_aligned) { + R_RETURN(m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm, + is_aligned)); + } + + Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange* out, + KProcessAddress address, size_t size) { + R_RETURN(m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size)); + } + + Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size) { + R_RETURN(m_page_table.LockForIpcUserBuffer(out, address, size)); + } + + Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) { + R_RETURN(m_page_table.UnlockForIpcUserBuffer(address, size)); + } + + Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, + KMemoryPermission perm) { + R_RETURN(m_page_table.LockForTransferMemory(out, address, size, perm)); + } + + Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg) { + R_RETURN(m_page_table.UnlockForTransferMemory(address, size, pg)); + } + + Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) { + R_RETURN(m_page_table.LockForCodeMemory(out, address, size)); + } + + Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg) { + R_RETURN(m_page_table.UnlockForCodeMemory(address, size, pg)); + } + + Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange* out, + KProcessAddress address, size_t size) { + R_RETURN(m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size)); + } + + Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, + KProcessAddress src_addr, KMemoryState src_state_mask, + KMemoryState src_state, KMemoryPermission src_test_perm, + KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { + R_RETURN(m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, + src_state, src_test_perm, src_attr_mask, + src_attr)); + } + + Result CopyMemoryFromLinearToKernel(void* dst_addr, size_t size, KProcessAddress src_addr, + KMemoryState src_state_mask, KMemoryState src_state, + KMemoryPermission src_test_perm, + KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { + R_RETURN(m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, + src_state, src_test_perm, src_attr_mask, + src_attr)); + } + + Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, + KMemoryState dst_state_mask, KMemoryState dst_state, + KMemoryPermission dst_test_perm, + KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, + KProcessAddress src_addr) { + R_RETURN(m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, + dst_test_perm, dst_attr_mask, dst_attr, + src_addr)); + } + + Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, + KMemoryState dst_state_mask, KMemoryState dst_state, + KMemoryPermission dst_test_perm, + KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, + void* src_addr) { + R_RETURN(m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, + dst_state, dst_test_perm, dst_attr_mask, + dst_attr, src_addr)); + } + + Result CopyMemoryFromHeapToHeap(KProcessPageTable& dst_page_table, KProcessAddress dst_addr, + size_t size, KMemoryState dst_state_mask, + KMemoryState dst_state, KMemoryPermission dst_test_perm, + KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, + KProcessAddress src_addr, KMemoryState src_state_mask, + KMemoryState src_state, KMemoryPermission src_test_perm, + KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { + R_RETURN(m_page_table.CopyMemoryFromHeapToHeap( + dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, + dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, + src_attr_mask, src_attr)); + } + + Result CopyMemoryFromHeapToHeapWithoutCheckDestination( + KProcessPageTable& dst_page_table, KProcessAddress dst_addr, size_t size, + KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, + KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, + KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, + KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { + R_RETURN(m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination( + dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, + dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, + src_attr_mask, src_attr)); + } + + Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr, + KProcessPageTable& src_page_table, KMemoryPermission test_perm, + KMemoryState dst_state, bool send) { + R_RETURN(m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table, + test_perm, dst_state, send)); + } + + Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) { + R_RETURN(m_page_table.CleanupForIpcServer(address, size, dst_state)); + } + + Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) { + R_RETURN(m_page_table.CleanupForIpcClient(address, size, dst_state)); + } + + Result MapPhysicalMemory(KProcessAddress address, size_t size) { + R_RETURN(m_page_table.MapPhysicalMemory(address, size)); + } + + Result UnmapPhysicalMemory(KProcessAddress address, size_t size) { + R_RETURN(m_page_table.UnmapPhysicalMemory(address, size)); + } + + Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { + R_RETURN(m_page_table.MapPhysicalMemoryUnsafe(address, size)); + } + + Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { + R_RETURN(m_page_table.UnmapPhysicalMemoryUnsafe(address, size)); + } + + Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, + KProcessPageTable& src_page_table, KProcessAddress src_address) { + R_RETURN(m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table, + src_address)); + } + + bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress address) { + return m_page_table.GetPhysicalAddress(out, address); + } + + bool Contains(KProcessAddress addr, size_t size) const { + return m_page_table.Contains(addr, size); + } + + bool IsInAliasRegion(KProcessAddress addr, size_t size) const { + return m_page_table.IsInAliasRegion(addr, size); + } + bool IsInHeapRegion(KProcessAddress addr, size_t size) const { + return m_page_table.IsInHeapRegion(addr, size); + } + bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { + return m_page_table.IsInUnsafeAliasRegion(addr, size); + } + + bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { + return m_page_table.CanContain(addr, size, state); + } + + KProcessAddress GetAddressSpaceStart() const { + return m_page_table.GetAddressSpaceStart(); + } + KProcessAddress GetHeapRegionStart() const { + return m_page_table.GetHeapRegionStart(); + } + KProcessAddress GetAliasRegionStart() const { + return m_page_table.GetAliasRegionStart(); + } + KProcessAddress GetStackRegionStart() const { + return m_page_table.GetStackRegionStart(); + } + KProcessAddress GetKernelMapRegionStart() const { + return m_page_table.GetKernelMapRegionStart(); + } + KProcessAddress GetCodeRegionStart() const { + return m_page_table.GetCodeRegionStart(); + } + KProcessAddress GetAliasCodeRegionStart() const { + return m_page_table.GetAliasCodeRegionStart(); + } + + size_t GetAddressSpaceSize() const { + return m_page_table.GetAddressSpaceSize(); + } + size_t GetHeapRegionSize() const { + return m_page_table.GetHeapRegionSize(); + } + size_t GetAliasRegionSize() const { + return m_page_table.GetAliasRegionSize(); + } + size_t GetStackRegionSize() const { + return m_page_table.GetStackRegionSize(); + } + size_t GetKernelMapRegionSize() const { + return m_page_table.GetKernelMapRegionSize(); + } + size_t GetCodeRegionSize() const { + return m_page_table.GetCodeRegionSize(); + } + size_t GetAliasCodeRegionSize() const { + return m_page_table.GetAliasCodeRegionSize(); + } + + size_t GetNormalMemorySize() const { + return m_page_table.GetNormalMemorySize(); + } + + size_t GetCodeSize() const { + return m_page_table.GetCodeSize(); + } + size_t GetCodeDataSize() const { + return m_page_table.GetCodeDataSize(); + } + + size_t GetAliasCodeSize() const { + return m_page_table.GetAliasCodeSize(); + } + size_t GetAliasCodeDataSize() const { + return m_page_table.GetAliasCodeDataSize(); + } + + u32 GetAllocateOption() const { + return m_page_table.GetAllocateOption(); + } + + u32 GetAddressSpaceWidth() const { + return m_page_table.GetAddressSpaceWidth(); + } + + KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) { + return m_page_table.GetHeapPhysicalAddress(address); + } + + u8* GetHeapVirtualPointer(KPhysicalAddress address) { + return m_page_table.GetHeapVirtualPointer(address); + } + + KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) { + return m_page_table.GetHeapVirtualAddress(address); + } + + KBlockInfoManager* GetBlockInfoManager() { + return m_page_table.GetBlockInfoManager(); + } + + KPageTable& GetBasePageTable() { + return m_page_table; + } + + const KPageTable& GetBasePageTable() const { + return m_page_table; + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 309aa0157..f8299047d 100755 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp @@ -383,7 +383,7 @@ Result KServerSession::SendReply(bool is_hle) { if (event != nullptr) { // // Get the client process/page table. // KProcess *client_process = client_thread->GetOwnerProcess(); - // KPageTable *client_page_table = std::addressof(client_process->PageTable()); + // KProcessPageTable *client_page_table = std::addressof(client_process->PageTable()); // // If we need to, reply with an async error. // if (R_FAILED(client_result)) { diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp index 07e92aa80..b51941faf 100755 --- a/src/core/hle/kernel/k_system_resource.cpp +++ b/src/core/hle/kernel/k_system_resource.cpp @@ -40,7 +40,7 @@ Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_l // Get resource pointer. KPhysicalAddress resource_paddr = - KPageTable::GetHeapPhysicalAddress(m_kernel.MemoryLayout(), m_resource_address); + KPageTable::GetHeapPhysicalAddress(m_kernel, m_resource_address); auto* resource = m_kernel.System().DeviceMemory().GetPointer(resource_paddr); diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp index cfaae2b14..cebdacf09 100755 --- a/src/core/hle/kernel/k_thread_local_page.cpp +++ b/src/core/hle/kernel/k_thread_local_page.cpp @@ -37,8 +37,8 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) { Result KThreadLocalPage::Finalize() { // Get the physical address of the page. - const KPhysicalAddress phys_addr = m_owner->GetPageTable().GetPhysicalAddr(m_virt_addr); - ASSERT(phys_addr); + KPhysicalAddress phys_addr{}; + ASSERT(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), m_virt_addr)); // Unmap the page. R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal)); diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp index 97f1210de..4ca62860d 100755 --- a/src/core/hle/kernel/svc/svc_memory.cpp +++ b/src/core/hle/kernel/svc/svc_memory.cpp @@ -29,7 +29,8 @@ constexpr bool IsValidAddressRange(u64 address, u64 size) { // Helper function that performs the common sanity checks for svcMapMemory // and svcUnmapMemory. This is doable, as both functions perform their sanitizing // in the same order. -Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) { +Result MapUnmapMemorySanityChecks(const KProcessPageTable& manager, u64 dst_addr, u64 src_addr, + u64 size) { if (!Common::Is4KBAligned(dst_addr)) { LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); R_THROW(ResultInvalidAddress); @@ -123,7 +124,8 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask, R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); // Set the memory attribute. - R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr)); + R_RETURN(page_table.SetMemoryAttribute(address, size, static_cast(mask), + static_cast(attr))); } /// Maps a memory range into a different range. diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp index 99330d02a..793e9f8d0 100755 --- a/src/core/hle/kernel/svc/svc_physical_memory.cpp +++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp @@ -16,7 +16,14 @@ Result SetHeapSize(Core::System& system, u64* out_address, u64 size) { R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize); // Set the heap size. - R_RETURN(GetCurrentProcess(system.Kernel()).GetPageTable().SetHeapSize(out_address, size)); + KProcessAddress address{}; + R_TRY(GetCurrentProcess(system.Kernel()) + .GetPageTable() + .SetHeapSize(std::addressof(address), size)); + + // We succeeded. + *out_address = GetInteger(address); + R_SUCCEED(); } /// Maps memory at a desired address diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp index 07cd48175..e1427947b 100755 --- a/src/core/hle/kernel/svc/svc_process_memory.cpp +++ b/src/core/hle/kernel/svc/svc_process_memory.cpp @@ -247,8 +247,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d R_THROW(ResultInvalidCurrentMemory); } - R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size, - KPageTable::ICacheInvalidationStrategy::InvalidateAll)); + R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size)); } Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address, diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp index 51af06e97..816dcb8d0 100755 --- a/src/core/hle/kernel/svc/svc_query_memory.cpp +++ b/src/core/hle/kernel/svc/svc_query_memory.cpp @@ -31,12 +31,12 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn } auto& current_memory{GetCurrentMemory(system.Kernel())}; - const auto memory_info{process->GetPageTable().QueryInfo(address).GetSvcMemoryInfo()}; - current_memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info)); + KMemoryInfo mem_info; + R_TRY(process->GetPageTable().QueryInfo(std::addressof(mem_info), out_page_info, address)); - //! This is supposed to be part of the QueryInfo call. - *out_page_info = {}; + const auto svc_mem_info = mem_info.GetSvcMemoryInfo(); + current_memory.WriteBlock(out_memory_info, std::addressof(svc_mem_info), sizeof(svc_mem_info)); R_SUCCEED(); } diff --git a/src/core/hle/result.h b/src/core/hle/result.h index c2ecb5658..cfde4137f 100755 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h @@ -407,3 +407,34 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess; /// Evaluates a boolean expression, and succeeds if that expression is true. #define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess) + +#define R_TRY_CATCH(res_expr) \ + { \ + const auto R_CURRENT_RESULT = (res_expr); \ + if (R_FAILED(R_CURRENT_RESULT)) { \ + if (false) + +#define R_END_TRY_CATCH \ + else if (R_FAILED(R_CURRENT_RESULT)) { \ + R_THROW(R_CURRENT_RESULT); \ + } \ + } \ + } + +#define R_CATCH_ALL() \ + } \ + else if (R_FAILED(R_CURRENT_RESULT)) { \ + if (true) + +#define R_CATCH(res_expr) \ + } \ + else if ((res_expr) == (R_CURRENT_RESULT)) { \ + if (true) + +#define R_CONVERT(catch_type, convert_type) \ + R_CATCH(catch_type) { R_THROW(static_cast(convert_type)); } + +#define R_CONVERT_ALL(convert_type) \ + R_CATCH_ALL() { R_THROW(static_cast(convert_type)); } + +#define R_ASSERT(res_expr) ASSERT(R_SUCCEEDED(res_expr)) diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp index f1562069e..1416709cf 100755 --- a/src/core/hle/service/hid/controllers/npad.cpp +++ b/src/core/hle/service/hid/controllers/npad.cpp @@ -1108,9 +1108,9 @@ Result Controller_NPad::DisconnectNpad(Core::HID::NpadIdType npad_id) { shared_memory->sixaxis_dual_right_properties.raw = 0; shared_memory->sixaxis_left_properties.raw = 0; shared_memory->sixaxis_right_properties.raw = 0; - shared_memory->battery_level_dual = 0; - shared_memory->battery_level_left = 0; - shared_memory->battery_level_right = 0; + shared_memory->battery_level_dual = Core::HID::NpadBatteryLevel::Empty; + shared_memory->battery_level_left = Core::HID::NpadBatteryLevel::Empty; + shared_memory->battery_level_right = Core::HID::NpadBatteryLevel::Empty; shared_memory->fullkey_color = { .attribute = ColorAttribute::NoController, .fullkey = {}, diff --git a/src/core/hle/service/hid/ring_lifo.h b/src/core/hle/service/hid/ring_lifo.h index bea347ea5..525d95914 100755 --- a/src/core/hle/service/hid/ring_lifo.h +++ b/src/core/hle/service/hid/ring_lifo.h @@ -32,15 +32,15 @@ struct Lifo { } std::size_t GetPreviousEntryIndex() const { - return static_cast((buffer_tail + total_buffer_count - 1) % total_buffer_count); + return static_cast((buffer_tail + max_buffer_size - 1) % max_buffer_size); } std::size_t GetNextEntryIndex() const { - return static_cast((buffer_tail + 1) % total_buffer_count); + return static_cast((buffer_tail + 1) % max_buffer_size); } void WriteNextEntry(const State& new_state) { - if (buffer_count < total_buffer_count - 1) { + if (buffer_count < static_cast(max_buffer_size) - 1) { buffer_count++; } buffer_tail = GetNextEntryIndex(); diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index a5facf802..2b419572d 100755 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp @@ -286,9 +286,14 @@ public: rb.Push(ResultSuccess); } - bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const { + bool ValidateRegionForMap(Kernel::KProcessPageTable& page_table, VAddr start, + std::size_t size) const { const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; - const auto start_info{page_table.QueryInfo(start - 1)}; + + Kernel::KMemoryInfo start_info; + Kernel::Svc::PageInfo page_info; + R_ASSERT( + page_table.QueryInfo(std::addressof(start_info), std::addressof(page_info), start - 1)); if (start_info.GetState() != Kernel::KMemoryState::Free) { return {}; @@ -298,7 +303,9 @@ public: return {}; } - const auto end_info{page_table.QueryInfo(start + size)}; + Kernel::KMemoryInfo end_info; + R_ASSERT(page_table.QueryInfo(std::addressof(end_info), std::addressof(page_info), + start + size)); if (end_info.GetState() != Kernel::KMemoryState::Free) { return {}; @@ -307,7 +314,7 @@ public: return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize()); } - Result GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) { + Result GetAvailableMapRegion(Kernel::KProcessPageTable& page_table, u64 size, VAddr& out_addr) { size = Common::AlignUp(size, Kernel::PageSize); size += page_table.GetNumGuardPages() * Kernel::PageSize * 4; @@ -391,12 +398,8 @@ public: if (bss_size) { auto block_guard = detail::ScopeExit([&] { - page_table.UnmapCodeMemory( - addr + nro_size, bss_addr, bss_size, - Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange); - page_table.UnmapCodeMemory( - addr, nro_addr, nro_size, - Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange); + page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size); + page_table.UnmapCodeMemory(addr, nro_addr, nro_size); }); const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)}; @@ -578,21 +581,17 @@ public: auto& page_table{system.ApplicationProcess()->GetPageTable()}; if (info.bss_size != 0) { - R_TRY(page_table.UnmapCodeMemory( - info.nro_address + info.text_size + info.ro_size + info.data_size, info.bss_address, - info.bss_size, Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); + R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size + + info.data_size, + info.bss_address, info.bss_size)); } - R_TRY(page_table.UnmapCodeMemory( - info.nro_address + info.text_size + info.ro_size, - info.src_addr + info.text_size + info.ro_size, info.data_size, - Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); - R_TRY(page_table.UnmapCodeMemory( - info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size, - Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); - R_TRY(page_table.UnmapCodeMemory( - info.nro_address, info.src_addr, info.text_size, - Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); + R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size, + info.src_addr + info.text_size + info.ro_size, + info.data_size)); + R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size, + info.src_addr + info.text_size, info.ro_size)); + R_TRY(page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size)); return ResultSuccess; } diff --git a/src/core/hle/service/nvnflinger/buffer_item.h b/src/core/hle/service/nvnflinger/buffer_item.h index 3da8cc3aa..7fd808f54 100755 --- a/src/core/hle/service/nvnflinger/buffer_item.h +++ b/src/core/hle/service/nvnflinger/buffer_item.h @@ -15,7 +15,7 @@ namespace Service::android { -struct GraphicBuffer; +class GraphicBuffer; class BufferItem final { public: diff --git a/src/core/hle/service/nvnflinger/buffer_queue_consumer.cpp b/src/core/hle/service/nvnflinger/buffer_queue_consumer.cpp index 51291539d..d91886bed 100755 --- a/src/core/hle/service/nvnflinger/buffer_queue_consumer.cpp +++ b/src/core/hle/service/nvnflinger/buffer_queue_consumer.cpp @@ -5,7 +5,6 @@ // https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/gui/BufferQueueConsumer.cpp #include "common/logging/log.h" -#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvnflinger/buffer_item.h" #include "core/hle/service/nvnflinger/buffer_queue_consumer.h" #include "core/hle/service/nvnflinger/buffer_queue_core.h" @@ -14,9 +13,8 @@ namespace Service::android { -BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr core_, - Service::Nvidia::NvCore::NvMap& nvmap_) - : core{std::move(core_)}, slots{core->slots}, nvmap(nvmap_) {} +BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr core_) + : core{std::move(core_)}, slots{core->slots} {} BufferQueueConsumer::~BufferQueueConsumer() = default; @@ -136,8 +134,6 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc slots[slot].buffer_state = BufferState::Free; - nvmap.FreeHandle(slots[slot].graphic_buffer->BufferId(), true); - listener = core->connected_producer_listener; LOG_DEBUG(Service_Nvnflinger, "releasing slot {}", slot); @@ -175,6 +171,25 @@ Status BufferQueueConsumer::Connect(std::shared_ptr consumer_ return Status::NoError; } +Status BufferQueueConsumer::Disconnect() { + LOG_DEBUG(Service_Nvnflinger, "called"); + + std::scoped_lock lock{core->mutex}; + + if (core->consumer_listener == nullptr) { + LOG_ERROR(Service_Nvnflinger, "no consumer is connected"); + return Status::BadValue; + } + + core->is_abandoned = true; + core->consumer_listener = nullptr; + core->queue.clear(); + core->FreeAllBuffersLocked(); + core->SignalDequeueCondition(); + + return Status::NoError; +} + Status BufferQueueConsumer::GetReleasedBuffers(u64* out_slot_mask) { if (out_slot_mask == nullptr) { LOG_ERROR(Service_Nvnflinger, "out_slot_mask may not be nullptr"); diff --git a/src/core/hle/service/nvnflinger/buffer_queue_consumer.h b/src/core/hle/service/nvnflinger/buffer_queue_consumer.h index 50ed0bb5f..0a61e8dbd 100755 --- a/src/core/hle/service/nvnflinger/buffer_queue_consumer.h +++ b/src/core/hle/service/nvnflinger/buffer_queue_consumer.h @@ -13,10 +13,6 @@ #include "core/hle/service/nvnflinger/buffer_queue_defs.h" #include "core/hle/service/nvnflinger/status.h" -namespace Service::Nvidia::NvCore { -class NvMap; -} // namespace Service::Nvidia::NvCore - namespace Service::android { class BufferItem; @@ -25,19 +21,18 @@ class IConsumerListener; class BufferQueueConsumer final { public: - explicit BufferQueueConsumer(std::shared_ptr core_, - Service::Nvidia::NvCore::NvMap& nvmap_); + explicit BufferQueueConsumer(std::shared_ptr core_); ~BufferQueueConsumer(); Status AcquireBuffer(BufferItem* out_buffer, std::chrono::nanoseconds expected_present); Status ReleaseBuffer(s32 slot, u64 frame_number, const Fence& release_fence); Status Connect(std::shared_ptr consumer_listener, bool controlled_by_app); + Status Disconnect(); Status GetReleasedBuffers(u64* out_slot_mask); private: std::shared_ptr core; BufferQueueDefs::SlotsType& slots; - Service::Nvidia::NvCore::NvMap& nvmap; }; } // namespace Service::android diff --git a/src/core/hle/service/nvnflinger/buffer_queue_core.cpp b/src/core/hle/service/nvnflinger/buffer_queue_core.cpp index ed66f6f5b..4ed5e5978 100755 --- a/src/core/hle/service/nvnflinger/buffer_queue_core.cpp +++ b/src/core/hle/service/nvnflinger/buffer_queue_core.cpp @@ -14,24 +14,12 @@ BufferQueueCore::BufferQueueCore() = default; BufferQueueCore::~BufferQueueCore() = default; -void BufferQueueCore::NotifyShutdown() { - std::scoped_lock lock{mutex}; - - is_shutting_down = true; - - SignalDequeueCondition(); -} - void BufferQueueCore::SignalDequeueCondition() { dequeue_possible.store(true); dequeue_condition.notify_all(); } bool BufferQueueCore::WaitForDequeueCondition(std::unique_lock& lk) { - if (is_shutting_down) { - return false; - } - dequeue_condition.wait(lk, [&] { return dequeue_possible.load(); }); dequeue_possible.store(false); diff --git a/src/core/hle/service/nvnflinger/buffer_queue_core.h b/src/core/hle/service/nvnflinger/buffer_queue_core.h index 9164f08a0..e513d183b 100755 --- a/src/core/hle/service/nvnflinger/buffer_queue_core.h +++ b/src/core/hle/service/nvnflinger/buffer_queue_core.h @@ -34,8 +34,6 @@ public: BufferQueueCore(); ~BufferQueueCore(); - void NotifyShutdown(); - private: void SignalDequeueCondition(); bool WaitForDequeueCondition(std::unique_lock& lk); @@ -74,7 +72,6 @@ private: u32 transform_hint{}; bool is_allocating{}; mutable std::condition_variable_any is_allocating_condition; - bool is_shutting_down{}; }; } // namespace Service::android diff --git a/src/core/hle/service/nvnflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvnflinger/buffer_queue_producer.cpp index 6e7a49658..5d8762d25 100755 --- a/src/core/hle/service/nvnflinger/buffer_queue_producer.cpp +++ b/src/core/hle/service/nvnflinger/buffer_queue_producer.cpp @@ -13,7 +13,6 @@ #include "core/hle/kernel/kernel.h" #include "core/hle/service/hle_ipc.h" #include "core/hle/service/kernel_helpers.h" -#include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvnflinger/buffer_queue_core.h" #include "core/hle/service/nvnflinger/buffer_queue_producer.h" #include "core/hle/service/nvnflinger/consumer_listener.h" @@ -533,8 +532,6 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input, item.is_droppable = core->dequeue_buffer_cannot_block || async; item.swap_interval = swap_interval; - nvmap.DuplicateHandle(item.graphic_buffer->BufferId(), true); - sticky_transform = sticky_transform_; if (core->queue.empty()) { @@ -744,19 +741,13 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) { return Status::NoError; } - // HACK: We are not Android. Remove handle for items in queue, and clear queue. - // Allows synchronous destruction of nvmap handles. - for (auto& item : core->queue) { - nvmap.FreeHandle(item.graphic_buffer->BufferId(), true); - } - core->queue.clear(); - switch (api) { case NativeWindowApi::Egl: case NativeWindowApi::Cpu: case NativeWindowApi::Media: case NativeWindowApi::Camera: if (core->connected_api == api) { + core->queue.clear(); core->FreeAllBuffersLocked(); core->connected_producer_listener = nullptr; core->connected_api = NativeWindowApi::NoConnectedApi; @@ -785,7 +776,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) { } Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot, - const std::shared_ptr& buffer) { + const std::shared_ptr& buffer) { LOG_DEBUG(Service_Nvnflinger, "slot {}", slot); if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) { @@ -796,7 +787,7 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot, slots[slot] = {}; slots[slot].fence = Fence::NoFence(); - slots[slot].graphic_buffer = buffer; + slots[slot].graphic_buffer = std::make_shared(nvmap, buffer); slots[slot].frame_number = 0; // Most games preallocate a buffer and pass a valid buffer here. However, it is possible for @@ -839,7 +830,7 @@ void BufferQueueProducer::Transact(HLERequestContext& ctx, TransactionId code, u } case TransactionId::SetPreallocatedBuffer: { const auto slot = parcel_in.Read(); - const auto buffer = parcel_in.ReadObject(); + const auto buffer = parcel_in.ReadObject(); status = SetPreallocatedBuffer(slot, buffer); break; @@ -867,7 +858,7 @@ void BufferQueueProducer::Transact(HLERequestContext& ctx, TransactionId code, u status = RequestBuffer(slot, &buf); - parcel_out.WriteFlattenedObject(buf); + parcel_out.WriteFlattenedObject(buf.get()); break; } case TransactionId::QueueBuffer: { diff --git a/src/core/hle/service/nvnflinger/buffer_queue_producer.h b/src/core/hle/service/nvnflinger/buffer_queue_producer.h index d4201c104..64c17d56c 100755 --- a/src/core/hle/service/nvnflinger/buffer_queue_producer.h +++ b/src/core/hle/service/nvnflinger/buffer_queue_producer.h @@ -38,6 +38,7 @@ namespace Service::android { class BufferQueueCore; class IProducerListener; +struct NvGraphicBuffer; class BufferQueueProducer final : public IBinder { public: @@ -65,7 +66,7 @@ public: bool producer_controlled_by_app, QueueBufferOutput* output); Status Disconnect(NativeWindowApi api); - Status SetPreallocatedBuffer(s32 slot, const std::shared_ptr& buffer); + Status SetPreallocatedBuffer(s32 slot, const std::shared_ptr& buffer); private: BufferQueueProducer(const BufferQueueProducer&) = delete; diff --git a/src/core/hle/service/nvnflinger/buffer_slot.h b/src/core/hle/service/nvnflinger/buffer_slot.h index d8c9dec3b..d25bca049 100755 --- a/src/core/hle/service/nvnflinger/buffer_slot.h +++ b/src/core/hle/service/nvnflinger/buffer_slot.h @@ -13,7 +13,7 @@ namespace Service::android { -struct GraphicBuffer; +class GraphicBuffer; enum class BufferState : u32 { Free = 0, diff --git a/src/core/hle/service/nvnflinger/consumer_base.cpp b/src/core/hle/service/nvnflinger/consumer_base.cpp index 4dcda8dac..1059e72bf 100755 --- a/src/core/hle/service/nvnflinger/consumer_base.cpp +++ b/src/core/hle/service/nvnflinger/consumer_base.cpp @@ -27,6 +27,26 @@ void ConsumerBase::Connect(bool controlled_by_app) { consumer->Connect(shared_from_this(), controlled_by_app); } +void ConsumerBase::Abandon() { + LOG_DEBUG(Service_Nvnflinger, "called"); + + std::scoped_lock lock{mutex}; + + if (!is_abandoned) { + this->AbandonLocked(); + is_abandoned = true; + } +} + +void ConsumerBase::AbandonLocked() { + for (int i = 0; i < BufferQueueDefs::NUM_BUFFER_SLOTS; i++) { + this->FreeBufferLocked(i); + } + // disconnect from the BufferQueue + consumer->Disconnect(); + consumer = nullptr; +} + void ConsumerBase::FreeBufferLocked(s32 slot_index) { LOG_DEBUG(Service_Nvnflinger, "slot_index={}", slot_index); diff --git a/src/core/hle/service/nvnflinger/consumer_base.h b/src/core/hle/service/nvnflinger/consumer_base.h index 264829414..ea3e9e97a 100755 --- a/src/core/hle/service/nvnflinger/consumer_base.h +++ b/src/core/hle/service/nvnflinger/consumer_base.h @@ -24,6 +24,7 @@ class BufferQueueConsumer; class ConsumerBase : public IConsumerListener, public std::enable_shared_from_this { public: void Connect(bool controlled_by_app); + void Abandon(); protected: explicit ConsumerBase(std::unique_ptr consumer_); @@ -34,6 +35,7 @@ protected: void OnBuffersReleased() override; void OnSidebandStreamChanged() override; + void AbandonLocked(); void FreeBufferLocked(s32 slot_index); Status AcquireBufferLocked(BufferItem* item, std::chrono::nanoseconds present_when); Status ReleaseBufferLocked(s32 slot, const std::shared_ptr& graphic_buffer); diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp index 6dc327b8b..d7db24f42 100755 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp @@ -166,7 +166,7 @@ constexpr SharedMemoryPoolLayout SharedBufferPoolLayout = [] { }(); void MakeGraphicBuffer(android::BufferQueueProducer& producer, u32 slot, u32 handle) { - auto buffer = std::make_shared(); + auto buffer = std::make_shared(); buffer->width = SharedBufferWidth; buffer->height = SharedBufferHeight; buffer->stride = SharedBufferBlockLinearStride; diff --git a/src/core/hle/service/nvnflinger/nvnflinger.cpp b/src/core/hle/service/nvnflinger/nvnflinger.cpp index bebb45eae..0745434c5 100755 --- a/src/core/hle/service/nvnflinger/nvnflinger.cpp +++ b/src/core/hle/service/nvnflinger/nvnflinger.cpp @@ -47,7 +47,10 @@ void Nvnflinger::SplitVSync(std::stop_token stop_token) { vsync_signal.Wait(); const auto lock_guard = Lock(); - Compose(); + + if (!is_abandoned) { + Compose(); + } } } @@ -98,7 +101,6 @@ Nvnflinger::~Nvnflinger() { } ShutdownLayers(); - vsync_thread = {}; if (nvdrv) { nvdrv->Close(disp_fd); @@ -106,12 +108,20 @@ Nvnflinger::~Nvnflinger() { } void Nvnflinger::ShutdownLayers() { - const auto lock_guard = Lock(); - for (auto& display : displays) { - for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) { - display.GetLayer(layer).Core().NotifyShutdown(); + // Abandon consumers. + { + const auto lock_guard = Lock(); + for (auto& display : displays) { + for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) { + display.GetLayer(layer).GetConsumer().Abandon(); + } } + + is_abandoned = true; } + + // Join the vsync thread, if it exists. + vsync_thread = {}; } void Nvnflinger::SetNVDrvInstance(std::shared_ptr instance) { diff --git a/src/core/hle/service/nvnflinger/nvnflinger.h b/src/core/hle/service/nvnflinger/nvnflinger.h index 959d8b46b..f5d73acdb 100755 --- a/src/core/hle/service/nvnflinger/nvnflinger.h +++ b/src/core/hle/service/nvnflinger/nvnflinger.h @@ -140,6 +140,8 @@ private: s32 swap_interval = 1; + bool is_abandoned = false; + /// Event that handles screen composition. std::shared_ptr multi_composition_event; std::shared_ptr single_composition_event; diff --git a/src/core/hle/service/nvnflinger/status.h b/src/core/hle/service/nvnflinger/status.h index 7af166c40..3fa0fe15b 100755 --- a/src/core/hle/service/nvnflinger/status.h +++ b/src/core/hle/service/nvnflinger/status.h @@ -19,7 +19,7 @@ enum class Status : s32 { Busy = -16, NoInit = -19, BadValue = -22, - InvalidOperation = -37, + InvalidOperation = -38, BufferNeedsReallocation = 1, ReleaseAllBuffers = 2, }; diff --git a/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp new file mode 100755 index 000000000..ce70946ec --- /dev/null +++ b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "core/hle/service/nvdrv/core/nvmap.h" +#include "core/hle/service/nvnflinger/ui/graphic_buffer.h" + +namespace Service::android { + +static NvGraphicBuffer GetBuffer(std::shared_ptr& buffer) { + if (buffer) { + return *buffer; + } else { + return {}; + } +} + +GraphicBuffer::GraphicBuffer(u32 width_, u32 height_, PixelFormat format_, u32 usage_) + : NvGraphicBuffer(width_, height_, format_, usage_), m_nvmap(nullptr) {} + +GraphicBuffer::GraphicBuffer(Service::Nvidia::NvCore::NvMap& nvmap, + std::shared_ptr buffer) + : NvGraphicBuffer(GetBuffer(buffer)), m_nvmap(std::addressof(nvmap)) { + if (this->BufferId() > 0) { + m_nvmap->DuplicateHandle(this->BufferId(), true); + } +} + +GraphicBuffer::~GraphicBuffer() { + if (m_nvmap != nullptr && this->BufferId() > 0) { + m_nvmap->FreeHandle(this->BufferId(), true); + } +} + +} // namespace Service::android diff --git a/src/core/hle/service/nvnflinger/ui/graphic_buffer.h b/src/core/hle/service/nvnflinger/ui/graphic_buffer.h index 3eac5cedd..da430aa75 100755 --- a/src/core/hle/service/nvnflinger/ui/graphic_buffer.h +++ b/src/core/hle/service/nvnflinger/ui/graphic_buffer.h @@ -6,16 +6,22 @@ #pragma once +#include + #include "common/common_funcs.h" #include "common/common_types.h" #include "core/hle/service/nvnflinger/pixel_format.h" +namespace Service::Nvidia::NvCore { +class NvMap; +} // namespace Service::Nvidia::NvCore + namespace Service::android { -struct GraphicBuffer final { - constexpr GraphicBuffer() = default; +struct NvGraphicBuffer { + constexpr NvGraphicBuffer() = default; - constexpr GraphicBuffer(u32 width_, u32 height_, PixelFormat format_, u32 usage_) + constexpr NvGraphicBuffer(u32 width_, u32 height_, PixelFormat format_, u32 usage_) : width{static_cast(width_)}, height{static_cast(height_)}, format{format_}, usage{static_cast(usage_)} {} @@ -93,6 +99,17 @@ struct GraphicBuffer final { u32 offset{}; INSERT_PADDING_WORDS(60); }; -static_assert(sizeof(GraphicBuffer) == 0x16C, "GraphicBuffer has wrong size"); +static_assert(sizeof(NvGraphicBuffer) == 0x16C, "NvGraphicBuffer has wrong size"); + +class GraphicBuffer final : public NvGraphicBuffer { +public: + explicit GraphicBuffer(u32 width, u32 height, PixelFormat format, u32 usage); + explicit GraphicBuffer(Service::Nvidia::NvCore::NvMap& nvmap, + std::shared_ptr buffer); + ~GraphicBuffer(); + +private: + Service::Nvidia::NvCore::NvMap* m_nvmap{}; +}; } // namespace Service::android diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp index 83aa68fc0..54f738d2b 100755 --- a/src/core/hle/service/vi/display/vi_display.cpp +++ b/src/core/hle/service/vi/display/vi_display.cpp @@ -35,7 +35,7 @@ static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_cont return { buffer_queue_core, std::make_unique(service_context, buffer_queue_core, nvmap), - std::make_unique(buffer_queue_core, nvmap)}; + std::make_unique(buffer_queue_core)}; } Display::Display(u64 id, std::string name_, diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 28eb4cd0e..bfdb1694a 100755 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -41,7 +41,7 @@ struct Memory::Impl { explicit Impl(Core::System& system_) : system{system_} {} void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) { - current_page_table = &process.GetPageTable().PageTableImpl(); + current_page_table = &process.GetPageTable().GetImpl(); current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer(); const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth(); @@ -195,7 +195,7 @@ struct Memory::Impl { bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) { - const auto& page_table = system.ApplicationProcess()->GetPageTable().PageTableImpl(); + const auto& page_table = system.ApplicationProcess()->GetPageTable().GetImpl(); std::size_t remaining_size = size; std::size_t page_index = addr >> YUZU_PAGEBITS; std::size_t page_offset = addr & YUZU_PAGEMASK; @@ -826,7 +826,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress b bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const { const Kernel::KProcess& process = *system.ApplicationProcess(); - const auto& page_table = process.GetPageTable().PageTableImpl(); + const auto& page_table = process.GetPageTable().GetImpl(); const size_t page = vaddr >> YUZU_PAGEBITS; if (page >= page_table.pointers.size()) { return false; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 6a4da6528..73bbf7972 100755 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -924,9 +924,13 @@ void RasterizerVulkan::UpdateDynamicStates() { } void RasterizerVulkan::HandleTransformFeedback() { + static std::once_flag warn_unsupported; + const auto& regs = maxwell3d->regs; if (!device.IsExtTransformFeedbackSupported()) { - LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported"); + std::call_once(warn_unsupported, [&] { + LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported"); + }); return; } query_cache.CounterEnable(VideoCommon::QueryType::StreamingByteCount,