diff --git a/README.md b/README.md index c85b2a90f..3d7f550e1 100755 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ yuzu emulator early access ============= -This is the source code for early-access 2526. +This is the source code for early-access 2527. ## Legal Notice diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp index 28949fe5e..c465cfc14 100755 --- a/src/common/host_memory.cpp +++ b/src/common/host_memory.cpp @@ -327,8 +327,8 @@ private: bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const { const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length}); if (it != placeholders.end() && it->lower() == virtual_offset + length) { - const bool is_root = it == placeholders.begin() && virtual_offset == 0; - return is_root || std::prev(it)->upper() == virtual_offset; + return it == placeholders.begin() ? virtual_offset == 0 + : std::prev(it)->upper() == virtual_offset; } return false; } diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp index c51c05b28..f1c9ed6c4 100755 --- a/src/common/logging/backend.cpp +++ b/src/common/logging/backend.cpp @@ -218,19 +218,17 @@ private: Impl(const std::filesystem::path& file_backend_filename, const Filter& filter_) : filter{filter_}, file_backend{file_backend_filename} {} - ~Impl() { - StopBackendThread(); - } + ~Impl() = default; void StartBackendThread() { - backend_thread = std::thread([this] { + backend_thread = std::jthread([this](std::stop_token stop_token) { Common::SetCurrentThreadName("yuzu:Log"); Entry entry; const auto write_logs = [this, &entry]() { ForEachBackend([&entry](Backend& backend) { backend.Write(entry); }); }; - while (!stop.stop_requested()) { - entry = message_queue.PopWait(stop.get_token()); + while (!stop_token.stop_requested()) { + entry = message_queue.PopWait(stop_token); if (entry.filename != nullptr) { write_logs(); } @@ -244,11 +242,6 @@ private: }); } - void StopBackendThread() { - stop.request_stop(); - backend_thread.join(); - } - Entry CreateEntry(Class log_class, Level log_level, const char* filename, unsigned int line_nr, const char* function, std::string&& message) const { using std::chrono::duration_cast; @@ -283,8 +276,7 @@ private: ColorConsoleBackend color_console_backend{}; FileBackend file_backend; - std::stop_source stop; - std::thread backend_thread; + std::jthread backend_thread; MPSCQueue message_queue{}; std::chrono::steady_clock::time_point time_origin{std::chrono::steady_clock::now()}; }; diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index bafe4c974..998fe4773 100755 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -285,40 +285,65 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory return ResultSuccess; } -ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { +ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { + // Validate the mapping request. + R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), + ResultInvalidMemoryRegion); + + // Lock the table. KScopedLightLock lk(general_lock); - const std::size_t num_pages{size / PageSize}; + // Verify that the source memory is normal heap. + KMemoryState src_state{}; + KMemoryPermission src_perm{}; + std::size_t num_src_allocator_blocks{}; + R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr, + std::addressof(num_src_allocator_blocks), src_address, size, + KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, + KMemoryPermission::UserReadWrite, KMemoryAttribute::All, + KMemoryAttribute::None)); - KMemoryState state{}; - KMemoryPermission perm{}; - CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, nullptr, src_addr, size, - KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, - KMemoryPermission::UserReadWrite, KMemoryAttribute::Mask, - KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); - - if (IsRegionMapped(dst_addr, size)) { - return ResultInvalidCurrentMemory; - } - - KPageLinkedList page_linked_list; - AddRegionToPages(src_addr, num_pages, page_linked_list); + // Verify that the destination memory is unmapped. + std::size_t num_dst_allocator_blocks{}; + R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, + KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + // Map the code memory. { - auto block_guard = detail::ScopeExit( - [&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); }); + // Determine the number of pages being operated on. + const std::size_t num_pages = size / PageSize; - CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, - OperationType::ChangePermissions)); - CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::None)); + // Create page groups for the memory being mapped. + KPageLinkedList pg; + AddRegionToPages(src_address, num_pages, pg); - block_guard.Cancel(); + // Reprotect the source as kernel-read/not mapped. + const KMemoryPermission new_perm = static_cast( + KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); + R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions)); + + // Ensure that we unprotect the source pages on failure. + auto unprot_guard = SCOPE_GUARD({ + ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions) + .IsSuccess()); + }); + + // Map the alias pages. + R_TRY(MapPages(dst_address, pg, new_perm)); + + // We successfully mapped the alias pages, so we don't need to unprotect the src pages on + // failure. + unprot_guard.Cancel(); + + // Apply the memory block updates. + block_manager->Update(src_address, num_pages, src_state, new_perm, + KMemoryAttribute::Locked); + block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm, + KMemoryAttribute::None); } - block_manager->Update(src_addr, num_pages, state, KMemoryPermission::None, - KMemoryAttribute::Locked); - block_manager->Update(dst_addr, num_pages, KMemoryState::AliasCode); - return ResultSuccess; } @@ -330,12 +355,6 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std // Lock the table. KScopedLightLock lk(general_lock); - if (!size) { - return ResultSuccess; - } - - const std::size_t num_pages{size / PageSize}; - // Verify that the source memory is locked normal heap. std::size_t num_src_allocator_blocks{}; R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, @@ -384,9 +403,15 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std // Unmap. { - // TODO(bunnei): We free the virtual address space, but do not nullptr the pointers in the - // backing page table. This is a workaround because of an issue where CPU emulation may have - // not quite finished running code when NROs are unloaded. + // Determine the number of pages being operated on. + const std::size_t num_pages = size / PageSize; + + // Unmap the aliased copy of the pages. + R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); + + // Try to set the permissions for the source pages back to what they should be. + R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, + OperationType::ChangePermissions)); // Apply the memory block updates. block_manager->Update(dst_address, num_pages, KMemoryState::None); diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 3cff6648b..e99abe36a 100755 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -36,8 +36,8 @@ public: KMemoryManager::Pool pool); ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, KMemoryPermission perm); - ResultCode MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); - ResultCode UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); + ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, VAddr src_addr); ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);