early-access version 2389

This commit is contained in:
pineappleEA 2022-01-12 07:07:47 +01:00
parent 3a8b87bfc0
commit ec43f604e1
12 changed files with 143 additions and 95 deletions

View file

@ -35,6 +35,8 @@ option(USE_DISCORD_PRESENCE "Enables Discord Rich Presence" OFF)
option(YUZU_USE_BUNDLED_OPUS "Compile bundled opus" ON) option(YUZU_USE_BUNDLED_OPUS "Compile bundled opus" ON)
option(YUZU_TESTS "Compile tests" ON)
# Default to a Release build # Default to a Release build
get_property(IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) get_property(IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
if (NOT IS_MULTI_CONFIG AND NOT CMAKE_BUILD_TYPE) if (NOT IS_MULTI_CONFIG AND NOT CMAKE_BUILD_TYPE)
@ -168,7 +170,6 @@ macro(yuzu_find_packages)
# Capitalization matters here. We need the naming to match the generated paths from Conan # Capitalization matters here. We need the naming to match the generated paths from Conan
set(REQUIRED_LIBS set(REQUIRED_LIBS
# Cmake Pkg Prefix Version Conan Pkg # Cmake Pkg Prefix Version Conan Pkg
"Catch2 2.13.7 catch2/2.13.7"
"fmt 8.0.1 fmt/8.1.1" "fmt 8.0.1 fmt/8.1.1"
"lz4 1.8 lz4/1.9.2" "lz4 1.8 lz4/1.9.2"
"nlohmann_json 3.8 nlohmann_json/3.8.0" "nlohmann_json 3.8 nlohmann_json/3.8.0"
@ -177,6 +178,11 @@ macro(yuzu_find_packages)
# can't use opus until AVX check is fixed: https://github.com/yuzu-emu/yuzu/pull/4068 # can't use opus until AVX check is fixed: https://github.com/yuzu-emu/yuzu/pull/4068
#"opus 1.3 opus/1.3.1" #"opus 1.3 opus/1.3.1"
) )
if (YUZU_TESTS)
list(APPEND REQUIRED_LIBS
"Catch2 2.13.7 catch2/2.13.7"
)
endif()
foreach(PACKAGE ${REQUIRED_LIBS}) foreach(PACKAGE ${REQUIRED_LIBS})
string(REGEX REPLACE "[ \t\r\n]+" ";" PACKAGE_SPLIT ${PACKAGE}) string(REGEX REPLACE "[ \t\r\n]+" ";" PACKAGE_SPLIT ${PACKAGE})

View file

@ -1,7 +1,7 @@
yuzu emulator early access yuzu emulator early access
============= =============
This is the source code for early-access 2388. This is the source code for early-access 2389.
## Legal Notice ## Legal Notice

View file

@ -13,10 +13,6 @@ if (ARCHITECTURE_x86 OR ARCHITECTURE_x86_64)
target_compile_definitions(xbyak INTERFACE XBYAK_NO_OP_NAMES) target_compile_definitions(xbyak INTERFACE XBYAK_NO_OP_NAMES)
endif() endif()
# Catch
add_library(catch-single-include INTERFACE)
target_include_directories(catch-single-include INTERFACE catch/single_include)
# Dynarmic # Dynarmic
if (ARCHITECTURE_x86_64) if (ARCHITECTURE_x86_64)
set(DYNARMIC_TESTS OFF) set(DYNARMIC_TESTS OFF)

View file

@ -153,7 +153,10 @@ add_subdirectory(audio_core)
add_subdirectory(video_core) add_subdirectory(video_core)
add_subdirectory(input_common) add_subdirectory(input_common)
add_subdirectory(shader_recompiler) add_subdirectory(shader_recompiler)
add_subdirectory(tests)
if (YUZU_TESTS)
add_subdirectory(tests)
endif()
if (ENABLE_SDL2) if (ENABLE_SDL2)
add_subdirectory(yuzu_cmd) add_subdirectory(yuzu_cmd)

View file

@ -298,7 +298,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
return ResultSuccess; return ResultSuccess;
} }
ResultCode KPageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
std::lock_guard lock{page_table_lock}; std::lock_guard lock{page_table_lock};
const std::size_t num_pages{size / PageSize}; const std::size_t num_pages{size / PageSize};
@ -307,7 +307,7 @@ ResultCode KPageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std:
KMemoryPermission perm{}; KMemoryPermission perm{};
CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, nullptr, src_addr, size, CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, nullptr, src_addr, size,
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All,
KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, KMemoryPermission::UserReadWrite, KMemoryAttribute::Mask,
KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
if (IsRegionMapped(dst_addr, size)) { if (IsRegionMapped(dst_addr, size)) {
@ -335,7 +335,7 @@ ResultCode KPageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std:
return ResultSuccess; return ResultSuccess;
} }
ResultCode KPageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
std::lock_guard lock{page_table_lock}; std::lock_guard lock{page_table_lock};
if (!size) { if (!size) {
@ -361,7 +361,7 @@ ResultCode KPageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, st
block_manager->Update(dst_addr, num_pages, KMemoryState::Free); block_manager->Update(dst_addr, num_pages, KMemoryState::Free);
block_manager->Update(src_addr, num_pages, KMemoryState::Normal, block_manager->Update(src_addr, num_pages, KMemoryState::Normal,
KMemoryPermission::ReadAndWrite); KMemoryPermission::UserReadWrite);
system.InvalidateCpuInstructionCacheRange(dst_addr, size); system.InvalidateCpuInstructionCacheRange(dst_addr, size);
@ -416,7 +416,7 @@ void KPageTable::MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr star
} }
const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)}; const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
Operate(dst_addr, num_pages, KMemoryPermission::ReadAndWrite, OperationType::Map, Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map,
map_addr); map_addr);
dst_addr += num_pages * PageSize; dst_addr += num_pages * PageSize;
@ -470,7 +470,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
const std::size_t num_pages{size / PageSize}; const std::size_t num_pages{size / PageSize};
block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None, block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryState::Normal, KMemoryAttribute::None, KMemoryState::Normal,
KMemoryPermission::ReadAndWrite, KMemoryAttribute::None); KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
return ResultSuccess; return ResultSuccess;
} }
@ -554,7 +554,7 @@ ResultCode KPageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
KMemoryState src_state{}; KMemoryState src_state{};
CASCADE_CODE(CheckMemoryState( CASCADE_CODE(CheckMemoryState(
&src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias,
KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::ReadAndWrite, KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
if (IsRegionMapped(dst_addr, size)) { if (IsRegionMapped(dst_addr, size)) {
@ -568,13 +568,13 @@ ResultCode KPageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
{ {
auto block_guard = detail::ScopeExit([&] { auto block_guard = detail::ScopeExit([&] {
Operate(src_addr, num_pages, KMemoryPermission::ReadAndWrite, Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite,
OperationType::ChangePermissions); OperationType::ChangePermissions);
}); });
CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None,
OperationType::ChangePermissions)); OperationType::ChangePermissions));
CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::ReadAndWrite)); CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::UserReadWrite));
block_guard.Cancel(); block_guard.Cancel();
} }
@ -582,7 +582,7 @@ ResultCode KPageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None, block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None,
KMemoryAttribute::Locked); KMemoryAttribute::Locked);
block_manager->Update(dst_addr, num_pages, KMemoryState::Stack, block_manager->Update(dst_addr, num_pages, KMemoryState::Stack,
KMemoryPermission::ReadAndWrite); KMemoryPermission::UserReadWrite);
return ResultSuccess; return ResultSuccess;
} }
@ -617,13 +617,13 @@ ResultCode KPageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) {
auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); });
CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::ReadAndWrite, CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite,
OperationType::ChangePermissions)); OperationType::ChangePermissions));
block_guard.Cancel(); block_guard.Cancel();
} }
block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::ReadAndWrite); block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::UserReadWrite);
block_manager->Update(dst_addr, num_pages, KMemoryState::Free); block_manager->Update(dst_addr, num_pages, KMemoryState::Free);
return ResultSuccess; return ResultSuccess;
@ -713,50 +713,61 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
} }
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
KMemoryPermission perm) { Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
// Lock the table.
std::lock_guard lock{page_table_lock}; std::lock_guard lock{page_table_lock};
KMemoryState prev_state{}; // Verify we can change the memory permission.
KMemoryPermission prev_perm{}; KMemoryState old_state;
KMemoryPermission old_perm;
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
std::addressof(num_allocator_blocks), addr, size,
KMemoryState::FlagCode, KMemoryState::FlagCode,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::All, KMemoryAttribute::None));
CASCADE_CODE(CheckMemoryState( // Determine new perm/state.
&prev_state, &prev_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCode, const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
KMemoryState::FlagCode, KMemoryPermission::None, KMemoryPermission::None, KMemoryState new_state = old_state;
KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
const bool was_x =
(old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
ASSERT(!(is_w && is_x));
KMemoryState state{prev_state}; if (is_w) {
switch (old_state) {
// Ensure state is mutable if permission allows write case KMemoryState::Code:
if ((perm & KMemoryPermission::Write) != KMemoryPermission::None) { new_state = KMemoryState::CodeData;
if (prev_state == KMemoryState::Code) { break;
state = KMemoryState::CodeData; case KMemoryState::AliasCode:
} else if (prev_state == KMemoryState::AliasCode) { new_state = KMemoryState::AliasCodeData;
state = KMemoryState::AliasCodeData; break;
} else { default:
UNREACHABLE(); UNREACHABLE();
} }
} }
// Return early if there is nothing to change // Succeed if there's nothing to do.
if (state == prev_state && perm == prev_perm) { R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
return ResultSuccess;
}
if ((prev_perm & KMemoryPermission::Execute) != (perm & KMemoryPermission::Execute)) { // Perform mapping operation.
const auto operation =
was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions;
R_TRY(Operate(addr, num_pages, new_perm, operation));
// Update the blocks.
block_manager->Update(addr, num_pages, new_state, new_perm, KMemoryAttribute::None);
// Ensure cache coherency, if we're setting pages as executable.
if (is_x) {
// Memory execution state is changing, invalidate CPU cache range // Memory execution state is changing, invalidate CPU cache range
system.InvalidateCpuInstructionCacheRange(addr, size); system.InvalidateCpuInstructionCacheRange(addr, size);
} }
const std::size_t num_pages{size / PageSize};
const OperationType operation{(perm & KMemoryPermission::Execute) != KMemoryPermission::None
? OperationType::ChangePermissionsAndRefresh
: OperationType::ChangePermissions};
CASCADE_CODE(Operate(addr, num_pages, perm, operation));
block_manager->Update(addr, num_pages, state, perm);
return ResultSuccess; return ResultSuccess;
} }
@ -785,7 +796,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
&state, nullptr, &attribute, nullptr, addr, size, &state, nullptr, &attribute, nullptr, addr, size,
KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::All, KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::All,
KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryPermission::UserReadWrite, KMemoryAttribute::Mask, KMemoryAttribute::None,
KMemoryAttribute::IpcAndDeviceMapped)); KMemoryAttribute::IpcAndDeviceMapped));
block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked); block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked);
@ -805,7 +816,7 @@ ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask,
KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
block_manager->Update(addr, size / PageSize, state, KMemoryPermission::ReadAndWrite); block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite);
return ResultSuccess; return ResultSuccess;
} }
@ -906,7 +917,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
heap_region_start + size, GetHeapSize() - size, heap_region_start + size, GetHeapSize() - size,
KMemoryState::All, KMemoryState::Normal, KMemoryState::All, KMemoryState::Normal,
KMemoryPermission::All, KMemoryPermission::ReadAndWrite, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
KMemoryAttribute::All, KMemoryAttribute::None)); KMemoryAttribute::All, KMemoryAttribute::None));
// Unmap the end of the heap. // Unmap the end of the heap.
@ -981,7 +992,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
// Apply the memory block update. // Apply the memory block update.
block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal, block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal,
KMemoryPermission::ReadAndWrite, KMemoryAttribute::None); KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
// Update the current heap end. // Update the current heap end.
current_heap_end = heap_region_start + size; current_heap_end = heap_region_start + size;

View file

@ -31,8 +31,8 @@ public:
KMemoryManager::Pool pool); KMemoryManager::Pool pool);
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
KMemoryPermission perm); KMemoryPermission perm);
ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); ResultCode MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); ResultCode UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
VAddr src_addr); VAddr src_addr);
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size); ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
@ -43,7 +43,8 @@ public:
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
KMemoryPermission perm); KMemoryPermission perm);
ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state); ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm); ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size,
Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr); KMemoryInfo QueryInfo(VAddr addr);
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
ResultCode ResetTransferMemory(VAddr addr, std::size_t size); ResultCode ResetTransferMemory(VAddr addr, std::size_t size);

View file

@ -509,7 +509,7 @@ VAddr KProcess::CreateTLSRegion() {
const VAddr tls_page_addr{page_table const VAddr tls_page_addr{page_table
->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize, ->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
KMemoryState::ThreadLocal, KMemoryState::ThreadLocal,
KMemoryPermission::ReadAndWrite, KMemoryPermission::UserReadWrite,
tls_map_addr) tls_map_addr)
.ValueOr(0)}; .ValueOr(0)};
@ -541,16 +541,16 @@ void KProcess::FreeTLSRegion(VAddr tls_address) {
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
const auto ReprotectSegment = [&](const CodeSet::Segment& segment, const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
KMemoryPermission permission) { Svc::MemoryPermission permission) {
page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
}; };
kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
code_set.memory.size()); code_set.memory.size());
ReprotectSegment(code_set.CodeSegment(), KMemoryPermission::ReadAndExecute); ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
ReprotectSegment(code_set.RODataSegment(), KMemoryPermission::Read); ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
ReprotectSegment(code_set.DataSegment(), KMemoryPermission::ReadAndWrite); ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite);
} }
bool KProcess::IsSignaled() const { bool KProcess::IsSignaled() const {
@ -587,7 +587,7 @@ ResultCode KProcess::AllocateMainThreadStack(std::size_t stack_size) {
CASCADE_RESULT(main_thread_stack_top, CASCADE_RESULT(main_thread_stack_top,
page_table->AllocateAndMapMemory( page_table->AllocateAndMapMemory(
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
KMemoryState::Stack, KMemoryPermission::ReadAndWrite)); KMemoryState::Stack, KMemoryPermission::UserReadWrite));
main_thread_stack_top += main_thread_stack_size; main_thread_stack_top += main_thread_stack_size;

View file

@ -121,7 +121,7 @@ struct KernelCore::Impl {
object_list_container.Finalize(); object_list_container.Finalize();
// Ensures all service threads gracefully shutdown. // Ensures all service threads gracefully shutdown.
service_threads.clear(); ClearServiceThreads();
next_object_id = 0; next_object_id = 0;
next_kernel_process_id = KProcess::InitialKIPIDMin; next_kernel_process_id = KProcess::InitialKIPIDMin;
@ -713,11 +713,32 @@ struct KernelCore::Impl {
return port; return port;
} }
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(KernelCore& kernel,
const std::string& name) {
std::lock_guard lk(service_threads_lock);
auto service_thread = std::make_shared<Kernel::ServiceThread>(kernel, 1, name);
service_threads.emplace(service_thread);
return service_thread;
}
void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
std::lock_guard lk(service_threads_lock);
if (auto strong_ptr = service_thread.lock()) {
service_threads.erase(strong_ptr);
}
}
void ClearServiceThreads() {
std::lock_guard lk(service_threads_lock);
service_threads.clear();
}
std::mutex server_ports_lock; std::mutex server_ports_lock;
std::mutex server_sessions_lock; std::mutex server_sessions_lock;
std::mutex registered_objects_lock; std::mutex registered_objects_lock;
std::mutex registered_in_use_objects_lock; std::mutex registered_in_use_objects_lock;
std::mutex dummy_thread_lock; std::mutex dummy_thread_lock;
std::mutex service_threads_lock;
std::atomic<u32> next_object_id{0}; std::atomic<u32> next_object_id{0};
std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin}; std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
@ -1117,15 +1138,11 @@ void KernelCore::ExitSVCProfile() {
} }
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name); return impl->CreateServiceThread(*this, name);
impl->service_threads.emplace(service_thread);
return service_thread;
} }
void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) { void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
if (auto strong_ptr = service_thread.lock()) { impl->ReleaseServiceThread(service_thread);
impl->service_threads.erase(strong_ptr);
}
} }
Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() { Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() {

View file

@ -16,17 +16,25 @@ namespace Kernel {
PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_, PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_,
Core::CPUInterrupts& interrupts_) Core::CPUInterrupts& interrupts_)
: core_index{core_index_}, system{system_}, scheduler{scheduler_}, : core_index{core_index_}, system{system_}, scheduler{scheduler_},
interrupts{interrupts_}, guard{std::make_unique<Common::SpinLock>()} {} interrupts{interrupts_}, guard{std::make_unique<Common::SpinLock>()} {
#ifdef ARCHITECTURE_x86_64
// TODO(bunnei): Initialization relies on a core being available. We may later replace this with
// a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
auto& kernel = system.Kernel();
arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
#else
#error Platform not supported yet.
#endif
}
PhysicalCore::~PhysicalCore() = default; PhysicalCore::~PhysicalCore() = default;
void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
#ifdef ARCHITECTURE_x86_64 #ifdef ARCHITECTURE_x86_64
auto& kernel = system.Kernel(); auto& kernel = system.Kernel();
if (is_64_bit) { if (!is_64_bit) {
arm_interface = std::make_unique<Core::ARM_Dynarmic_64>( // We already initialized a 64-bit core, replace with a 32-bit one.
system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
} else {
arm_interface = std::make_unique<Core::ARM_Dynarmic_32>( arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index); system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
} }

View file

@ -1309,6 +1309,8 @@ static ResultCode SetProcessMemoryPermission(Core::System& system, Handle proces
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize); R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
R_UNLESS(size > 0, ResultInvalidSize); R_UNLESS(size > 0, ResultInvalidSize);
R_UNLESS((address < address + size), ResultInvalidCurrentMemory); R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
R_UNLESS(address == static_cast<uintptr_t>(address), ResultInvalidCurrentMemory);
R_UNLESS(size == static_cast<size_t>(size), ResultInvalidCurrentMemory);
// Validate the memory permission. // Validate the memory permission.
R_UNLESS(IsValidProcessMemoryPermission(perm), ResultInvalidNewMemoryPermission); R_UNLESS(IsValidProcessMemoryPermission(perm), ResultInvalidNewMemoryPermission);
@ -1323,7 +1325,7 @@ static ResultCode SetProcessMemoryPermission(Core::System& system, Handle proces
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
// Set the memory permission. // Set the memory permission.
return page_table.SetProcessMemoryPermission(address, size, ConvertToKMemoryPermission(perm)); return page_table.SetProcessMemoryPermission(address, size, perm);
} }
static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle, static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
@ -1626,7 +1628,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
return ResultInvalidMemoryRegion; return ResultInvalidMemoryRegion;
} }
return page_table.MapProcessCodeMemory(dst_address, src_address, size); return page_table.MapCodeMemory(dst_address, src_address, size);
} }
static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle, static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle,
@ -1694,7 +1696,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
return ResultInvalidMemoryRegion; return ResultInvalidMemoryRegion;
} }
return page_table.UnmapProcessCodeMemory(dst_address, src_address, size); return page_table.UnmapCodeMemory(dst_address, src_address, size);
} }
/// Exits the current process /// Exits the current process

View file

@ -14,6 +14,7 @@
#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_system_control.h" #include "core/hle/kernel/k_system_control.h"
#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/service/ldr/ldr.h" #include "core/hle/service/ldr/ldr.h"
#include "core/hle/service/service.h" #include "core/hle/service/service.h"
#include "core/loader/nro.h" #include "core/loader/nro.h"
@ -325,7 +326,7 @@ public:
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) { for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
auto& page_table{process->PageTable()}; auto& page_table{process->PageTable()};
const VAddr addr{GetRandomMapRegion(page_table, size)}; const VAddr addr{GetRandomMapRegion(page_table, size)};
const ResultCode result{page_table.MapProcessCodeMemory(addr, baseAddress, size)}; const ResultCode result{page_table.MapCodeMemory(addr, baseAddress, size)};
if (result == Kernel::ResultInvalidCurrentMemory) { if (result == Kernel::ResultInvalidCurrentMemory) {
continue; continue;
@ -351,12 +352,12 @@ public:
if (bss_size) { if (bss_size) {
auto block_guard = detail::ScopeExit([&] { auto block_guard = detail::ScopeExit([&] {
page_table.UnmapProcessCodeMemory(addr + nro_size, bss_addr, bss_size); page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size);
page_table.UnmapProcessCodeMemory(addr, nro_addr, nro_size); page_table.UnmapCodeMemory(addr, nro_addr, nro_size);
}); });
const ResultCode result{ const ResultCode result{
page_table.MapProcessCodeMemory(addr + nro_size, bss_addr, bss_size)}; page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)};
if (result == Kernel::ResultInvalidCurrentMemory) { if (result == Kernel::ResultInvalidCurrentMemory) {
continue; continue;
@ -397,12 +398,12 @@ public:
nro_header.segment_headers[DATA_INDEX].memory_size); nro_header.segment_headers[DATA_INDEX].memory_size);
CASCADE_CODE(process->PageTable().SetProcessMemoryPermission( CASCADE_CODE(process->PageTable().SetProcessMemoryPermission(
text_start, ro_start - text_start, Kernel::KMemoryPermission::ReadAndExecute)); text_start, ro_start - text_start, Kernel::Svc::MemoryPermission::ReadExecute));
CASCADE_CODE(process->PageTable().SetProcessMemoryPermission( CASCADE_CODE(process->PageTable().SetProcessMemoryPermission(
ro_start, data_start - ro_start, Kernel::KMemoryPermission::Read)); ro_start, data_start - ro_start, Kernel::Svc::MemoryPermission::Read));
return process->PageTable().SetProcessMemoryPermission( return process->PageTable().SetProcessMemoryPermission(
data_start, bss_end_addr - data_start, Kernel::KMemoryPermission::ReadAndWrite); data_start, bss_end_addr - data_start, Kernel::Svc::MemoryPermission::ReadWrite);
} }
void LoadModule(Kernel::HLERequestContext& ctx) { void LoadModule(Kernel::HLERequestContext& ctx) {
@ -530,16 +531,19 @@ public:
ResultCode UnmapNro(const NROInfo& info) { ResultCode UnmapNro(const NROInfo& info) {
// Each region must be unmapped separately to validate memory state // Each region must be unmapped separately to validate memory state
auto& page_table{system.CurrentProcess()->PageTable()}; auto& page_table{system.CurrentProcess()->PageTable()};
CASCADE_CODE(page_table.UnmapProcessCodeMemory(info.nro_address + info.text_size +
if (info.bss_size != 0) {
CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address + info.text_size +
info.ro_size + info.data_size, info.ro_size + info.data_size,
info.bss_address, info.bss_size)); info.bss_address, info.bss_size));
CASCADE_CODE(page_table.UnmapProcessCodeMemory( }
info.nro_address + info.text_size + info.ro_size,
info.src_addr + info.text_size + info.ro_size, info.data_size)); CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size,
CASCADE_CODE(page_table.UnmapProcessCodeMemory( info.src_addr + info.text_size + info.ro_size,
info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size)); info.data_size));
CASCADE_CODE( CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address + info.text_size,
page_table.UnmapProcessCodeMemory(info.nro_address, info.src_addr, info.text_size)); info.src_addr + info.text_size, info.ro_size));
CASCADE_CODE(page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size));
return ResultSuccess; return ResultSuccess;
} }

View file

@ -16,6 +16,6 @@ add_executable(tests
create_target_directory_groups(tests) create_target_directory_groups(tests)
target_link_libraries(tests PRIVATE common core input_common) target_link_libraries(tests PRIVATE common core input_common)
target_link_libraries(tests PRIVATE ${PLATFORM_LIBRARIES} catch-single-include Threads::Threads) target_link_libraries(tests PRIVATE ${PLATFORM_LIBRARIES} Catch2::Catch2 Threads::Threads)
add_test(NAME tests COMMAND tests) add_test(NAME tests COMMAND tests)