early-access version 3260

This commit is contained in:
pineappleEA 2022-12-29 04:45:17 +01:00
parent 644de7a4f3
commit 2ac96f28e4
16 changed files with 828 additions and 232 deletions

View File

@ -1,7 +1,7 @@
yuzu emulator early access yuzu emulator early access
============= =============
This is the source code for early-access 3259. This is the source code for early-access 3260.
## Legal Notice ## Legal Notice

View File

@ -74,7 +74,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
R_UNLESS(!m_is_mapped, ResultInvalidState); R_UNLESS(!m_is_mapped, ResultInvalidState);
// Map the memory. // Map the memory.
R_TRY(kernel.CurrentProcess()->PageTable().MapPages( R_TRY(kernel.CurrentProcess()->PageTable().MapPageGroup(
address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
// Mark ourselves as mapped. // Mark ourselves as mapped.
@ -91,7 +91,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
KScopedLightLock lk(m_lock); KScopedLightLock lk(m_lock);
// Unmap the memory. // Unmap the memory.
R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, *m_page_group, R_TRY(kernel.CurrentProcess()->PageTable().UnmapPageGroup(address, *m_page_group,
KMemoryState::CodeOut)); KMemoryState::CodeOut));
// Mark ourselves as unmapped. // Mark ourselves as unmapped.
@ -125,8 +125,8 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
} }
// Map the memory. // Map the memory.
R_TRY( R_TRY(m_owner->PageTable().MapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode,
m_owner->PageTable().MapPages(address, *m_page_group, KMemoryState::GeneratedCode, k_perm)); k_perm));
// Mark ourselves as mapped. // Mark ourselves as mapped.
m_is_owner_mapped = true; m_is_owner_mapped = true;
@ -142,7 +142,7 @@ Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
KScopedLightLock lk(m_lock); KScopedLightLock lk(m_lock);
// Unmap the memory. // Unmap the memory.
R_TRY(m_owner->PageTable().UnmapPages(address, *m_page_group, KMemoryState::GeneratedCode)); R_TRY(m_owner->PageTable().UnmapPageGroup(address, *m_page_group, KMemoryState::GeneratedCode));
// Mark ourselves as unmapped. // Mark ourselves as unmapped.
m_is_owner_mapped = false; m_is_owner_mapped = false;

View File

@ -435,6 +435,9 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
KPageGroup pg{m_kernel, m_block_info_manager}; KPageGroup pg{m_kernel, m_block_info_manager};
AddRegionToPages(src_address, num_pages, pg); AddRegionToPages(src_address, num_pages, pg);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Reprotect the source as kernel-read/not mapped. // Reprotect the source as kernel-read/not mapped.
const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead | const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
KMemoryPermission::NotMapped); KMemoryPermission::NotMapped);
@ -447,7 +450,10 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
}); });
// Map the alias pages. // Map the alias pages.
R_TRY(MapPages(dst_address, pg, new_perm)); const KPageProperties dst_properties = {new_perm, false, false,
DisableMergeAttribute::DisableHead};
R_TRY(
this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
// We successfully mapped the alias pages, so we don't need to unprotect the src pages on // We successfully mapped the alias pages, so we don't need to unprotect the src pages on
// failure. // failure.
@ -1881,7 +1887,8 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
R_SUCCEED(); R_SUCCEED();
} }
Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
size_t size) {
// Lock the table. // Lock the table.
KScopedLightLock lk(m_general_lock); KScopedLightLock lk(m_general_lock);
@ -1902,53 +1909,73 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
KMemoryAttribute::None)); KMemoryAttribute::None));
// Create an update allocator for the source. // Create an update allocator for the source.
Result src_allocator_result{ResultSuccess}; Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
m_memory_block_slab_manager, m_memory_block_slab_manager,
num_src_allocator_blocks); num_src_allocator_blocks);
R_TRY(src_allocator_result); R_TRY(src_allocator_result);
// Create an update allocator for the destination. // Create an update allocator for the destination.
Result dst_allocator_result{ResultSuccess}; Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
m_memory_block_slab_manager, m_memory_block_slab_manager,
num_dst_allocator_blocks); num_dst_allocator_blocks);
R_TRY(dst_allocator_result); R_TRY(dst_allocator_result);
// Map the memory. // Map the memory.
KPageGroup page_linked_list{m_kernel, m_block_info_manager}; {
const size_t num_pages{size / PageSize}; // Determine the number of pages being operated on.
const size_t num_pages = size / PageSize;
// Create page groups for the memory being unmapped.
KPageGroup pg{m_kernel, m_block_info_manager};
// Create the page group representing the source.
R_TRY(this->MakePageGroup(pg, src_address, num_pages));
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Reprotect the source as kernel-read/not mapped.
const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
const KPageProperties src_properties = {new_src_perm, false, false,
DisableMergeAttribute::DisableHeadBodyTail};
R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
OperationType::ChangePermissions));
AddRegionToPages(src_address, num_pages, page_linked_list); // Ensure that we unprotect the source pages on failure.
{ ON_RESULT_FAILURE {
// Reprotect the source as kernel-read/not mapped. const KPageProperties unprotect_properties = {
auto block_guard = detail::ScopeExit([&] { KMemoryPermission::UserReadWrite, false, false,
Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, DisableMergeAttribute::EnableHeadBodyTail};
OperationType::ChangePermissions); ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm,
}); OperationType::ChangePermissions) == ResultSuccess);
R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions)); };
R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite));
block_guard.Cancel(); // Map the alias pages.
} const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
DisableMergeAttribute::DisableHead};
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
false));
// Apply the memory block updates. // Apply the memory block updates.
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
new_src_perm, new_src_attr, src_state, new_src_perm, new_src_attr,
KMemoryBlockDisableMergeAttribute::Locked, KMemoryBlockDisableMergeAttribute::Locked,
KMemoryBlockDisableMergeAttribute::None); KMemoryBlockDisableMergeAttribute::None);
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, m_memory_block_manager.Update(
KMemoryState::Stack, KMemoryPermission::UserReadWrite, std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
KMemoryBlockDisableMergeAttribute::None); KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
}
R_SUCCEED(); R_SUCCEED();
} }
Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
size_t size) {
// Lock the table. // Lock the table.
KScopedLightLock lk(m_general_lock); KScopedLightLock lk(m_general_lock);
@ -1970,108 +1997,208 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
// Create an update allocator for the source. // Create an update allocator for the source.
Result src_allocator_result{ResultSuccess}; Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
m_memory_block_slab_manager, m_memory_block_slab_manager,
num_src_allocator_blocks); num_src_allocator_blocks);
R_TRY(src_allocator_result); R_TRY(src_allocator_result);
// Create an update allocator for the destination. // Create an update allocator for the destination.
Result dst_allocator_result{ResultSuccess}; Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
m_memory_block_slab_manager, m_memory_block_slab_manager,
num_dst_allocator_blocks); num_dst_allocator_blocks);
R_TRY(dst_allocator_result); R_TRY(dst_allocator_result);
KPageGroup src_pages{m_kernel, m_block_info_manager}; // Unmap the memory.
KPageGroup dst_pages{m_kernel, m_block_info_manager};
const size_t num_pages{size / PageSize};
AddRegionToPages(src_address, num_pages, src_pages);
AddRegionToPages(dst_address, num_pages, dst_pages);
R_UNLESS(dst_pages.IsEquivalentTo(src_pages), ResultInvalidMemoryRegion);
{ {
auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); // Determine the number of pages being operated on.
const size_t num_pages = size / PageSize;
R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Create page groups for the memory being unmapped.
R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, KPageGroup pg{m_kernel, m_block_info_manager};
// Create the page group representing the destination.
R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
// Ensure the page group is the valid for the source.
R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Unmap the aliased copy of the pages.
const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
DisableMergeAttribute::None};
R_TRY(
this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap));
// Ensure that we re-map the aliased pages on failure.
ON_RESULT_FAILURE {
this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
};
// Try to set the permissions for the source pages back to what they should be.
const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
DisableMergeAttribute::EnableAndMergeHeadBodyTail};
R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
OperationType::ChangePermissions)); OperationType::ChangePermissions));
block_guard.Cancel();
}
// Apply the memory block updates. // Apply the memory block updates.
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, m_memory_block_manager.Update(
std::addressof(src_allocator), src_address, num_pages, src_state,
KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
KMemoryBlockDisableMergeAttribute::Locked); m_memory_block_manager.Update(
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
KMemoryState::None, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
KMemoryBlockDisableMergeAttribute::Normal); }
R_SUCCEED(); R_SUCCEED();
} }
Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
KMemoryPermission perm) { size_t num_pages, KMemoryPermission perm) {
ASSERT(this->IsLockedByCurrentThread()); ASSERT(this->IsLockedByCurrentThread());
VAddr cur_addr{addr}; // Create a page group to hold the pages we allocate.
KPageGroup pg{m_kernel, m_block_info_manager};
for (const auto& node : page_linked_list) { // Allocate the pages.
if (const auto result{ R_TRY(
Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
result.IsError()) {
const size_t num_pages{(addr - cur_addr) / PageSize};
ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) // Ensure that the page group is closed when we're done working with it.
.IsSuccess()); SCOPE_EXIT({ pg.Close(); });
R_RETURN(result); // Clear all pages.
for (const auto& it : pg) {
std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
it.GetSize());
} }
cur_addr += node.GetNumPages() * PageSize;
}
R_SUCCEED();
}
Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
KMemoryPermission perm) {
// Check that the map is in range.
const size_t num_pages{page_linked_list.GetNumPages()};
const size_t size{num_pages * PageSize};
R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(m_general_lock);
// Check the memory state.
R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None));
// Create an update allocator.
Result allocator_result{ResultSuccess};
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager);
// Map the pages. // Map the pages.
R_TRY(MapPages(address, page_linked_list, perm)); R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup));
}
// Update the blocks. Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, const KPageGroup& pg, const KPageProperties properties,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, bool reuse_ll) {
KMemoryBlockDisableMergeAttribute::None); ASSERT(this->IsLockedByCurrentThread());
// Note the current address, so that we can iterate.
const KProcessAddress start_address = address;
KProcessAddress cur_address = address;
// Ensure that we clean up on failure.
ON_RESULT_FAILURE {
ASSERT(!reuse_ll);
if (cur_address != start_address) {
const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
DisableMergeAttribute::None};
ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize,
unmap_properties.perm, OperationType::Unmap) == ResultSuccess);
}
};
// Iterate, mapping all pages in the group.
for (const auto& block : pg) {
// Map and advance.
const KPageProperties cur_properties =
(cur_address == start_address)
? properties
: KPageProperties{properties.perm, properties.io, properties.uncached,
DisableMergeAttribute::None};
this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map,
block.GetAddress());
cur_address += block.GetSize();
}
// We succeeded!
R_SUCCEED(); R_SUCCEED();
} }
Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
bool is_pa_valid, VAddr region_start, size_t region_num_pages, const KPageGroup& pg) {
ASSERT(this->IsLockedByCurrentThread());
// Note the current address, so that we can iterate.
const KProcessAddress start_address = address;
const KProcessAddress last_address = start_address + size - 1;
const KProcessAddress end_address = last_address + 1;
// Iterate over the memory.
auto pg_it = pg.begin();
ASSERT(pg_it != pg.end());
KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
auto it = m_memory_block_manager.FindIterator(start_address);
while (true) {
// Check that the iterator is valid.
ASSERT(it != m_memory_block_manager.end());
// Get the memory info.
const KMemoryInfo info = it->GetMemoryInfo();
// Determine the range to map.
KProcessAddress map_address = std::max(info.GetAddress(), start_address);
const KProcessAddress map_end_address = std::min(info.GetEndAddress(), end_address);
ASSERT(map_end_address != map_address);
// Determine if we should disable head merge.
const bool disable_head_merge =
info.GetAddress() >= start_address &&
True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
const KPageProperties map_properties = {
info.GetPermission(), false, false,
disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
// While we have pages to map, map them.
size_t map_pages = (map_end_address - map_address) / PageSize;
while (map_pages > 0) {
// Check if we're at the end of the physical block.
if (pg_pages == 0) {
// Ensure there are more pages to map.
ASSERT(pg_it != pg.end());
// Advance our physical block.
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
}
// Map whatever we can.
const size_t cur_pages = std::min(pg_pages, map_pages);
ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map,
pg_phys_addr) == ResultSuccess);
// Advance.
map_address += cur_pages * PageSize;
map_pages -= cur_pages;
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
}
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
break;
}
// Advance.
++it;
}
// Check that we re-mapped precisely the page group.
ASSERT((++pg_it) == pg.end());
}
Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, bool is_pa_valid,
KProcessAddress region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm) { KMemoryState state, KMemoryPermission perm) {
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
@ -2084,26 +2211,30 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment,
KScopedLightLock lk(m_general_lock); KScopedLightLock lk(m_general_lock);
// Find a random address to map at. // Find a random address to map at.
VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
this->GetNumGuardPages()); 0, this->GetNumGuardPages());
R_UNLESS(addr != 0, ResultOutOfMemory); R_UNLESS(addr != 0, ResultOutOfMemory);
ASSERT(Common::IsAligned(addr, alignment)); ASSERT(Common::IsAligned(addr, alignment));
ASSERT(this->CanContain(addr, num_pages * PageSize, state)); ASSERT(this->CanContain(addr, num_pages * PageSize, state));
ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None) KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
.IsSuccess());
// Create an update allocator. // Create an update allocator.
Result allocator_result{ResultSuccess}; Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager); m_memory_block_slab_manager);
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Perform mapping operation. // Perform mapping operation.
if (is_pa_valid) { if (is_pa_valid) {
R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr));
} else { } else {
UNIMPLEMENTED(); R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
} }
// Update the blocks. // Update the blocks.
@ -2116,28 +2247,45 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment,
R_SUCCEED(); R_SUCCEED();
} }
Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
ASSERT(this->IsLockedByCurrentThread()); KMemoryPermission perm) {
// Check that the map is in range.
const size_t size = num_pages * PageSize;
R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
VAddr cur_addr{addr}; // Lock the table.
KScopedLightLock lk(m_general_lock);
for (const auto& node : page_linked_list) { // Check the memory state.
if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, size_t num_allocator_blocks;
OperationType::Unmap)}; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
result.IsError()) { KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
R_RETURN(result); KMemoryPermission::None, KMemoryAttribute::None,
} KMemoryAttribute::None));
cur_addr += node.GetNumPages() * PageSize; // Create an update allocator.
} Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Map the pages.
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
KMemoryBlockDisableMergeAttribute::None);
R_SUCCEED(); R_SUCCEED();
} }
Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
// Check that the unmap is in range. // Check that the unmap is in range.
const size_t num_pages{page_linked_list.GetNumPages()}; const size_t size = num_pages * PageSize;
const size_t size{num_pages * PageSize};
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Lock the table. // Lock the table.
@ -2151,13 +2299,18 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo
KMemoryAttribute::None)); KMemoryAttribute::None));
// Create an update allocator. // Create an update allocator.
Result allocator_result{ResultSuccess}; Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks); m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result); R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Perform the unmap. // Perform the unmap.
R_TRY(UnmapPages(address, page_linked_list)); const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
DisableMergeAttribute::None};
R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap));
// Update the blocks. // Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
@ -2168,29 +2321,130 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo
R_SUCCEED(); R_SUCCEED();
} }
Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
// Check that the unmap is in range. KProcessAddress region_start, size_t region_num_pages,
const size_t size = num_pages * PageSize; KMemoryState state, KMemoryPermission perm) {
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); ASSERT(!this->IsLockedByCurrentThread());
// Ensure this is a valid map request.
const size_t num_pages = pg.GetNumPages();
R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
ResultInvalidCurrentMemory);
R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
// Lock the table. // Lock the table.
KScopedLightLock lk(m_general_lock); KScopedLightLock lk(m_general_lock);
// Check the memory state. // Find a random address to map at.
size_t num_allocator_blocks{}; KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
0, this->GetNumGuardPages());
R_UNLESS(addr != 0, ResultOutOfMemory);
ASSERT(this->CanContain(addr, num_pages * PageSize, state));
ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
// Create an update allocator.
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager);
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Perform mapping operation.
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
DisableMergeAttribute::DisableHead};
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
KMemoryBlockDisableMergeAttribute::None);
// We successfully mapped the pages.
*out_addr = addr;
R_SUCCEED();
}
Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
KMemoryPermission perm) {
ASSERT(!this->IsLockedByCurrentThread());
// Ensure this is a valid map request.
const size_t num_pages = pg.GetNumPages();
const size_t size = num_pages * PageSize;
R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(m_general_lock);
// Check if state allows us to map.
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::None,
KMemoryAttribute::None));
// Create an update allocator.
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Perform mapping operation.
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
DisableMergeAttribute::DisableHead};
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
KMemoryBlockDisableMergeAttribute::None);
// We successfully mapped the pages.
R_SUCCEED();
}
Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
KMemoryState state) {
ASSERT(!this->IsLockedByCurrentThread());
// Ensure this is a valid unmap request.
const size_t num_pages = pg.GetNumPages();
const size_t size = num_pages * PageSize;
R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(m_general_lock);
// Check if state allows us to unmap.
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
KMemoryState::All, state, KMemoryPermission::None, KMemoryState::All, state, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All, KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::None)); KMemoryAttribute::None));
// Check that the page group is valid.
R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
// Create an update allocator. // Create an update allocator.
Result allocator_result{ResultSuccess}; Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks); m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result); R_TRY(allocator_result);
// Perform the unmap. // We're going to perform an update, so create a helper.
R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); KScopedPageTableUpdater updater(this);
// Perform unmapping operation.
const KPageProperties properties = {KMemoryPermission::None, false, false,
DisableMergeAttribute::None};
R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap));
// Update the blocks. // Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
@ -2550,54 +2804,6 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
} }
} }
ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align,
bool is_map_only, VAddr region_start,
size_t region_num_pages, KMemoryState state,
KMemoryPermission perm, PAddr map_addr) {
KScopedLightLock lk(m_general_lock);
R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state),
ResultInvalidCurrentMemory);
R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory);
const VAddr addr{
AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
R_UNLESS(addr, ResultOutOfMemory);
// Create an update allocator.
Result allocator_result{ResultSuccess};
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager);
if (is_map_only) {
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
} else {
// Create a page group tohold the pages we allocate.
KPageGroup pg{m_kernel, m_block_info_manager};
R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
&pg, needed_num_pages,
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
// Ensure that the page group is closed when we're done working with it.
SCOPE_EXIT({ pg.Close(); });
// Clear all pages.
for (const auto& it : pg) {
std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()),
m_heap_fill_value, it.GetSize());
}
R_TRY(Operate(addr, needed_num_pages, pg, OperationType::MapGroup));
}
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
KMemoryBlockDisableMergeAttribute::None);
return addr;
}
Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
KMemoryPermission perm, bool is_aligned, KMemoryPermission perm, bool is_aligned,
bool check_heap) { bool check_heap) {

View File

@ -24,12 +24,36 @@ class System;
namespace Kernel { namespace Kernel {
enum class DisableMergeAttribute : u8 {
None = (0U << 0),
DisableHead = (1U << 0),
DisableHeadAndBody = (1U << 1),
EnableHeadAndBody = (1U << 2),
DisableTail = (1U << 3),
EnableTail = (1U << 4),
EnableAndMergeHeadBodyTail = (1U << 5),
EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
};
struct KPageProperties {
KMemoryPermission perm;
bool io;
bool uncached;
DisableMergeAttribute disable_merge_attributes;
};
static_assert(std::is_trivial_v<KPageProperties>);
static_assert(sizeof(KPageProperties) == sizeof(u32));
class KBlockInfoManager; class KBlockInfoManager;
class KMemoryBlockManager; class KMemoryBlockManager;
class KResourceLimit; class KResourceLimit;
class KSystemResource; class KSystemResource;
class KPageTable final { class KPageTable final {
protected:
struct PageLinkedList;
public: public:
enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll }; enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
@ -57,27 +81,12 @@ public:
Result UnmapPhysicalMemory(VAddr addr, size_t size); Result UnmapPhysicalMemory(VAddr addr, size_t size);
Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
KMemoryPermission perm);
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
KMemoryState state, KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
this->GetRegionAddress(state),
this->GetRegionSize(state) / PageSize, state, perm));
}
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr); KMemoryInfo QueryInfo(VAddr addr);
Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
Result SetMaxHeapSize(size_t size); Result SetMaxHeapSize(size_t size);
Result SetHeapSize(VAddr* out, size_t size); Result SetHeapSize(VAddr* out, size_t size);
ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
VAddr region_start, size_t region_num_pages,
KMemoryState state, KMemoryPermission perm,
PAddr map_addr = 0);
Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
KMemoryPermission perm, bool is_aligned, bool check_heap); KMemoryPermission perm, bool is_aligned, bool check_heap);
Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap); Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap);
@ -113,6 +122,40 @@ public:
bool CanContain(VAddr addr, size_t size, KMemoryState state) const; bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, KProcessAddress region_start,
size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
region_num_pages, state, perm));
}
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
this->GetRegionAddress(state),
this->GetRegionSize(state) / PageSize, state, perm));
}
Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
this->GetRegionAddress(state),
this->GetRegionSize(state) / PageSize, state, perm));
}
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
KMemoryPermission perm);
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
KMemoryPermission perm);
Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
KMemoryPermission perm);
Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
const KPageGroup& pg);
protected: protected:
struct PageLinkedList { struct PageLinkedList {
private: private:
@ -166,11 +209,9 @@ private:
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
bool is_pa_valid, VAddr region_start, size_t region_num_pages, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
KMemoryState state, KMemoryPermission perm);
Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
bool IsRegionContiguous(VAddr addr, u64 size) const; bool IsRegionContiguous(VAddr addr, u64 size) const;
void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
KMemoryInfo QueryInfoImpl(VAddr addr); KMemoryInfo QueryInfoImpl(VAddr addr);
@ -265,6 +306,11 @@ private:
void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
size_t size, KMemoryPermission prot_perm); size_t size, KMemoryPermission prot_perm);
Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
size_t num_pages, KMemoryPermission perm);
Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
mutable KLightLock m_general_lock; mutable KLightLock m_general_lock;
mutable KLightLock m_map_physical_memory_lock; mutable KLightLock m_map_physical_memory_lock;

View File

@ -417,9 +417,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
} }
void KProcess::Run(s32 main_thread_priority, u64 stack_size) { void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
AllocateMainThreadStack(stack_size); ASSERT(AllocateMainThreadStack(stack_size) == ResultSuccess);
resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, main_thread_stack_size);
const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
@ -675,20 +674,28 @@ void KProcess::ChangeState(State new_state) {
} }
Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
ASSERT(stack_size); // Ensure that we haven't already allocated stack.
ASSERT(main_thread_stack_size == 0);
// The kernel always ensures that the given stack size is page aligned. // Ensure that we're allocating a valid stack.
main_thread_stack_size = Common::AlignUp(stack_size, PageSize); stack_size = Common::AlignUp(stack_size, PageSize);
// R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory);
R_UNLESS(stack_size + image_size >= image_size, ResultOutOfMemory);
const VAddr start{page_table.GetStackRegionStart()}; // Place a tentative reservation of memory for our new stack.
const std::size_t size{page_table.GetStackRegionEnd() - start}; KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax,
stack_size);
R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached);
CASCADE_RESULT(main_thread_stack_top, // Allocate and map our stack.
page_table.AllocateAndMapMemory( if (stack_size) {
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, KProcessAddress stack_bottom;
R_TRY(page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
KMemoryState::Stack, KMemoryPermission::UserReadWrite)); KMemoryState::Stack, KMemoryPermission::UserReadWrite));
main_thread_stack_top += main_thread_stack_size; main_thread_stack_top = stack_bottom + stack_size;
main_thread_stack_size = stack_size;
}
R_SUCCEED(); R_SUCCEED();
} }

View File

@ -94,7 +94,7 @@ Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t m
R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission); R_UNLESS(map_perm == test_perm, ResultInvalidNewMemoryPermission);
} }
return target_process.PageTable().MapPages(address, *page_group, KMemoryState::Shared, return target_process.PageTable().MapPageGroup(address, *page_group, KMemoryState::Shared,
ConvertToKMemoryPermission(map_perm)); ConvertToKMemoryPermission(map_perm));
} }
@ -102,7 +102,7 @@ Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t
// Validate the size. // Validate the size.
R_UNLESS(size == unmap_size, ResultInvalidSize); R_UNLESS(size == unmap_size, ResultInvalidSize);
return target_process.PageTable().UnmapPages(address, *page_group, KMemoryState::Shared); return target_process.PageTable().UnmapPageGroup(address, *page_group, KMemoryState::Shared);
} }
} // namespace Kernel } // namespace Kernel

View File

@ -1492,7 +1492,7 @@ static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle p
KMemoryAttribute::All, KMemoryAttribute::None)); KMemoryAttribute::All, KMemoryAttribute::None));
// Map the group. // Map the group.
R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode, R_TRY(dst_pt.MapPageGroup(dst_address, pg, KMemoryState::SharedCode,
KMemoryPermission::UserReadWrite)); KMemoryPermission::UserReadWrite));
return ResultSuccess; return ResultSuccess;

View File

@ -68,6 +68,8 @@ if (ENABLE_SDL2)
helpers/joycon_protocol/generic_functions.cpp helpers/joycon_protocol/generic_functions.cpp
helpers/joycon_protocol/generic_functions.h helpers/joycon_protocol/generic_functions.h
helpers/joycon_protocol/joycon_types.h helpers/joycon_protocol/joycon_types.h
helpers/joycon_protocol/irs.cpp
helpers/joycon_protocol/irs.h
helpers/joycon_protocol/nfc.cpp helpers/joycon_protocol/nfc.cpp
helpers/joycon_protocol/nfc.h helpers/joycon_protocol/nfc.h
helpers/joycon_protocol/poller.cpp helpers/joycon_protocol/poller.cpp

View File

@ -7,6 +7,7 @@
#include "input_common/helpers/joycon_driver.h" #include "input_common/helpers/joycon_driver.h"
#include "input_common/helpers/joycon_protocol/calibration.h" #include "input_common/helpers/joycon_protocol/calibration.h"
#include "input_common/helpers/joycon_protocol/generic_functions.h" #include "input_common/helpers/joycon_protocol/generic_functions.h"
#include "input_common/helpers/joycon_protocol/irs.h"
#include "input_common/helpers/joycon_protocol/nfc.h" #include "input_common/helpers/joycon_protocol/nfc.h"
#include "input_common/helpers/joycon_protocol/poller.h" #include "input_common/helpers/joycon_protocol/poller.h"
#include "input_common/helpers/joycon_protocol/ringcon.h" #include "input_common/helpers/joycon_protocol/ringcon.h"
@ -78,6 +79,7 @@ DriverResult JoyconDriver::InitializeDevice() {
// Initialize HW Protocols // Initialize HW Protocols
calibration_protocol = std::make_unique<CalibrationProtocol>(hidapi_handle); calibration_protocol = std::make_unique<CalibrationProtocol>(hidapi_handle);
generic_protocol = std::make_unique<GenericProtocol>(hidapi_handle); generic_protocol = std::make_unique<GenericProtocol>(hidapi_handle);
irs_protocol = std::make_unique<IrsProtocol>(hidapi_handle);
nfc_protocol = std::make_unique<NfcProtocol>(hidapi_handle); nfc_protocol = std::make_unique<NfcProtocol>(hidapi_handle);
ring_protocol = std::make_unique<RingConProtocol>(hidapi_handle); ring_protocol = std::make_unique<RingConProtocol>(hidapi_handle);
rumble_protocol = std::make_unique<RumbleProtocol>(hidapi_handle); rumble_protocol = std::make_unique<RumbleProtocol>(hidapi_handle);
@ -251,6 +253,20 @@ DriverResult JoyconDriver::SetPollingMode() {
generic_protocol->EnableImu(false); generic_protocol->EnableImu(false);
} }
if (irs_protocol->IsEnabled()) {
irs_protocol->DisableIrs();
}
if (irs_enabled && supported_features.irs) {
auto result = irs_protocol->EnableIrs();
if (result == DriverResult::Success) {
disable_input_thread = false;
return result;
}
irs_protocol->DisableIrs();
LOG_ERROR(Input, "Error enabling IRS");
}
if (nfc_protocol->IsEnabled()) { if (nfc_protocol->IsEnabled()) {
amiibo_detected = false; amiibo_detected = false;
nfc_protocol->DisableNfc(); nfc_protocol->DisableNfc();

View File

@ -13,6 +13,7 @@
namespace InputCommon::Joycon { namespace InputCommon::Joycon {
class CalibrationProtocol; class CalibrationProtocol;
class GenericProtocol; class GenericProtocol;
class IrsProtocol;
class NfcProtocol; class NfcProtocol;
class JoyconPoller; class JoyconPoller;
class RingConProtocol; class RingConProtocol;
@ -87,6 +88,7 @@ private:
// Protocol Features // Protocol Features
std::unique_ptr<CalibrationProtocol> calibration_protocol; std::unique_ptr<CalibrationProtocol> calibration_protocol;
std::unique_ptr<GenericProtocol> generic_protocol; std::unique_ptr<GenericProtocol> generic_protocol;
std::unique_ptr<IrsProtocol> irs_protocol;
std::unique_ptr<NfcProtocol> nfc_protocol; std::unique_ptr<NfcProtocol> nfc_protocol;
std::unique_ptr<JoyconPoller> joycon_poller; std::unique_ptr<JoyconPoller> joycon_poller;
std::unique_ptr<RingConProtocol> ring_protocol; std::unique_ptr<RingConProtocol> ring_protocol;

View File

@ -120,6 +120,19 @@ DriverResult JoyconCommonProtocol::SendSubCommand(SubCommand sc, std::span<const
return DriverResult::Success; return DriverResult::Success;
} }
DriverResult JoyconCommonProtocol::SendMcuCommand(SubCommand sc, std::span<const u8> buffer) {
std::vector<u8> local_buffer(MaxResponseSize);
local_buffer[0] = static_cast<u8>(OutputReport::MCU_DATA);
local_buffer[1] = GetCounter();
local_buffer[10] = static_cast<u8>(sc);
for (std::size_t i = 0; i < buffer.size(); ++i) {
local_buffer[11 + i] = buffer[i];
}
return SendData(local_buffer);
}
DriverResult JoyconCommonProtocol::SendVibrationReport(std::span<const u8> buffer) { DriverResult JoyconCommonProtocol::SendVibrationReport(std::span<const u8> buffer) {
std::vector<u8> local_buffer(MaxResponseSize); std::vector<u8> local_buffer(MaxResponseSize);

View File

@ -74,6 +74,13 @@ public:
*/ */
DriverResult SendSubCommand(SubCommand sc, std::span<const u8> buffer, std::vector<u8>& output); DriverResult SendSubCommand(SubCommand sc, std::span<const u8> buffer, std::vector<u8>& output);
/**
* Sends a mcu command to the device
* @param sc sub command to be send
* @param buffer data to be send
*/
DriverResult SendMcuCommand(SubCommand sc, std::span<const u8> buffer);
/** /**
* Sends vibration data to the joycon * Sends vibration data to the joycon
* @param buffer data to be send * @param buffer data to be send

View File

@ -0,0 +1,209 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <thread>
#include "common/logging/log.h"
#include "input_common/helpers/joycon_protocol/irs.h"
namespace InputCommon::Joycon {
IrsProtocol::IrsProtocol(std::shared_ptr<JoyconHandle> handle)
: JoyconCommonProtocol(std::move(handle)) {}
DriverResult IrsProtocol::EnableIrs() {
LOG_INFO(Input, "Enable IRS");
DriverResult result{DriverResult::Success};
SetBlocking();
if (result == DriverResult::Success) {
result = SetReportMode(ReportMode::NFC_IR_MODE_60HZ);
}
if (result == DriverResult::Success) {
result = EnableMCU(true);
}
if (result == DriverResult::Success) {
result = WaitSetMCUMode(ReportMode::NFC_IR_MODE_60HZ, MCUMode::Standby);
}
if (result == DriverResult::Success) {
const MCUConfig config{
.command = MCUCommand::ConfigureMCU,
.sub_command = MCUSubCommand::SetMCUMode,
.mode = MCUMode::IR,
.crc = {},
};
result = ConfigureMCU(config);
}
if (result == DriverResult::Success) {
result = WaitSetMCUMode(ReportMode::NFC_IR_MODE_60HZ, MCUMode::IR);
}
if (result == DriverResult::Success) {
result = ConfigureIrs();
}
if (result == DriverResult::Success) {
result = WriteRegistersStep1();
}
if (result == DriverResult::Success) {
result = WriteRegistersStep2();
}
is_enabled = true;
SetNonBlocking();
return result;
}
DriverResult IrsProtocol::DisableIrs() {
LOG_DEBUG(Input, "Disable IRS");
DriverResult result{DriverResult::Success};
SetBlocking();
if (result == DriverResult::Success) {
result = EnableMCU(false);
}
is_enabled = false;
SetNonBlocking();
return result;
}
DriverResult IrsProtocol::ConfigureIrs() {
LOG_DEBUG(Input, "Configure IRS");
constexpr std::size_t max_tries = 28;
std::vector<u8> output;
std::size_t tries = 0;
const IrsConfigure irs_configuration{
.command = MCUCommand::ConfigureIR,
.sub_command = MCUSubCommand::SetDeviceMode,
.irs_mode = IrsMode::ImageTransfer,
.number_of_fragments = 0x3,
.mcu_major_version = 0x0500,
.mcu_minor_version = 0x1800,
.crc = {},
};
std::vector<u8> request_data(sizeof(IrsConfigure));
memcpy(request_data.data(), &irs_configuration, sizeof(IrsConfigure));
request_data[37] = CalculateMCU_CRC8(request_data.data() + 1, 36);
do {
const auto result = SendSubCommand(SubCommand::SET_MCU_CONFIG, request_data, output);
if (result != DriverResult::Success) {
return result;
}
if (tries++ >= max_tries) {
return DriverResult::WrongReply;
}
} while (output[15] != 0x0b);
return DriverResult::Success;
}
DriverResult IrsProtocol::WriteRegistersStep1() {
LOG_DEBUG(Input, "Configure IRS");
DriverResult result{DriverResult::Success};
constexpr std::size_t max_tries = 28;
std::vector<u8> output;
std::size_t tries = 0;
const IrsWriteRegisters irs_registers{
.command = MCUCommand::ConfigureIR,
.sub_command = MCUSubCommand::WriteDeviceRegisters,
.number_of_registers = 0x9,
.registers =
{
IrsRegister{0x2e00, resolution},
{0x3001, static_cast<u8>(exposure & 0xff)},
{0x3101, static_cast<u8>(exposure >> 8)},
{0x3201, 0x00},
{0x1000, leds},
{0x2e01, static_cast<u8>((digital_gain & 0x0f) << 4)},
{0x2f01, static_cast<u8>((digital_gain & 0xf0) >> 4)},
{0x0e00, ex_light_filter},
{0x4301, 0xc8},
},
.crc = {},
};
std::vector<u8> request_data(sizeof(IrsWriteRegisters));
memcpy(request_data.data(), &irs_registers, sizeof(IrsWriteRegisters));
request_data[37] = CalculateMCU_CRC8(request_data.data() + 1, 36);
std::array<u8, 38> mcu_request{0x02};
mcu_request[36] = CalculateMCU_CRC8(mcu_request.data(), 36);
mcu_request[37] = 0xFF;
if (result != DriverResult::Success) {
return result;
}
do {
result = SendSubCommand(SubCommand::SET_MCU_CONFIG, request_data, output);
// First time we need to set the report mode
if (result == DriverResult::Success && tries == 0) {
result = SendMcuCommand(SubCommand::SET_REPORT_MODE, mcu_request);
}
if (result == DriverResult::Success && tries == 0) {
GetSubCommandResponse(SubCommand::SET_MCU_CONFIG, output);
}
if (result != DriverResult::Success) {
return result;
}
if (tries++ >= max_tries) {
return DriverResult::WrongReply;
}
} while (!(output[15] == 0x13 && output[17] == 0x07) && output[15] != 0x23);
return DriverResult::Success;
}
DriverResult IrsProtocol::WriteRegistersStep2() {
LOG_DEBUG(Input, "Configure IRS");
constexpr std::size_t max_tries = 28;
std::vector<u8> output;
std::size_t tries = 0;
const IrsWriteRegisters irs_registers{
.command = MCUCommand::ConfigureIR,
.sub_command = MCUSubCommand::WriteDeviceRegisters,
.number_of_registers = 0x8,
.registers =
{
IrsRegister{0x1100, static_cast<u8>(led_intensity >> 8)},
{0x1200, static_cast<u8>(led_intensity & 0xff)},
{0x2d00, image_flip},
{0x6701, static_cast<u8>((denoise >> 16) & 0xff)},
{0x6801, static_cast<u8>((denoise >> 8) & 0xff)},
{0x6901, static_cast<u8>(denoise & 0xff)},
{0x0400, 0x2d},
{0x0700, 0x01},
},
.crc = {},
};
std::vector<u8> request_data(sizeof(IrsWriteRegisters));
memcpy(request_data.data(), &irs_registers, sizeof(IrsWriteRegisters));
request_data[37] = CalculateMCU_CRC8(request_data.data() + 1, 36);
do {
const auto result = SendSubCommand(SubCommand::SET_MCU_CONFIG, request_data, output);
if (result != DriverResult::Success) {
return result;
}
if (tries++ >= max_tries) {
return DriverResult::WrongReply;
}
} while (output[15] != 0x13 && output[15] != 0x23);
return DriverResult::Success;
}
bool IrsProtocol::IsEnabled() const {
return is_enabled;
}
} // namespace InputCommon::Joycon

View File

@ -0,0 +1,46 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
// Based on dkms-hid-nintendo implementation, CTCaer joycon toolkit and dekuNukem reverse
// engineering https://github.com/nicman23/dkms-hid-nintendo/blob/master/src/hid-nintendo.c
// https://github.com/CTCaer/jc_toolkit
// https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering
#pragma once
#include <vector>
#include "input_common/helpers/joycon_protocol/common_protocol.h"
#include "input_common/helpers/joycon_protocol/joycon_types.h"
namespace InputCommon::Joycon {
class IrsProtocol final : private JoyconCommonProtocol {
public:
explicit IrsProtocol(std::shared_ptr<JoyconHandle> handle);
DriverResult EnableIrs();
DriverResult DisableIrs();
bool IsEnabled() const;
private:
DriverResult ConfigureIrs();
DriverResult WriteRegistersStep1();
DriverResult WriteRegistersStep2();
bool is_enabled{};
u8 resolution = 0x69;
u8 leds = 0x00;
u8 ex_light_filter = 0x03;
u8 image_flip = 0x00;
u8 digital_gain = 0x01;
u16 exposure = 0x2490;
u16 led_intensity = 0x0f10;
u32 denoise = 0x012344;
};
} // namespace InputCommon::Joycon

View File

@ -273,6 +273,17 @@ enum class NFCTagType : u8 {
Ntag215 = 0x01, Ntag215 = 0x01,
}; };
enum class IrsMode : u8 {
None = 0x02,
Moment = 0x03,
Dpd = 0x04,
Clustering = 0x06,
ImageTransfer = 0x07,
Silhouette = 0x08,
TeraImage = 0x09,
SilhouetteTeraImage = 0x0A,
};
enum class DriverResult { enum class DriverResult {
Success, Success,
WrongReply, WrongReply,
@ -456,6 +467,36 @@ struct NFCRequestState {
}; };
static_assert(sizeof(NFCRequestState) == 0x26, "NFCRequestState is an invalid size"); static_assert(sizeof(NFCRequestState) == 0x26, "NFCRequestState is an invalid size");
struct IrsConfigure {
MCUCommand command;
MCUSubCommand sub_command;
IrsMode irs_mode;
u8 number_of_fragments;
u16 mcu_major_version;
u16 mcu_minor_version;
INSERT_PADDING_BYTES(0x1D);
u8 crc;
};
static_assert(sizeof(IrsConfigure) == 0x26, "IrsConfigure is an invalid size");
#pragma pack(push, 1)
struct IrsRegister {
u16 address;
u8 value;
};
static_assert(sizeof(IrsRegister) == 0x3, "IrsRegister is an invalid size");
struct IrsWriteRegisters {
MCUCommand command;
MCUSubCommand sub_command;
u8 number_of_registers;
std::array<IrsRegister, 9> registers;
INSERT_PADDING_BYTES(0x7);
u8 crc;
};
static_assert(sizeof(IrsWriteRegisters) == 0x26, "IrsWriteRegisters is an invalid size");
#pragma pack(pop)
struct FirmwareVersion { struct FirmwareVersion {
u8 major; u8 major;
u8 minor; u8 minor;

View File

@ -30,8 +30,7 @@ constexpr VkDeviceSize MAX_STREAM_BUFFER_REQUEST_SIZE = 8_MiB;
constexpr VkDeviceSize STREAM_BUFFER_SIZE = 128_MiB; constexpr VkDeviceSize STREAM_BUFFER_SIZE = 128_MiB;
constexpr VkDeviceSize REGION_SIZE = STREAM_BUFFER_SIZE / StagingBufferPool::NUM_SYNCS; constexpr VkDeviceSize REGION_SIZE = STREAM_BUFFER_SIZE / StagingBufferPool::NUM_SYNCS;
constexpr VkMemoryPropertyFlags HOST_FLAGS = constexpr VkMemoryPropertyFlags HOST_FLAGS = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
constexpr VkMemoryPropertyFlags STREAM_FLAGS = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | HOST_FLAGS; constexpr VkMemoryPropertyFlags STREAM_FLAGS = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | HOST_FLAGS;
bool IsStreamHeap(VkMemoryHeap heap) noexcept { bool IsStreamHeap(VkMemoryHeap heap) noexcept {
@ -93,9 +92,7 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
.size = STREAM_BUFFER_SIZE, .size = STREAM_BUFFER_SIZE,
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE, .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0, .queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr, .pQueueFamilyIndices = nullptr,
@ -247,15 +244,19 @@ std::optional<StagingBufferRef> StagingBufferPool::TryGetReservedBuffer(size_t s
StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, MemoryUsage usage, StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, MemoryUsage usage,
bool deferred) { bool deferred) {
const u32 log2 = Common::Log2Ceil64(size); const u32 log2 = Common::Log2Ceil64(size);
VkBufferUsageFlags usage_flags{};
if (usage == MemoryUsage::Upload) {
usage_flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
}
if (usage == MemoryUsage::Download) {
usage_flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
vk::Buffer buffer = device.GetLogical().CreateBuffer({ vk::Buffer buffer = device.GetLogical().CreateBuffer({
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
.size = 1ULL << log2, .size = 1ULL << log2,
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | .usage = usage_flags,
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE, .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0, .queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr, .pQueueFamilyIndices = nullptr,