early-access version 2520
This commit is contained in:
parent
09cf05ab91
commit
a1e50a2b0d
5 changed files with 114 additions and 44 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 2519.
|
This is the source code for early-access 2520.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
|
||||||
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
|
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
|
||||||
return KMemoryManager::Pool::SystemNonSecure;
|
return KMemoryManager::Pool::SystemNonSecure;
|
||||||
} else {
|
} else {
|
||||||
ASSERT_MSG("InvalidMemoryRegionType for conversion to Pool");
|
UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -322,7 +322,12 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
||||||
|
// Validate the mapping request.
|
||||||
|
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
||||||
|
ResultInvalidMemoryRegion);
|
||||||
|
|
||||||
|
// Lock the table.
|
||||||
KScopedLightLock lk(general_lock);
|
KScopedLightLock lk(general_lock);
|
||||||
|
|
||||||
if (!size) {
|
if (!size) {
|
||||||
|
@ -331,26 +336,66 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size
|
||||||
|
|
||||||
const std::size_t num_pages{size / PageSize};
|
const std::size_t num_pages{size / PageSize};
|
||||||
|
|
||||||
CASCADE_CODE(CheckMemoryState(nullptr, nullptr, nullptr, nullptr, src_addr, size,
|
// Verify that the source memory is locked normal heap.
|
||||||
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
|
std::size_t num_src_allocator_blocks{};
|
||||||
KMemoryPermission::None, KMemoryAttribute::Mask,
|
R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
|
||||||
KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
|
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
|
||||||
|
KMemoryPermission::None, KMemoryAttribute::All,
|
||||||
|
KMemoryAttribute::Locked));
|
||||||
|
|
||||||
KMemoryState state{};
|
// Verify that the destination memory is aliasable code.
|
||||||
CASCADE_CODE(CheckMemoryState(
|
std::size_t num_dst_allocator_blocks{};
|
||||||
&state, nullptr, nullptr, nullptr, dst_addr, PageSize, KMemoryState::FlagCanCodeAlias,
|
R_TRY(this->CheckMemoryStateContiguous(
|
||||||
|
std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
|
||||||
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
|
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
|
||||||
KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
|
KMemoryAttribute::All, KMemoryAttribute::None));
|
||||||
CASCADE_CODE(CheckMemoryState(dst_addr, size, KMemoryState::All, state, KMemoryPermission::None,
|
|
||||||
KMemoryPermission::None, KMemoryAttribute::Mask,
|
|
||||||
KMemoryAttribute::None));
|
|
||||||
CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
|
||||||
|
|
||||||
block_manager->Update(dst_addr, num_pages, KMemoryState::Free);
|
// Determine whether any pages being unmapped are code.
|
||||||
block_manager->Update(src_addr, num_pages, KMemoryState::Normal,
|
bool any_code_pages = false;
|
||||||
KMemoryPermission::UserReadWrite);
|
{
|
||||||
|
KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address);
|
||||||
|
while (true) {
|
||||||
|
// Get the memory info.
|
||||||
|
const KMemoryInfo info = it->GetMemoryInfo();
|
||||||
|
|
||||||
system.InvalidateCpuInstructionCacheRange(dst_addr, size);
|
// Check if the memory has code flag.
|
||||||
|
if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) {
|
||||||
|
any_code_pages = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we're done.
|
||||||
|
if (dst_address + size - 1 <= info.GetLastAddress()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Advance.
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that we maintain the instruction cache.
|
||||||
|
bool reprotected_pages = false;
|
||||||
|
SCOPE_EXIT({
|
||||||
|
if (reprotected_pages && any_code_pages) {
|
||||||
|
system.InvalidateCpuInstructionCacheRange(dst_address, size);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Unmap.
|
||||||
|
{
|
||||||
|
// TODO(bunnei): We free the virtual address space, but do not nullptr the pointers in the
|
||||||
|
// backing page table. This is a workaround because of an issue where CPU emulation may have
|
||||||
|
// not quite finished running code when NROs are unloaded.
|
||||||
|
|
||||||
|
// Apply the memory block updates.
|
||||||
|
block_manager->Update(dst_address, num_pages, KMemoryState::None);
|
||||||
|
block_manager->Update(src_address, num_pages, KMemoryState::Normal,
|
||||||
|
KMemoryPermission::UserReadWrite);
|
||||||
|
|
||||||
|
// Note that we reprotected pages.
|
||||||
|
reprotected_pages = true;
|
||||||
|
}
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
|
@ -253,7 +253,9 @@ public:
|
||||||
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
|
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
|
||||||
return !IsOutsideASLRRegion(address, size);
|
return !IsOutsideASLRRegion(address, size);
|
||||||
}
|
}
|
||||||
|
constexpr std::size_t GetNumGuardPages() const {
|
||||||
|
return IsKernel() ? 1 : 4;
|
||||||
|
}
|
||||||
PAddr GetPhysicalAddr(VAddr addr) const {
|
PAddr GetPhysicalAddr(VAddr addr) const {
|
||||||
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
|
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
|
||||||
ASSERT(backing_addr);
|
ASSERT(backing_addr);
|
||||||
|
@ -275,10 +277,6 @@ private:
|
||||||
return is_aslr_enabled;
|
return is_aslr_enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr std::size_t GetNumGuardPages() const {
|
|
||||||
return IsKernel() ? 1 : 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
|
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
|
||||||
return (address_space_start <= addr) &&
|
return (address_space_start <= addr) &&
|
||||||
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
|
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
|
||||||
|
|
|
@ -288,7 +288,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const {
|
bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const {
|
||||||
constexpr std::size_t padding_size{4 * Kernel::PageSize};
|
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
|
||||||
const auto start_info{page_table.QueryInfo(start - 1)};
|
const auto start_info{page_table.QueryInfo(start - 1)};
|
||||||
|
|
||||||
if (start_info.state != Kernel::KMemoryState::Free) {
|
if (start_info.state != Kernel::KMemoryState::Free) {
|
||||||
|
@ -308,31 +308,56 @@ public:
|
||||||
return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
|
return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr GetRandomMapRegion(const Kernel::KPageTable& page_table, std::size_t size) const {
|
ResultCode GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) {
|
||||||
VAddr addr{};
|
size = Common::AlignUp(size, Kernel::PageSize);
|
||||||
const std::size_t end_pages{(page_table.GetAliasCodeRegionSize() - size) >>
|
size += page_table.GetNumGuardPages() * Kernel::PageSize * 2;
|
||||||
Kernel::PageBits};
|
|
||||||
do {
|
auto is_region_available = [&](VAddr addr) {
|
||||||
addr = page_table.GetAliasCodeRegionStart() +
|
const auto end_addr = addr + size;
|
||||||
(Kernel::KSystemControl::GenerateRandomRange(0, end_pages) << Kernel::PageBits);
|
while (addr < end_addr) {
|
||||||
} while (!page_table.IsInsideAddressSpace(addr, size) ||
|
if (system.Memory().IsValidVirtualAddress(addr)) {
|
||||||
page_table.IsInsideHeapRegion(addr, size) ||
|
return false;
|
||||||
page_table.IsInsideAliasRegion(addr, size));
|
}
|
||||||
return addr;
|
addr += Kernel::PageSize;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
bool succeeded = false;
|
||||||
|
const auto map_region_end =
|
||||||
|
page_table.GetAliasCodeRegionStart() + page_table.GetAliasCodeRegionSize();
|
||||||
|
while (current_map_addr < map_region_end) {
|
||||||
|
if (is_region_available(current_map_addr)) {
|
||||||
|
succeeded = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
current_map_addr += 0x100000;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!succeeded) {
|
||||||
|
UNREACHABLE_MSG("Out of address space!");
|
||||||
|
return Kernel::ResultOutOfMemory;
|
||||||
|
}
|
||||||
|
|
||||||
|
out_addr = current_map_addr;
|
||||||
|
current_map_addr += size;
|
||||||
|
|
||||||
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultVal<VAddr> MapProcessCodeMemory(Kernel::KProcess* process, VAddr baseAddress,
|
ResultVal<VAddr> MapProcessCodeMemory(Kernel::KProcess* process, VAddr base_addr, u64 size) {
|
||||||
u64 size) const {
|
auto& page_table{process->PageTable()};
|
||||||
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
|
VAddr addr{};
|
||||||
auto& page_table{process->PageTable()};
|
|
||||||
const VAddr addr{GetRandomMapRegion(page_table, size)};
|
|
||||||
const ResultCode result{page_table.MapCodeMemory(addr, baseAddress, size)};
|
|
||||||
|
|
||||||
|
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
|
||||||
|
R_TRY(GetAvailableMapRegion(page_table, size, addr));
|
||||||
|
|
||||||
|
const ResultCode result{page_table.MapCodeMemory(addr, base_addr, size)};
|
||||||
if (result == Kernel::ResultInvalidCurrentMemory) {
|
if (result == Kernel::ResultInvalidCurrentMemory) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
CASCADE_CODE(result);
|
R_TRY(result);
|
||||||
|
|
||||||
if (ValidateRegionForMap(page_table, addr, size)) {
|
if (ValidateRegionForMap(page_table, addr, size)) {
|
||||||
return addr;
|
return addr;
|
||||||
|
@ -343,7 +368,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultVal<VAddr> MapNro(Kernel::KProcess* process, VAddr nro_addr, std::size_t nro_size,
|
ResultVal<VAddr> MapNro(Kernel::KProcess* process, VAddr nro_addr, std::size_t nro_size,
|
||||||
VAddr bss_addr, std::size_t bss_size, std::size_t size) const {
|
VAddr bss_addr, std::size_t bss_size, std::size_t size) {
|
||||||
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
|
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
|
||||||
auto& page_table{process->PageTable()};
|
auto& page_table{process->PageTable()};
|
||||||
VAddr addr{};
|
VAddr addr{};
|
||||||
|
@ -597,6 +622,7 @@ public:
|
||||||
LOG_WARNING(Service_LDR, "(STUBBED) called");
|
LOG_WARNING(Service_LDR, "(STUBBED) called");
|
||||||
|
|
||||||
initialized = true;
|
initialized = true;
|
||||||
|
current_map_addr = system.CurrentProcess()->PageTable().GetAliasCodeRegionStart();
|
||||||
|
|
||||||
IPC::ResponseBuilder rb{ctx, 2};
|
IPC::ResponseBuilder rb{ctx, 2};
|
||||||
rb.Push(ResultSuccess);
|
rb.Push(ResultSuccess);
|
||||||
|
@ -607,6 +633,7 @@ private:
|
||||||
|
|
||||||
std::map<VAddr, NROInfo> nro;
|
std::map<VAddr, NROInfo> nro;
|
||||||
std::map<VAddr, std::vector<SHA256Hash>> nrr;
|
std::map<VAddr, std::vector<SHA256Hash>> nrr;
|
||||||
|
VAddr current_map_addr{};
|
||||||
|
|
||||||
bool IsValidNROHash(const SHA256Hash& hash) const {
|
bool IsValidNROHash(const SHA256Hash& hash) const {
|
||||||
return std::any_of(nrr.begin(), nrr.end(), [&hash](const auto& p) {
|
return std::any_of(nrr.begin(), nrr.end(), [&hash](const auto& p) {
|
||||||
|
|
Loading…
Reference in a new issue