early-access version 2914

This commit is contained in:
pineappleEA 2022-08-21 22:11:13 +02:00
parent d4d68be38e
commit c141471dce
31 changed files with 193 additions and 249 deletions

View File

@ -40,6 +40,8 @@ option(YUZU_TESTS "Compile tests" ON)
option(YUZU_USE_BUNDLED_VCPKG "Use vcpkg for yuzu dependencies" "${MSVC}") option(YUZU_USE_BUNDLED_VCPKG "Use vcpkg for yuzu dependencies" "${MSVC}")
option(YUZU_CHECK_SUBMODULES "Check if submodules are present" ON)
if (YUZU_USE_BUNDLED_VCPKG) if (YUZU_USE_BUNDLED_VCPKG)
if (YUZU_TESTS) if (YUZU_TESTS)
list(APPEND VCPKG_MANIFEST_FEATURES "yuzu-tests") list(APPEND VCPKG_MANIFEST_FEATURES "yuzu-tests")
@ -81,7 +83,7 @@ function(check_submodules_present)
endforeach() endforeach()
endfunction() endfunction()
if(EXISTS ${PROJECT_SOURCE_DIR}/.gitmodules) if(EXISTS ${PROJECT_SOURCE_DIR}/.gitmodules AND YUZU_CHECK_SUBMODULES)
check_submodules_present() check_submodules_present()
endif() endif()
configure_file(${PROJECT_SOURCE_DIR}/dist/compatibility_list/compatibility_list.qrc configure_file(${PROJECT_SOURCE_DIR}/dist/compatibility_list/compatibility_list.qrc

View File

@ -1,7 +1,7 @@
yuzu emulator early access yuzu emulator early access
============= =============
This is the source code for early-access 2913. This is the source code for early-access 2914.
## Legal Notice ## Legal Notice

View File

@ -128,7 +128,7 @@ endif()
if (YUZU_USE_BUNDLED_OPUS) if (YUZU_USE_BUNDLED_OPUS)
add_subdirectory(opus EXCLUDE_FROM_ALL) add_subdirectory(opus EXCLUDE_FROM_ALL)
else() else()
find_package(opus 1.3 REQUIRED) find_package(Opus 1.3 REQUIRED)
endif() endif()
# FFMpeg # FFMpeg

19
externals/find-modules/FindOpus.cmake vendored Executable file
View File

@ -0,0 +1,19 @@
# SPDX-FileCopyrightText: 2022 yuzu Emulator Project
# SPDX-License-Identifier: GPL-2.0-or-later
find_package(PkgConfig)
if (PKG_CONFIG_FOUND)
pkg_search_module(opus IMPORTED_TARGET GLOBAL opus)
if (opus_FOUND)
add_library(Opus::opus ALIAS PkgConfig::opus)
endif()
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Opus
REQUIRED_VARS
opus_LINK_LIBRARIES
opus_FOUND
VERSION_VAR opus_VERSION
)

View File

@ -1,56 +1,19 @@
# SPDX-FileCopyrightText: 2020 yuzu Emulator Project # SPDX-FileCopyrightText: 2022 yuzu Emulator Project
# SPDX-License-Identifier: GPL-2.0-or-later # SPDX-License-Identifier: GPL-2.0-or-later
find_package(PkgConfig QUIET) find_package(PkgConfig)
pkg_check_modules(PC_lz4 QUIET lz4)
find_path(lz4_INCLUDE_DIR if (PKG_CONFIG_FOUND)
NAMES lz4.h pkg_search_module(liblz4 IMPORTED_TARGET GLOBAL liblz4)
PATHS ${PC_lz4_INCLUDE_DIRS} if (liblz4_FOUND)
) add_library(lz4::lz4 ALIAS PkgConfig::liblz4)
find_library(lz4_LIBRARY endif()
NAMES lz4
PATHS ${PC_lz4_LIBRARY_DIRS}
)
if(lz4_INCLUDE_DIR)
file(STRINGS "${lz4_INCLUDE_DIR}/lz4.h" _lz4_version_lines
REGEX "#define[ \t]+LZ4_VERSION_(MAJOR|MINOR|RELEASE)")
string(REGEX REPLACE ".*LZ4_VERSION_MAJOR *\([0-9]*\).*" "\\1" _lz4_version_major "${_lz4_version_lines}")
string(REGEX REPLACE ".*LZ4_VERSION_MINOR *\([0-9]*\).*" "\\1" _lz4_version_minor "${_lz4_version_lines}")
string(REGEX REPLACE ".*LZ4_VERSION_RELEASE *\([0-9]*\).*" "\\1" _lz4_version_release "${_lz4_version_lines}")
set(lz4_VERSION "${_lz4_version_major}.${_lz4_version_minor}.${_lz4_version_release}")
unset(_lz4_version_major)
unset(_lz4_version_minor)
unset(_lz4_version_release)
unset(_lz4_version_lines)
endif() endif()
include(FindPackageHandleStandardArgs) include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(lz4 find_package_handle_standard_args(lz4
FOUND_VAR lz4_FOUND REQUIRED_VARS
REQUIRED_VARS liblz4_LINK_LIBRARIES
lz4_LIBRARY liblz4_FOUND
lz4_INCLUDE_DIR VERSION_VAR liblz4_VERSION
VERSION_VAR lz4_VERSION
)
if(lz4_FOUND)
set(lz4_LIBRARIES ${lz4_LIBRARY})
set(lz4_INCLUDE_DIRS ${lz4_INCLUDE_DIR})
set(lz4_DEFINITIONS ${PC_lz4_CFLAGS_OTHER})
endif()
if(lz4_FOUND AND NOT TARGET lz4::lz4)
add_library(lz4::lz4 UNKNOWN IMPORTED)
set_target_properties(lz4::lz4 PROPERTIES
IMPORTED_LOCATION "${lz4_LIBRARY}"
INTERFACE_COMPILE_OPTIONS "${PC_lz4_CFLAGS_OTHER}"
INTERFACE_INCLUDE_DIRECTORIES "${lz4_INCLUDE_DIR}"
)
endif()
mark_as_advanced(
lz4_INCLUDE_DIR
lz4_LIBRARY
) )

View File

@ -1,57 +1,19 @@
# SPDX-FileCopyrightText: 2020 yuzu Emulator Project # SPDX-FileCopyrightText: 2022 yuzu Emulator Project
# SPDX-License-Identifier: GPL-2.0-or-later # SPDX-License-Identifier: GPL-2.0-or-later
find_package(PkgConfig QUIET) find_package(PkgConfig)
pkg_check_modules(PC_zstd QUIET libzstd)
find_path(zstd_INCLUDE_DIR if (PKG_CONFIG_FOUND)
NAMES zstd.h pkg_search_module(libzstd IMPORTED_TARGET GLOBAL libzstd)
PATHS ${PC_zstd_INCLUDE_DIRS} if (libzstd_FOUND)
) add_library(zstd::zstd ALIAS PkgConfig::libzstd)
find_library(zstd_LIBRARY endif()
NAMES zstd
PATHS ${PC_zstd_LIBRARY_DIRS}
)
if(zstd_INCLUDE_DIR)
file(STRINGS "${zstd_INCLUDE_DIR}/zstd.h" _zstd_version_lines
REGEX "#define[ \t]+ZSTD_VERSION_(MAJOR|MINOR|RELEASE)")
string(REGEX REPLACE ".*ZSTD_VERSION_MAJOR *\([0-9]*\).*" "\\1" _zstd_version_major "${_zstd_version_lines}")
string(REGEX REPLACE ".*ZSTD_VERSION_MINOR *\([0-9]*\).*" "\\1" _zstd_version_minor "${_zstd_version_lines}")
string(REGEX REPLACE ".*ZSTD_VERSION_RELEASE *\([0-9]*\).*" "\\1" _zstd_version_release "${_zstd_version_lines}")
set(zstd_VERSION "${_zstd_version_major}.${_zstd_version_minor}.${_zstd_version_release}")
unset(_zstd_version_major)
unset(_zstd_version_minor)
unset(_zstd_version_release)
unset(_zstd_version_lines)
endif() endif()
include(FindPackageHandleStandardArgs) include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(zstd find_package_handle_standard_args(zstd
FOUND_VAR zstd_FOUND REQUIRED_VARS
REQUIRED_VARS libzstd_LINK_LIBRARIES
zstd_LIBRARY libzstd_FOUND
zstd_INCLUDE_DIR VERSION_VAR libzstd_VERSION
zstd_VERSION
VERSION_VAR zstd_VERSION
)
if(zstd_FOUND)
set(zstd_LIBRARIES ${zstd_LIBRARY})
set(zstd_INCLUDE_DIRS ${zstd_INCLUDE_DIR})
set(zstd_DEFINITIONS ${PC_zstd_CFLAGS_OTHER})
endif()
if(zstd_FOUND AND NOT TARGET zstd::zstd)
add_library(zstd::zstd UNKNOWN IMPORTED)
set_target_properties(zstd::zstd PROPERTIES
IMPORTED_LOCATION "${zstd_LIBRARY}"
INTERFACE_COMPILE_OPTIONS "${PC_zstd_CFLAGS_OTHER}"
INTERFACE_INCLUDE_DIRECTORIES "${zstd_INCLUDE_DIR}"
)
endif()
mark_as_advanced(
zstd_INCLUDE_DIR
zstd_LIBRARY
) )

View File

@ -256,4 +256,4 @@ PRIVATE
opus/src opus/src
) )
add_library(Opus::Opus ALIAS opus) add_library(Opus::opus ALIAS opus)

View File

@ -22,12 +22,3 @@ typedef void* HANDLE;
#include <microprofile.h> #include <microprofile.h>
#define MP_RGB(r, g, b) ((r) << 16 | (g) << 8 | (b) << 0) #define MP_RGB(r, g, b) ((r) << 16 | (g) << 8 | (b) << 0)
// On OS X, some Mach header included by MicroProfile defines these as macros, conflicting with
// identifiers we use.
#ifdef PAGE_SIZE
#undef PAGE_SIZE
#endif
#ifdef PAGE_MASK
#undef PAGE_MASK
#endif

View File

@ -788,7 +788,7 @@ endif()
create_target_directory_groups(core) create_target_directory_groups(core)
target_link_libraries(core PUBLIC common PRIVATE audio_core network video_core) target_link_libraries(core PUBLIC common PRIVATE audio_core network video_core)
target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt::fmt nlohmann_json::nlohmann_json mbedtls Opus::Opus) target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt::fmt nlohmann_json::nlohmann_json mbedtls Opus::opus)
if (MINGW) if (MINGW)
target_link_libraries(core PRIVATE ${MSWSOCK_LIBRARY}) target_link_libraries(core PRIVATE ${MSWSOCK_LIBRARY})
endif() endif()

View File

@ -190,8 +190,8 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
config.callbacks = cb.get(); config.callbacks = cb.get();
config.coprocessors[15] = cp15; config.coprocessors[15] = cp15;
config.define_unpredictable_behaviour = true; config.define_unpredictable_behaviour = true;
static constexpr std::size_t PAGE_BITS = 12; static constexpr std::size_t YUZU_PAGEBITS = 12;
static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS); static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - YUZU_PAGEBITS);
if (page_table) { if (page_table) {
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>( config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
page_table->pointers.data()); page_table->pointers.data());

View File

@ -217,7 +217,7 @@ void IPSwitchCompiler::Parse() {
break; break;
} else if (StartsWith(line, "@nsobid-")) { } else if (StartsWith(line, "@nsobid-")) {
// NSO Build ID Specifier // NSO Build ID Specifier
const auto raw_build_id = fmt::format("{:0>64}", line.substr(8)); const auto raw_build_id = fmt::format("{:0<64}", line.substr(8));
nso_build_id = Common::HexStringToArray<0x20>(raw_build_id); nso_build_id = Common::HexStringToArray<0x20>(raw_build_id);
} else if (StartsWith(line, "#")) { } else if (StartsWith(line, "#")) {
// Mandatory Comment // Mandatory Comment

View File

@ -191,7 +191,7 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const {
std::vector<VirtualFile> PatchManager::CollectPatches(const std::vector<VirtualDir>& patch_dirs, std::vector<VirtualFile> PatchManager::CollectPatches(const std::vector<VirtualDir>& patch_dirs,
const std::string& build_id) const { const std::string& build_id) const {
const auto& disabled = Settings::values.disabled_addons[title_id]; const auto& disabled = Settings::values.disabled_addons[title_id];
const auto nso_build_id = fmt::format("{:0>64}", build_id); const auto nso_build_id = fmt::format("{:0<64}", build_id);
std::vector<VirtualFile> out; std::vector<VirtualFile> out;
out.reserve(patch_dirs.size()); out.reserve(patch_dirs.size());
@ -206,7 +206,7 @@ std::vector<VirtualFile> PatchManager::CollectPatches(const std::vector<VirtualD
auto name = file->GetName(); auto name = file->GetName();
const auto this_build_id = const auto this_build_id =
fmt::format("{:0>64}", name.substr(0, name.find('.'))); fmt::format("{:0<64}", name.substr(0, name.find('.')));
if (nso_build_id == this_build_id) if (nso_build_id == this_build_id)
out.push_back(file); out.push_back(file);
} else if (file->GetExtension() == "pchtxt") { } else if (file->GetExtension() == "pchtxt") {

View File

@ -9,7 +9,7 @@
#include "core/memory.h" #include "core/memory.h"
#include "video_core/host1x/host1x.h" #include "video_core/host1x/host1x.h"
using Core::Memory::PAGE_SIZE; using Core::Memory::YUZU_PAGESIZE;
namespace Service::Nvidia::NvCore { namespace Service::Nvidia::NvCore {
NvMap::Handle::Handle(u64 size_, Id id_) NvMap::Handle::Handle(u64 size_, Id id_)
@ -27,7 +27,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress)
flags = pFlags; flags = pFlags;
kind = pKind; kind = pKind;
align = pAlign < PAGE_SIZE ? PAGE_SIZE : pAlign; align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
// This flag is only applicable for handles with an address passed // This flag is only applicable for handles with an address passed
if (pAddress) { if (pAddress) {
@ -37,7 +37,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress)
"Mapping nvmap handles without a CPU side address is unimplemented!"); "Mapping nvmap handles without a CPU side address is unimplemented!");
} }
size = Common::AlignUp(size, PAGE_SIZE); size = Common::AlignUp(size, YUZU_PAGESIZE);
aligned_size = Common::AlignUp(size, align); aligned_size = Common::AlignUp(size, align);
address = pAddress; address = pAddress;
allocated = true; allocated = true;

View File

@ -153,7 +153,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
return NvResult::BadValue; return NvResult::BadValue;
} }
if (params.page_size != VM::PAGE_SIZE && params.page_size != vm.big_page_size) { if (params.page_size != VM::YUZU_PAGESIZE && params.page_size != vm.big_page_size) {
return NvResult::BadValue; return NvResult::BadValue;
} }
@ -163,11 +163,11 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
return NvResult::NotImplemented; return NvResult::NotImplemented;
} }
const u32 page_size_bits{params.page_size == VM::PAGE_SIZE ? VM::PAGE_SIZE_BITS const u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS
: vm.big_page_size_bits}; : vm.big_page_size_bits};
auto& allocator{params.page_size == VM::PAGE_SIZE ? *vm.small_page_allocator auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator
: *vm.big_page_allocator}; : *vm.big_page_allocator};
if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) { if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
allocator.AllocateFixed(static_cast<u32>(params.offset >> page_size_bits), params.pages); allocator.AllocateFixed(static_cast<u32>(params.offset >> page_size_bits), params.pages);
@ -190,7 +190,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
.mappings{}, .mappings{},
.page_size = params.page_size, .page_size = params.page_size,
.sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None, .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None,
.big_pages = params.page_size != VM::PAGE_SIZE, .big_pages = params.page_size != VM::YUZU_PAGESIZE,
}; };
std::memcpy(output.data(), &params, output.size()); std::memcpy(output.data(), &params, output.size());
@ -248,10 +248,10 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>&
gmmu->Unmap(params.offset, allocation.size); gmmu->Unmap(params.offset, allocation.size);
} }
auto& allocator{params.page_size == VM::PAGE_SIZE ? *vm.small_page_allocator auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator
: *vm.big_page_allocator}; : *vm.big_page_allocator};
u32 page_size_bits{params.page_size == VM::PAGE_SIZE ? VM::PAGE_SIZE_BITS u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS
: vm.big_page_size_bits}; : vm.big_page_size_bits};
allocator.Free(static_cast<u32>(params.offset >> page_size_bits), allocator.Free(static_cast<u32>(params.offset >> page_size_bits),
static_cast<u32>(allocation.size >> page_size_bits)); static_cast<u32>(allocation.size >> page_size_bits));
@ -369,7 +369,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
bool big_page{[&]() { bool big_page{[&]() {
if (Common::IsAligned(handle->align, vm.big_page_size)) if (Common::IsAligned(handle->align, vm.big_page_size))
return true; return true;
else if (Common::IsAligned(handle->align, VM::PAGE_SIZE)) else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE))
return false; return false;
else { else {
ASSERT(false); ASSERT(false);
@ -396,7 +396,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
} else { } else {
auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
u32 page_size{big_page ? vm.big_page_size : VM::PAGE_SIZE}; u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
params.offset = static_cast<u64>(allocator.Allocate( params.offset = static_cast<u64>(allocator.Allocate(
@ -473,7 +473,7 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
params.regions = std::array<VaRegion, 2>{ params.regions = std::array<VaRegion, 2>{
VaRegion{ VaRegion{
.offset = vm.small_page_allocator->GetVAStart() << VM::PAGE_SIZE_BITS, .offset = vm.small_page_allocator->GetVAStart() << VM::PAGE_SIZE_BITS,
.page_size = VM::PAGE_SIZE, .page_size = VM::YUZU_PAGESIZE,
._pad0_{}, ._pad0_{},
.pages = vm.small_page_allocator->GetVALimit() - vm.small_page_allocator->GetVAStart(), .pages = vm.small_page_allocator->GetVALimit() - vm.small_page_allocator->GetVAStart(),
}, },

View File

@ -188,8 +188,8 @@ private:
std::mutex mutex; //!< Locks all AS operations std::mutex mutex; //!< Locks all AS operations
struct VM { struct VM {
static constexpr u32 PAGE_SIZE{0x1000}; static constexpr u32 YUZU_PAGESIZE{0x1000};
static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(PAGE_SIZE)}; static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(YUZU_PAGESIZE)};
static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000}; static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000};
static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000}; static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000};

View File

@ -16,7 +16,7 @@
#include "core/hle/service/nvdrv/devices/nvmap.h" #include "core/hle/service/nvdrv/devices/nvmap.h"
#include "core/memory.h" #include "core/memory.h"
using Core::Memory::PAGE_SIZE; using Core::Memory::YUZU_PAGESIZE;
namespace Service::Nvidia::Devices { namespace Service::Nvidia::Devices {
@ -75,7 +75,8 @@ NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output)
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size); LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
std::shared_ptr<NvCore::NvMap::Handle> handle_description{}; std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
auto result = file.CreateHandle(Common::AlignUp(params.size, PAGE_SIZE), handle_description); auto result =
file.CreateHandle(Common::AlignUp(params.size, YUZU_PAGESIZE), handle_description);
if (result != NvResult::Success) { if (result != NvResult::Success) {
LOG_CRITICAL(Service_NVDRV, "Failed to create Object"); LOG_CRITICAL(Service_NVDRV, "Failed to create Object");
return result; return result;
@ -104,8 +105,8 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
} }
// Force page size alignment at a minimum // Force page size alignment at a minimum
if (params.align < PAGE_SIZE) { if (params.align < YUZU_PAGESIZE) {
params.align = PAGE_SIZE; params.align = YUZU_PAGESIZE;
} }
auto handle_description{file.GetHandle(params.handle)}; auto handle_description{file.GetHandle(params.handle)};

View File

@ -14,7 +14,7 @@ namespace Loader {
namespace { namespace {
constexpr u32 PageAlignSize(u32 size) { constexpr u32 PageAlignSize(u32 size) {
return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
} }
} // Anonymous namespace } // Anonymous namespace

View File

@ -125,7 +125,7 @@ FileType AppLoader_NRO::IdentifyType(const FileSys::VirtualFile& nro_file) {
} }
static constexpr u32 PageAlignSize(u32 size) { static constexpr u32 PageAlignSize(u32 size) {
return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
} }
static bool LoadNroImpl(Kernel::KProcess& process, const std::vector<u8>& data) { static bool LoadNroImpl(Kernel::KProcess& process, const std::vector<u8>& data) {

View File

@ -45,7 +45,7 @@ std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data,
} }
constexpr u32 PageAlignSize(u32 size) { constexpr u32 PageAlignSize(u32 size) {
return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
} }
} // Anonymous namespace } // Anonymous namespace

View File

@ -36,10 +36,11 @@ struct Memory::Impl {
} }
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target); ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
Common::PageType::Memory);
if (Settings::IsFastmemEnabled()) { if (Settings::IsFastmemEnabled()) {
system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size); system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
@ -47,9 +48,10 @@ struct Memory::Impl {
} }
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped); MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
Common::PageType::Unmapped);
if (Settings::IsFastmemEnabled()) { if (Settings::IsFastmemEnabled()) {
system.DeviceMemory().buffer.Unmap(base, size); system.DeviceMemory().buffer.Unmap(base, size);
@ -57,7 +59,7 @@ struct Memory::Impl {
} }
[[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const {
const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
if (!paddr) { if (!paddr) {
return {}; return {};
@ -67,7 +69,7 @@ struct Memory::Impl {
} }
[[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
if (paddr == 0) { if (paddr == 0) {
return {}; return {};
@ -176,13 +178,14 @@ struct Memory::Impl {
auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) { auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) {
const auto& page_table = process.PageTable().PageTableImpl(); const auto& page_table = process.PageTable().PageTableImpl();
std::size_t remaining_size = size; std::size_t remaining_size = size;
std::size_t page_index = addr >> PAGE_BITS; std::size_t page_index = addr >> YUZU_PAGEBITS;
std::size_t page_offset = addr & PAGE_MASK; std::size_t page_offset = addr & YUZU_PAGEMASK;
while (remaining_size) { while (remaining_size) {
const std::size_t copy_amount = const std::size_t copy_amount =
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size);
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); const auto current_vaddr =
static_cast<VAddr>((page_index << YUZU_PAGEBITS) + page_offset);
const auto [pointer, type] = page_table.pointers[page_index].PointerType(); const auto [pointer, type] = page_table.pointers[page_index].PointerType();
switch (type) { switch (type) {
@ -192,7 +195,7 @@ struct Memory::Impl {
} }
case Common::PageType::Memory: { case Common::PageType::Memory: {
DEBUG_ASSERT(pointer); DEBUG_ASSERT(pointer);
u8* mem_ptr = pointer + page_offset + (page_index << PAGE_BITS); u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS);
on_memory(copy_amount, mem_ptr); on_memory(copy_amount, mem_ptr);
break; break;
} }
@ -339,10 +342,10 @@ struct Memory::Impl {
// Iterate over a contiguous CPU address space, marking/unmarking the region. // Iterate over a contiguous CPU address space, marking/unmarking the region.
// The region is at a granularity of CPU pages. // The region is at a granularity of CPU pages.
const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
const Common::PageType page_type{ const Common::PageType page_type{
current_page_table->pointers[vaddr >> PAGE_BITS].Type()}; current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
if (debug) { if (debug) {
// Switch page type to debug if now debug // Switch page type to debug if now debug
switch (page_type) { switch (page_type) {
@ -354,7 +357,7 @@ struct Memory::Impl {
// Page is already marked. // Page is already marked.
break; break;
case Common::PageType::Memory: case Common::PageType::Memory:
current_page_table->pointers[vaddr >> PAGE_BITS].Store( current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
nullptr, Common::PageType::DebugMemory); nullptr, Common::PageType::DebugMemory);
break; break;
default: default:
@ -371,9 +374,9 @@ struct Memory::Impl {
// Don't mess with already non-debug or rasterizer memory. // Don't mess with already non-debug or rasterizer memory.
break; break;
case Common::PageType::DebugMemory: { case Common::PageType::DebugMemory: {
u8* const pointer{GetPointerFromDebugMemory(vaddr & ~PAGE_MASK)}; u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
current_page_table->pointers[vaddr >> PAGE_BITS].Store( current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory); pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
break; break;
} }
default: default:
@ -398,10 +401,10 @@ struct Memory::Impl {
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
// is different). This assumes the specified GPU address region is contiguous as well. // is different). This assumes the specified GPU address region is contiguous as well.
const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
const Common::PageType page_type{ const Common::PageType page_type{
current_page_table->pointers[vaddr >> PAGE_BITS].Type()}; current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
if (cached) { if (cached) {
// Switch page type to cached if now cached // Switch page type to cached if now cached
switch (page_type) { switch (page_type) {
@ -411,7 +414,7 @@ struct Memory::Impl {
break; break;
case Common::PageType::DebugMemory: case Common::PageType::DebugMemory:
case Common::PageType::Memory: case Common::PageType::Memory:
current_page_table->pointers[vaddr >> PAGE_BITS].Store( current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
nullptr, Common::PageType::RasterizerCachedMemory); nullptr, Common::PageType::RasterizerCachedMemory);
break; break;
case Common::PageType::RasterizerCachedMemory: case Common::PageType::RasterizerCachedMemory:
@ -434,16 +437,16 @@ struct Memory::Impl {
// that this area is already unmarked as cached. // that this area is already unmarked as cached.
break; break;
case Common::PageType::RasterizerCachedMemory: { case Common::PageType::RasterizerCachedMemory: {
u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)}; u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~YUZU_PAGEMASK)};
if (pointer == nullptr) { if (pointer == nullptr) {
// It's possible that this function has been called while updating the // It's possible that this function has been called while updating the
// pagetable after unmapping a VMA. In that case the underlying VMA will no // pagetable after unmapping a VMA. In that case the underlying VMA will no
// longer exist, and we should just leave the pagetable entry blank. // longer exist, and we should just leave the pagetable entry blank.
current_page_table->pointers[vaddr >> PAGE_BITS].Store( current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
nullptr, Common::PageType::Unmapped); nullptr, Common::PageType::Unmapped);
} else { } else {
current_page_table->pointers[vaddr >> PAGE_BITS].Store( current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory); pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
} }
break; break;
} }
@ -465,8 +468,8 @@ struct Memory::Impl {
*/ */
void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target, void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target,
Common::PageType type) { Common::PageType type) {
LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE, LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * YUZU_PAGESIZE,
(base + size) * PAGE_SIZE); (base + size) * YUZU_PAGESIZE);
// During boot, current_page_table might not be set yet, in which case we need not flush // During boot, current_page_table might not be set yet, in which case we need not flush
if (system.IsPoweredOn()) { if (system.IsPoweredOn()) {
@ -474,7 +477,7 @@ struct Memory::Impl {
for (u64 i = 0; i < size; i++) { for (u64 i = 0; i < size; i++) {
const auto page = base + i; const auto page = base + i;
if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) { if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE); gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE);
} }
} }
} }
@ -485,7 +488,7 @@ struct Memory::Impl {
if (!target) { if (!target) {
ASSERT_MSG(type != Common::PageType::Memory, ASSERT_MSG(type != Common::PageType::Memory,
"Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE); "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
while (base != end) { while (base != end) {
page_table.pointers[base].Store(nullptr, type); page_table.pointers[base].Store(nullptr, type);
@ -496,14 +499,14 @@ struct Memory::Impl {
} else { } else {
while (base != end) { while (base != end) {
page_table.pointers[base].Store( page_table.pointers[base].Store(
system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS), type); system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type);
page_table.backing_addr[base] = target - (base << PAGE_BITS); page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
ASSERT_MSG(page_table.pointers[base].Pointer(), ASSERT_MSG(page_table.pointers[base].Pointer(),
"memory mapping base yield a nullptr within the table"); "memory mapping base yield a nullptr within the table");
base += 1; base += 1;
target += PAGE_SIZE; target += YUZU_PAGESIZE;
} }
} }
} }
@ -518,7 +521,7 @@ struct Memory::Impl {
} }
// Avoid adding any extra logic to this fast-path block // Avoid adding any extra logic to this fast-path block
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
return &pointer[vaddr]; return &pointer[vaddr];
} }
@ -662,7 +665,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
const Kernel::KProcess& process = *system.CurrentProcess(); const Kernel::KProcess& process = *system.CurrentProcess();
const auto& page_table = process.PageTable().PageTableImpl(); const auto& page_table = process.PageTable().PageTableImpl();
const size_t page = vaddr >> PAGE_BITS; const size_t page = vaddr >> YUZU_PAGEBITS;
if (page >= page_table.pointers.size()) { if (page >= page_table.pointers.size()) {
return false; return false;
} }
@ -673,9 +676,9 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const { bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const {
VAddr end = base + size; VAddr end = base + size;
VAddr page = Common::AlignDown(base, PAGE_SIZE); VAddr page = Common::AlignDown(base, YUZU_PAGESIZE);
for (; page < end; page += PAGE_SIZE) { for (; page < end; page += YUZU_PAGESIZE) {
if (!IsValidVirtualAddress(page)) { if (!IsValidVirtualAddress(page)) {
return false; return false;
} }

View File

@ -27,9 +27,9 @@ namespace Core::Memory {
* Page size used by the ARM architecture. This is the smallest granularity with which memory can * Page size used by the ARM architecture. This is the smallest granularity with which memory can
* be mapped. * be mapped.
*/ */
constexpr std::size_t PAGE_BITS = 12; constexpr std::size_t YUZU_PAGEBITS = 12;
constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS; constexpr u64 YUZU_PAGESIZE = 1ULL << YUZU_PAGEBITS;
constexpr u64 PAGE_MASK = PAGE_SIZE - 1; constexpr u64 YUZU_PAGEMASK = YUZU_PAGESIZE - 1;
/// Virtual user-space memory regions /// Virtual user-space memory regions
enum : VAddr { enum : VAddr {

View File

@ -22,8 +22,9 @@ constexpr VAddr c = 0x1328914000;
class RasterizerInterface { class RasterizerInterface {
public: public:
void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
const u64 page_start{addr >> Core::Memory::PAGE_BITS}; const u64 page_start{addr >> Core::Memory::YUZU_PAGEBITS};
const u64 page_end{(addr + size + Core::Memory::PAGE_SIZE - 1) >> Core::Memory::PAGE_BITS}; const u64 page_end{(addr + size + Core::Memory::YUZU_PAGESIZE - 1) >>
Core::Memory::YUZU_PAGEBITS};
for (u64 page = page_start; page < page_end; ++page) { for (u64 page = page_start; page < page_end; ++page) {
int& value = page_table[page]; int& value = page_table[page];
value += delta; value += delta;
@ -37,7 +38,7 @@ public:
} }
[[nodiscard]] int Count(VAddr addr) const noexcept { [[nodiscard]] int Count(VAddr addr) const noexcept {
const auto it = page_table.find(addr >> Core::Memory::PAGE_BITS); const auto it = page_table.find(addr >> Core::Memory::YUZU_PAGEBITS);
return it == page_table.end() ? 0 : it->second; return it == page_table.end() ? 0 : it->second;
} }

View File

@ -36,7 +36,7 @@ struct NullBufferParams {};
template <class RasterizerInterface> template <class RasterizerInterface>
class BufferBase { class BufferBase {
static constexpr u64 PAGES_PER_WORD = 64; static constexpr u64 PAGES_PER_WORD = 64;
static constexpr u64 BYTES_PER_PAGE = Core::Memory::PAGE_SIZE; static constexpr u64 BYTES_PER_PAGE = Core::Memory::YUZU_PAGESIZE;
static constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE; static constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE;
/// Vector tracking modified pages tightly packed with small vector optimization /// Vector tracking modified pages tightly packed with small vector optimization

View File

@ -60,8 +60,8 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelI
// Page size for caching purposes. // Page size for caching purposes.
// This is unrelated to the CPU page size and it can be changed as it seems optimal. // This is unrelated to the CPU page size and it can be changed as it seems optimal.
static constexpr u32 PAGE_BITS = 16; static constexpr u32 YUZU_PAGEBITS = 16;
static constexpr u64 PAGE_SIZE = u64{1} << PAGE_BITS; static constexpr u64 YUZU_PAGESIZE = u64{1} << YUZU_PAGEBITS;
static constexpr bool IS_OPENGL = P::IS_OPENGL; static constexpr bool IS_OPENGL = P::IS_OPENGL;
static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS = static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS =
@ -213,8 +213,8 @@ private:
template <typename Func> template <typename Func>
void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) { void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) {
const u64 page_end = Common::DivCeil(cpu_addr + size, PAGE_SIZE); const u64 page_end = Common::DivCeil(cpu_addr + size, YUZU_PAGESIZE);
for (u64 page = cpu_addr >> PAGE_BITS; page < page_end;) { for (u64 page = cpu_addr >> YUZU_PAGEBITS; page < page_end;) {
const BufferId buffer_id = page_table[page]; const BufferId buffer_id = page_table[page];
if (!buffer_id) { if (!buffer_id) {
++page; ++page;
@ -224,7 +224,7 @@ private:
func(buffer_id, buffer); func(buffer_id, buffer);
const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
page = Common::DivCeil(end_addr, PAGE_SIZE); page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
} }
} }
@ -259,8 +259,8 @@ private:
} }
static bool IsRangeGranular(VAddr cpu_addr, size_t size) { static bool IsRangeGranular(VAddr cpu_addr, size_t size) {
return (cpu_addr & ~Core::Memory::PAGE_MASK) == return (cpu_addr & ~Core::Memory::YUZU_PAGEMASK) ==
((cpu_addr + size) & ~Core::Memory::PAGE_MASK); ((cpu_addr + size) & ~Core::Memory::YUZU_PAGEMASK);
} }
void RunGarbageCollector(); void RunGarbageCollector();
@ -433,7 +433,7 @@ private:
u64 minimum_memory = 0; u64 minimum_memory = 0;
u64 critical_memory = 0; u64 critical_memory = 0;
std::array<BufferId, ((1ULL << 39) >> PAGE_BITS)> page_table; std::array<BufferId, ((1ULL << 39) >> YUZU_PAGEBITS)> page_table;
}; };
template <class P> template <class P>
@ -929,8 +929,8 @@ void BufferCache<P>::PopAsyncFlushes() {}
template <class P> template <class P>
bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) { bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
for (u64 page = addr >> PAGE_BITS; page < page_end;) { for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) {
const BufferId image_id = page_table[page]; const BufferId image_id = page_table[page];
if (!image_id) { if (!image_id) {
++page; ++page;
@ -941,7 +941,7 @@ bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
return true; return true;
} }
const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
page = Common::DivCeil(end_addr, PAGE_SIZE); page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
} }
return false; return false;
} }
@ -949,8 +949,8 @@ bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
template <class P> template <class P>
bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) { bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) {
const VAddr end_addr = addr + size; const VAddr end_addr = addr + size;
const u64 page_end = Common::DivCeil(end_addr, PAGE_SIZE); const u64 page_end = Common::DivCeil(end_addr, YUZU_PAGESIZE);
for (u64 page = addr >> PAGE_BITS; page < page_end;) { for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) {
const BufferId buffer_id = page_table[page]; const BufferId buffer_id = page_table[page];
if (!buffer_id) { if (!buffer_id) {
++page; ++page;
@ -962,15 +962,15 @@ bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) {
if (buf_start_addr < end_addr && addr < buf_end_addr) { if (buf_start_addr < end_addr && addr < buf_end_addr) {
return true; return true;
} }
page = Common::DivCeil(end_addr, PAGE_SIZE); page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
} }
return false; return false;
} }
template <class P> template <class P>
bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) { bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
for (u64 page = addr >> PAGE_BITS; page < page_end;) { for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) {
const BufferId image_id = page_table[page]; const BufferId image_id = page_table[page];
if (!image_id) { if (!image_id) {
++page; ++page;
@ -981,7 +981,7 @@ bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
return true; return true;
} }
const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
page = Common::DivCeil(end_addr, PAGE_SIZE); page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
} }
return false; return false;
} }
@ -1470,7 +1470,7 @@ BufferId BufferCache<P>::FindBuffer(VAddr cpu_addr, u32 size) {
if (cpu_addr == 0) { if (cpu_addr == 0) {
return NULL_BUFFER_ID; return NULL_BUFFER_ID;
} }
const u64 page = cpu_addr >> PAGE_BITS; const u64 page = cpu_addr >> YUZU_PAGEBITS;
const BufferId buffer_id = page_table[page]; const BufferId buffer_id = page_table[page];
if (!buffer_id) { if (!buffer_id) {
return CreateBuffer(cpu_addr, size); return CreateBuffer(cpu_addr, size);
@ -1491,8 +1491,9 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
VAddr end = cpu_addr + wanted_size; VAddr end = cpu_addr + wanted_size;
int stream_score = 0; int stream_score = 0;
bool has_stream_leap = false; bool has_stream_leap = false;
for (; cpu_addr >> PAGE_BITS < Common::DivCeil(end, PAGE_SIZE); cpu_addr += PAGE_SIZE) { for (; cpu_addr >> YUZU_PAGEBITS < Common::DivCeil(end, YUZU_PAGESIZE);
const BufferId overlap_id = page_table[cpu_addr >> PAGE_BITS]; cpu_addr += YUZU_PAGESIZE) {
const BufferId overlap_id = page_table[cpu_addr >> YUZU_PAGEBITS];
if (!overlap_id) { if (!overlap_id) {
continue; continue;
} }
@ -1518,11 +1519,11 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
// as a stream buffer. Increase the size to skip constantly recreating buffers. // as a stream buffer. Increase the size to skip constantly recreating buffers.
has_stream_leap = true; has_stream_leap = true;
if (expands_right) { if (expands_right) {
begin -= PAGE_SIZE * 256; begin -= YUZU_PAGESIZE * 256;
cpu_addr = begin; cpu_addr = begin;
} }
if (expands_left) { if (expands_left) {
end += PAGE_SIZE * 256; end += YUZU_PAGESIZE * 256;
} }
} }
} }
@ -1598,8 +1599,8 @@ void BufferCache<P>::ChangeRegister(BufferId buffer_id) {
} }
const VAddr cpu_addr_begin = buffer.CpuAddr(); const VAddr cpu_addr_begin = buffer.CpuAddr();
const VAddr cpu_addr_end = cpu_addr_begin + size; const VAddr cpu_addr_end = cpu_addr_begin + size;
const u64 page_begin = cpu_addr_begin / PAGE_SIZE; const u64 page_begin = cpu_addr_begin / YUZU_PAGESIZE;
const u64 page_end = Common::DivCeil(cpu_addr_end, PAGE_SIZE); const u64 page_end = Common::DivCeil(cpu_addr_end, YUZU_PAGESIZE);
for (u64 page = page_begin; page != page_end; ++page) { for (u64 page = page_begin; page != page_end; ++page) {
if constexpr (insert) { if constexpr (insert) {
page_table[page] = buffer_id; page_table[page] = buffer_id;
@ -1848,7 +1849,7 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
if (!cpu_addr || size == 0) { if (!cpu_addr || size == 0) {
return NULL_BINDING; return NULL_BINDING;
} }
const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::PAGE_SIZE); const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
const Binding binding{ const Binding binding{
.cpu_addr = *cpu_addr, .cpu_addr = *cpu_addr,
.size = is_written ? size : static_cast<u32>(cpu_end - *cpu_addr), .size = is_written ? size : static_cast<u32>(cpu_end - *cpu_addr),

View File

@ -570,14 +570,14 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
const std::size_t page{(page_index & big_page_mask) + size}; const std::size_t page{(page_index & big_page_mask) + size};
return page <= big_page_size; return page <= big_page_size;
} }
const std::size_t page{(gpu_addr & Core::Memory::PAGE_MASK) + size}; const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
return page <= Core::Memory::PAGE_SIZE; return page <= Core::Memory::YUZU_PAGESIZE;
} }
if (GetEntry<false>(gpu_addr) != EntryType::Mapped) { if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
return false; return false;
} }
const std::size_t page{(gpu_addr & Core::Memory::PAGE_MASK) + size}; const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
return page <= Core::Memory::PAGE_SIZE; return page <= Core::Memory::YUZU_PAGESIZE;
} }
bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const {

View File

@ -214,8 +214,8 @@ private:
return cache_begin < addr_end && addr_begin < cache_end; return cache_begin < addr_end && addr_begin < cache_end;
}; };
const u64 page_end = addr_end >> PAGE_BITS; const u64 page_end = addr_end >> YUZU_PAGEBITS;
for (u64 page = addr_begin >> PAGE_BITS; page <= page_end; ++page) { for (u64 page = addr_begin >> YUZU_PAGEBITS; page <= page_end; ++page) {
const auto& it = cached_queries.find(page); const auto& it = cached_queries.find(page);
if (it == std::end(cached_queries)) { if (it == std::end(cached_queries)) {
continue; continue;
@ -235,14 +235,14 @@ private:
/// Registers the passed parameters as cached and returns a pointer to the stored cached query. /// Registers the passed parameters as cached and returns a pointer to the stored cached query.
CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) { CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1); rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1);
const u64 page = static_cast<u64>(cpu_addr) >> PAGE_BITS; const u64 page = static_cast<u64>(cpu_addr) >> YUZU_PAGEBITS;
return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr, return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
host_ptr); host_ptr);
} }
/// Tries to a get a cached query. Returns nullptr on failure. /// Tries to a get a cached query. Returns nullptr on failure.
CachedQuery* TryGet(VAddr addr) { CachedQuery* TryGet(VAddr addr) {
const u64 page = static_cast<u64>(addr) >> PAGE_BITS; const u64 page = static_cast<u64>(addr) >> YUZU_PAGEBITS;
const auto it = cached_queries.find(page); const auto it = cached_queries.find(page);
if (it == std::end(cached_queries)) { if (it == std::end(cached_queries)) {
return nullptr; return nullptr;
@ -260,8 +260,8 @@ private:
uncommitted_flushes->push_back(addr); uncommitted_flushes->push_back(addr);
} }
static constexpr std::uintptr_t PAGE_SIZE = 4096; static constexpr std::uintptr_t YUZU_PAGESIZE = 4096;
static constexpr unsigned PAGE_BITS = 12; static constexpr unsigned YUZU_PAGEBITS = 12;
VideoCore::RasterizerInterface& rasterizer; VideoCore::RasterizerInterface& rasterizer;

View File

@ -24,8 +24,8 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del
u64 cache_bytes = 0; u64 cache_bytes = 0;
std::atomic_thread_fence(std::memory_order_acquire); std::atomic_thread_fence(std::memory_order_acquire);
const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
for (u64 page = addr >> PAGE_BITS; page != page_end; ++page) { for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) {
std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page); std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page);
if (delta > 0) { if (delta > 0) {
@ -44,26 +44,27 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del
if (uncache_bytes == 0) { if (uncache_bytes == 0) {
uncache_begin = page; uncache_begin = page;
} }
uncache_bytes += PAGE_SIZE; uncache_bytes += YUZU_PAGESIZE;
} else if (uncache_bytes > 0) { } else if (uncache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false); cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes,
false);
uncache_bytes = 0; uncache_bytes = 0;
} }
if (count.load(std::memory_order::relaxed) == 1 && delta > 0) { if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
if (cache_bytes == 0) { if (cache_bytes == 0) {
cache_begin = page; cache_begin = page;
} }
cache_bytes += PAGE_SIZE; cache_bytes += YUZU_PAGESIZE;
} else if (cache_bytes > 0) { } else if (cache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true); cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
cache_bytes = 0; cache_bytes = 0;
} }
} }
if (uncache_bytes > 0) { if (uncache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false); cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, false);
} }
if (cache_bytes > 0) { if (cache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true); cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
} }
} }

View File

@ -120,8 +120,8 @@ void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t
const VAddr addr_end = addr + size; const VAddr addr_end = addr + size;
Entry* const entry = NewEntry(addr, addr_end, data.get()); Entry* const entry = NewEntry(addr, addr_end, data.get());
const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS; const u64 page_end = (addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS;
for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) { for (u64 page = addr >> YUZU_PAGEBITS; page < page_end; ++page) {
invalidation_cache[page].push_back(entry); invalidation_cache[page].push_back(entry);
} }
@ -132,8 +132,8 @@ void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t
void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) { void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) {
const VAddr addr_end = addr + size; const VAddr addr_end = addr + size;
const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS; const u64 page_end = (addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS;
for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) { for (u64 page = addr >> YUZU_PAGEBITS; page < page_end; ++page) {
auto it = invalidation_cache.find(page); auto it = invalidation_cache.find(page);
if (it == invalidation_cache.end()) { if (it == invalidation_cache.end()) {
continue; continue;
@ -186,8 +186,8 @@ void ShaderCache::InvalidatePageEntries(std::vector<Entry*>& entries, VAddr addr
} }
void ShaderCache::RemoveEntryFromInvalidationCache(const Entry* entry) { void ShaderCache::RemoveEntryFromInvalidationCache(const Entry* entry) {
const u64 page_end = (entry->addr_end + PAGE_SIZE - 1) >> PAGE_BITS; const u64 page_end = (entry->addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS;
for (u64 page = entry->addr_start >> PAGE_BITS; page < page_end; ++page) { for (u64 page = entry->addr_start >> YUZU_PAGEBITS; page < page_end; ++page) {
const auto entries_it = invalidation_cache.find(page); const auto entries_it = invalidation_cache.find(page);
ASSERT(entries_it != invalidation_cache.end()); ASSERT(entries_it != invalidation_cache.end());
std::vector<Entry*>& entries = entries_it->second; std::vector<Entry*>& entries = entries_it->second;

View File

@ -34,8 +34,8 @@ struct ShaderInfo {
}; };
class ShaderCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { class ShaderCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
static constexpr u64 PAGE_BITS = 14; static constexpr u64 YUZU_PAGEBITS = 14;
static constexpr u64 PAGE_SIZE = u64(1) << PAGE_BITS; static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS;
static constexpr size_t NUM_PROGRAMS = 6; static constexpr size_t NUM_PROGRAMS = 6;

View File

@ -600,7 +600,7 @@ void TextureCache<P>::BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
template <class P> template <class P>
typename P::ImageView* TextureCache<P>::TryFindFramebufferImageView(VAddr cpu_addr) { typename P::ImageView* TextureCache<P>::TryFindFramebufferImageView(VAddr cpu_addr) {
// TODO: Properly implement this // TODO: Properly implement this
const auto it = page_table.find(cpu_addr >> PAGE_BITS); const auto it = page_table.find(cpu_addr >> YUZU_PAGEBITS);
if (it == page_table.end()) { if (it == page_table.end()) {
return nullptr; return nullptr;
} }
@ -1506,14 +1506,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
selected_page_table) { selected_page_table) {
const auto page_it = selected_page_table.find(page); const auto page_it = selected_page_table.find(page);
if (page_it == selected_page_table.end()) { if (page_it == selected_page_table.end()) {
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
return; return;
} }
std::vector<ImageId>& image_ids = page_it->second; std::vector<ImageId>& image_ids = page_it->second;
const auto vector_it = std::ranges::find(image_ids, image_id); const auto vector_it = std::ranges::find(image_ids, image_id);
if (vector_it == image_ids.end()) { if (vector_it == image_ids.end()) {
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}", ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
page << PAGE_BITS); page << YUZU_PAGEBITS);
return; return;
} }
image_ids.erase(vector_it); image_ids.erase(vector_it);
@ -1526,14 +1526,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) { ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) {
const auto page_it = page_table.find(page); const auto page_it = page_table.find(page);
if (page_it == page_table.end()) { if (page_it == page_table.end()) {
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
return; return;
} }
std::vector<ImageMapId>& image_map_ids = page_it->second; std::vector<ImageMapId>& image_map_ids = page_it->second;
const auto vector_it = std::ranges::find(image_map_ids, map_id); const auto vector_it = std::ranges::find(image_map_ids, map_id);
if (vector_it == image_map_ids.end()) { if (vector_it == image_map_ids.end()) {
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}", ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
page << PAGE_BITS); page << YUZU_PAGEBITS);
return; return;
} }
image_map_ids.erase(vector_it); image_map_ids.erase(vector_it);
@ -1554,7 +1554,7 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) { ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) {
const auto page_it = page_table.find(page); const auto page_it = page_table.find(page);
if (page_it == page_table.end()) { if (page_it == page_table.end()) {
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
return; return;
} }
std::vector<ImageMapId>& image_map_ids = page_it->second; std::vector<ImageMapId>& image_map_ids = page_it->second;

View File

@ -82,7 +82,7 @@ public:
template <class P> template <class P>
class TextureCache : public VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo> { class TextureCache : public VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo> {
/// Address shift for caching images into a hash table /// Address shift for caching images into a hash table
static constexpr u64 PAGE_BITS = 20; static constexpr u64 YUZU_PAGEBITS = 20;
/// Enables debugging features to the texture cache /// Enables debugging features to the texture cache
static constexpr bool ENABLE_VALIDATION = P::ENABLE_VALIDATION; static constexpr bool ENABLE_VALIDATION = P::ENABLE_VALIDATION;
@ -210,8 +210,8 @@ private:
template <typename Func> template <typename Func>
static void ForEachCPUPage(VAddr addr, size_t size, Func&& func) { static void ForEachCPUPage(VAddr addr, size_t size, Func&& func) {
static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>; static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>;
const u64 page_end = (addr + size - 1) >> PAGE_BITS; const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS;
for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) { for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) {
if constexpr (RETURNS_BOOL) { if constexpr (RETURNS_BOOL) {
if (func(page)) { if (func(page)) {
break; break;
@ -225,8 +225,8 @@ private:
template <typename Func> template <typename Func>
static void ForEachGPUPage(GPUVAddr addr, size_t size, Func&& func) { static void ForEachGPUPage(GPUVAddr addr, size_t size, Func&& func) {
static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>; static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>;
const u64 page_end = (addr + size - 1) >> PAGE_BITS; const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS;
for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) { for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) {
if constexpr (RETURNS_BOOL) { if constexpr (RETURNS_BOOL) {
if (func(page)) { if (func(page)) {
break; break;