early-access version 2610
This commit is contained in:
parent
b3c57a4768
commit
3996303be5
15 changed files with 383 additions and 248 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 2606.
|
This is the source code for early-access 2610.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,6 @@ if (MSVC)
|
||||||
# /GT - Supports fiber safety for data allocated using static thread-local storage
|
# /GT - Supports fiber safety for data allocated using static thread-local storage
|
||||||
add_compile_options(
|
add_compile_options(
|
||||||
/MP
|
/MP
|
||||||
/Zf
|
|
||||||
/Zi
|
/Zi
|
||||||
/Zm200
|
/Zm200
|
||||||
/Zo
|
/Zo
|
||||||
|
@ -82,7 +81,7 @@ if (MSVC)
|
||||||
add_compile_options("$<$<CONFIG:Release>:/GS->")
|
add_compile_options("$<$<CONFIG:Release>:/GS->")
|
||||||
|
|
||||||
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "/DEBUG /MANIFEST:NO" CACHE STRING "" FORCE)
|
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "/DEBUG /MANIFEST:NO" CACHE STRING "" FORCE)
|
||||||
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
|
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
|
||||||
else()
|
else()
|
||||||
add_compile_options(
|
add_compile_options(
|
||||||
-Wall
|
-Wall
|
||||||
|
|
|
@ -32,14 +32,14 @@ assert_noinline_call(const Fn& fn) {
|
||||||
|
|
||||||
#define ASSERT(_a_) \
|
#define ASSERT(_a_) \
|
||||||
do \
|
do \
|
||||||
if (!(_a_)) [[unlikely]] { \
|
if (!(_a_)) { \
|
||||||
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
|
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
|
||||||
} \
|
} \
|
||||||
while (0)
|
while (0)
|
||||||
|
|
||||||
#define ASSERT_MSG(_a_, ...) \
|
#define ASSERT_MSG(_a_, ...) \
|
||||||
do \
|
do \
|
||||||
if (!(_a_)) [[unlikely]] { \
|
if (!(_a_)) { \
|
||||||
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
|
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
|
||||||
} \
|
} \
|
||||||
while (0)
|
while (0)
|
||||||
|
@ -70,7 +70,7 @@ assert_noinline_call(const Fn& fn) {
|
||||||
#define ASSERT_OR_EXECUTE(_a_, _b_) \
|
#define ASSERT_OR_EXECUTE(_a_, _b_) \
|
||||||
do { \
|
do { \
|
||||||
ASSERT(_a_); \
|
ASSERT(_a_); \
|
||||||
if (!(_a_)) [[unlikely]] { \
|
if (!(_a_)) { \
|
||||||
_b_ \
|
_b_ \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
@ -79,7 +79,7 @@ assert_noinline_call(const Fn& fn) {
|
||||||
#define ASSERT_OR_EXECUTE_MSG(_a_, _b_, ...) \
|
#define ASSERT_OR_EXECUTE_MSG(_a_, _b_, ...) \
|
||||||
do { \
|
do { \
|
||||||
ASSERT_MSG(_a_, __VA_ARGS__); \
|
ASSERT_MSG(_a_, __VA_ARGS__); \
|
||||||
if (!(_a_)) [[unlikely]] { \
|
if (!(_a_)) { \
|
||||||
_b_ \
|
_b_ \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
|
@ -28,7 +28,8 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
|
||||||
auto& page_table = m_owner->PageTable();
|
auto& page_table = m_owner->PageTable();
|
||||||
|
|
||||||
// Construct the page group.
|
// Construct the page group.
|
||||||
m_page_group = KPageLinkedList(addr, Common::DivideUp(size, PageSize));
|
m_page_group =
|
||||||
|
KPageLinkedList(page_table.GetPhysicalAddr(addr), Common::DivideUp(size, PageSize));
|
||||||
|
|
||||||
// Lock the memory.
|
// Lock the memory.
|
||||||
R_TRY(page_table.LockForCodeMemory(addr, size))
|
R_TRY(page_table.LockForCodeMemory(addr, size))
|
||||||
|
|
|
@ -89,6 +89,10 @@ public:
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool Empty() const {
|
||||||
|
return nodes.empty();
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::list<Node> nodes;
|
std::list<Node> nodes;
|
||||||
};
|
};
|
||||||
|
|
|
@ -486,6 +486,58 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||||
return address;
|
return address;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages) {
|
||||||
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
const size_t size = num_pages * PageSize;
|
||||||
|
|
||||||
|
// We're making a new group, not adding to an existing one.
|
||||||
|
R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
|
// Begin traversal.
|
||||||
|
Common::PageTable::TraversalContext context;
|
||||||
|
Common::PageTable::TraversalEntry next_entry;
|
||||||
|
R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
|
// Prepare tracking variables.
|
||||||
|
PAddr cur_addr = next_entry.phys_addr;
|
||||||
|
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
|
||||||
|
size_t tot_size = cur_size;
|
||||||
|
|
||||||
|
// Iterate, adding to group as we go.
|
||||||
|
const auto& memory_layout = system.Kernel().MemoryLayout();
|
||||||
|
while (tot_size < size) {
|
||||||
|
R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context),
|
||||||
|
ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
||||||
|
const size_t cur_pages = cur_size / PageSize;
|
||||||
|
|
||||||
|
R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
|
||||||
|
R_TRY(pg.AddBlock(cur_addr, cur_pages));
|
||||||
|
|
||||||
|
cur_addr = next_entry.phys_addr;
|
||||||
|
cur_size = next_entry.block_size;
|
||||||
|
} else {
|
||||||
|
cur_size += next_entry.block_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
tot_size += next_entry.block_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure we add the right amount for the last block.
|
||||||
|
if (tot_size > size) {
|
||||||
|
cur_size -= (tot_size - size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the last block.
|
||||||
|
const size_t cur_pages = cur_size / PageSize;
|
||||||
|
R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
|
||||||
|
R_TRY(pg.AddBlock(cur_addr, cur_pages));
|
||||||
|
|
||||||
|
return ResultSuccess;
|
||||||
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
||||||
KPageTable& src_page_table, VAddr src_addr) {
|
KPageTable& src_page_table, VAddr src_addr) {
|
||||||
KScopedLightLock lk(general_lock);
|
KScopedLightLock lk(general_lock);
|
||||||
|
@ -1223,6 +1275,31 @@ ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryS
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
|
||||||
|
KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr) {
|
||||||
|
// Ensure that the page group isn't null.
|
||||||
|
ASSERT(out != nullptr);
|
||||||
|
|
||||||
|
// Make sure that the region we're mapping is valid for the table.
|
||||||
|
const size_t size = num_pages * PageSize;
|
||||||
|
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
|
// Lock the table.
|
||||||
|
KScopedLightLock lk(general_lock);
|
||||||
|
|
||||||
|
// Check if state allows us to create the group.
|
||||||
|
R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
|
||||||
|
state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
|
||||||
|
attr_mask, attr));
|
||||||
|
|
||||||
|
// Create a new page group for the region.
|
||||||
|
R_TRY(this->MakePageGroup(*out, address, num_pages));
|
||||||
|
|
||||||
|
return ResultSuccess;
|
||||||
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||||
Svc::MemoryPermission svc_perm) {
|
Svc::MemoryPermission svc_perm) {
|
||||||
const size_t num_pages = size / PageSize;
|
const size_t num_pages = size / PageSize;
|
||||||
|
@ -1605,57 +1682,21 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
|
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
|
||||||
KScopedLightLock lk(general_lock);
|
return this->LockMemoryAndOpen(
|
||||||
|
nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
|
||||||
KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
|
KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
|
||||||
|
KMemoryAttribute::All, KMemoryAttribute::None,
|
||||||
KMemoryPermission old_perm{};
|
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
|
||||||
|
KMemoryPermission::KernelReadWrite),
|
||||||
if (const ResultCode result{CheckMemoryState(
|
KMemoryAttribute::Locked);
|
||||||
nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
|
|
||||||
KMemoryState::FlagCanCodeMemory, KMemoryPermission::All,
|
|
||||||
KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)};
|
|
||||||
result.IsError()) {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
|
|
||||||
|
|
||||||
block_manager->UpdateLock(
|
|
||||||
addr, size / PageSize,
|
|
||||||
[](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
|
|
||||||
block->ShareToDevice(permission);
|
|
||||||
},
|
|
||||||
new_perm);
|
|
||||||
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
|
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
|
||||||
KScopedLightLock lk(general_lock);
|
return this->UnlockMemory(addr, size, KMemoryState::FlagCanCodeMemory,
|
||||||
|
KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
|
||||||
KMemoryPermission new_perm = KMemoryPermission::UserReadWrite;
|
KMemoryPermission::None, KMemoryAttribute::All,
|
||||||
|
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
|
||||||
KMemoryPermission old_perm{};
|
KMemoryAttribute::Locked, nullptr);
|
||||||
|
|
||||||
if (const ResultCode result{CheckMemoryState(
|
|
||||||
nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
|
|
||||||
KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None,
|
|
||||||
KMemoryAttribute::All, KMemoryAttribute::Locked)};
|
|
||||||
result.IsError()) {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
|
|
||||||
|
|
||||||
block_manager->UpdateLock(
|
|
||||||
addr, size / PageSize,
|
|
||||||
[](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
|
|
||||||
block->UnshareToDevice(permission);
|
|
||||||
},
|
|
||||||
new_perm);
|
|
||||||
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
|
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
|
||||||
|
@ -1991,4 +2032,109 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr,
|
||||||
|
size_t size, KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
|
KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
|
||||||
|
// Validate basic preconditions.
|
||||||
|
ASSERT((lock_attr & attr) == KMemoryAttribute::None);
|
||||||
|
ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
||||||
|
KMemoryAttribute::None);
|
||||||
|
|
||||||
|
// Validate the lock request.
|
||||||
|
const size_t num_pages = size / PageSize;
|
||||||
|
R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
|
// Lock the table.
|
||||||
|
KScopedLightLock lk(general_lock);
|
||||||
|
|
||||||
|
// Check that the output page group is empty, if it exists.
|
||||||
|
if (out_pg) {
|
||||||
|
ASSERT(out_pg->GetNumPages() == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the state.
|
||||||
|
KMemoryState old_state{};
|
||||||
|
KMemoryPermission old_perm{};
|
||||||
|
KMemoryAttribute old_attr{};
|
||||||
|
size_t num_allocator_blocks{};
|
||||||
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
|
||||||
|
std::addressof(old_attr), std::addressof(num_allocator_blocks),
|
||||||
|
addr, size, state_mask | KMemoryState::FlagReferenceCounted,
|
||||||
|
state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
|
||||||
|
attr_mask, attr));
|
||||||
|
|
||||||
|
// Get the physical address, if we're supposed to.
|
||||||
|
if (out_paddr != nullptr) {
|
||||||
|
ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the page group, if we're supposed to.
|
||||||
|
if (out_pg != nullptr) {
|
||||||
|
R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decide on new perm and attr.
|
||||||
|
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
|
||||||
|
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
|
||||||
|
|
||||||
|
// Update permission, if we need to.
|
||||||
|
if (new_perm != old_perm) {
|
||||||
|
R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply the memory block updates.
|
||||||
|
block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
|
||||||
|
|
||||||
|
return ResultSuccess;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
|
||||||
|
KMemoryState state, KMemoryPermission perm_mask,
|
||||||
|
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||||
|
KMemoryAttribute attr, KMemoryPermission new_perm,
|
||||||
|
KMemoryAttribute lock_attr, const KPageLinkedList* pg) {
|
||||||
|
// Validate basic preconditions.
|
||||||
|
ASSERT((attr_mask & lock_attr) == lock_attr);
|
||||||
|
ASSERT((attr & lock_attr) == lock_attr);
|
||||||
|
|
||||||
|
// Validate the unlock request.
|
||||||
|
const size_t num_pages = size / PageSize;
|
||||||
|
R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
|
// Lock the table.
|
||||||
|
KScopedLightLock lk(general_lock);
|
||||||
|
|
||||||
|
// Check the state.
|
||||||
|
KMemoryState old_state{};
|
||||||
|
KMemoryPermission old_perm{};
|
||||||
|
KMemoryAttribute old_attr{};
|
||||||
|
size_t num_allocator_blocks{};
|
||||||
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
|
||||||
|
std::addressof(old_attr), std::addressof(num_allocator_blocks),
|
||||||
|
addr, size, state_mask | KMemoryState::FlagReferenceCounted,
|
||||||
|
state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
|
||||||
|
attr_mask, attr));
|
||||||
|
|
||||||
|
// Check the page group.
|
||||||
|
if (pg != nullptr) {
|
||||||
|
UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decide on new perm and attr.
|
||||||
|
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
|
||||||
|
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
|
||||||
|
|
||||||
|
// Update permission, if we need to.
|
||||||
|
if (new_perm != old_perm) {
|
||||||
|
R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply the memory block updates.
|
||||||
|
block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
|
||||||
|
|
||||||
|
return ResultSuccess;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include "core/file_sys/program_metadata.h"
|
#include "core/file_sys/program_metadata.h"
|
||||||
#include "core/hle/kernel/k_light_lock.h"
|
#include "core/hle/kernel/k_light_lock.h"
|
||||||
#include "core/hle/kernel/k_memory_block.h"
|
#include "core/hle/kernel/k_memory_block.h"
|
||||||
|
#include "core/hle/kernel/k_memory_layout.h"
|
||||||
#include "core/hle/kernel/k_memory_manager.h"
|
#include "core/hle/kernel/k_memory_manager.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
|
|
||||||
|
@ -71,6 +72,10 @@ public:
|
||||||
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||||
ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
|
ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
|
||||||
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
|
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
|
||||||
|
ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
|
||||||
|
KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
||||||
|
|
||||||
Common::PageTable& PageTableImpl() {
|
Common::PageTable& PageTableImpl() {
|
||||||
return page_table_impl;
|
return page_table_impl;
|
||||||
|
@ -159,10 +164,37 @@ private:
|
||||||
attr_mask, attr, ignore_attr);
|
attr_mask, attr, ignore_attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ResultCode LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||||
|
KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
|
KMemoryPermission new_perm, KMemoryAttribute lock_attr);
|
||||||
|
ResultCode UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
|
||||||
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
|
KMemoryPermission new_perm, KMemoryAttribute lock_attr,
|
||||||
|
const KPageLinkedList* pg);
|
||||||
|
|
||||||
|
ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
|
||||||
|
|
||||||
bool IsLockedByCurrentThread() const {
|
bool IsLockedByCurrentThread() const {
|
||||||
return general_lock.IsLockedByCurrentThread();
|
return general_lock.IsLockedByCurrentThread();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
|
||||||
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
|
||||||
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
*out = GetPhysicalAddr(virt_addr);
|
||||||
|
|
||||||
|
return *out != 0;
|
||||||
|
}
|
||||||
|
|
||||||
mutable KLightLock general_lock;
|
mutable KLightLock general_lock;
|
||||||
mutable KLightLock map_physical_memory_lock;
|
mutable KLightLock map_physical_memory_lock;
|
||||||
|
|
||||||
|
@ -322,6 +354,7 @@ private:
|
||||||
bool is_aslr_enabled{};
|
bool is_aslr_enabled{};
|
||||||
|
|
||||||
u32 heap_fill_value{};
|
u32 heap_fill_value{};
|
||||||
|
const KMemoryRegion* cached_physical_heap_region{};
|
||||||
|
|
||||||
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
|
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
|
||||||
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
|
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
|
||||||
|
|
|
@ -1362,8 +1362,11 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
|
||||||
ResultInvalidMemoryRegion);
|
ResultInvalidMemoryRegion);
|
||||||
|
|
||||||
// Create a new page group.
|
// Create a new page group.
|
||||||
KMemoryInfo kBlockInfo = dst_pt.QueryInfo(dst_address);
|
KPageLinkedList pg;
|
||||||
KPageLinkedList pg(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages());
|
R_TRY(src_pt.MakeAndOpenPageGroup(
|
||||||
|
std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
|
||||||
|
KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
|
||||||
|
KMemoryAttribute::All, KMemoryAttribute::None));
|
||||||
|
|
||||||
// Map the group.
|
// Map the group.
|
||||||
R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
|
R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
|
||||||
|
@ -1408,8 +1411,8 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha
|
||||||
}
|
}
|
||||||
|
|
||||||
static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
|
static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
|
||||||
LOG_TRACE(Kernel_SVC, "called, handle_out={}, address=0x{:X}, size=0x{:X}",
|
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
|
||||||
static_cast<void*>(out), address, size);
|
|
||||||
// Get kernel instance.
|
// Get kernel instance.
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
|
|
||||||
|
@ -1664,7 +1667,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
|
||||||
return ResultInvalidAddress;
|
return ResultInvalidAddress;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (size == 0 || Common::Is4KBAligned(size)) {
|
if (size == 0 || !Common::Is4KBAligned(size)) {
|
||||||
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
|
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
|
||||||
return ResultInvalidSize;
|
return ResultInvalidSize;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco
|
||||||
return Status::BadValue;
|
return Status::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_lock lock(mutex);
|
std::scoped_lock lock(mutex);
|
||||||
|
|
||||||
if (const auto status = AcquireBufferLocked(item, present_when); status != Status::NoError) {
|
if (const auto status = AcquireBufferLocked(item, present_when); status != Status::NoError) {
|
||||||
if (status != Status::NoBufferAvailable) {
|
if (status != Status::NoBufferAvailable) {
|
||||||
|
@ -40,7 +40,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco
|
||||||
}
|
}
|
||||||
|
|
||||||
Status BufferItemConsumer::ReleaseBuffer(const BufferItem& item, Fence& release_fence) {
|
Status BufferItemConsumer::ReleaseBuffer(const BufferItem& item, Fence& release_fence) {
|
||||||
std::unique_lock lock(mutex);
|
std::scoped_lock lock(mutex);
|
||||||
|
|
||||||
if (const auto status = AddReleaseFenceLocked(item.buf, item.graphic_buffer, release_fence);
|
if (const auto status = AddReleaseFenceLocked(item.buf, item.graphic_buffer, release_fence);
|
||||||
status != Status::NoError) {
|
status != Status::NoError) {
|
||||||
|
|
|
@ -20,11 +20,7 @@ BufferQueueConsumer::~BufferQueueConsumer() = default;
|
||||||
Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
|
Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
|
||||||
std::chrono::nanoseconds expected_present,
|
std::chrono::nanoseconds expected_present,
|
||||||
u64 max_frame_number) {
|
u64 max_frame_number) {
|
||||||
s32 num_dropped_buffers{};
|
std::scoped_lock lock(core->mutex);
|
||||||
|
|
||||||
std::shared_ptr<IProducerListener> listener;
|
|
||||||
{
|
|
||||||
std::unique_lock lock(core->mutex);
|
|
||||||
|
|
||||||
// Check that the consumer doesn't currently have the maximum number of buffers acquired.
|
// Check that the consumer doesn't currently have the maximum number of buffers acquired.
|
||||||
const s32 num_acquired_buffers{
|
const s32 num_acquired_buffers{
|
||||||
|
@ -49,13 +45,13 @@ Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
|
||||||
if (expected_present.count() != 0) {
|
if (expected_present.count() != 0) {
|
||||||
constexpr auto MAX_REASONABLE_NSEC = 1000000000LL; // 1 second
|
constexpr auto MAX_REASONABLE_NSEC = 1000000000LL; // 1 second
|
||||||
|
|
||||||
// The expected_present argument indicates when the buffer is expected to be
|
// The expected_present argument indicates when the buffer is expected to be presented
|
||||||
// presented on-screen.
|
// on-screen.
|
||||||
while (core->queue.size() > 1 && !core->queue[0].is_auto_timestamp) {
|
while (core->queue.size() > 1 && !core->queue[0].is_auto_timestamp) {
|
||||||
const auto& buffer_item{core->queue[1]};
|
const auto& buffer_item{core->queue[1]};
|
||||||
|
|
||||||
// If dropping entry[0] would leave us with a buffer that the consumer is not yet
|
// If dropping entry[0] would leave us with a buffer that the consumer is not yet ready
|
||||||
// ready for, don't drop it.
|
// for, don't drop it.
|
||||||
if (max_frame_number && buffer_item.frame_number > max_frame_number) {
|
if (max_frame_number && buffer_item.frame_number > max_frame_number) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -64,8 +60,7 @@ Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
|
||||||
const auto desired_present = buffer_item.timestamp;
|
const auto desired_present = buffer_item.timestamp;
|
||||||
if (desired_present < expected_present.count() - MAX_REASONABLE_NSEC ||
|
if (desired_present < expected_present.count() - MAX_REASONABLE_NSEC ||
|
||||||
desired_present > expected_present.count()) {
|
desired_present > expected_present.count()) {
|
||||||
// This buffer is set to display in the near future, or desired_present is
|
// This buffer is set to display in the near future, or desired_present is garbage.
|
||||||
// garbage.
|
|
||||||
LOG_DEBUG(Service_NVFlinger, "nodrop desire={} expect={}", desired_present,
|
LOG_DEBUG(Service_NVFlinger, "nodrop desire={} expect={}", desired_present,
|
||||||
expected_present.count());
|
expected_present.count());
|
||||||
break;
|
break;
|
||||||
|
@ -77,9 +72,6 @@ Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
|
||||||
if (core->StillTracking(*front)) {
|
if (core->StillTracking(*front)) {
|
||||||
// Front buffer is still in mSlots, so mark the slot as free
|
// Front buffer is still in mSlots, so mark the slot as free
|
||||||
slots[front->slot].buffer_state = BufferState::Free;
|
slots[front->slot].buffer_state = BufferState::Free;
|
||||||
core->free_buffers.push_back(front->slot);
|
|
||||||
listener = core->connected_producer_listener;
|
|
||||||
++num_dropped_buffers;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
core->queue.erase(front);
|
core->queue.erase(front);
|
||||||
|
@ -88,13 +80,8 @@ Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
|
||||||
|
|
||||||
// See if the front buffer is ready to be acquired.
|
// See if the front buffer is ready to be acquired.
|
||||||
const auto desired_present = front->timestamp;
|
const auto desired_present = front->timestamp;
|
||||||
const auto buffer_is_due =
|
if (desired_present > expected_present.count() &&
|
||||||
desired_present <= expected_present.count() ||
|
desired_present < expected_present.count() + MAX_REASONABLE_NSEC) {
|
||||||
desired_present > expected_present.count() + MAX_REASONABLE_NSEC;
|
|
||||||
const auto consumer_is_ready =
|
|
||||||
max_frame_number > 0 ? front->frame_number <= max_frame_number : true;
|
|
||||||
|
|
||||||
if (!buffer_is_due || !consumer_is_ready) {
|
|
||||||
LOG_DEBUG(Service_NVFlinger, "defer desire={} expect={}", desired_present,
|
LOG_DEBUG(Service_NVFlinger, "defer desire={} expect={}", desired_present,
|
||||||
expected_present.count());
|
expected_present.count());
|
||||||
return Status::PresentLater;
|
return Status::PresentLater;
|
||||||
|
@ -117,8 +104,8 @@ Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
|
||||||
slots[slot].fence = Fence::NoFence();
|
slots[slot].fence = Fence::NoFence();
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the buffer has previously been acquired by the consumer, set graphic_buffer to nullptr
|
// If the buffer has previously been acquired by the consumer, set graphic_buffer to nullptr to
|
||||||
// to avoid unnecessarily remapping this buffer on the consumer side.
|
// avoid unnecessarily remapping this buffer on the consumer side.
|
||||||
if (out_buffer->acquire_called) {
|
if (out_buffer->acquire_called) {
|
||||||
out_buffer->graphic_buffer = nullptr;
|
out_buffer->graphic_buffer = nullptr;
|
||||||
}
|
}
|
||||||
|
@ -128,13 +115,6 @@ Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
|
||||||
// We might have freed a slot while dropping old buffers, or the producer may be blocked
|
// We might have freed a slot while dropping old buffers, or the producer may be blocked
|
||||||
// waiting for the number of buffers in the queue to decrease.
|
// waiting for the number of buffers in the queue to decrease.
|
||||||
core->SignalDequeueCondition();
|
core->SignalDequeueCondition();
|
||||||
}
|
|
||||||
|
|
||||||
if (listener != nullptr) {
|
|
||||||
for (s32 i = 0; i < num_dropped_buffers; ++i) {
|
|
||||||
listener->OnBufferReleased();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return Status::NoError;
|
return Status::NoError;
|
||||||
}
|
}
|
||||||
|
@ -147,7 +127,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
|
||||||
|
|
||||||
std::shared_ptr<IProducerListener> listener;
|
std::shared_ptr<IProducerListener> listener;
|
||||||
{
|
{
|
||||||
std::unique_lock lock(core->mutex);
|
std::scoped_lock lock(core->mutex);
|
||||||
|
|
||||||
// If the frame number has changed because the buffer has been reallocated, we can ignore
|
// If the frame number has changed because the buffer has been reallocated, we can ignore
|
||||||
// this ReleaseBuffer for the old buffer.
|
// this ReleaseBuffer for the old buffer.
|
||||||
|
@ -170,8 +150,6 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
|
||||||
slots[slot].fence = release_fence;
|
slots[slot].fence = release_fence;
|
||||||
slots[slot].buffer_state = BufferState::Free;
|
slots[slot].buffer_state = BufferState::Free;
|
||||||
|
|
||||||
core->free_buffers.push_back(slot);
|
|
||||||
|
|
||||||
listener = core->connected_producer_listener;
|
listener = core->connected_producer_listener;
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot);
|
LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot);
|
||||||
|
@ -189,7 +167,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
|
||||||
return Status::BadValue;
|
return Status::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
core->dequeue_condition.notify_all();
|
core->SignalDequeueCondition();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call back without lock held
|
// Call back without lock held
|
||||||
|
@ -209,7 +187,7 @@ Status BufferQueueConsumer::Connect(std::shared_ptr<IConsumerListener> consumer_
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVFlinger, "controlled_by_app={}", controlled_by_app);
|
LOG_DEBUG(Service_NVFlinger, "controlled_by_app={}", controlled_by_app);
|
||||||
|
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
|
|
||||||
if (core->is_abandoned) {
|
if (core->is_abandoned) {
|
||||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||||
|
|
|
@ -10,16 +10,12 @@
|
||||||
|
|
||||||
namespace Service::android {
|
namespace Service::android {
|
||||||
|
|
||||||
BufferQueueCore::BufferQueueCore() : lock{mutex, std::defer_lock} {
|
BufferQueueCore::BufferQueueCore() = default;
|
||||||
for (s32 slot = 0; slot < BufferQueueDefs::NUM_BUFFER_SLOTS; ++slot) {
|
|
||||||
free_slots.insert(slot);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
BufferQueueCore::~BufferQueueCore() = default;
|
BufferQueueCore::~BufferQueueCore() = default;
|
||||||
|
|
||||||
void BufferQueueCore::NotifyShutdown() {
|
void BufferQueueCore::NotifyShutdown() {
|
||||||
std::unique_lock lk(mutex);
|
std::scoped_lock lock(mutex);
|
||||||
|
|
||||||
is_shutting_down = true;
|
is_shutting_down = true;
|
||||||
|
|
||||||
|
@ -35,7 +31,7 @@ bool BufferQueueCore::WaitForDequeueCondition() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
dequeue_condition.wait(lock);
|
dequeue_condition.wait(mutex);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -86,26 +82,15 @@ s32 BufferQueueCore::GetPreallocatedBufferCountLocked() const {
|
||||||
void BufferQueueCore::FreeBufferLocked(s32 slot) {
|
void BufferQueueCore::FreeBufferLocked(s32 slot) {
|
||||||
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
||||||
|
|
||||||
const auto had_buffer = slots[slot].graphic_buffer != nullptr;
|
|
||||||
|
|
||||||
slots[slot].graphic_buffer.reset();
|
slots[slot].graphic_buffer.reset();
|
||||||
|
|
||||||
if (slots[slot].buffer_state == BufferState::Acquired) {
|
if (slots[slot].buffer_state == BufferState::Acquired) {
|
||||||
slots[slot].needs_cleanup_on_release = true;
|
slots[slot].needs_cleanup_on_release = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (slots[slot].buffer_state != BufferState::Free) {
|
|
||||||
free_slots.insert(slot);
|
|
||||||
} else if (had_buffer) {
|
|
||||||
// If the slot was FREE, but we had a buffer, we need to move this slot from the free
|
|
||||||
// buffers list to the the free slots list.
|
|
||||||
free_buffers.remove(slot);
|
|
||||||
free_slots.insert(slot);
|
|
||||||
}
|
|
||||||
|
|
||||||
slots[slot].buffer_state = BufferState::Free;
|
slots[slot].buffer_state = BufferState::Free;
|
||||||
|
slots[slot].frame_number = UINT32_MAX;
|
||||||
slots[slot].acquire_called = false;
|
slots[slot].acquire_called = false;
|
||||||
slots[slot].frame_number = 0;
|
|
||||||
slots[slot].fence = Fence::NoFence();
|
slots[slot].fence = Fence::NoFence();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,8 +111,7 @@ bool BufferQueueCore::StillTracking(const BufferItem& item) const {
|
||||||
|
|
||||||
void BufferQueueCore::WaitWhileAllocatingLocked() const {
|
void BufferQueueCore::WaitWhileAllocatingLocked() const {
|
||||||
while (is_allocating) {
|
while (is_allocating) {
|
||||||
std::unique_lock lk(mutex);
|
is_allocating_condition.wait(mutex);
|
||||||
is_allocating_condition.wait(lk);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,24 +49,8 @@ private:
|
||||||
bool StillTracking(const BufferItem& item) const;
|
bool StillTracking(const BufferItem& item) const;
|
||||||
void WaitWhileAllocatingLocked() const;
|
void WaitWhileAllocatingLocked() const;
|
||||||
|
|
||||||
private:
|
|
||||||
class AutoLock final {
|
|
||||||
public:
|
|
||||||
AutoLock(std::shared_ptr<BufferQueueCore>& core_) : core{core_} {
|
|
||||||
core->lock.lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
~AutoLock() {
|
|
||||||
core->lock.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
std::shared_ptr<BufferQueueCore>& core;
|
|
||||||
};
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
mutable std::mutex mutex;
|
mutable std::mutex mutex;
|
||||||
mutable std::unique_lock<std::mutex> lock;
|
|
||||||
bool is_abandoned{};
|
bool is_abandoned{};
|
||||||
bool consumer_controlled_by_app{};
|
bool consumer_controlled_by_app{};
|
||||||
std::shared_ptr<IConsumerListener> consumer_listener;
|
std::shared_ptr<IConsumerListener> consumer_listener;
|
||||||
|
@ -75,10 +59,8 @@ private:
|
||||||
std::shared_ptr<IProducerListener> connected_producer_listener;
|
std::shared_ptr<IProducerListener> connected_producer_listener;
|
||||||
BufferQueueDefs::SlotsType slots{};
|
BufferQueueDefs::SlotsType slots{};
|
||||||
std::vector<BufferItem> queue;
|
std::vector<BufferItem> queue;
|
||||||
std::set<s32> free_slots;
|
|
||||||
std::list<s32> free_buffers;
|
|
||||||
s32 override_max_buffer_count{};
|
s32 override_max_buffer_count{};
|
||||||
mutable std::condition_variable dequeue_condition;
|
mutable std::condition_variable_any dequeue_condition;
|
||||||
const bool use_async_buffer{}; // This is always disabled on HOS
|
const bool use_async_buffer{}; // This is always disabled on HOS
|
||||||
bool dequeue_buffer_cannot_block{};
|
bool dequeue_buffer_cannot_block{};
|
||||||
PixelFormat default_buffer_format{PixelFormat::Rgba8888};
|
PixelFormat default_buffer_format{PixelFormat::Rgba8888};
|
||||||
|
@ -90,7 +72,7 @@ private:
|
||||||
u64 frame_counter{};
|
u64 frame_counter{};
|
||||||
u32 transform_hint{};
|
u32 transform_hint{};
|
||||||
bool is_allocating{};
|
bool is_allocating{};
|
||||||
mutable std::condition_variable is_allocating_condition;
|
mutable std::condition_variable_any is_allocating_condition;
|
||||||
bool allow_allocation{true};
|
bool allow_allocation{true};
|
||||||
u64 buffer_age{};
|
u64 buffer_age{};
|
||||||
bool is_shutting_down{};
|
bool is_shutting_down{};
|
||||||
|
|
|
@ -38,7 +38,7 @@ BufferQueueProducer::~BufferQueueProducer() {
|
||||||
Status BufferQueueProducer::RequestBuffer(s32 slot, std::shared_ptr<GraphicBuffer>* buf) {
|
Status BufferQueueProducer::RequestBuffer(s32 slot, std::shared_ptr<GraphicBuffer>* buf) {
|
||||||
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
||||||
|
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
|
|
||||||
if (core->is_abandoned) {
|
if (core->is_abandoned) {
|
||||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||||
|
@ -65,7 +65,7 @@ Status BufferQueueProducer::SetBufferCount(s32 buffer_count) {
|
||||||
std::shared_ptr<IConsumerListener> listener;
|
std::shared_ptr<IConsumerListener> listener;
|
||||||
|
|
||||||
{
|
{
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
core->WaitWhileAllocatingLocked();
|
core->WaitWhileAllocatingLocked();
|
||||||
if (core->is_abandoned) {
|
if (core->is_abandoned) {
|
||||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||||
|
@ -156,6 +156,14 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found,
|
||||||
case BufferState::Acquired:
|
case BufferState::Acquired:
|
||||||
++acquired_count;
|
++acquired_count;
|
||||||
break;
|
break;
|
||||||
|
case BufferState::Free:
|
||||||
|
// We return the oldest of the free buffers to avoid stalling the producer if
|
||||||
|
// possible, since the consumer may still have pending reads of in-flight buffers
|
||||||
|
if (*found == BufferQueueCore::INVALID_BUFFER_SLOT ||
|
||||||
|
slots[s].frame_number < slots[*found].frame_number) {
|
||||||
|
*found = s;
|
||||||
|
}
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -183,27 +191,12 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*found = BufferQueueCore::INVALID_BUFFER_SLOT;
|
|
||||||
|
|
||||||
// If we disconnect and reconnect quickly, we can be in a state where our slots are empty
|
// If we disconnect and reconnect quickly, we can be in a state where our slots are empty
|
||||||
// but we have many buffers in the queue. This can cause us to run out of memory if we
|
// but we have many buffers in the queue. This can cause us to run out of memory if we
|
||||||
// outrun the consumer. Wait here if it looks like we have too many buffers queued up.
|
// outrun the consumer. Wait here if it looks like we have too many buffers queued up.
|
||||||
const bool too_many_buffers = core->queue.size() > static_cast<size_t>(max_buffer_count);
|
const bool too_many_buffers = core->queue.size() > static_cast<size_t>(max_buffer_count);
|
||||||
if (too_many_buffers) {
|
if (too_many_buffers) {
|
||||||
LOG_ERROR(Service_NVFlinger, "queue size is {}, waiting", core->queue.size());
|
LOG_ERROR(Service_NVFlinger, "queue size is {}, waiting", core->queue.size());
|
||||||
} else {
|
|
||||||
if (!core->free_buffers.empty()) {
|
|
||||||
auto slot = core->free_buffers.begin();
|
|
||||||
*found = *slot;
|
|
||||||
core->free_buffers.erase(slot);
|
|
||||||
} else if (core->allow_allocation && !core->free_slots.empty()) {
|
|
||||||
auto slot = core->free_slots.begin();
|
|
||||||
// Only return free slots up to the max buffer count
|
|
||||||
if (*slot < max_buffer_count) {
|
|
||||||
*found = *slot;
|
|
||||||
core->free_slots.erase(slot);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no buffer is found, or if the queue has too many buffers outstanding, wait for a
|
// If no buffer is found, or if the queue has too many buffers outstanding, wait for a
|
||||||
|
@ -240,7 +233,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
|
||||||
Status return_flags = Status::NoError;
|
Status return_flags = Status::NoError;
|
||||||
bool attached_by_consumer = false;
|
bool attached_by_consumer = false;
|
||||||
{
|
{
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
core->WaitWhileAllocatingLocked();
|
core->WaitWhileAllocatingLocked();
|
||||||
if (format == PixelFormat::NoFormat) {
|
if (format == PixelFormat::NoFormat) {
|
||||||
format = core->default_buffer_format;
|
format = core->default_buffer_format;
|
||||||
|
@ -317,12 +310,13 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
if (core->is_abandoned) {
|
if (core->is_abandoned) {
|
||||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||||
return Status::NoInit;
|
return Status::NoInit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
slots[*out_slot].frame_number = UINT32_MAX;
|
||||||
slots[*out_slot].graphic_buffer = graphic_buffer;
|
slots[*out_slot].graphic_buffer = graphic_buffer;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -339,7 +333,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
|
||||||
Status BufferQueueProducer::DetachBuffer(s32 slot) {
|
Status BufferQueueProducer::DetachBuffer(s32 slot) {
|
||||||
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
||||||
|
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
if (core->is_abandoned) {
|
if (core->is_abandoned) {
|
||||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||||
return Status::NoInit;
|
return Status::NoInit;
|
||||||
|
@ -374,7 +368,7 @@ Status BufferQueueProducer::DetachNextBuffer(std::shared_ptr<GraphicBuffer>* out
|
||||||
return Status::BadValue;
|
return Status::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
|
|
||||||
core->WaitWhileAllocatingLocked();
|
core->WaitWhileAllocatingLocked();
|
||||||
|
|
||||||
|
@ -382,12 +376,21 @@ Status BufferQueueProducer::DetachNextBuffer(std::shared_ptr<GraphicBuffer>* out
|
||||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||||
return Status::NoInit;
|
return Status::NoInit;
|
||||||
}
|
}
|
||||||
if (core->free_buffers.empty()) {
|
|
||||||
return Status::NoMemory;
|
// Find the oldest valid slot
|
||||||
|
int found = BufferQueueCore::INVALID_BUFFER_SLOT;
|
||||||
|
for (int s = 0; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) {
|
||||||
|
if (slots[s].buffer_state == BufferState::Free && slots[s].graphic_buffer != nullptr) {
|
||||||
|
if (found == BufferQueueCore::INVALID_BUFFER_SLOT ||
|
||||||
|
slots[s].frame_number < slots[found].frame_number) {
|
||||||
|
found = s;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const s32 found = core->free_buffers.front();
|
if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
|
||||||
core->free_buffers.remove(found);
|
return Status::NoMemory;
|
||||||
|
}
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVFlinger, "Detached slot {}", found);
|
LOG_DEBUG(Service_NVFlinger, "Detached slot {}", found);
|
||||||
|
|
||||||
|
@ -409,7 +412,7 @@ Status BufferQueueProducer::AttachBuffer(s32* out_slot,
|
||||||
return Status::BadValue;
|
return Status::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
core->WaitWhileAllocatingLocked();
|
core->WaitWhileAllocatingLocked();
|
||||||
|
|
||||||
Status return_flags = Status::NoError;
|
Status return_flags = Status::NoError;
|
||||||
|
@ -469,7 +472,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
|
||||||
BufferItem item;
|
BufferItem item;
|
||||||
|
|
||||||
{
|
{
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
|
|
||||||
if (core->is_abandoned) {
|
if (core->is_abandoned) {
|
||||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||||
|
@ -554,7 +557,9 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
|
||||||
// mark it as freed
|
// mark it as freed
|
||||||
if (core->StillTracking(*front)) {
|
if (core->StillTracking(*front)) {
|
||||||
slots[front->slot].buffer_state = BufferState::Free;
|
slots[front->slot].buffer_state = BufferState::Free;
|
||||||
core->free_buffers.push_front(front->slot);
|
// Reset the frame number of the freed buffer so that it is the first in line to
|
||||||
|
// be dequeued again
|
||||||
|
slots[front->slot].frame_number = 0;
|
||||||
}
|
}
|
||||||
// Overwrite the droppable buffer with the incoming one
|
// Overwrite the droppable buffer with the incoming one
|
||||||
*front = item;
|
*front = item;
|
||||||
|
@ -582,10 +587,9 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
|
||||||
// Call back without the main BufferQueue lock held, but with the callback lock held so we can
|
// Call back without the main BufferQueue lock held, but with the callback lock held so we can
|
||||||
// ensure that callbacks occur in order
|
// ensure that callbacks occur in order
|
||||||
{
|
{
|
||||||
std::unique_lock lock(callback_mutex);
|
std::scoped_lock lock(callback_mutex);
|
||||||
while (callback_ticket != current_callback_ticket) {
|
while (callback_ticket != current_callback_ticket) {
|
||||||
std::unique_lock<std::mutex> lk(callback_mutex);
|
callback_condition.wait(callback_mutex);
|
||||||
callback_condition.wait(lk);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (frameAvailableListener != nullptr) {
|
if (frameAvailableListener != nullptr) {
|
||||||
|
@ -604,7 +608,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
|
||||||
void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
|
void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
|
||||||
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
|
||||||
|
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
|
|
||||||
if (core->is_abandoned) {
|
if (core->is_abandoned) {
|
||||||
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
|
||||||
|
@ -621,8 +625,8 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
core->free_buffers.push_front(slot);
|
|
||||||
slots[slot].buffer_state = BufferState::Free;
|
slots[slot].buffer_state = BufferState::Free;
|
||||||
|
slots[slot].frame_number = 0;
|
||||||
slots[slot].fence = fence;
|
slots[slot].fence = fence;
|
||||||
|
|
||||||
core->SignalDequeueCondition();
|
core->SignalDequeueCondition();
|
||||||
|
@ -630,7 +634,7 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
|
Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
|
|
||||||
if (out_value == nullptr) {
|
if (out_value == nullptr) {
|
||||||
LOG_ERROR(Service_NVFlinger, "outValue was nullptr");
|
LOG_ERROR(Service_NVFlinger, "outValue was nullptr");
|
||||||
|
@ -687,7 +691,7 @@ Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
|
||||||
Status BufferQueueProducer::Connect(const std::shared_ptr<IProducerListener>& listener,
|
Status BufferQueueProducer::Connect(const std::shared_ptr<IProducerListener>& listener,
|
||||||
NativeWindowApi api, bool producer_controlled_by_app,
|
NativeWindowApi api, bool producer_controlled_by_app,
|
||||||
QueueBufferOutput* output) {
|
QueueBufferOutput* output) {
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVFlinger, "api = {} producer_controlled_by_app = {}", api,
|
LOG_DEBUG(Service_NVFlinger, "api = {} producer_controlled_by_app = {}", api,
|
||||||
producer_controlled_by_app);
|
producer_controlled_by_app);
|
||||||
|
@ -745,7 +749,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) {
|
||||||
std::shared_ptr<IConsumerListener> listener;
|
std::shared_ptr<IConsumerListener> listener;
|
||||||
|
|
||||||
{
|
{
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
|
|
||||||
core->WaitWhileAllocatingLocked();
|
core->WaitWhileAllocatingLocked();
|
||||||
|
|
||||||
|
@ -795,10 +799,11 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot,
|
||||||
return Status::BadValue;
|
return Status::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
BufferQueueCore::AutoLock lock(core);
|
std::scoped_lock lock(core->mutex);
|
||||||
|
|
||||||
slots[slot] = {};
|
slots[slot] = {};
|
||||||
slots[slot].graphic_buffer = buffer;
|
slots[slot].graphic_buffer = buffer;
|
||||||
|
slots[slot].frame_number = 0;
|
||||||
|
|
||||||
// Most games preallocate a buffer and pass a valid buffer here. However, it is possible for
|
// Most games preallocate a buffer and pass a valid buffer here. However, it is possible for
|
||||||
// this to be called with an empty buffer, Naruto Ultimate Ninja Storm is a game that does this.
|
// this to be called with an empty buffer, Naruto Ultimate Ninja Storm is a game that does this.
|
||||||
|
|
|
@ -77,7 +77,7 @@ private:
|
||||||
std::mutex callback_mutex;
|
std::mutex callback_mutex;
|
||||||
s32 next_callback_ticket{};
|
s32 next_callback_ticket{};
|
||||||
s32 current_callback_ticket{};
|
s32 current_callback_ticket{};
|
||||||
std::condition_variable callback_condition;
|
std::condition_variable_any callback_condition;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::android
|
} // namespace Service::android
|
||||||
|
|
|
@ -18,7 +18,7 @@ ConsumerBase::ConsumerBase(std::unique_ptr<BufferQueueConsumer> consumer_)
|
||||||
: consumer{std::move(consumer_)} {}
|
: consumer{std::move(consumer_)} {}
|
||||||
|
|
||||||
ConsumerBase::~ConsumerBase() {
|
ConsumerBase::~ConsumerBase() {
|
||||||
std::unique_lock lock(mutex);
|
std::scoped_lock lock(mutex);
|
||||||
|
|
||||||
ASSERT_MSG(is_abandoned, "consumer is not abandoned!");
|
ASSERT_MSG(is_abandoned, "consumer is not abandoned!");
|
||||||
}
|
}
|
||||||
|
@ -36,17 +36,17 @@ void ConsumerBase::FreeBufferLocked(s32 slot_index) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConsumerBase::OnFrameAvailable(const BufferItem& item) {
|
void ConsumerBase::OnFrameAvailable(const BufferItem& item) {
|
||||||
std::unique_lock lock(mutex);
|
std::scoped_lock lock(mutex);
|
||||||
LOG_DEBUG(Service_NVFlinger, "called");
|
LOG_DEBUG(Service_NVFlinger, "called");
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConsumerBase::OnFrameReplaced(const BufferItem& item) {
|
void ConsumerBase::OnFrameReplaced(const BufferItem& item) {
|
||||||
std::unique_lock lock(mutex);
|
std::scoped_lock lock(mutex);
|
||||||
LOG_DEBUG(Service_NVFlinger, "called");
|
LOG_DEBUG(Service_NVFlinger, "called");
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConsumerBase::OnBuffersReleased() {
|
void ConsumerBase::OnBuffersReleased() {
|
||||||
std::unique_lock lock(mutex);
|
std::scoped_lock lock(mutex);
|
||||||
LOG_DEBUG(Service_NVFlinger, "called");
|
LOG_DEBUG(Service_NVFlinger, "called");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue