early-access version 2610

This commit is contained in:
pineappleEA 2022-03-26 14:36:57 +01:00
parent b3c57a4768
commit 3996303be5
15 changed files with 383 additions and 248 deletions

View File

@ -1,7 +1,7 @@
yuzu emulator early access
=============
This is the source code for early-access 2606.
This is the source code for early-access 2610.
## Legal Notice

View File

@ -36,7 +36,6 @@ if (MSVC)
# /GT - Supports fiber safety for data allocated using static thread-local storage
add_compile_options(
/MP
/Zf
/Zi
/Zm200
/Zo
@ -82,7 +81,7 @@ if (MSVC)
add_compile_options("$<$<CONFIG:Release>:/GS->")
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "/DEBUG /MANIFEST:NO" CACHE STRING "" FORCE)
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
else()
add_compile_options(
-Wall

View File

@ -32,14 +32,14 @@ assert_noinline_call(const Fn& fn) {
#define ASSERT(_a_) \
do \
if (!(_a_)) [[unlikely]] { \
if (!(_a_)) { \
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
} \
while (0)
#define ASSERT_MSG(_a_, ...) \
do \
if (!(_a_)) [[unlikely]] { \
if (!(_a_)) { \
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
} \
while (0)
@ -70,7 +70,7 @@ assert_noinline_call(const Fn& fn) {
#define ASSERT_OR_EXECUTE(_a_, _b_) \
do { \
ASSERT(_a_); \
if (!(_a_)) [[unlikely]] { \
if (!(_a_)) { \
_b_ \
} \
} while (0)
@ -79,7 +79,7 @@ assert_noinline_call(const Fn& fn) {
#define ASSERT_OR_EXECUTE_MSG(_a_, _b_, ...) \
do { \
ASSERT_MSG(_a_, __VA_ARGS__); \
if (!(_a_)) [[unlikely]] { \
if (!(_a_)) { \
_b_ \
} \
} while (0)

View File

@ -28,7 +28,8 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
auto& page_table = m_owner->PageTable();
// Construct the page group.
m_page_group = KPageLinkedList(addr, Common::DivideUp(size, PageSize));
m_page_group =
KPageLinkedList(page_table.GetPhysicalAddr(addr), Common::DivideUp(size, PageSize));
// Lock the memory.
R_TRY(page_table.LockForCodeMemory(addr, size))

View File

@ -89,6 +89,10 @@ public:
return ResultSuccess;
}
bool Empty() const {
return nodes.empty();
}
private:
std::list<Node> nodes;
};

View File

@ -486,6 +486,58 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
return address;
}
ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
// We're making a new group, not adding to an existing one.
R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory);
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory);
// Prepare tracking variables.
PAddr cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
size_t tot_size = cur_size;
// Iterate, adding to group as we go.
const auto& memory_layout = system.Kernel().MemoryLayout();
while (tot_size < size) {
R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context),
ResultInvalidCurrentMemory);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
const size_t cur_pages = cur_size / PageSize;
R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
R_TRY(pg.AddBlock(cur_addr, cur_pages));
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
// Ensure we add the right amount for the last block.
if (tot_size > size) {
cur_size -= (tot_size - size);
}
// Add the last block.
const size_t cur_pages = cur_size / PageSize;
R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
R_TRY(pg.AddBlock(cur_addr, cur_pages));
return ResultSuccess;
}
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
KPageTable& src_page_table, VAddr src_addr) {
KScopedLightLock lk(general_lock);
@ -1223,6 +1275,31 @@ ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryS
return ResultSuccess;
}
ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) {
// Ensure that the page group isn't null.
ASSERT(out != nullptr);
// Make sure that the region we're mapping is valid for the table.
const size_t size = num_pages * PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(general_lock);
// Check if state allows us to create the group.
R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
attr_mask, attr));
// Create a new page group for the region.
R_TRY(this->MakePageGroup(*out, address, num_pages));
return ResultSuccess;
}
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
@ -1605,57 +1682,21 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
}
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
KMemoryPermission old_perm{};
if (const ResultCode result{CheckMemoryState(
nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
KMemoryState::FlagCanCodeMemory, KMemoryPermission::All,
KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)};
result.IsError()) {
return result;
}
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
block_manager->UpdateLock(
addr, size / PageSize,
[](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
block->ShareToDevice(permission);
},
new_perm);
return ResultSuccess;
return this->LockMemoryAndOpen(
nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
KMemoryAttribute::All, KMemoryAttribute::None,
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
KMemoryPermission::KernelReadWrite),
KMemoryAttribute::Locked);
}
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryPermission new_perm = KMemoryPermission::UserReadWrite;
KMemoryPermission old_perm{};
if (const ResultCode result{CheckMemoryState(
nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::All, KMemoryAttribute::Locked)};
result.IsError()) {
return result;
}
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
block_manager->UpdateLock(
addr, size / PageSize,
[](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
block->UnshareToDevice(permission);
},
new_perm);
return ResultSuccess;
return this->UnlockMemory(addr, size, KMemoryState::FlagCanCodeMemory,
KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
KMemoryAttribute::Locked, nullptr);
}
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
@ -1991,4 +2032,109 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
return ResultSuccess;
}
ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr,
size_t size, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
// Validate basic preconditions.
ASSERT((lock_attr & attr) == KMemoryAttribute::None);
ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
// Validate the lock request.
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(general_lock);
// Check that the output page group is empty, if it exists.
if (out_pg) {
ASSERT(out_pg->GetNumPages() == 0);
}
// Check the state.
KMemoryState old_state{};
KMemoryPermission old_perm{};
KMemoryAttribute old_attr{};
size_t num_allocator_blocks{};
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
std::addressof(old_attr), std::addressof(num_allocator_blocks),
addr, size, state_mask | KMemoryState::FlagReferenceCounted,
state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
attr_mask, attr));
// Get the physical address, if we're supposed to.
if (out_paddr != nullptr) {
ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
}
// Make the page group, if we're supposed to.
if (out_pg != nullptr) {
R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
}
// Decide on new perm and attr.
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
// Update permission, if we need to.
if (new_perm != old_perm) {
R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
}
// Apply the memory block updates.
block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
return ResultSuccess;
}
ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr, KMemoryPermission new_perm,
KMemoryAttribute lock_attr, const KPageLinkedList* pg) {
// Validate basic preconditions.
ASSERT((attr_mask & lock_attr) == lock_attr);
ASSERT((attr & lock_attr) == lock_attr);
// Validate the unlock request.
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(general_lock);
// Check the state.
KMemoryState old_state{};
KMemoryPermission old_perm{};
KMemoryAttribute old_attr{};
size_t num_allocator_blocks{};
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
std::addressof(old_attr), std::addressof(num_allocator_blocks),
addr, size, state_mask | KMemoryState::FlagReferenceCounted,
state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
attr_mask, attr));
// Check the page group.
if (pg != nullptr) {
UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
}
// Decide on new perm and attr.
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
// Update permission, if we need to.
if (new_perm != old_perm) {
R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
}
// Apply the memory block updates.
block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
return ResultSuccess;
}
} // namespace Kernel

View File

@ -12,6 +12,7 @@
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/result.h"
@ -71,6 +72,10 @@ public:
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr);
Common::PageTable& PageTableImpl() {
return page_table_impl;
@ -159,10 +164,37 @@ private:
attr_mask, attr, ignore_attr);
}
ResultCode LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryPermission new_perm, KMemoryAttribute lock_attr);
ResultCode UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryPermission new_perm, KMemoryAttribute lock_attr,
const KPageLinkedList* pg);
ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
return general_lock.IsLockedByCurrentThread();
}
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
ASSERT(this->IsLockedByCurrentThread());
return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
}
bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
ASSERT(this->IsLockedByCurrentThread());
*out = GetPhysicalAddr(virt_addr);
return *out != 0;
}
mutable KLightLock general_lock;
mutable KLightLock map_physical_memory_lock;
@ -322,6 +354,7 @@ private:
bool is_aslr_enabled{};
u32 heap_fill_value{};
const KMemoryRegion* cached_physical_heap_region{};
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};

View File

@ -1362,8 +1362,11 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
ResultInvalidMemoryRegion);
// Create a new page group.
KMemoryInfo kBlockInfo = dst_pt.QueryInfo(dst_address);
KPageLinkedList pg(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages());
KPageLinkedList pg;
R_TRY(src_pt.MakeAndOpenPageGroup(
std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::All, KMemoryAttribute::None));
// Map the group.
R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
@ -1408,8 +1411,8 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha
}
static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
LOG_TRACE(Kernel_SVC, "called, handle_out={}, address=0x{:X}, size=0x{:X}",
static_cast<void*>(out), address, size);
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
// Get kernel instance.
auto& kernel = system.Kernel();
@ -1664,7 +1667,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
return ResultInvalidAddress;
}
if (size == 0 || Common::Is4KBAligned(size)) {
if (size == 0 || !Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
return ResultInvalidSize;
}

View File

@ -21,7 +21,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco
return Status::BadValue;
}
std::unique_lock lock(mutex);
std::scoped_lock lock(mutex);
if (const auto status = AcquireBufferLocked(item, present_when); status != Status::NoError) {
if (status != Status::NoBufferAvailable) {
@ -40,7 +40,7 @@ Status BufferItemConsumer::AcquireBuffer(BufferItem* item, std::chrono::nanoseco
}
Status BufferItemConsumer::ReleaseBuffer(const BufferItem& item, Fence& release_fence) {
std::unique_lock lock(mutex);
std::scoped_lock lock(mutex);
if (const auto status = AddReleaseFenceLocked(item.buf, item.graphic_buffer, release_fence);
status != Status::NoError) {

View File

@ -20,122 +20,102 @@ BufferQueueConsumer::~BufferQueueConsumer() = default;
Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
std::chrono::nanoseconds expected_present,
u64 max_frame_number) {
s32 num_dropped_buffers{};
std::scoped_lock lock(core->mutex);
std::shared_ptr<IProducerListener> listener;
{
std::unique_lock lock(core->mutex);
// Check that the consumer doesn't currently have the maximum number of buffers acquired.
const s32 num_acquired_buffers{
static_cast<s32>(std::count_if(slots.begin(), slots.end(), [](const auto& slot) {
return slot.buffer_state == BufferState::Acquired;
}))};
// Check that the consumer doesn't currently have the maximum number of buffers acquired.
const s32 num_acquired_buffers{
static_cast<s32>(std::count_if(slots.begin(), slots.end(), [](const auto& slot) {
return slot.buffer_state == BufferState::Acquired;
}))};
if (num_acquired_buffers >= core->max_acquired_buffer_count + 1) {
LOG_ERROR(Service_NVFlinger, "max acquired buffer count reached: {} (max {})",
num_acquired_buffers, core->max_acquired_buffer_count);
return Status::InvalidOperation;
}
if (num_acquired_buffers >= core->max_acquired_buffer_count + 1) {
LOG_ERROR(Service_NVFlinger, "max acquired buffer count reached: {} (max {})",
num_acquired_buffers, core->max_acquired_buffer_count);
return Status::InvalidOperation;
}
// Check if the queue is empty.
if (core->queue.empty()) {
return Status::NoBufferAvailable;
}
// Check if the queue is empty.
if (core->queue.empty()) {
return Status::NoBufferAvailable;
}
auto front(core->queue.begin());
auto front(core->queue.begin());
// If expected_present is specified, we may not want to return a buffer yet.
if (expected_present.count() != 0) {
constexpr auto MAX_REASONABLE_NSEC = 1000000000LL; // 1 second
// If expected_present is specified, we may not want to return a buffer yet.
if (expected_present.count() != 0) {
constexpr auto MAX_REASONABLE_NSEC = 1000000000LL; // 1 second
// The expected_present argument indicates when the buffer is expected to be presented
// on-screen.
while (core->queue.size() > 1 && !core->queue[0].is_auto_timestamp) {
const auto& buffer_item{core->queue[1]};
// The expected_present argument indicates when the buffer is expected to be
// presented on-screen.
while (core->queue.size() > 1 && !core->queue[0].is_auto_timestamp) {
const auto& buffer_item{core->queue[1]};
// If dropping entry[0] would leave us with a buffer that the consumer is not yet
// ready for, don't drop it.
if (max_frame_number && buffer_item.frame_number > max_frame_number) {
break;
}
// If entry[1] is timely, drop entry[0] (and repeat).
const auto desired_present = buffer_item.timestamp;
if (desired_present < expected_present.count() - MAX_REASONABLE_NSEC ||
desired_present > expected_present.count()) {
// This buffer is set to display in the near future, or desired_present is
// garbage.
LOG_DEBUG(Service_NVFlinger, "nodrop desire={} expect={}", desired_present,
expected_present.count());
break;
}
LOG_DEBUG(Service_NVFlinger, "drop desire={} expect={} size={}", desired_present,
expected_present.count(), core->queue.size());
if (core->StillTracking(*front)) {
// Front buffer is still in mSlots, so mark the slot as free
slots[front->slot].buffer_state = BufferState::Free;
core->free_buffers.push_back(front->slot);
listener = core->connected_producer_listener;
++num_dropped_buffers;
}
core->queue.erase(front);
front = core->queue.begin();
// If dropping entry[0] would leave us with a buffer that the consumer is not yet ready
// for, don't drop it.
if (max_frame_number && buffer_item.frame_number > max_frame_number) {
break;
}
// See if the front buffer is ready to be acquired.
const auto desired_present = front->timestamp;
const auto buffer_is_due =
desired_present <= expected_present.count() ||
desired_present > expected_present.count() + MAX_REASONABLE_NSEC;
const auto consumer_is_ready =
max_frame_number > 0 ? front->frame_number <= max_frame_number : true;
if (!buffer_is_due || !consumer_is_ready) {
LOG_DEBUG(Service_NVFlinger, "defer desire={} expect={}", desired_present,
// If entry[1] is timely, drop entry[0] (and repeat).
const auto desired_present = buffer_item.timestamp;
if (desired_present < expected_present.count() - MAX_REASONABLE_NSEC ||
desired_present > expected_present.count()) {
// This buffer is set to display in the near future, or desired_present is garbage.
LOG_DEBUG(Service_NVFlinger, "nodrop desire={} expect={}", desired_present,
expected_present.count());
return Status::PresentLater;
break;
}
LOG_DEBUG(Service_NVFlinger, "accept desire={} expect={}", desired_present,
LOG_DEBUG(Service_NVFlinger, "drop desire={} expect={} size={}", desired_present,
expected_present.count(), core->queue.size());
if (core->StillTracking(*front)) {
// Front buffer is still in mSlots, so mark the slot as free
slots[front->slot].buffer_state = BufferState::Free;
}
core->queue.erase(front);
front = core->queue.begin();
}
// See if the front buffer is ready to be acquired.
const auto desired_present = front->timestamp;
if (desired_present > expected_present.count() &&
desired_present < expected_present.count() + MAX_REASONABLE_NSEC) {
LOG_DEBUG(Service_NVFlinger, "defer desire={} expect={}", desired_present,
expected_present.count());
return Status::PresentLater;
}
const auto slot = front->slot;
*out_buffer = *front;
LOG_DEBUG(Service_NVFlinger, "acquiring slot={}", slot);
// If the front buffer is still being tracked, update its slot state
if (core->StillTracking(*front)) {
slots[slot].acquire_called = true;
slots[slot].needs_cleanup_on_release = false;
slots[slot].buffer_state = BufferState::Acquired;
slots[slot].fence = Fence::NoFence();
}
// If the buffer has previously been acquired by the consumer, set graphic_buffer to nullptr
// to avoid unnecessarily remapping this buffer on the consumer side.
if (out_buffer->acquire_called) {
out_buffer->graphic_buffer = nullptr;
}
core->queue.erase(front);
// We might have freed a slot while dropping old buffers, or the producer may be blocked
// waiting for the number of buffers in the queue to decrease.
core->SignalDequeueCondition();
LOG_DEBUG(Service_NVFlinger, "accept desire={} expect={}", desired_present,
expected_present.count());
}
if (listener != nullptr) {
for (s32 i = 0; i < num_dropped_buffers; ++i) {
listener->OnBufferReleased();
}
const auto slot = front->slot;
*out_buffer = *front;
LOG_DEBUG(Service_NVFlinger, "acquiring slot={}", slot);
// If the front buffer is still being tracked, update its slot state
if (core->StillTracking(*front)) {
slots[slot].acquire_called = true;
slots[slot].needs_cleanup_on_release = false;
slots[slot].buffer_state = BufferState::Acquired;
slots[slot].fence = Fence::NoFence();
}
// If the buffer has previously been acquired by the consumer, set graphic_buffer to nullptr to
// avoid unnecessarily remapping this buffer on the consumer side.
if (out_buffer->acquire_called) {
out_buffer->graphic_buffer = nullptr;
}
core->queue.erase(front);
// We might have freed a slot while dropping old buffers, or the producer may be blocked
// waiting for the number of buffers in the queue to decrease.
core->SignalDequeueCondition();
return Status::NoError;
}
@ -147,7 +127,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
std::shared_ptr<IProducerListener> listener;
{
std::unique_lock lock(core->mutex);
std::scoped_lock lock(core->mutex);
// If the frame number has changed because the buffer has been reallocated, we can ignore
// this ReleaseBuffer for the old buffer.
@ -170,8 +150,6 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
slots[slot].fence = release_fence;
slots[slot].buffer_state = BufferState::Free;
core->free_buffers.push_back(slot);
listener = core->connected_producer_listener;
LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot);
@ -189,7 +167,7 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
return Status::BadValue;
}
core->dequeue_condition.notify_all();
core->SignalDequeueCondition();
}
// Call back without lock held
@ -209,7 +187,7 @@ Status BufferQueueConsumer::Connect(std::shared_ptr<IConsumerListener> consumer_
LOG_DEBUG(Service_NVFlinger, "controlled_by_app={}", controlled_by_app);
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");

View File

@ -10,16 +10,12 @@
namespace Service::android {
BufferQueueCore::BufferQueueCore() : lock{mutex, std::defer_lock} {
for (s32 slot = 0; slot < BufferQueueDefs::NUM_BUFFER_SLOTS; ++slot) {
free_slots.insert(slot);
}
}
BufferQueueCore::BufferQueueCore() = default;
BufferQueueCore::~BufferQueueCore() = default;
void BufferQueueCore::NotifyShutdown() {
std::unique_lock lk(mutex);
std::scoped_lock lock(mutex);
is_shutting_down = true;
@ -35,7 +31,7 @@ bool BufferQueueCore::WaitForDequeueCondition() {
return false;
}
dequeue_condition.wait(lock);
dequeue_condition.wait(mutex);
return true;
}
@ -86,26 +82,15 @@ s32 BufferQueueCore::GetPreallocatedBufferCountLocked() const {
void BufferQueueCore::FreeBufferLocked(s32 slot) {
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
const auto had_buffer = slots[slot].graphic_buffer != nullptr;
slots[slot].graphic_buffer.reset();
if (slots[slot].buffer_state == BufferState::Acquired) {
slots[slot].needs_cleanup_on_release = true;
}
if (slots[slot].buffer_state != BufferState::Free) {
free_slots.insert(slot);
} else if (had_buffer) {
// If the slot was FREE, but we had a buffer, we need to move this slot from the free
// buffers list to the the free slots list.
free_buffers.remove(slot);
free_slots.insert(slot);
}
slots[slot].buffer_state = BufferState::Free;
slots[slot].frame_number = UINT32_MAX;
slots[slot].acquire_called = false;
slots[slot].frame_number = 0;
slots[slot].fence = Fence::NoFence();
}
@ -126,8 +111,7 @@ bool BufferQueueCore::StillTracking(const BufferItem& item) const {
void BufferQueueCore::WaitWhileAllocatingLocked() const {
while (is_allocating) {
std::unique_lock lk(mutex);
is_allocating_condition.wait(lk);
is_allocating_condition.wait(mutex);
}
}

View File

@ -49,24 +49,8 @@ private:
bool StillTracking(const BufferItem& item) const;
void WaitWhileAllocatingLocked() const;
private:
class AutoLock final {
public:
AutoLock(std::shared_ptr<BufferQueueCore>& core_) : core{core_} {
core->lock.lock();
}
~AutoLock() {
core->lock.unlock();
}
private:
std::shared_ptr<BufferQueueCore>& core;
};
private:
mutable std::mutex mutex;
mutable std::unique_lock<std::mutex> lock;
bool is_abandoned{};
bool consumer_controlled_by_app{};
std::shared_ptr<IConsumerListener> consumer_listener;
@ -75,10 +59,8 @@ private:
std::shared_ptr<IProducerListener> connected_producer_listener;
BufferQueueDefs::SlotsType slots{};
std::vector<BufferItem> queue;
std::set<s32> free_slots;
std::list<s32> free_buffers;
s32 override_max_buffer_count{};
mutable std::condition_variable dequeue_condition;
mutable std::condition_variable_any dequeue_condition;
const bool use_async_buffer{}; // This is always disabled on HOS
bool dequeue_buffer_cannot_block{};
PixelFormat default_buffer_format{PixelFormat::Rgba8888};
@ -90,7 +72,7 @@ private:
u64 frame_counter{};
u32 transform_hint{};
bool is_allocating{};
mutable std::condition_variable is_allocating_condition;
mutable std::condition_variable_any is_allocating_condition;
bool allow_allocation{true};
u64 buffer_age{};
bool is_shutting_down{};

View File

@ -38,7 +38,7 @@ BufferQueueProducer::~BufferQueueProducer() {
Status BufferQueueProducer::RequestBuffer(s32 slot, std::shared_ptr<GraphicBuffer>* buf) {
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
@ -65,7 +65,7 @@ Status BufferQueueProducer::SetBufferCount(s32 buffer_count) {
std::shared_ptr<IConsumerListener> listener;
{
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
core->WaitWhileAllocatingLocked();
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
@ -156,6 +156,14 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found,
case BufferState::Acquired:
++acquired_count;
break;
case BufferState::Free:
// We return the oldest of the free buffers to avoid stalling the producer if
// possible, since the consumer may still have pending reads of in-flight buffers
if (*found == BufferQueueCore::INVALID_BUFFER_SLOT ||
slots[s].frame_number < slots[*found].frame_number) {
*found = s;
}
break;
default:
break;
}
@ -183,27 +191,12 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found,
}
}
*found = BufferQueueCore::INVALID_BUFFER_SLOT;
// If we disconnect and reconnect quickly, we can be in a state where our slots are empty
// but we have many buffers in the queue. This can cause us to run out of memory if we
// outrun the consumer. Wait here if it looks like we have too many buffers queued up.
const bool too_many_buffers = core->queue.size() > static_cast<size_t>(max_buffer_count);
if (too_many_buffers) {
LOG_ERROR(Service_NVFlinger, "queue size is {}, waiting", core->queue.size());
} else {
if (!core->free_buffers.empty()) {
auto slot = core->free_buffers.begin();
*found = *slot;
core->free_buffers.erase(slot);
} else if (core->allow_allocation && !core->free_slots.empty()) {
auto slot = core->free_slots.begin();
// Only return free slots up to the max buffer count
if (*slot < max_buffer_count) {
*found = *slot;
core->free_slots.erase(slot);
}
}
}
// If no buffer is found, or if the queue has too many buffers outstanding, wait for a
@ -240,7 +233,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
Status return_flags = Status::NoError;
bool attached_by_consumer = false;
{
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
core->WaitWhileAllocatingLocked();
if (format == PixelFormat::NoFormat) {
format = core->default_buffer_format;
@ -317,12 +310,13 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
}
{
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
return Status::NoInit;
}
slots[*out_slot].frame_number = UINT32_MAX;
slots[*out_slot].graphic_buffer = graphic_buffer;
}
}
@ -339,7 +333,7 @@ Status BufferQueueProducer::DequeueBuffer(s32* out_slot, Fence* out_fence, bool
Status BufferQueueProducer::DetachBuffer(s32 slot) {
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
return Status::NoInit;
@ -374,7 +368,7 @@ Status BufferQueueProducer::DetachNextBuffer(std::shared_ptr<GraphicBuffer>* out
return Status::BadValue;
}
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
core->WaitWhileAllocatingLocked();
@ -382,12 +376,21 @@ Status BufferQueueProducer::DetachNextBuffer(std::shared_ptr<GraphicBuffer>* out
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
return Status::NoInit;
}
if (core->free_buffers.empty()) {
return Status::NoMemory;
// Find the oldest valid slot
int found = BufferQueueCore::INVALID_BUFFER_SLOT;
for (int s = 0; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) {
if (slots[s].buffer_state == BufferState::Free && slots[s].graphic_buffer != nullptr) {
if (found == BufferQueueCore::INVALID_BUFFER_SLOT ||
slots[s].frame_number < slots[found].frame_number) {
found = s;
}
}
}
const s32 found = core->free_buffers.front();
core->free_buffers.remove(found);
if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
return Status::NoMemory;
}
LOG_DEBUG(Service_NVFlinger, "Detached slot {}", found);
@ -409,7 +412,7 @@ Status BufferQueueProducer::AttachBuffer(s32* out_slot,
return Status::BadValue;
}
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
core->WaitWhileAllocatingLocked();
Status return_flags = Status::NoError;
@ -469,7 +472,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
BufferItem item;
{
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
@ -554,7 +557,9 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
// mark it as freed
if (core->StillTracking(*front)) {
slots[front->slot].buffer_state = BufferState::Free;
core->free_buffers.push_front(front->slot);
// Reset the frame number of the freed buffer so that it is the first in line to
// be dequeued again
slots[front->slot].frame_number = 0;
}
// Overwrite the droppable buffer with the incoming one
*front = item;
@ -582,10 +587,9 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
// Call back without the main BufferQueue lock held, but with the callback lock held so we can
// ensure that callbacks occur in order
{
std::unique_lock lock(callback_mutex);
std::scoped_lock lock(callback_mutex);
while (callback_ticket != current_callback_ticket) {
std::unique_lock<std::mutex> lk(callback_mutex);
callback_condition.wait(lk);
callback_condition.wait(callback_mutex);
}
if (frameAvailableListener != nullptr) {
@ -604,7 +608,7 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
LOG_DEBUG(Service_NVFlinger, "slot {}", slot);
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
if (core->is_abandoned) {
LOG_ERROR(Service_NVFlinger, "BufferQueue has been abandoned");
@ -621,8 +625,8 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
return;
}
core->free_buffers.push_front(slot);
slots[slot].buffer_state = BufferState::Free;
slots[slot].frame_number = 0;
slots[slot].fence = fence;
core->SignalDequeueCondition();
@ -630,7 +634,7 @@ void BufferQueueProducer::CancelBuffer(s32 slot, const Fence& fence) {
}
Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
if (out_value == nullptr) {
LOG_ERROR(Service_NVFlinger, "outValue was nullptr");
@ -687,7 +691,7 @@ Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
Status BufferQueueProducer::Connect(const std::shared_ptr<IProducerListener>& listener,
NativeWindowApi api, bool producer_controlled_by_app,
QueueBufferOutput* output) {
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
LOG_DEBUG(Service_NVFlinger, "api = {} producer_controlled_by_app = {}", api,
producer_controlled_by_app);
@ -745,7 +749,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) {
std::shared_ptr<IConsumerListener> listener;
{
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
core->WaitWhileAllocatingLocked();
@ -795,10 +799,11 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot,
return Status::BadValue;
}
BufferQueueCore::AutoLock lock(core);
std::scoped_lock lock(core->mutex);
slots[slot] = {};
slots[slot].graphic_buffer = buffer;
slots[slot].frame_number = 0;
// Most games preallocate a buffer and pass a valid buffer here. However, it is possible for
// this to be called with an empty buffer, Naruto Ultimate Ninja Storm is a game that does this.

View File

@ -77,7 +77,7 @@ private:
std::mutex callback_mutex;
s32 next_callback_ticket{};
s32 current_callback_ticket{};
std::condition_variable callback_condition;
std::condition_variable_any callback_condition;
};
} // namespace Service::android

View File

@ -18,7 +18,7 @@ ConsumerBase::ConsumerBase(std::unique_ptr<BufferQueueConsumer> consumer_)
: consumer{std::move(consumer_)} {}
ConsumerBase::~ConsumerBase() {
std::unique_lock lock(mutex);
std::scoped_lock lock(mutex);
ASSERT_MSG(is_abandoned, "consumer is not abandoned!");
}
@ -36,17 +36,17 @@ void ConsumerBase::FreeBufferLocked(s32 slot_index) {
}
void ConsumerBase::OnFrameAvailable(const BufferItem& item) {
std::unique_lock lock(mutex);
std::scoped_lock lock(mutex);
LOG_DEBUG(Service_NVFlinger, "called");
}
void ConsumerBase::OnFrameReplaced(const BufferItem& item) {
std::unique_lock lock(mutex);
std::scoped_lock lock(mutex);
LOG_DEBUG(Service_NVFlinger, "called");
}
void ConsumerBase::OnBuffersReleased() {
std::unique_lock lock(mutex);
std::scoped_lock lock(mutex);
LOG_DEBUG(Service_NVFlinger, "called");
}