early-access version 3449
This commit is contained in:
parent
0b99ea7c93
commit
9ecf62dac4
3 changed files with 28 additions and 80 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 3448.
|
This is the source code for early-access 3449.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
|
@ -47,41 +47,24 @@ Scheduler::Scheduler(const Device& device_, StateTracker& state_tracker_)
|
||||||
Scheduler::~Scheduler() = default;
|
Scheduler::~Scheduler() = default;
|
||||||
|
|
||||||
void Scheduler::Flush(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
void Scheduler::Flush(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
||||||
// When flushing, we only send data to the worker thread; no waiting is necessary.
|
|
||||||
SubmitExecution(signal_semaphore, wait_semaphore);
|
SubmitExecution(signal_semaphore, wait_semaphore);
|
||||||
AllocateNewContext();
|
AllocateNewContext();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::Finish(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
void Scheduler::Finish(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
|
||||||
// When finishing, we need to wait for the submission to have executed on the device.
|
|
||||||
const u64 presubmit_tick = CurrentTick();
|
const u64 presubmit_tick = CurrentTick();
|
||||||
SubmitExecution(signal_semaphore, wait_semaphore);
|
SubmitExecution(signal_semaphore, wait_semaphore);
|
||||||
DrainRequests();
|
WaitWorker();
|
||||||
Wait(presubmit_tick);
|
Wait(presubmit_tick);
|
||||||
AllocateNewContext();
|
AllocateNewContext();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::DrainRequests() {
|
|
||||||
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
|
||||||
DispatchWork();
|
|
||||||
|
|
||||||
// Wait until the queue is empty and the queue lock can be held.
|
|
||||||
// This drains the queue.
|
|
||||||
std::unique_lock ql{queue_mutex};
|
|
||||||
event_cv.wait(ql, [this] { return work_queue.empty(); });
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::WaitWorker() {
|
void Scheduler::WaitWorker() {
|
||||||
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
||||||
DispatchWork();
|
DispatchWork();
|
||||||
|
|
||||||
// Wait until the queue is empty and the execution lock can be held.
|
std::unique_lock lock{work_mutex};
|
||||||
// This ensures Vulkan is aware of everything we have done when we return.
|
wait_cv.wait(lock, [this] { return work_queue.empty(); });
|
||||||
std::unique_lock el{execution_mutex};
|
|
||||||
event_cv.wait(el, [this] {
|
|
||||||
std::scoped_lock ql{queue_mutex};
|
|
||||||
return work_queue.empty();
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::DispatchWork() {
|
void Scheduler::DispatchWork() {
|
||||||
|
@ -89,10 +72,10 @@ void Scheduler::DispatchWork() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
std::scoped_lock ql{queue_mutex};
|
std::scoped_lock lock{work_mutex};
|
||||||
work_queue.push(std::move(chunk));
|
work_queue.push(std::move(chunk));
|
||||||
}
|
}
|
||||||
event_cv.notify_all();
|
work_cv.notify_one();
|
||||||
AcquireNewChunk();
|
AcquireNewChunk();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,59 +137,30 @@ bool Scheduler::UpdateRescaling(bool is_rescaling) {
|
||||||
|
|
||||||
void Scheduler::WorkerThread(std::stop_token stop_token) {
|
void Scheduler::WorkerThread(std::stop_token stop_token) {
|
||||||
Common::SetCurrentThreadName("VulkanWorker");
|
Common::SetCurrentThreadName("VulkanWorker");
|
||||||
|
do {
|
||||||
const auto TryPopQueue{[this](auto& work) -> bool {
|
|
||||||
std::scoped_lock ql{queue_mutex};
|
|
||||||
if (work_queue.empty()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
work = std::move(work_queue.front());
|
|
||||||
work_queue.pop();
|
|
||||||
event_cv.notify_all();
|
|
||||||
return true;
|
|
||||||
}};
|
|
||||||
|
|
||||||
while (!stop_token.stop_requested()) {
|
|
||||||
std::unique_ptr<CommandChunk> work;
|
std::unique_ptr<CommandChunk> work;
|
||||||
|
bool has_submit{false};
|
||||||
{
|
{
|
||||||
std::unique_lock el{execution_mutex};
|
std::unique_lock lock{work_mutex};
|
||||||
|
if (work_queue.empty()) {
|
||||||
// Wait for work.
|
wait_cv.notify_all();
|
||||||
Common::CondvarWait(event_cv, el, stop_token, [&] {
|
|
||||||
std::scoped_lock ql{queue_mutex};
|
|
||||||
return !work_queue.empty();
|
|
||||||
});
|
|
||||||
|
|
||||||
// If we've been asked to stop, we're done.
|
|
||||||
if (stop_token.stop_requested()) {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
Common::CondvarWait(work_cv, lock, stop_token, [&] { return !work_queue.empty(); });
|
||||||
// If we don't have any work, restart from the top.
|
if (stop_token.stop_requested()) {
|
||||||
if (!TryPopQueue(work)) {
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
work = std::move(work_queue.front());
|
||||||
|
work_queue.pop();
|
||||||
|
|
||||||
// Perform the work, tracking whether the chunk was a submission
|
has_submit = work->HasSubmit();
|
||||||
// before executing.
|
|
||||||
const bool has_submit = work->HasSubmit();
|
|
||||||
work->ExecuteAll(current_cmdbuf);
|
work->ExecuteAll(current_cmdbuf);
|
||||||
|
|
||||||
// If the chunk was a submission, reallocate the command buffer.
|
|
||||||
if (has_submit) {
|
|
||||||
AllocateWorkerCommandBuffer();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if (has_submit) {
|
||||||
{
|
AllocateWorkerCommandBuffer();
|
||||||
std::scoped_lock rl{reserve_mutex};
|
|
||||||
|
|
||||||
// Recycle the chunk back to the reserve.
|
|
||||||
chunk_reserve.emplace_back(std::move(work));
|
|
||||||
}
|
}
|
||||||
}
|
std::scoped_lock reserve_lock{reserve_mutex};
|
||||||
|
chunk_reserve.push_back(std::move(work));
|
||||||
|
} while (!stop_token.stop_requested());
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::AllocateWorkerCommandBuffer() {
|
void Scheduler::AllocateWorkerCommandBuffer() {
|
||||||
|
@ -335,16 +289,13 @@ void Scheduler::EndRenderPass() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Scheduler::AcquireNewChunk() {
|
void Scheduler::AcquireNewChunk() {
|
||||||
std::scoped_lock rl{reserve_mutex};
|
std::scoped_lock lock{reserve_mutex};
|
||||||
|
|
||||||
if (chunk_reserve.empty()) {
|
if (chunk_reserve.empty()) {
|
||||||
// If we don't have anything reserved, we need to make a new chunk.
|
|
||||||
chunk = std::make_unique<CommandChunk>();
|
chunk = std::make_unique<CommandChunk>();
|
||||||
} else {
|
return;
|
||||||
// Otherwise, we can just take from the reserve.
|
|
||||||
chunk = std::make_unique<CommandChunk>();
|
|
||||||
chunk_reserve.pop_back();
|
|
||||||
}
|
}
|
||||||
|
chunk = std::move(chunk_reserve.back());
|
||||||
|
chunk_reserve.pop_back();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -39,9 +39,6 @@ public:
|
||||||
/// Sends the current execution context to the GPU and waits for it to complete.
|
/// Sends the current execution context to the GPU and waits for it to complete.
|
||||||
void Finish(VkSemaphore signal_semaphore = nullptr, VkSemaphore wait_semaphore = nullptr);
|
void Finish(VkSemaphore signal_semaphore = nullptr, VkSemaphore wait_semaphore = nullptr);
|
||||||
|
|
||||||
/// Waits for the worker thread to begin executing everything.
|
|
||||||
void DrainRequests();
|
|
||||||
|
|
||||||
/// Waits for the worker thread to finish executing everything. After this function returns it's
|
/// Waits for the worker thread to finish executing everything. After this function returns it's
|
||||||
/// safe to touch worker resources.
|
/// safe to touch worker resources.
|
||||||
void WaitWorker();
|
void WaitWorker();
|
||||||
|
@ -235,10 +232,10 @@ private:
|
||||||
|
|
||||||
std::queue<std::unique_ptr<CommandChunk>> work_queue;
|
std::queue<std::unique_ptr<CommandChunk>> work_queue;
|
||||||
std::vector<std::unique_ptr<CommandChunk>> chunk_reserve;
|
std::vector<std::unique_ptr<CommandChunk>> chunk_reserve;
|
||||||
std::mutex execution_mutex;
|
|
||||||
std::mutex reserve_mutex;
|
std::mutex reserve_mutex;
|
||||||
std::mutex queue_mutex;
|
std::mutex work_mutex;
|
||||||
std::condition_variable_any event_cv;
|
std::condition_variable_any work_cv;
|
||||||
|
std::condition_variable wait_cv;
|
||||||
std::jthread worker_thread;
|
std::jthread worker_thread;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue