early-access version 1972
This commit is contained in:
parent
03ad7255d8
commit
957c2233d1
28 changed files with 229 additions and 139 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 1971.
|
This is the source code for early-access 1972.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
|
@ -494,12 +494,6 @@ const ARM_Interface& System::CurrentArmInterface() const {
|
||||||
return impl->kernel.CurrentPhysicalCore().ArmInterface();
|
return impl->kernel.CurrentPhysicalCore().ArmInterface();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t System::CurrentCoreIndex() const {
|
|
||||||
std::size_t core = impl->kernel.GetCurrentHostThreadID();
|
|
||||||
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
|
|
||||||
return core;
|
|
||||||
}
|
|
||||||
|
|
||||||
Kernel::PhysicalCore& System::CurrentPhysicalCore() {
|
Kernel::PhysicalCore& System::CurrentPhysicalCore() {
|
||||||
return impl->kernel.CurrentPhysicalCore();
|
return impl->kernel.CurrentPhysicalCore();
|
||||||
}
|
}
|
||||||
|
|
|
@ -205,9 +205,6 @@ public:
|
||||||
/// Gets an ARM interface to the CPU core that is currently running
|
/// Gets an ARM interface to the CPU core that is currently running
|
||||||
[[nodiscard]] const ARM_Interface& CurrentArmInterface() const;
|
[[nodiscard]] const ARM_Interface& CurrentArmInterface() const;
|
||||||
|
|
||||||
/// Gets the index of the currently running CPU core
|
|
||||||
[[nodiscard]] std::size_t CurrentCoreIndex() const;
|
|
||||||
|
|
||||||
/// Gets the physical core for the CPU core that is currently running
|
/// Gets the physical core for the CPU core that is currently running
|
||||||
[[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
|
[[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
|
||||||
|
|
||||||
|
|
|
@ -21,34 +21,25 @@ namespace Core {
|
||||||
CpuManager::CpuManager(System& system_) : system{system_} {}
|
CpuManager::CpuManager(System& system_) : system{system_} {}
|
||||||
CpuManager::~CpuManager() = default;
|
CpuManager::~CpuManager() = default;
|
||||||
|
|
||||||
void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) {
|
void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager,
|
||||||
cpu_manager.RunThread(core);
|
std::size_t core) {
|
||||||
|
cpu_manager.RunThread(stop_token, core);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CpuManager::Initialize() {
|
void CpuManager::Initialize() {
|
||||||
running_mode = true;
|
running_mode = true;
|
||||||
if (is_multicore) {
|
if (is_multicore) {
|
||||||
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
||||||
core_data[core].host_thread =
|
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
|
||||||
std::make_unique<std::thread>(ThreadStart, std::ref(*this), core);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0);
|
core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CpuManager::Shutdown() {
|
void CpuManager::Shutdown() {
|
||||||
running_mode = false;
|
running_mode = false;
|
||||||
Pause(false);
|
Pause(false);
|
||||||
if (is_multicore) {
|
|
||||||
for (auto& data : core_data) {
|
|
||||||
data.host_thread->join();
|
|
||||||
data.host_thread.reset();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
core_data[0].host_thread->join();
|
|
||||||
core_data[0].host_thread.reset();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
|
std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
|
||||||
|
@ -127,17 +118,18 @@ void CpuManager::MultiCoreRunGuestLoop() {
|
||||||
physical_core = &kernel.CurrentPhysicalCore();
|
physical_core = &kernel.CurrentPhysicalCore();
|
||||||
}
|
}
|
||||||
system.ExitDynarmicProfile();
|
system.ExitDynarmicProfile();
|
||||||
|
{
|
||||||
|
Kernel::KScopedDisableDispatch dd(kernel);
|
||||||
physical_core->ArmInterface().ClearExclusiveState();
|
physical_core->ArmInterface().ClearExclusiveState();
|
||||||
kernel.CurrentScheduler()->RescheduleCurrentCore();
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CpuManager::MultiCoreRunIdleThread() {
|
void CpuManager::MultiCoreRunIdleThread() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
while (true) {
|
while (true) {
|
||||||
auto& physical_core = kernel.CurrentPhysicalCore();
|
Kernel::KScopedDisableDispatch dd(kernel);
|
||||||
physical_core.Idle();
|
kernel.CurrentPhysicalCore().Idle();
|
||||||
kernel.CurrentScheduler()->RescheduleCurrentCore();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,12 +137,12 @@ void CpuManager::MultiCoreRunSuspendThread() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
kernel.CurrentScheduler()->OnThreadStart();
|
kernel.CurrentScheduler()->OnThreadStart();
|
||||||
while (true) {
|
while (true) {
|
||||||
auto core = kernel.GetCurrentHostThreadID();
|
auto core = kernel.CurrentPhysicalCoreIndex();
|
||||||
auto& scheduler = *kernel.CurrentScheduler();
|
auto& scheduler = *kernel.CurrentScheduler();
|
||||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
||||||
ASSERT(scheduler.ContextSwitchPending());
|
ASSERT(scheduler.ContextSwitchPending());
|
||||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
ASSERT(core == kernel.CurrentPhysicalCoreIndex());
|
||||||
scheduler.RescheduleCurrentCore();
|
scheduler.RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -317,7 +309,7 @@ void CpuManager::Pause(bool paused) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CpuManager::RunThread(std::size_t core) {
|
void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
|
||||||
/// Initialization
|
/// Initialization
|
||||||
system.RegisterCoreThread(core);
|
system.RegisterCoreThread(core);
|
||||||
std::string name;
|
std::string name;
|
||||||
|
@ -361,6 +353,10 @@ void CpuManager::RunThread(std::size_t core) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (stop_token.stop_requested()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||||
data.is_running = true;
|
data.is_running = true;
|
||||||
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
|
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
|
||||||
|
|
|
@ -78,9 +78,9 @@ private:
|
||||||
void SingleCoreRunSuspendThread();
|
void SingleCoreRunSuspendThread();
|
||||||
void SingleCorePause(bool paused);
|
void SingleCorePause(bool paused);
|
||||||
|
|
||||||
static void ThreadStart(CpuManager& cpu_manager, std::size_t core);
|
static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core);
|
||||||
|
|
||||||
void RunThread(std::size_t core);
|
void RunThread(std::stop_token stop_token, std::size_t core);
|
||||||
|
|
||||||
struct CoreData {
|
struct CoreData {
|
||||||
std::shared_ptr<Common::Fiber> host_context;
|
std::shared_ptr<Common::Fiber> host_context;
|
||||||
|
@ -89,7 +89,7 @@ private:
|
||||||
std::atomic<bool> is_running;
|
std::atomic<bool> is_running;
|
||||||
std::atomic<bool> is_paused;
|
std::atomic<bool> is_paused;
|
||||||
std::atomic<bool> initialized;
|
std::atomic<bool> initialized;
|
||||||
std::unique_ptr<std::thread> host_thread;
|
std::jthread host_thread;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::atomic<bool> running_mode{};
|
std::atomic<bool> running_mode{};
|
||||||
|
|
|
@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
|
||||||
|
|
||||||
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
|
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
|
||||||
auto& monitor = system.Monitor();
|
auto& monitor = system.Monitor();
|
||||||
const auto current_core = system.CurrentCoreIndex();
|
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||||
|
|
||||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||||
|
@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
|
||||||
|
|
||||||
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
|
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
|
||||||
auto& monitor = system.Monitor();
|
auto& monitor = system.Monitor();
|
||||||
const auto current_core = system.CurrentCoreIndex();
|
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||||
|
|
||||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||||
|
|
|
@ -170,6 +170,10 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const std::string& GetName() const {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void RegisterWithKernel();
|
void RegisterWithKernel();
|
||||||
void UnregisterWithKernel();
|
void UnregisterWithKernel();
|
||||||
|
|
|
@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
|
||||||
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
||||||
u32 new_orr_mask) {
|
u32 new_orr_mask) {
|
||||||
auto& monitor = system.Monitor();
|
auto& monitor = system.Monitor();
|
||||||
const auto current_core = system.CurrentCoreIndex();
|
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||||
|
|
||||||
// Load the value from the address.
|
// Load the value from the address.
|
||||||
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
||||||
|
|
|
@ -13,6 +13,7 @@ ResultCode KHandleTable::Finalize() {
|
||||||
// Get the table and clear our record of it.
|
// Get the table and clear our record of it.
|
||||||
u16 saved_table_size = 0;
|
u16 saved_table_size = 0;
|
||||||
{
|
{
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
std::swap(m_table_size, saved_table_size);
|
std::swap(m_table_size, saved_table_size);
|
||||||
|
@ -43,6 +44,7 @@ bool KHandleTable::Remove(Handle handle) {
|
||||||
// Find the object and free the entry.
|
// Find the object and free the entry.
|
||||||
KAutoObject* obj = nullptr;
|
KAutoObject* obj = nullptr;
|
||||||
{
|
{
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
if (this->IsValidHandle(handle)) {
|
if (this->IsValidHandle(handle)) {
|
||||||
|
@ -61,6 +63,7 @@ bool KHandleTable::Remove(Handle handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
|
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Never exceed our capacity.
|
// Never exceed our capacity.
|
||||||
|
@ -83,6 +86,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Never exceed our capacity.
|
// Never exceed our capacity.
|
||||||
|
@ -93,6 +97,7 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KHandleTable::Unreserve(Handle handle) {
|
void KHandleTable::Unreserve(Handle handle) {
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Unpack the handle.
|
// Unpack the handle.
|
||||||
|
@ -111,6 +116,7 @@ void KHandleTable::Unreserve(Handle handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
|
void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Unpack the handle.
|
// Unpack the handle.
|
||||||
|
|
|
@ -69,6 +69,7 @@ public:
|
||||||
template <typename T = KAutoObject>
|
template <typename T = KAutoObject>
|
||||||
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
|
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
|
||||||
// Lock and look up in table.
|
// Lock and look up in table.
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
if constexpr (std::is_same_v<T, KAutoObject>) {
|
if constexpr (std::is_same_v<T, KAutoObject>) {
|
||||||
|
@ -123,6 +124,7 @@ public:
|
||||||
size_t num_opened;
|
size_t num_opened;
|
||||||
{
|
{
|
||||||
// Lock the table.
|
// Lock the table.
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
for (num_opened = 0; num_opened < num_handles; num_opened++) {
|
for (num_opened = 0; num_opened < num_handles; num_opened++) {
|
||||||
// Get the current handle.
|
// Get the current handle.
|
||||||
|
|
|
@ -59,6 +59,7 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
|
||||||
thread->GetContext64().cpu_registers[0] = 0;
|
thread->GetContext64().cpu_registers[0] = 0;
|
||||||
thread->GetContext32().cpu_registers[1] = thread_handle;
|
thread->GetContext32().cpu_registers[1] = thread_handle;
|
||||||
thread->GetContext64().cpu_registers[1] = thread_handle;
|
thread->GetContext64().cpu_registers[1] = thread_handle;
|
||||||
|
thread->DisableDispatch();
|
||||||
|
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
||||||
|
|
|
@ -376,21 +376,19 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::DisableScheduling(KernelCore& kernel) {
|
void KScheduler::DisableScheduling(KernelCore& kernel) {
|
||||||
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
|
||||||
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
|
GetCurrentThreadPointer(kernel)->DisableDispatch();
|
||||||
scheduler->GetCurrentThread()->DisableDispatch();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
||||||
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1);
|
||||||
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
|
|
||||||
if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
|
if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) {
|
||||||
scheduler->GetCurrentThread()->EnableDispatch();
|
GetCurrentThreadPointer(kernel)->EnableDispatch();
|
||||||
}
|
} else {
|
||||||
}
|
|
||||||
RescheduleCores(kernel, cores_needing_scheduling);
|
RescheduleCores(kernel, cores_needing_scheduling);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
||||||
if (IsSchedulerUpdateNeeded(kernel)) {
|
if (IsSchedulerUpdateNeeded(kernel)) {
|
||||||
|
@ -617,13 +615,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
|
||||||
state.highest_priority_thread = nullptr;
|
state.highest_priority_thread = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
KScheduler::~KScheduler() {
|
void KScheduler::Finalize() {
|
||||||
if (idle_thread) {
|
if (idle_thread) {
|
||||||
idle_thread->Close();
|
idle_thread->Close();
|
||||||
idle_thread = nullptr;
|
idle_thread = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KScheduler::~KScheduler() {
|
||||||
|
ASSERT(!idle_thread);
|
||||||
|
}
|
||||||
|
|
||||||
KThread* KScheduler::GetCurrentThread() const {
|
KThread* KScheduler::GetCurrentThread() const {
|
||||||
if (auto result = current_thread.load(); result) {
|
if (auto result = current_thread.load(); result) {
|
||||||
return result;
|
return result;
|
||||||
|
@ -642,10 +644,12 @@ void KScheduler::RescheduleCurrentCore() {
|
||||||
if (phys_core.IsInterrupted()) {
|
if (phys_core.IsInterrupted()) {
|
||||||
phys_core.ClearInterrupt();
|
phys_core.ClearInterrupt();
|
||||||
}
|
}
|
||||||
|
|
||||||
guard.Lock();
|
guard.Lock();
|
||||||
if (state.needs_scheduling.load()) {
|
if (state.needs_scheduling.load()) {
|
||||||
Schedule();
|
Schedule();
|
||||||
} else {
|
} else {
|
||||||
|
GetCurrentThread()->EnableDispatch();
|
||||||
guard.Unlock();
|
guard.Unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -655,27 +659,34 @@ void KScheduler::OnThreadStart() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::Unload(KThread* thread) {
|
void KScheduler::Unload(KThread* thread) {
|
||||||
|
ASSERT(thread);
|
||||||
|
|
||||||
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
||||||
|
|
||||||
if (thread) {
|
|
||||||
if (thread->IsCallingSvc()) {
|
if (thread->IsCallingSvc()) {
|
||||||
thread->ClearIsCallingSvc();
|
thread->ClearIsCallingSvc();
|
||||||
}
|
}
|
||||||
if (!thread->IsTerminationRequested()) {
|
|
||||||
prev_thread = thread;
|
|
||||||
|
|
||||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
auto& physical_core = system.Kernel().PhysicalCore(core_id);
|
||||||
|
if (!physical_core.IsInitialized()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
|
||||||
cpu_core.SaveContext(thread->GetContext32());
|
cpu_core.SaveContext(thread->GetContext32());
|
||||||
cpu_core.SaveContext(thread->GetContext64());
|
cpu_core.SaveContext(thread->GetContext64());
|
||||||
// Save the TPIDR_EL0 system register in case it was modified.
|
// Save the TPIDR_EL0 system register in case it was modified.
|
||||||
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||||
cpu_core.ClearExclusiveState();
|
cpu_core.ClearExclusiveState();
|
||||||
|
|
||||||
|
if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
|
||||||
|
prev_thread = thread;
|
||||||
} else {
|
} else {
|
||||||
prev_thread = nullptr;
|
prev_thread = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
thread->context_guard.Unlock();
|
thread->context_guard.Unlock();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void KScheduler::Reload(KThread* thread) {
|
void KScheduler::Reload(KThread* thread) {
|
||||||
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
||||||
|
@ -683,11 +694,6 @@ void KScheduler::Reload(KThread* thread) {
|
||||||
if (thread) {
|
if (thread) {
|
||||||
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
||||||
|
|
||||||
auto* const thread_owner_process = thread->GetOwnerProcess();
|
|
||||||
if (thread_owner_process != nullptr) {
|
|
||||||
system.Kernel().MakeCurrentProcess(thread_owner_process);
|
|
||||||
}
|
|
||||||
|
|
||||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||||
cpu_core.LoadContext(thread->GetContext32());
|
cpu_core.LoadContext(thread->GetContext32());
|
||||||
cpu_core.LoadContext(thread->GetContext64());
|
cpu_core.LoadContext(thread->GetContext64());
|
||||||
|
@ -705,7 +711,7 @@ void KScheduler::SwitchContextStep2() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::ScheduleImpl() {
|
void KScheduler::ScheduleImpl() {
|
||||||
KThread* previous_thread = current_thread.load();
|
KThread* previous_thread = GetCurrentThread();
|
||||||
KThread* next_thread = state.highest_priority_thread;
|
KThread* next_thread = state.highest_priority_thread;
|
||||||
|
|
||||||
state.needs_scheduling = false;
|
state.needs_scheduling = false;
|
||||||
|
@ -717,10 +723,15 @@ void KScheduler::ScheduleImpl() {
|
||||||
|
|
||||||
// If we're not actually switching thread, there's nothing to do.
|
// If we're not actually switching thread, there's nothing to do.
|
||||||
if (next_thread == current_thread.load()) {
|
if (next_thread == current_thread.load()) {
|
||||||
|
previous_thread->EnableDispatch();
|
||||||
guard.Unlock();
|
guard.Unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (next_thread->GetCurrentCore() != core_id) {
|
||||||
|
next_thread->SetCurrentCore(core_id);
|
||||||
|
}
|
||||||
|
|
||||||
current_thread.store(next_thread);
|
current_thread.store(next_thread);
|
||||||
|
|
||||||
KProcess* const previous_process = system.Kernel().CurrentProcess();
|
KProcess* const previous_process = system.Kernel().CurrentProcess();
|
||||||
|
@ -731,11 +742,7 @@ void KScheduler::ScheduleImpl() {
|
||||||
Unload(previous_thread);
|
Unload(previous_thread);
|
||||||
|
|
||||||
std::shared_ptr<Common::Fiber>* old_context;
|
std::shared_ptr<Common::Fiber>* old_context;
|
||||||
if (previous_thread != nullptr) {
|
|
||||||
old_context = &previous_thread->GetHostContext();
|
old_context = &previous_thread->GetHostContext();
|
||||||
} else {
|
|
||||||
old_context = &idle_thread->GetHostContext();
|
|
||||||
}
|
|
||||||
guard.Unlock();
|
guard.Unlock();
|
||||||
|
|
||||||
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
||||||
|
@ -768,7 +775,7 @@ void KScheduler::SwitchToCurrent() {
|
||||||
next_thread->context_guard.Unlock();
|
next_thread->context_guard.Unlock();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (next_thread->GetActiveCore() != core_id) {
|
if (next_thread->GetCurrentCore() != core_id) {
|
||||||
next_thread->context_guard.Unlock();
|
next_thread->context_guard.Unlock();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,8 @@ public:
|
||||||
explicit KScheduler(Core::System& system_, s32 core_id_);
|
explicit KScheduler(Core::System& system_, s32 core_id_);
|
||||||
~KScheduler();
|
~KScheduler();
|
||||||
|
|
||||||
|
void Finalize();
|
||||||
|
|
||||||
/// Reschedules to the next available thread (call after current thread is suspended)
|
/// Reschedules to the next available thread (call after current thread is suspended)
|
||||||
void RescheduleCurrentCore();
|
void RescheduleCurrentCore();
|
||||||
|
|
||||||
|
|
|
@ -188,7 +188,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||||
// Setup the stack parameters.
|
// Setup the stack parameters.
|
||||||
StackParameters& sp = GetStackParameters();
|
StackParameters& sp = GetStackParameters();
|
||||||
sp.cur_thread = this;
|
sp.cur_thread = this;
|
||||||
sp.disable_count = 1;
|
sp.disable_count = 0;
|
||||||
SetInExceptionHandler();
|
SetInExceptionHandler();
|
||||||
|
|
||||||
// Set thread ID.
|
// Set thread ID.
|
||||||
|
@ -970,6 +970,9 @@ ResultCode KThread::Run() {
|
||||||
|
|
||||||
// Set our state and finish.
|
// Set our state and finish.
|
||||||
SetState(ThreadState::Runnable);
|
SetState(ThreadState::Runnable);
|
||||||
|
|
||||||
|
DisableDispatch();
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1054,4 +1057,16 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
|
||||||
return GetCurrentThread(kernel).GetCurrentCore();
|
return GetCurrentThread(kernel).GetCurrentCore();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KScopedDisableDispatch::~KScopedDisableDispatch() {
|
||||||
|
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
|
||||||
|
auto scheduler = kernel.CurrentScheduler();
|
||||||
|
|
||||||
|
if (scheduler) {
|
||||||
|
scheduler->RescheduleCurrentCore();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
GetCurrentThread(kernel).EnableDispatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -450,16 +450,35 @@ public:
|
||||||
sleeping_queue = q;
|
sleeping_queue = q;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] bool IsKernelThread() const {
|
||||||
|
return GetActiveCore() == 3;
|
||||||
|
}
|
||||||
|
|
||||||
[[nodiscard]] s32 GetDisableDispatchCount() const {
|
[[nodiscard]] s32 GetDisableDispatchCount() const {
|
||||||
|
if (IsKernelThread()) {
|
||||||
|
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
return this->GetStackParameters().disable_count;
|
return this->GetStackParameters().disable_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DisableDispatch() {
|
void DisableDispatch() {
|
||||||
|
if (IsKernelThread()) {
|
||||||
|
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
|
||||||
this->GetStackParameters().disable_count++;
|
this->GetStackParameters().disable_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void EnableDispatch() {
|
void EnableDispatch() {
|
||||||
|
if (IsKernelThread()) {
|
||||||
|
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
|
||||||
this->GetStackParameters().disable_count--;
|
this->GetStackParameters().disable_count--;
|
||||||
}
|
}
|
||||||
|
@ -752,4 +771,16 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class KScopedDisableDispatch {
|
||||||
|
public:
|
||||||
|
explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
|
||||||
|
GetCurrentThread(kernel).DisableDispatch();
|
||||||
|
}
|
||||||
|
|
||||||
|
~KScopedDisableDispatch();
|
||||||
|
|
||||||
|
private:
|
||||||
|
KernelCore& kernel;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -85,8 +85,9 @@ struct KernelCore::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeCores() {
|
void InitializeCores() {
|
||||||
for (auto& core : cores) {
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
core.Initialize(current_process->Is64BitProcess());
|
cores[core_id].Initialize(current_process->Is64BitProcess());
|
||||||
|
system.Memory().SetCurrentPageTable(*current_process, core_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,15 +132,6 @@ struct KernelCore::Impl {
|
||||||
next_user_process_id = KProcess::ProcessIDMin;
|
next_user_process_id = KProcess::ProcessIDMin;
|
||||||
next_thread_id = 1;
|
next_thread_id = 1;
|
||||||
|
|
||||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
|
||||||
if (suspend_threads[core_id]) {
|
|
||||||
suspend_threads[core_id]->Close();
|
|
||||||
suspend_threads[core_id] = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
schedulers[core_id].reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
cores.clear();
|
cores.clear();
|
||||||
|
|
||||||
global_handle_table->Finalize();
|
global_handle_table->Finalize();
|
||||||
|
@ -167,6 +159,16 @@ struct KernelCore::Impl {
|
||||||
CleanupObject(time_shared_mem);
|
CleanupObject(time_shared_mem);
|
||||||
CleanupObject(system_resource_limit);
|
CleanupObject(system_resource_limit);
|
||||||
|
|
||||||
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
|
if (suspend_threads[core_id]) {
|
||||||
|
suspend_threads[core_id]->Close();
|
||||||
|
suspend_threads[core_id] = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
schedulers[core_id]->Finalize();
|
||||||
|
schedulers[core_id].reset();
|
||||||
|
}
|
||||||
|
|
||||||
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
||||||
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
|
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
|
||||||
|
|
||||||
|
@ -257,14 +259,6 @@ struct KernelCore::Impl {
|
||||||
|
|
||||||
void MakeCurrentProcess(KProcess* process) {
|
void MakeCurrentProcess(KProcess* process) {
|
||||||
current_process = process;
|
current_process = process;
|
||||||
if (process == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const u32 core_id = GetCurrentHostThreadID();
|
|
||||||
if (core_id < Core::Hardware::NUM_CPU_CORES) {
|
|
||||||
system.Memory().SetCurrentPageTable(*process, core_id);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new host thread ID, should only be called by GetHostThreadId
|
/// Creates a new host thread ID, should only be called by GetHostThreadId
|
||||||
|
@ -824,16 +818,20 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
|
||||||
return impl->cores[id];
|
return impl->cores[id];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t KernelCore::CurrentPhysicalCoreIndex() const {
|
||||||
|
const u32 core_id = impl->GetCurrentHostThreadID();
|
||||||
|
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
|
||||||
|
return Core::Hardware::NUM_CPU_CORES - 1;
|
||||||
|
}
|
||||||
|
return core_id;
|
||||||
|
}
|
||||||
|
|
||||||
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
|
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
|
||||||
u32 core_id = impl->GetCurrentHostThreadID();
|
return impl->cores[CurrentPhysicalCoreIndex()];
|
||||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
|
||||||
return impl->cores[core_id];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
|
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
|
||||||
u32 core_id = impl->GetCurrentHostThreadID();
|
return impl->cores[CurrentPhysicalCoreIndex()];
|
||||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
|
||||||
return impl->cores[core_id];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::KScheduler* KernelCore::CurrentScheduler() {
|
Kernel::KScheduler* KernelCore::CurrentScheduler() {
|
||||||
|
@ -1026,6 +1024,9 @@ void KernelCore::Suspend(bool in_suspention) {
|
||||||
impl->suspend_threads[core_id]->SetState(state);
|
impl->suspend_threads[core_id]->SetState(state);
|
||||||
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
|
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
|
||||||
ThreadWaitReasonForDebugging::Suspended);
|
ThreadWaitReasonForDebugging::Suspended);
|
||||||
|
if (!should_suspend) {
|
||||||
|
impl->suspend_threads[core_id]->DisableDispatch();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1040,13 +1041,11 @@ void KernelCore::ExceptionalExit() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::EnterSVCProfile() {
|
void KernelCore::EnterSVCProfile() {
|
||||||
std::size_t core = impl->GetCurrentHostThreadID();
|
impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
|
||||||
impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::ExitSVCProfile() {
|
void KernelCore::ExitSVCProfile() {
|
||||||
std::size_t core = impl->GetCurrentHostThreadID();
|
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
|
||||||
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
||||||
|
|
|
@ -146,6 +146,9 @@ public:
|
||||||
/// Gets the an instance of the respective physical CPU core.
|
/// Gets the an instance of the respective physical CPU core.
|
||||||
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
|
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
|
||||||
|
|
||||||
|
/// Gets the current physical core index for the running host thread.
|
||||||
|
std::size_t CurrentPhysicalCoreIndex() const;
|
||||||
|
|
||||||
/// Gets the sole instance of the Scheduler at the current running core.
|
/// Gets the sole instance of the Scheduler at the current running core.
|
||||||
Kernel::KScheduler* CurrentScheduler();
|
Kernel::KScheduler* CurrentScheduler();
|
||||||
|
|
||||||
|
|
|
@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
|
||||||
const u64 thread_ticks = current_thread->GetCpuTime();
|
const u64 thread_ticks = current_thread->GetCpuTime();
|
||||||
|
|
||||||
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
|
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
|
||||||
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
|
} else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
|
||||||
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
|
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,17 +9,20 @@
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/kernel/k_writable_event.h"
|
#include "core/hle/kernel/k_writable_event.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/service/kernel_helpers.h"
|
||||||
#include "core/hle/service/nvflinger/buffer_queue.h"
|
#include "core/hle/service/nvflinger/buffer_queue.h"
|
||||||
|
|
||||||
namespace Service::NVFlinger {
|
namespace Service::NVFlinger {
|
||||||
|
|
||||||
BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_)
|
BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_,
|
||||||
: id(id_), layer_id(layer_id_), buffer_wait_event{kernel} {
|
KernelHelpers::ServiceContext& service_context_)
|
||||||
Kernel::KAutoObject::Create(std::addressof(buffer_wait_event));
|
: id(id_), layer_id(layer_id_), service_context{service_context_} {
|
||||||
buffer_wait_event.Initialize("BufferQueue:WaitEvent");
|
buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent");
|
||||||
}
|
}
|
||||||
|
|
||||||
BufferQueue::~BufferQueue() = default;
|
BufferQueue::~BufferQueue() {
|
||||||
|
service_context.CloseEvent(buffer_wait_event);
|
||||||
|
}
|
||||||
|
|
||||||
void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) {
|
void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) {
|
||||||
ASSERT(slot < buffer_slots);
|
ASSERT(slot < buffer_slots);
|
||||||
|
@ -41,7 +44,7 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer)
|
||||||
.multi_fence = {},
|
.multi_fence = {},
|
||||||
};
|
};
|
||||||
|
|
||||||
buffer_wait_event.GetWritableEvent().Signal();
|
buffer_wait_event->GetWritableEvent().Signal();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width,
|
std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width,
|
||||||
|
@ -123,7 +126,7 @@ void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& mult
|
||||||
}
|
}
|
||||||
free_buffers_condition.notify_one();
|
free_buffers_condition.notify_one();
|
||||||
|
|
||||||
buffer_wait_event.GetWritableEvent().Signal();
|
buffer_wait_event->GetWritableEvent().Signal();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() {
|
std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() {
|
||||||
|
@ -158,7 +161,7 @@ void BufferQueue::ReleaseBuffer(u32 slot) {
|
||||||
}
|
}
|
||||||
free_buffers_condition.notify_one();
|
free_buffers_condition.notify_one();
|
||||||
|
|
||||||
buffer_wait_event.GetWritableEvent().Signal();
|
buffer_wait_event->GetWritableEvent().Signal();
|
||||||
}
|
}
|
||||||
|
|
||||||
void BufferQueue::Connect() {
|
void BufferQueue::Connect() {
|
||||||
|
@ -173,7 +176,7 @@ void BufferQueue::Disconnect() {
|
||||||
std::unique_lock lock{queue_sequence_mutex};
|
std::unique_lock lock{queue_sequence_mutex};
|
||||||
queue_sequence.clear();
|
queue_sequence.clear();
|
||||||
}
|
}
|
||||||
buffer_wait_event.GetWritableEvent().Signal();
|
buffer_wait_event->GetWritableEvent().Signal();
|
||||||
is_connect = false;
|
is_connect = false;
|
||||||
free_buffers_condition.notify_one();
|
free_buffers_condition.notify_one();
|
||||||
}
|
}
|
||||||
|
@ -193,11 +196,11 @@ u32 BufferQueue::Query(QueryType type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() {
|
Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() {
|
||||||
return buffer_wait_event.GetWritableEvent();
|
return buffer_wait_event->GetWritableEvent();
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() {
|
Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() {
|
||||||
return buffer_wait_event.GetReadableEvent();
|
return buffer_wait_event->GetReadableEvent();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Service::NVFlinger
|
} // namespace Service::NVFlinger
|
||||||
|
|
|
@ -24,6 +24,10 @@ class KReadableEvent;
|
||||||
class KWritableEvent;
|
class KWritableEvent;
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
||||||
|
namespace Service::KernelHelpers {
|
||||||
|
class ServiceContext;
|
||||||
|
} // namespace Service::KernelHelpers
|
||||||
|
|
||||||
namespace Service::NVFlinger {
|
namespace Service::NVFlinger {
|
||||||
|
|
||||||
constexpr u32 buffer_slots = 0x40;
|
constexpr u32 buffer_slots = 0x40;
|
||||||
|
@ -54,7 +58,8 @@ public:
|
||||||
NativeWindowFormat = 2,
|
NativeWindowFormat = 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_);
|
explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_,
|
||||||
|
KernelHelpers::ServiceContext& service_context_);
|
||||||
~BufferQueue();
|
~BufferQueue();
|
||||||
|
|
||||||
enum class BufferTransformFlags : u32 {
|
enum class BufferTransformFlags : u32 {
|
||||||
|
@ -131,12 +136,14 @@ private:
|
||||||
std::list<u32> free_buffers;
|
std::list<u32> free_buffers;
|
||||||
std::array<Buffer, buffer_slots> buffers;
|
std::array<Buffer, buffer_slots> buffers;
|
||||||
std::list<u32> queue_sequence;
|
std::list<u32> queue_sequence;
|
||||||
Kernel::KEvent buffer_wait_event;
|
Kernel::KEvent* buffer_wait_event{};
|
||||||
|
|
||||||
std::mutex free_buffers_mutex;
|
std::mutex free_buffers_mutex;
|
||||||
std::condition_variable free_buffers_condition;
|
std::condition_variable free_buffers_condition;
|
||||||
|
|
||||||
std::mutex queue_sequence_mutex;
|
std::mutex queue_sequence_mutex;
|
||||||
|
|
||||||
|
KernelHelpers::ServiceContext& service_context;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::NVFlinger
|
} // namespace Service::NVFlinger
|
||||||
|
|
|
@ -61,12 +61,13 @@ void NVFlinger::SplitVSync() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
NVFlinger::NVFlinger(Core::System& system_) : system(system_) {
|
NVFlinger::NVFlinger(Core::System& system_)
|
||||||
displays.emplace_back(0, "Default", system);
|
: system(system_), service_context(system_, "nvflinger") {
|
||||||
displays.emplace_back(1, "External", system);
|
displays.emplace_back(0, "Default", service_context, system);
|
||||||
displays.emplace_back(2, "Edid", system);
|
displays.emplace_back(1, "External", service_context, system);
|
||||||
displays.emplace_back(3, "Internal", system);
|
displays.emplace_back(2, "Edid", service_context, system);
|
||||||
displays.emplace_back(4, "Null", system);
|
displays.emplace_back(3, "Internal", service_context, system);
|
||||||
|
displays.emplace_back(4, "Null", service_context, system);
|
||||||
guard = std::make_shared<std::mutex>();
|
guard = std::make_shared<std::mutex>();
|
||||||
|
|
||||||
// Schedule the screen composition events
|
// Schedule the screen composition events
|
||||||
|
@ -146,7 +147,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
|
||||||
void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) {
|
void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) {
|
||||||
const u32 buffer_queue_id = next_buffer_queue_id++;
|
const u32 buffer_queue_id = next_buffer_queue_id++;
|
||||||
buffer_queues.emplace_back(
|
buffer_queues.emplace_back(
|
||||||
std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id));
|
std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id, service_context));
|
||||||
display.CreateLayer(layer_id, *buffer_queues.back());
|
display.CreateLayer(layer_id, *buffer_queues.back());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/service/kernel_helpers.h"
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
class Event;
|
class Event;
|
||||||
|
@ -137,6 +138,8 @@ private:
|
||||||
std::unique_ptr<std::thread> vsync_thread;
|
std::unique_ptr<std::thread> vsync_thread;
|
||||||
std::unique_ptr<Common::Event> wait_event;
|
std::unique_ptr<Common::Event> wait_event;
|
||||||
std::atomic<bool> is_running{};
|
std::atomic<bool> is_running{};
|
||||||
|
|
||||||
|
KernelHelpers::ServiceContext service_context;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::NVFlinger
|
} // namespace Service::NVFlinger
|
||||||
|
|
|
@ -12,18 +12,21 @@
|
||||||
#include "core/hle/kernel/k_event.h"
|
#include "core/hle/kernel/k_event.h"
|
||||||
#include "core/hle/kernel/k_readable_event.h"
|
#include "core/hle/kernel/k_readable_event.h"
|
||||||
#include "core/hle/kernel/k_writable_event.h"
|
#include "core/hle/kernel/k_writable_event.h"
|
||||||
|
#include "core/hle/service/kernel_helpers.h"
|
||||||
#include "core/hle/service/vi/display/vi_display.h"
|
#include "core/hle/service/vi/display/vi_display.h"
|
||||||
#include "core/hle/service/vi/layer/vi_layer.h"
|
#include "core/hle/service/vi/layer/vi_layer.h"
|
||||||
|
|
||||||
namespace Service::VI {
|
namespace Service::VI {
|
||||||
|
|
||||||
Display::Display(u64 id, std::string name_, Core::System& system)
|
Display::Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_,
|
||||||
: display_id{id}, name{std::move(name_)}, vsync_event{system.Kernel()} {
|
Core::System& system_)
|
||||||
Kernel::KAutoObject::Create(std::addressof(vsync_event));
|
: display_id{id}, name{std::move(name_)}, service_context{service_context_} {
|
||||||
vsync_event.Initialize(fmt::format("Display VSync Event {}", id));
|
vsync_event = service_context.CreateEvent(fmt::format("Display VSync Event {}", id));
|
||||||
}
|
}
|
||||||
|
|
||||||
Display::~Display() = default;
|
Display::~Display() {
|
||||||
|
service_context.CloseEvent(vsync_event);
|
||||||
|
}
|
||||||
|
|
||||||
Layer& Display::GetLayer(std::size_t index) {
|
Layer& Display::GetLayer(std::size_t index) {
|
||||||
return *layers.at(index);
|
return *layers.at(index);
|
||||||
|
@ -34,11 +37,11 @@ const Layer& Display::GetLayer(std::size_t index) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::KReadableEvent& Display::GetVSyncEvent() {
|
Kernel::KReadableEvent& Display::GetVSyncEvent() {
|
||||||
return vsync_event.GetReadableEvent();
|
return vsync_event->GetReadableEvent();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Display::SignalVSyncEvent() {
|
void Display::SignalVSyncEvent() {
|
||||||
vsync_event.GetWritableEvent().Signal();
|
vsync_event->GetWritableEvent().Signal();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Display::CreateLayer(u64 layer_id, NVFlinger::BufferQueue& buffer_queue) {
|
void Display::CreateLayer(u64 layer_id, NVFlinger::BufferQueue& buffer_queue) {
|
||||||
|
|
|
@ -18,6 +18,9 @@ class KEvent;
|
||||||
namespace Service::NVFlinger {
|
namespace Service::NVFlinger {
|
||||||
class BufferQueue;
|
class BufferQueue;
|
||||||
}
|
}
|
||||||
|
namespace Service::KernelHelpers {
|
||||||
|
class ServiceContext;
|
||||||
|
} // namespace Service::KernelHelpers
|
||||||
|
|
||||||
namespace Service::VI {
|
namespace Service::VI {
|
||||||
|
|
||||||
|
@ -32,9 +35,12 @@ public:
|
||||||
/// Constructs a display with a given unique ID and name.
|
/// Constructs a display with a given unique ID and name.
|
||||||
///
|
///
|
||||||
/// @param id The unique ID for this display.
|
/// @param id The unique ID for this display.
|
||||||
|
/// @param service_context_ The ServiceContext for the owning service.
|
||||||
/// @param name_ The name for this display.
|
/// @param name_ The name for this display.
|
||||||
|
/// @param system_ The global system instance.
|
||||||
///
|
///
|
||||||
Display(u64 id, std::string name_, Core::System& system);
|
Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_,
|
||||||
|
Core::System& system_);
|
||||||
~Display();
|
~Display();
|
||||||
|
|
||||||
/// Gets the unique ID assigned to this display.
|
/// Gets the unique ID assigned to this display.
|
||||||
|
@ -98,9 +104,10 @@ public:
|
||||||
private:
|
private:
|
||||||
u64 display_id;
|
u64 display_id;
|
||||||
std::string name;
|
std::string name;
|
||||||
|
KernelHelpers::ServiceContext& service_context;
|
||||||
|
|
||||||
std::vector<std::shared_ptr<Layer>> layers;
|
std::vector<std::shared_ptr<Layer>> layers;
|
||||||
Kernel::KEvent vsync_event;
|
Kernel::KEvent* vsync_event{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::VI
|
} // namespace Service::VI
|
||||||
|
|
|
@ -122,6 +122,10 @@ void EmuWindow_SDL2::OnResize() {
|
||||||
UpdateCurrentFramebufferLayout(width, height);
|
UpdateCurrentFramebufferLayout(width, height);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void EmuWindow_SDL2::ShowCursor(bool show_cursor) {
|
||||||
|
SDL_ShowCursor(show_cursor ? SDL_ENABLE : SDL_DISABLE);
|
||||||
|
}
|
||||||
|
|
||||||
void EmuWindow_SDL2::Fullscreen() {
|
void EmuWindow_SDL2::Fullscreen() {
|
||||||
switch (Settings::values.fullscreen_mode.GetValue()) {
|
switch (Settings::values.fullscreen_mode.GetValue()) {
|
||||||
case Settings::FullscreenMode::Exclusive:
|
case Settings::FullscreenMode::Exclusive:
|
||||||
|
|
|
@ -67,6 +67,9 @@ protected:
|
||||||
/// Called by WaitEvent when any event that may cause the window to be resized occurs
|
/// Called by WaitEvent when any event that may cause the window to be resized occurs
|
||||||
void OnResize();
|
void OnResize();
|
||||||
|
|
||||||
|
/// Called when users want to hide the mouse cursor
|
||||||
|
void ShowCursor(bool show_cursor);
|
||||||
|
|
||||||
/// Called when user passes the fullscreen parameter flag
|
/// Called when user passes the fullscreen parameter flag
|
||||||
void Fullscreen();
|
void Fullscreen();
|
||||||
|
|
||||||
|
|
|
@ -111,6 +111,7 @@ EmuWindow_SDL2_GL::EmuWindow_SDL2_GL(InputCommon::InputSubsystem* input_subsyste
|
||||||
|
|
||||||
if (fullscreen) {
|
if (fullscreen) {
|
||||||
Fullscreen();
|
Fullscreen();
|
||||||
|
ShowCursor(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
window_context = SDL_GL_CreateContext(render_window);
|
window_context = SDL_GL_CreateContext(render_window);
|
||||||
|
|
|
@ -45,6 +45,7 @@ EmuWindow_SDL2_VK::EmuWindow_SDL2_VK(InputCommon::InputSubsystem* input_subsyste
|
||||||
|
|
||||||
if (fullscreen) {
|
if (fullscreen) {
|
||||||
Fullscreen();
|
Fullscreen();
|
||||||
|
ShowCursor(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (wm.subsystem) {
|
switch (wm.subsystem) {
|
||||||
|
|
Loading…
Reference in a new issue