Merge pull request #5131 from bunnei/scheduler-rewrite
Rewrite Kernel scheduler based on Atmosphere
This commit is contained in:
commit
1279c7ce7a
41 changed files with 2218 additions and 1874 deletions
|
@ -104,6 +104,7 @@ add_library(common STATIC
|
||||||
detached_tasks.h
|
detached_tasks.h
|
||||||
bit_cast.h
|
bit_cast.h
|
||||||
bit_field.h
|
bit_field.h
|
||||||
|
bit_set.h
|
||||||
bit_util.h
|
bit_util.h
|
||||||
cityhash.cpp
|
cityhash.cpp
|
||||||
cityhash.h
|
cityhash.h
|
||||||
|
@ -140,7 +141,6 @@ add_library(common STATIC
|
||||||
microprofile.h
|
microprofile.h
|
||||||
microprofileui.h
|
microprofileui.h
|
||||||
misc.cpp
|
misc.cpp
|
||||||
multi_level_queue.h
|
|
||||||
page_table.cpp
|
page_table.cpp
|
||||||
page_table.h
|
page_table.h
|
||||||
param_package.cpp
|
param_package.cpp
|
||||||
|
|
99
src/common/bit_set.h
Normal file
99
src/common/bit_set.h
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <bit>
|
||||||
|
|
||||||
|
#include "common/alignment.h"
|
||||||
|
#include "common/bit_util.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
namespace impl {
|
||||||
|
|
||||||
|
template <typename Storage, size_t N>
|
||||||
|
class BitSet {
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr BitSet() = default;
|
||||||
|
|
||||||
|
constexpr void SetBit(size_t i) {
|
||||||
|
this->words[i / FlagsPerWord] |= GetBitMask(i % FlagsPerWord);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void ClearBit(size_t i) {
|
||||||
|
this->words[i / FlagsPerWord] &= ~GetBitMask(i % FlagsPerWord);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr size_t CountLeadingZero() const {
|
||||||
|
for (size_t i = 0; i < NumWords; i++) {
|
||||||
|
if (this->words[i]) {
|
||||||
|
return FlagsPerWord * i + CountLeadingZeroImpl(this->words[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return FlagsPerWord * NumWords;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr size_t GetNextSet(size_t n) const {
|
||||||
|
for (size_t i = (n + 1) / FlagsPerWord; i < NumWords; i++) {
|
||||||
|
Storage word = this->words[i];
|
||||||
|
if (!IsAligned(n + 1, FlagsPerWord)) {
|
||||||
|
word &= GetBitMask(n % FlagsPerWord) - 1;
|
||||||
|
}
|
||||||
|
if (word) {
|
||||||
|
return FlagsPerWord * i + CountLeadingZeroImpl(word);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return FlagsPerWord * NumWords;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static_assert(std::is_unsigned_v<Storage>);
|
||||||
|
static_assert(sizeof(Storage) <= sizeof(u64));
|
||||||
|
|
||||||
|
static constexpr size_t FlagsPerWord = BitSize<Storage>();
|
||||||
|
static constexpr size_t NumWords = AlignUp(N, FlagsPerWord) / FlagsPerWord;
|
||||||
|
|
||||||
|
static constexpr auto CountLeadingZeroImpl(Storage word) {
|
||||||
|
return std::countl_zero(static_cast<unsigned long long>(word)) -
|
||||||
|
(BitSize<unsigned long long>() - FlagsPerWord);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr Storage GetBitMask(size_t bit) {
|
||||||
|
return Storage(1) << (FlagsPerWord - 1 - bit);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::array<Storage, NumWords> words{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace impl
|
||||||
|
|
||||||
|
template <size_t N>
|
||||||
|
using BitSet8 = impl::BitSet<u8, N>;
|
||||||
|
|
||||||
|
template <size_t N>
|
||||||
|
using BitSet16 = impl::BitSet<u16, N>;
|
||||||
|
|
||||||
|
template <size_t N>
|
||||||
|
using BitSet32 = impl::BitSet<u32, N>;
|
||||||
|
|
||||||
|
template <size_t N>
|
||||||
|
using BitSet64 = impl::BitSet<u64, N>;
|
||||||
|
|
||||||
|
} // namespace Common
|
|
@ -1,345 +0,0 @@
|
||||||
// Copyright 2019 TuxSH
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <array>
|
|
||||||
#include <iterator>
|
|
||||||
#include <list>
|
|
||||||
#include <utility>
|
|
||||||
|
|
||||||
#include "common/bit_util.h"
|
|
||||||
#include "common/common_types.h"
|
|
||||||
|
|
||||||
namespace Common {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A MultiLevelQueue is a type of priority queue which has the following characteristics:
|
|
||||||
* - iteratable through each of its elements.
|
|
||||||
* - back can be obtained.
|
|
||||||
* - O(1) add, lookup (both front and back)
|
|
||||||
* - discrete priorities and a max of 64 priorities (limited domain)
|
|
||||||
* This type of priority queue is normaly used for managing threads within an scheduler
|
|
||||||
*/
|
|
||||||
template <typename T, std::size_t Depth>
|
|
||||||
class MultiLevelQueue {
|
|
||||||
public:
|
|
||||||
using value_type = T;
|
|
||||||
using reference = value_type&;
|
|
||||||
using const_reference = const value_type&;
|
|
||||||
using pointer = value_type*;
|
|
||||||
using const_pointer = const value_type*;
|
|
||||||
|
|
||||||
using difference_type = typename std::pointer_traits<pointer>::difference_type;
|
|
||||||
using size_type = std::size_t;
|
|
||||||
|
|
||||||
template <bool is_constant>
|
|
||||||
class iterator_impl {
|
|
||||||
public:
|
|
||||||
using iterator_category = std::bidirectional_iterator_tag;
|
|
||||||
using value_type = T;
|
|
||||||
using pointer = std::conditional_t<is_constant, T*, const T*>;
|
|
||||||
using reference = std::conditional_t<is_constant, const T&, T&>;
|
|
||||||
using difference_type = typename std::pointer_traits<pointer>::difference_type;
|
|
||||||
|
|
||||||
friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) {
|
|
||||||
if (lhs.IsEnd() && rhs.IsEnd())
|
|
||||||
return true;
|
|
||||||
return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it);
|
|
||||||
}
|
|
||||||
|
|
||||||
friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) {
|
|
||||||
return !operator==(lhs, rhs);
|
|
||||||
}
|
|
||||||
|
|
||||||
reference operator*() const {
|
|
||||||
return *it;
|
|
||||||
}
|
|
||||||
|
|
||||||
pointer operator->() const {
|
|
||||||
return it.operator->();
|
|
||||||
}
|
|
||||||
|
|
||||||
iterator_impl& operator++() {
|
|
||||||
if (IsEnd()) {
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
++it;
|
|
||||||
|
|
||||||
if (it == GetEndItForPrio()) {
|
|
||||||
u64 prios = mlq.used_priorities;
|
|
||||||
prios &= ~((1ULL << (current_priority + 1)) - 1);
|
|
||||||
if (prios == 0) {
|
|
||||||
current_priority = static_cast<u32>(mlq.depth());
|
|
||||||
} else {
|
|
||||||
current_priority = CountTrailingZeroes64(prios);
|
|
||||||
it = GetBeginItForPrio();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
iterator_impl& operator--() {
|
|
||||||
if (IsEnd()) {
|
|
||||||
if (mlq.used_priorities != 0) {
|
|
||||||
current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities);
|
|
||||||
it = GetEndItForPrio();
|
|
||||||
--it;
|
|
||||||
}
|
|
||||||
} else if (it == GetBeginItForPrio()) {
|
|
||||||
u64 prios = mlq.used_priorities;
|
|
||||||
prios &= (1ULL << current_priority) - 1;
|
|
||||||
if (prios != 0) {
|
|
||||||
current_priority = CountTrailingZeroes64(prios);
|
|
||||||
it = GetEndItForPrio();
|
|
||||||
--it;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
--it;
|
|
||||||
}
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
iterator_impl operator++(int) {
|
|
||||||
const iterator_impl v{*this};
|
|
||||||
++(*this);
|
|
||||||
return v;
|
|
||||||
}
|
|
||||||
|
|
||||||
iterator_impl operator--(int) {
|
|
||||||
const iterator_impl v{*this};
|
|
||||||
--(*this);
|
|
||||||
return v;
|
|
||||||
}
|
|
||||||
|
|
||||||
// allow implicit const->non-const
|
|
||||||
iterator_impl(const iterator_impl<false>& other)
|
|
||||||
: mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
|
|
||||||
|
|
||||||
iterator_impl(const iterator_impl<true>& other)
|
|
||||||
: mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
|
|
||||||
|
|
||||||
iterator_impl& operator=(const iterator_impl<false>& other) {
|
|
||||||
mlq = other.mlq;
|
|
||||||
it = other.it;
|
|
||||||
current_priority = other.current_priority;
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
friend class iterator_impl<true>;
|
|
||||||
iterator_impl() = default;
|
|
||||||
|
|
||||||
private:
|
|
||||||
friend class MultiLevelQueue;
|
|
||||||
using container_ref =
|
|
||||||
std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>;
|
|
||||||
using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator,
|
|
||||||
typename std::list<T>::iterator>;
|
|
||||||
|
|
||||||
explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority)
|
|
||||||
: mlq(mlq), it(it), current_priority(current_priority) {}
|
|
||||||
explicit iterator_impl(container_ref mlq, u32 current_priority)
|
|
||||||
: mlq(mlq), it(), current_priority(current_priority) {}
|
|
||||||
|
|
||||||
bool IsEnd() const {
|
|
||||||
return current_priority == mlq.depth();
|
|
||||||
}
|
|
||||||
|
|
||||||
list_iterator GetBeginItForPrio() const {
|
|
||||||
return mlq.levels[current_priority].begin();
|
|
||||||
}
|
|
||||||
|
|
||||||
list_iterator GetEndItForPrio() const {
|
|
||||||
return mlq.levels[current_priority].end();
|
|
||||||
}
|
|
||||||
|
|
||||||
container_ref mlq;
|
|
||||||
list_iterator it;
|
|
||||||
u32 current_priority;
|
|
||||||
};
|
|
||||||
|
|
||||||
using iterator = iterator_impl<false>;
|
|
||||||
using const_iterator = iterator_impl<true>;
|
|
||||||
|
|
||||||
void add(const T& element, u32 priority, bool send_back = true) {
|
|
||||||
if (send_back)
|
|
||||||
levels[priority].push_back(element);
|
|
||||||
else
|
|
||||||
levels[priority].push_front(element);
|
|
||||||
used_priorities |= 1ULL << priority;
|
|
||||||
}
|
|
||||||
|
|
||||||
void remove(const T& element, u32 priority) {
|
|
||||||
auto it = ListIterateTo(levels[priority], element);
|
|
||||||
if (it == levels[priority].end())
|
|
||||||
return;
|
|
||||||
levels[priority].erase(it);
|
|
||||||
if (levels[priority].empty()) {
|
|
||||||
used_priorities &= ~(1ULL << priority);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) {
|
|
||||||
remove(element, old_priority);
|
|
||||||
add(element, new_priority, !adjust_front);
|
|
||||||
}
|
|
||||||
void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) {
|
|
||||||
adjust(*it, old_priority, new_priority, adjust_front);
|
|
||||||
}
|
|
||||||
|
|
||||||
void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) {
|
|
||||||
ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority],
|
|
||||||
ListIterateTo(levels[priority], element));
|
|
||||||
|
|
||||||
other.used_priorities |= 1ULL << priority;
|
|
||||||
|
|
||||||
if (levels[priority].empty()) {
|
|
||||||
used_priorities &= ~(1ULL << priority);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) {
|
|
||||||
transfer_to_front(*it, priority, other);
|
|
||||||
}
|
|
||||||
|
|
||||||
void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) {
|
|
||||||
ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority],
|
|
||||||
ListIterateTo(levels[priority], element));
|
|
||||||
|
|
||||||
other.used_priorities |= 1ULL << priority;
|
|
||||||
|
|
||||||
if (levels[priority].empty()) {
|
|
||||||
used_priorities &= ~(1ULL << priority);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) {
|
|
||||||
transfer_to_back(*it, priority, other);
|
|
||||||
}
|
|
||||||
|
|
||||||
void yield(u32 priority, std::size_t n = 1) {
|
|
||||||
ListShiftForward(levels[priority], n);
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] std::size_t depth() const {
|
|
||||||
return Depth;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] std::size_t size(u32 priority) const {
|
|
||||||
return levels[priority].size();
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] std::size_t size() const {
|
|
||||||
u64 priorities = used_priorities;
|
|
||||||
std::size_t size = 0;
|
|
||||||
while (priorities != 0) {
|
|
||||||
const u64 current_priority = CountTrailingZeroes64(priorities);
|
|
||||||
size += levels[current_priority].size();
|
|
||||||
priorities &= ~(1ULL << current_priority);
|
|
||||||
}
|
|
||||||
return size;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] bool empty() const {
|
|
||||||
return used_priorities == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] bool empty(u32 priority) const {
|
|
||||||
return (used_priorities & (1ULL << priority)) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] u32 highest_priority_set(u32 max_priority = 0) const {
|
|
||||||
const u64 priorities =
|
|
||||||
max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1));
|
|
||||||
return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities));
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] u32 lowest_priority_set(u32 min_priority = Depth - 1) const {
|
|
||||||
const u64 priorities = min_priority >= Depth - 1
|
|
||||||
? used_priorities
|
|
||||||
: (used_priorities & ((1ULL << (min_priority + 1)) - 1));
|
|
||||||
return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities);
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] const_iterator cbegin(u32 max_prio = 0) const {
|
|
||||||
const u32 priority = highest_priority_set(max_prio);
|
|
||||||
return priority == Depth ? cend()
|
|
||||||
: const_iterator{*this, levels[priority].cbegin(), priority};
|
|
||||||
}
|
|
||||||
[[nodiscard]] const_iterator begin(u32 max_prio = 0) const {
|
|
||||||
return cbegin(max_prio);
|
|
||||||
}
|
|
||||||
[[nodiscard]] iterator begin(u32 max_prio = 0) {
|
|
||||||
const u32 priority = highest_priority_set(max_prio);
|
|
||||||
return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority};
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] const_iterator cend(u32 min_prio = Depth - 1) const {
|
|
||||||
return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1);
|
|
||||||
}
|
|
||||||
[[nodiscard]] const_iterator end(u32 min_prio = Depth - 1) const {
|
|
||||||
return cend(min_prio);
|
|
||||||
}
|
|
||||||
[[nodiscard]] iterator end(u32 min_prio = Depth - 1) {
|
|
||||||
return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] T& front(u32 max_priority = 0) {
|
|
||||||
const u32 priority = highest_priority_set(max_priority);
|
|
||||||
return levels[priority == Depth ? 0 : priority].front();
|
|
||||||
}
|
|
||||||
[[nodiscard]] const T& front(u32 max_priority = 0) const {
|
|
||||||
const u32 priority = highest_priority_set(max_priority);
|
|
||||||
return levels[priority == Depth ? 0 : priority].front();
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] T& back(u32 min_priority = Depth - 1) {
|
|
||||||
const u32 priority = lowest_priority_set(min_priority); // intended
|
|
||||||
return levels[priority == Depth ? 63 : priority].back();
|
|
||||||
}
|
|
||||||
[[nodiscard]] const T& back(u32 min_priority = Depth - 1) const {
|
|
||||||
const u32 priority = lowest_priority_set(min_priority); // intended
|
|
||||||
return levels[priority == Depth ? 63 : priority].back();
|
|
||||||
}
|
|
||||||
|
|
||||||
void clear() {
|
|
||||||
used_priorities = 0;
|
|
||||||
for (std::size_t i = 0; i < Depth; i++) {
|
|
||||||
levels[i].clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
using const_list_iterator = typename std::list<T>::const_iterator;
|
|
||||||
|
|
||||||
static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) {
|
|
||||||
if (shift >= list.size()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto begin_range = list.begin();
|
|
||||||
const auto end_range = std::next(begin_range, shift);
|
|
||||||
list.splice(list.end(), list, begin_range, end_range);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ListSplice(std::list<T>& in_list, const_list_iterator position,
|
|
||||||
std::list<T>& out_list, const_list_iterator element) {
|
|
||||||
in_list.splice(position, out_list, element);
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] static const_list_iterator ListIterateTo(const std::list<T>& list,
|
|
||||||
const T& element) {
|
|
||||||
auto it = list.cbegin();
|
|
||||||
while (it != list.cend() && *it != element) {
|
|
||||||
++it;
|
|
||||||
}
|
|
||||||
return it;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::array<std::list<T>, Depth> levels;
|
|
||||||
u64 used_priorities = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace Common
|
|
|
@ -149,10 +149,19 @@ add_library(core STATIC
|
||||||
hle/kernel/code_set.cpp
|
hle/kernel/code_set.cpp
|
||||||
hle/kernel/code_set.h
|
hle/kernel/code_set.h
|
||||||
hle/kernel/errors.h
|
hle/kernel/errors.h
|
||||||
|
hle/kernel/global_scheduler_context.cpp
|
||||||
|
hle/kernel/global_scheduler_context.h
|
||||||
hle/kernel/handle_table.cpp
|
hle/kernel/handle_table.cpp
|
||||||
hle/kernel/handle_table.h
|
hle/kernel/handle_table.h
|
||||||
hle/kernel/hle_ipc.cpp
|
hle/kernel/hle_ipc.cpp
|
||||||
hle/kernel/hle_ipc.h
|
hle/kernel/hle_ipc.h
|
||||||
|
hle/kernel/k_affinity_mask.h
|
||||||
|
hle/kernel/k_priority_queue.h
|
||||||
|
hle/kernel/k_scheduler.cpp
|
||||||
|
hle/kernel/k_scheduler.h
|
||||||
|
hle/kernel/k_scheduler_lock.h
|
||||||
|
hle/kernel/k_scoped_lock.h
|
||||||
|
hle/kernel/k_scoped_scheduler_lock_and_sleep.h
|
||||||
hle/kernel/kernel.cpp
|
hle/kernel/kernel.cpp
|
||||||
hle/kernel/kernel.h
|
hle/kernel/kernel.h
|
||||||
hle/kernel/memory/address_space_info.cpp
|
hle/kernel/memory/address_space_info.cpp
|
||||||
|
@ -187,8 +196,6 @@ add_library(core STATIC
|
||||||
hle/kernel/readable_event.h
|
hle/kernel/readable_event.h
|
||||||
hle/kernel/resource_limit.cpp
|
hle/kernel/resource_limit.cpp
|
||||||
hle/kernel/resource_limit.h
|
hle/kernel/resource_limit.h
|
||||||
hle/kernel/scheduler.cpp
|
|
||||||
hle/kernel/scheduler.h
|
|
||||||
hle/kernel/server_port.cpp
|
hle/kernel/server_port.cpp
|
||||||
hle/kernel/server_port.h
|
hle/kernel/server_port.h
|
||||||
hle/kernel/server_session.cpp
|
hle/kernel/server_session.cpp
|
||||||
|
|
|
@ -294,6 +294,9 @@ void ARM_Dynarmic_32::InvalidateCacheRange(VAddr addr, std::size_t size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ARM_Dynarmic_32::ClearExclusiveState() {
|
void ARM_Dynarmic_32::ClearExclusiveState() {
|
||||||
|
if (!jit) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
jit->ClearExclusiveState();
|
jit->ClearExclusiveState();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,8 +15,8 @@
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/hardware_properties.h"
|
#include "core/hardware_properties.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/svc.h"
|
#include "core/hle/kernel/svc.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
#include "core/settings.h"
|
#include "core/settings.h"
|
||||||
|
@ -330,6 +330,9 @@ void ARM_Dynarmic_64::InvalidateCacheRange(VAddr addr, std::size_t size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ARM_Dynarmic_64::ClearExclusiveState() {
|
void ARM_Dynarmic_64::ClearExclusiveState() {
|
||||||
|
if (!jit) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
jit->ClearExclusiveState();
|
jit->ClearExclusiveState();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,10 +27,10 @@
|
||||||
#include "core/file_sys/vfs_real.h"
|
#include "core/file_sys/vfs_real.h"
|
||||||
#include "core/hardware_interrupt_manager.h"
|
#include "core/hardware_interrupt_manager.h"
|
||||||
#include "core/hle/kernel/client_port.h"
|
#include "core/hle/kernel/client_port.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/physical_core.h"
|
#include "core/hle/kernel/physical_core.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
#include "core/hle/service/am/applets/applets.h"
|
#include "core/hle/service/am/applets/applets.h"
|
||||||
#include "core/hle/service/apm/controller.h"
|
#include "core/hle/service/apm/controller.h"
|
||||||
|
@ -507,14 +507,6 @@ std::size_t System::CurrentCoreIndex() const {
|
||||||
return core;
|
return core;
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::Scheduler& System::CurrentScheduler() {
|
|
||||||
return impl->kernel.CurrentScheduler();
|
|
||||||
}
|
|
||||||
|
|
||||||
const Kernel::Scheduler& System::CurrentScheduler() const {
|
|
||||||
return impl->kernel.CurrentScheduler();
|
|
||||||
}
|
|
||||||
|
|
||||||
Kernel::PhysicalCore& System::CurrentPhysicalCore() {
|
Kernel::PhysicalCore& System::CurrentPhysicalCore() {
|
||||||
return impl->kernel.CurrentPhysicalCore();
|
return impl->kernel.CurrentPhysicalCore();
|
||||||
}
|
}
|
||||||
|
@ -523,22 +515,14 @@ const Kernel::PhysicalCore& System::CurrentPhysicalCore() const {
|
||||||
return impl->kernel.CurrentPhysicalCore();
|
return impl->kernel.CurrentPhysicalCore();
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::Scheduler& System::Scheduler(std::size_t core_index) {
|
/// Gets the global scheduler
|
||||||
return impl->kernel.Scheduler(core_index);
|
Kernel::GlobalSchedulerContext& System::GlobalSchedulerContext() {
|
||||||
}
|
return impl->kernel.GlobalSchedulerContext();
|
||||||
|
|
||||||
const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const {
|
|
||||||
return impl->kernel.Scheduler(core_index);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the global scheduler
|
/// Gets the global scheduler
|
||||||
Kernel::GlobalScheduler& System::GlobalScheduler() {
|
const Kernel::GlobalSchedulerContext& System::GlobalSchedulerContext() const {
|
||||||
return impl->kernel.GlobalScheduler();
|
return impl->kernel.GlobalSchedulerContext();
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the global scheduler
|
|
||||||
const Kernel::GlobalScheduler& System::GlobalScheduler() const {
|
|
||||||
return impl->kernel.GlobalScheduler();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::Process* System::CurrentProcess() {
|
Kernel::Process* System::CurrentProcess() {
|
||||||
|
|
|
@ -26,11 +26,11 @@ class VfsFilesystem;
|
||||||
} // namespace FileSys
|
} // namespace FileSys
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
class GlobalScheduler;
|
class GlobalSchedulerContext;
|
||||||
class KernelCore;
|
class KernelCore;
|
||||||
class PhysicalCore;
|
class PhysicalCore;
|
||||||
class Process;
|
class Process;
|
||||||
class Scheduler;
|
class KScheduler;
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
||||||
namespace Loader {
|
namespace Loader {
|
||||||
|
@ -213,12 +213,6 @@ public:
|
||||||
/// Gets the index of the currently running CPU core
|
/// Gets the index of the currently running CPU core
|
||||||
[[nodiscard]] std::size_t CurrentCoreIndex() const;
|
[[nodiscard]] std::size_t CurrentCoreIndex() const;
|
||||||
|
|
||||||
/// Gets the scheduler for the CPU core that is currently running
|
|
||||||
[[nodiscard]] Kernel::Scheduler& CurrentScheduler();
|
|
||||||
|
|
||||||
/// Gets the scheduler for the CPU core that is currently running
|
|
||||||
[[nodiscard]] const Kernel::Scheduler& CurrentScheduler() const;
|
|
||||||
|
|
||||||
/// Gets the physical core for the CPU core that is currently running
|
/// Gets the physical core for the CPU core that is currently running
|
||||||
[[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
|
[[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
|
||||||
|
|
||||||
|
@ -261,17 +255,11 @@ public:
|
||||||
/// Gets an immutable reference to the renderer.
|
/// Gets an immutable reference to the renderer.
|
||||||
[[nodiscard]] const VideoCore::RendererBase& Renderer() const;
|
[[nodiscard]] const VideoCore::RendererBase& Renderer() const;
|
||||||
|
|
||||||
/// Gets the scheduler for the CPU core with the specified index
|
/// Gets the global scheduler
|
||||||
[[nodiscard]] Kernel::Scheduler& Scheduler(std::size_t core_index);
|
[[nodiscard]] Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
|
||||||
|
|
||||||
/// Gets the scheduler for the CPU core with the specified index
|
|
||||||
[[nodiscard]] const Kernel::Scheduler& Scheduler(std::size_t core_index) const;
|
|
||||||
|
|
||||||
/// Gets the global scheduler
|
/// Gets the global scheduler
|
||||||
[[nodiscard]] Kernel::GlobalScheduler& GlobalScheduler();
|
[[nodiscard]] const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const;
|
||||||
|
|
||||||
/// Gets the global scheduler
|
|
||||||
[[nodiscard]] const Kernel::GlobalScheduler& GlobalScheduler() const;
|
|
||||||
|
|
||||||
/// Gets the manager for the guest device memory
|
/// Gets the manager for the guest device memory
|
||||||
[[nodiscard]] Core::DeviceMemory& DeviceMemory();
|
[[nodiscard]] Core::DeviceMemory& DeviceMemory();
|
||||||
|
|
|
@ -10,9 +10,9 @@
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/cpu_manager.h"
|
#include "core/cpu_manager.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/physical_core.h"
|
#include "core/hle/kernel/physical_core.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
|
|
||||||
|
@ -109,11 +109,8 @@ void* CpuManager::GetStartFuncParamater() {
|
||||||
|
|
||||||
void CpuManager::MultiCoreRunGuestThread() {
|
void CpuManager::MultiCoreRunGuestThread() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
{
|
kernel.CurrentScheduler()->OnThreadStart();
|
||||||
auto& sched = kernel.CurrentScheduler();
|
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
sched.OnThreadStart();
|
|
||||||
}
|
|
||||||
auto* thread = kernel.CurrentScheduler().GetCurrentThread();
|
|
||||||
auto& host_context = thread->GetHostContext();
|
auto& host_context = thread->GetHostContext();
|
||||||
host_context->SetRewindPoint(GuestRewindFunction, this);
|
host_context->SetRewindPoint(GuestRewindFunction, this);
|
||||||
MultiCoreRunGuestLoop();
|
MultiCoreRunGuestLoop();
|
||||||
|
@ -130,8 +127,8 @@ void CpuManager::MultiCoreRunGuestLoop() {
|
||||||
physical_core = &kernel.CurrentPhysicalCore();
|
physical_core = &kernel.CurrentPhysicalCore();
|
||||||
}
|
}
|
||||||
system.ExitDynarmicProfile();
|
system.ExitDynarmicProfile();
|
||||||
auto& scheduler = kernel.CurrentScheduler();
|
physical_core->ArmInterface().ClearExclusiveState();
|
||||||
scheduler.TryDoContextSwitch();
|
kernel.CurrentScheduler()->RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,25 +137,21 @@ void CpuManager::MultiCoreRunIdleThread() {
|
||||||
while (true) {
|
while (true) {
|
||||||
auto& physical_core = kernel.CurrentPhysicalCore();
|
auto& physical_core = kernel.CurrentPhysicalCore();
|
||||||
physical_core.Idle();
|
physical_core.Idle();
|
||||||
auto& scheduler = kernel.CurrentScheduler();
|
kernel.CurrentScheduler()->RescheduleCurrentCore();
|
||||||
scheduler.TryDoContextSwitch();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CpuManager::MultiCoreRunSuspendThread() {
|
void CpuManager::MultiCoreRunSuspendThread() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
{
|
kernel.CurrentScheduler()->OnThreadStart();
|
||||||
auto& sched = kernel.CurrentScheduler();
|
|
||||||
sched.OnThreadStart();
|
|
||||||
}
|
|
||||||
while (true) {
|
while (true) {
|
||||||
auto core = kernel.GetCurrentHostThreadID();
|
auto core = kernel.GetCurrentHostThreadID();
|
||||||
auto& scheduler = kernel.CurrentScheduler();
|
auto& scheduler = *kernel.CurrentScheduler();
|
||||||
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
||||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
|
||||||
ASSERT(scheduler.ContextSwitchPending());
|
ASSERT(scheduler.ContextSwitchPending());
|
||||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||||
scheduler.TryDoContextSwitch();
|
scheduler.RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,11 +199,8 @@ void CpuManager::MultiCorePause(bool paused) {
|
||||||
|
|
||||||
void CpuManager::SingleCoreRunGuestThread() {
|
void CpuManager::SingleCoreRunGuestThread() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
{
|
kernel.CurrentScheduler()->OnThreadStart();
|
||||||
auto& sched = kernel.CurrentScheduler();
|
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
sched.OnThreadStart();
|
|
||||||
}
|
|
||||||
auto* thread = kernel.CurrentScheduler().GetCurrentThread();
|
|
||||||
auto& host_context = thread->GetHostContext();
|
auto& host_context = thread->GetHostContext();
|
||||||
host_context->SetRewindPoint(GuestRewindFunction, this);
|
host_context->SetRewindPoint(GuestRewindFunction, this);
|
||||||
SingleCoreRunGuestLoop();
|
SingleCoreRunGuestLoop();
|
||||||
|
@ -218,7 +208,7 @@ void CpuManager::SingleCoreRunGuestThread() {
|
||||||
|
|
||||||
void CpuManager::SingleCoreRunGuestLoop() {
|
void CpuManager::SingleCoreRunGuestLoop() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
auto* thread = kernel.CurrentScheduler().GetCurrentThread();
|
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
while (true) {
|
while (true) {
|
||||||
auto* physical_core = &kernel.CurrentPhysicalCore();
|
auto* physical_core = &kernel.CurrentPhysicalCore();
|
||||||
system.EnterDynarmicProfile();
|
system.EnterDynarmicProfile();
|
||||||
|
@ -230,9 +220,10 @@ void CpuManager::SingleCoreRunGuestLoop() {
|
||||||
thread->SetPhantomMode(true);
|
thread->SetPhantomMode(true);
|
||||||
system.CoreTiming().Advance();
|
system.CoreTiming().Advance();
|
||||||
thread->SetPhantomMode(false);
|
thread->SetPhantomMode(false);
|
||||||
|
physical_core->ArmInterface().ClearExclusiveState();
|
||||||
PreemptSingleCore();
|
PreemptSingleCore();
|
||||||
auto& scheduler = kernel.Scheduler(current_core);
|
auto& scheduler = kernel.Scheduler(current_core);
|
||||||
scheduler.TryDoContextSwitch();
|
scheduler.RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -244,51 +235,53 @@ void CpuManager::SingleCoreRunIdleThread() {
|
||||||
system.CoreTiming().AddTicks(1000U);
|
system.CoreTiming().AddTicks(1000U);
|
||||||
idle_count++;
|
idle_count++;
|
||||||
auto& scheduler = physical_core.Scheduler();
|
auto& scheduler = physical_core.Scheduler();
|
||||||
scheduler.TryDoContextSwitch();
|
scheduler.RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CpuManager::SingleCoreRunSuspendThread() {
|
void CpuManager::SingleCoreRunSuspendThread() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
{
|
kernel.CurrentScheduler()->OnThreadStart();
|
||||||
auto& sched = kernel.CurrentScheduler();
|
|
||||||
sched.OnThreadStart();
|
|
||||||
}
|
|
||||||
while (true) {
|
while (true) {
|
||||||
auto core = kernel.GetCurrentHostThreadID();
|
auto core = kernel.GetCurrentHostThreadID();
|
||||||
auto& scheduler = kernel.CurrentScheduler();
|
auto& scheduler = *kernel.CurrentScheduler();
|
||||||
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
||||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
|
||||||
ASSERT(scheduler.ContextSwitchPending());
|
ASSERT(scheduler.ContextSwitchPending());
|
||||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||||
scheduler.TryDoContextSwitch();
|
scheduler.RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||||
std::size_t old_core = current_core;
|
{
|
||||||
auto& scheduler = system.Kernel().Scheduler(old_core);
|
auto& scheduler = system.Kernel().Scheduler(current_core);
|
||||||
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
||||||
if (idle_count >= 4 || from_running_enviroment) {
|
if (idle_count >= 4 || from_running_enviroment) {
|
||||||
if (!from_running_enviroment) {
|
if (!from_running_enviroment) {
|
||||||
system.CoreTiming().Idle();
|
system.CoreTiming().Idle();
|
||||||
|
idle_count = 0;
|
||||||
|
}
|
||||||
|
current_thread->SetPhantomMode(true);
|
||||||
|
system.CoreTiming().Advance();
|
||||||
|
current_thread->SetPhantomMode(false);
|
||||||
|
}
|
||||||
|
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
|
||||||
|
system.CoreTiming().ResetTicks();
|
||||||
|
scheduler.Unload(scheduler.GetCurrentThread());
|
||||||
|
|
||||||
|
auto& next_scheduler = system.Kernel().Scheduler(current_core);
|
||||||
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
|
||||||
|
}
|
||||||
|
|
||||||
|
// May have changed scheduler
|
||||||
|
{
|
||||||
|
auto& scheduler = system.Kernel().Scheduler(current_core);
|
||||||
|
scheduler.Reload(scheduler.GetCurrentThread());
|
||||||
|
auto* currrent_thread2 = scheduler.GetCurrentThread();
|
||||||
|
if (!currrent_thread2->IsIdleThread()) {
|
||||||
idle_count = 0;
|
idle_count = 0;
|
||||||
}
|
}
|
||||||
current_thread->SetPhantomMode(true);
|
|
||||||
system.CoreTiming().Advance();
|
|
||||||
current_thread->SetPhantomMode(false);
|
|
||||||
}
|
|
||||||
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
|
|
||||||
system.CoreTiming().ResetTicks();
|
|
||||||
scheduler.Unload();
|
|
||||||
auto& next_scheduler = system.Kernel().Scheduler(current_core);
|
|
||||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
|
|
||||||
/// May have changed scheduler
|
|
||||||
auto& current_scheduler = system.Kernel().Scheduler(current_core);
|
|
||||||
current_scheduler.Reload();
|
|
||||||
auto* currrent_thread2 = current_scheduler.GetCurrentThread();
|
|
||||||
if (!currrent_thread2->IsIdleThread()) {
|
|
||||||
idle_count = 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -369,8 +362,7 @@ void CpuManager::RunThread(std::size_t core) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto& scheduler = system.Kernel().CurrentScheduler();
|
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||||
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
|
||||||
data.is_running = true;
|
data.is_running = true;
|
||||||
Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
|
Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
|
||||||
data.is_running = false;
|
data.is_running = false;
|
||||||
|
|
|
@ -12,8 +12,9 @@
|
||||||
#include "core/hle/kernel/address_arbiter.h"
|
#include "core/hle/kernel/address_arbiter.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
#include "core/hle/kernel/time_manager.h"
|
#include "core/hle/kernel/time_manager.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
|
@ -58,7 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
|
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
|
||||||
SchedulerLock lock(system.Kernel());
|
KScopedSchedulerLock lock(system.Kernel());
|
||||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||||
GetThreadsWaitingOnAddress(address);
|
GetThreadsWaitingOnAddress(address);
|
||||||
WakeThreads(waiting_threads, num_to_wake);
|
WakeThreads(waiting_threads, num_to_wake);
|
||||||
|
@ -67,7 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
|
||||||
|
|
||||||
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
|
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||||
s32 num_to_wake) {
|
s32 num_to_wake) {
|
||||||
SchedulerLock lock(system.Kernel());
|
KScopedSchedulerLock lock(system.Kernel());
|
||||||
auto& memory = system.Memory();
|
auto& memory = system.Memory();
|
||||||
|
|
||||||
// Ensure that we can write to the address.
|
// Ensure that we can write to the address.
|
||||||
|
@ -92,7 +93,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32
|
||||||
|
|
||||||
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||||
s32 num_to_wake) {
|
s32 num_to_wake) {
|
||||||
SchedulerLock lock(system.Kernel());
|
KScopedSchedulerLock lock(system.Kernel());
|
||||||
auto& memory = system.Memory();
|
auto& memory = system.Memory();
|
||||||
|
|
||||||
// Ensure that we can write to the address.
|
// Ensure that we can write to the address.
|
||||||
|
@ -153,11 +154,11 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
|
||||||
bool should_decrement) {
|
bool should_decrement) {
|
||||||
auto& memory = system.Memory();
|
auto& memory = system.Memory();
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
|
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
|
|
||||||
Handle event_handle = InvalidHandle;
|
Handle event_handle = InvalidHandle;
|
||||||
{
|
{
|
||||||
SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||||
|
|
||||||
if (current_thread->IsPendingTermination()) {
|
if (current_thread->IsPendingTermination()) {
|
||||||
lock.CancelSleep();
|
lock.CancelSleep();
|
||||||
|
@ -210,7 +211,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
if (current_thread->IsWaitingForArbitration()) {
|
if (current_thread->IsWaitingForArbitration()) {
|
||||||
RemoveThread(SharedFrom(current_thread));
|
RemoveThread(SharedFrom(current_thread));
|
||||||
current_thread->WaitForArbitration(false);
|
current_thread->WaitForArbitration(false);
|
||||||
|
@ -223,11 +224,11 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
|
||||||
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
|
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
|
||||||
auto& memory = system.Memory();
|
auto& memory = system.Memory();
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
|
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
|
|
||||||
Handle event_handle = InvalidHandle;
|
Handle event_handle = InvalidHandle;
|
||||||
{
|
{
|
||||||
SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||||
|
|
||||||
if (current_thread->IsPendingTermination()) {
|
if (current_thread->IsPendingTermination()) {
|
||||||
lock.CancelSleep();
|
lock.CancelSleep();
|
||||||
|
@ -265,7 +266,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
if (current_thread->IsWaitingForArbitration()) {
|
if (current_thread->IsWaitingForArbitration()) {
|
||||||
RemoveThread(SharedFrom(current_thread));
|
RemoveThread(SharedFrom(current_thread));
|
||||||
current_thread->WaitForArbitration(false);
|
current_thread->WaitForArbitration(false);
|
||||||
|
|
52
src/core/hle/kernel/global_scheduler_context.cpp
Normal file
52
src/core/hle/kernel/global_scheduler_context.cpp
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "core/hle/kernel/global_scheduler_context.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
|
||||||
|
: kernel{kernel}, scheduler_lock{kernel} {}
|
||||||
|
|
||||||
|
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
|
||||||
|
|
||||||
|
void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
|
||||||
|
std::scoped_lock lock{global_list_guard};
|
||||||
|
thread_list.push_back(std::move(thread));
|
||||||
|
}
|
||||||
|
|
||||||
|
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||||
|
std::scoped_lock lock{global_list_guard};
|
||||||
|
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||||
|
thread_list.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
void GlobalSchedulerContext::PreemptThreads() {
|
||||||
|
// The priority levels at which the global scheduler preempts threads every 10 ms. They are
|
||||||
|
// ordered from Core 0 to Core 3.
|
||||||
|
static constexpr std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities{
|
||||||
|
59,
|
||||||
|
59,
|
||||||
|
59,
|
||||||
|
63,
|
||||||
|
};
|
||||||
|
|
||||||
|
ASSERT(IsLocked());
|
||||||
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
|
const u32 priority = preemption_priorities[core_id];
|
||||||
|
kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool GlobalSchedulerContext::IsLocked() const {
|
||||||
|
return scheduler_lock.IsLockedByCurrentThread();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel
|
81
src/core/hle/kernel/global_scheduler_context.h
Normal file
81
src/core/hle/kernel/global_scheduler_context.h
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "common/spin_lock.h"
|
||||||
|
#include "core/hardware_properties.h"
|
||||||
|
#include "core/hle/kernel/k_priority_queue.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler_lock.h"
|
||||||
|
#include "core/hle/kernel/thread.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KernelCore;
|
||||||
|
class SchedulerLock;
|
||||||
|
|
||||||
|
using KSchedulerPriorityQueue =
|
||||||
|
KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
|
||||||
|
constexpr s32 HighestCoreMigrationAllowedPriority = 2;
|
||||||
|
|
||||||
|
class GlobalSchedulerContext final {
|
||||||
|
friend class KScheduler;
|
||||||
|
|
||||||
|
public:
|
||||||
|
using LockType = KAbstractSchedulerLock<KScheduler>;
|
||||||
|
|
||||||
|
explicit GlobalSchedulerContext(KernelCore& kernel);
|
||||||
|
~GlobalSchedulerContext();
|
||||||
|
|
||||||
|
/// Adds a new thread to the scheduler
|
||||||
|
void AddThread(std::shared_ptr<Thread> thread);
|
||||||
|
|
||||||
|
/// Removes a thread from the scheduler
|
||||||
|
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||||
|
|
||||||
|
/// Returns a list of all threads managed by the scheduler
|
||||||
|
[[nodiscard]] const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
|
||||||
|
return thread_list;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Rotates the scheduling queues of threads at a preemption priority and then does
|
||||||
|
* some core rebalancing. Preemption priorities can be found in the array
|
||||||
|
* 'preemption_priorities'.
|
||||||
|
*
|
||||||
|
* @note This operation happens every 10ms.
|
||||||
|
*/
|
||||||
|
void PreemptThreads();
|
||||||
|
|
||||||
|
/// Returns true if the global scheduler lock is acquired
|
||||||
|
bool IsLocked() const;
|
||||||
|
|
||||||
|
[[nodiscard]] LockType& SchedulerLock() {
|
||||||
|
return scheduler_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] const LockType& SchedulerLock() const {
|
||||||
|
return scheduler_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class KScopedSchedulerLock;
|
||||||
|
friend class KScopedSchedulerLockAndSleep;
|
||||||
|
|
||||||
|
KernelCore& kernel;
|
||||||
|
|
||||||
|
std::atomic_bool scheduler_update_needed{};
|
||||||
|
KSchedulerPriorityQueue priority_queue;
|
||||||
|
LockType scheduler_lock;
|
||||||
|
|
||||||
|
/// Lists all thread ids that aren't deleted/etc.
|
||||||
|
std::vector<std::shared_ptr<Thread>> thread_list;
|
||||||
|
Common::SpinLock global_list_guard{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
|
@ -8,9 +8,9 @@
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
@ -105,7 +105,7 @@ bool HandleTable::IsValid(Handle handle) const {
|
||||||
|
|
||||||
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
|
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
|
||||||
if (handle == CurrentThread) {
|
if (handle == CurrentThread) {
|
||||||
return SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
|
return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||||
} else if (handle == CurrentProcess) {
|
} else if (handle == CurrentProcess) {
|
||||||
return SharedFrom(kernel.CurrentProcess());
|
return SharedFrom(kernel.CurrentProcess());
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,11 +17,12 @@
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
#include "core/hle/kernel/hle_ipc.h"
|
#include "core/hle/kernel/hle_ipc.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/object.h"
|
#include "core/hle/kernel/object.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/readable_event.h"
|
#include "core/hle/kernel/readable_event.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/server_session.h"
|
#include "core/hle/kernel/server_session.h"
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
#include "core/hle/kernel/time_manager.h"
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
@ -56,9 +57,9 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
|
||||||
writable_event = pair.writable;
|
writable_event = pair.writable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Handle event_handle = InvalidHandle;
|
||||||
{
|
{
|
||||||
Handle event_handle = InvalidHandle;
|
KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
|
||||||
SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
|
|
||||||
thread->SetHLECallback(
|
thread->SetHLECallback(
|
||||||
[context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
|
[context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
|
||||||
ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
|
ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
|
||||||
|
@ -74,9 +75,8 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
|
||||||
thread->SetStatus(ThreadStatus::WaitHLEEvent);
|
thread->SetStatus(ThreadStatus::WaitHLEEvent);
|
||||||
thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||||
readable_event->AddWaitingThread(thread);
|
readable_event->AddWaitingThread(thread);
|
||||||
lock.Release();
|
|
||||||
thread->SetHLETimeEvent(event_handle);
|
|
||||||
}
|
}
|
||||||
|
thread->SetHLETimeEvent(event_handle);
|
||||||
|
|
||||||
is_thread_waiting = true;
|
is_thread_waiting = true;
|
||||||
|
|
||||||
|
|
58
src/core/hle/kernel/k_affinity_mask.h
Normal file
58
src/core/hle/kernel/k_affinity_mask.h
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hardware_properties.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KAffinityMask {
|
||||||
|
public:
|
||||||
|
constexpr KAffinityMask() = default;
|
||||||
|
|
||||||
|
[[nodiscard]] constexpr u64 GetAffinityMask() const {
|
||||||
|
return this->mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void SetAffinityMask(u64 new_mask) {
|
||||||
|
ASSERT((new_mask & ~AllowedAffinityMask) == 0);
|
||||||
|
this->mask = new_mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] constexpr bool GetAffinity(s32 core) const {
|
||||||
|
return this->mask & GetCoreBit(core);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void SetAffinity(s32 core, bool set) {
|
||||||
|
ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||||
|
|
||||||
|
if (set) {
|
||||||
|
this->mask |= GetCoreBit(core);
|
||||||
|
} else {
|
||||||
|
this->mask &= ~GetCoreBit(core);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void SetAll() {
|
||||||
|
this->mask = AllowedAffinityMask;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
[[nodiscard]] static constexpr u64 GetCoreBit(s32 core) {
|
||||||
|
ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||||
|
return (1ULL << core);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1;
|
||||||
|
|
||||||
|
u64 mask{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
449
src/core/hle/kernel/k_priority_queue.h
Normal file
449
src/core/hle/kernel/k_priority_queue.h
Normal file
|
@ -0,0 +1,449 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/bit_set.h"
|
||||||
|
#include "common/bit_util.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class Thread;
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
|
||||||
|
{ t.GetAffinityMask() }
|
||||||
|
->std::convertible_to<u64>;
|
||||||
|
{t.SetAffinityMask(std::declval<u64>())};
|
||||||
|
|
||||||
|
{ t.GetAffinity(std::declval<int32_t>()) }
|
||||||
|
->std::same_as<bool>;
|
||||||
|
{t.SetAffinity(std::declval<int32_t>(), std::declval<bool>())};
|
||||||
|
{t.SetAll()};
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
|
||||||
|
{typename T::QueueEntry()};
|
||||||
|
{(typename T::QueueEntry()).Initialize()};
|
||||||
|
{(typename T::QueueEntry()).SetPrev(std::addressof(t))};
|
||||||
|
{(typename T::QueueEntry()).SetNext(std::addressof(t))};
|
||||||
|
{ (typename T::QueueEntry()).GetNext() }
|
||||||
|
->std::same_as<T*>;
|
||||||
|
{ (typename T::QueueEntry()).GetPrev() }
|
||||||
|
->std::same_as<T*>;
|
||||||
|
{ t.GetPriorityQueueEntry(std::declval<s32>()) }
|
||||||
|
->std::same_as<typename T::QueueEntry&>;
|
||||||
|
|
||||||
|
{t.GetAffinityMask()};
|
||||||
|
{ typename std::remove_cvref<decltype(t.GetAffinityMask())>::type() }
|
||||||
|
->KPriorityQueueAffinityMask;
|
||||||
|
|
||||||
|
{ t.GetActiveCore() }
|
||||||
|
->std::convertible_to<s32>;
|
||||||
|
{ t.GetPriority() }
|
||||||
|
->std::convertible_to<s32>;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename Member, size_t _NumCores, int LowestPriority, int HighestPriority>
|
||||||
|
requires KPriorityQueueMember<Member> class KPriorityQueue {
|
||||||
|
public:
|
||||||
|
using AffinityMaskType = typename std::remove_cv_t<
|
||||||
|
typename std::remove_reference<decltype(std::declval<Member>().GetAffinityMask())>::type>;
|
||||||
|
|
||||||
|
static_assert(LowestPriority >= 0);
|
||||||
|
static_assert(HighestPriority >= 0);
|
||||||
|
static_assert(LowestPriority >= HighestPriority);
|
||||||
|
static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1;
|
||||||
|
static constexpr size_t NumCores = _NumCores;
|
||||||
|
|
||||||
|
static constexpr bool IsValidCore(s32 core) {
|
||||||
|
return 0 <= core && core < static_cast<s32>(NumCores);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr bool IsValidPriority(s32 priority) {
|
||||||
|
return HighestPriority <= priority && priority <= LowestPriority + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
using Entry = typename Member::QueueEntry;
|
||||||
|
|
||||||
|
public:
|
||||||
|
class KPerCoreQueue {
|
||||||
|
private:
|
||||||
|
std::array<Entry, NumCores> root{};
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr KPerCoreQueue() {
|
||||||
|
for (auto& per_core_root : root) {
|
||||||
|
per_core_root.Initialize();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool PushBack(s32 core, Member* member) {
|
||||||
|
// Get the entry associated with the member.
|
||||||
|
Entry& member_entry = member->GetPriorityQueueEntry(core);
|
||||||
|
|
||||||
|
// Get the entry associated with the end of the queue.
|
||||||
|
Member* tail = this->root[core].GetPrev();
|
||||||
|
Entry& tail_entry =
|
||||||
|
(tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core];
|
||||||
|
|
||||||
|
// Link the entries.
|
||||||
|
member_entry.SetPrev(tail);
|
||||||
|
member_entry.SetNext(nullptr);
|
||||||
|
tail_entry.SetNext(member);
|
||||||
|
this->root[core].SetPrev(member);
|
||||||
|
|
||||||
|
return tail == nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool PushFront(s32 core, Member* member) {
|
||||||
|
// Get the entry associated with the member.
|
||||||
|
Entry& member_entry = member->GetPriorityQueueEntry(core);
|
||||||
|
|
||||||
|
// Get the entry associated with the front of the queue.
|
||||||
|
Member* head = this->root[core].GetNext();
|
||||||
|
Entry& head_entry =
|
||||||
|
(head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core];
|
||||||
|
|
||||||
|
// Link the entries.
|
||||||
|
member_entry.SetPrev(nullptr);
|
||||||
|
member_entry.SetNext(head);
|
||||||
|
head_entry.SetPrev(member);
|
||||||
|
this->root[core].SetNext(member);
|
||||||
|
|
||||||
|
return (head == nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool Remove(s32 core, Member* member) {
|
||||||
|
// Get the entry associated with the member.
|
||||||
|
Entry& member_entry = member->GetPriorityQueueEntry(core);
|
||||||
|
|
||||||
|
// Get the entries associated with next and prev.
|
||||||
|
Member* prev = member_entry.GetPrev();
|
||||||
|
Member* next = member_entry.GetNext();
|
||||||
|
Entry& prev_entry =
|
||||||
|
(prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core];
|
||||||
|
Entry& next_entry =
|
||||||
|
(next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core];
|
||||||
|
|
||||||
|
// Unlink.
|
||||||
|
prev_entry.SetNext(next);
|
||||||
|
next_entry.SetPrev(prev);
|
||||||
|
|
||||||
|
return (this->GetFront(core) == nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Member* GetFront(s32 core) const {
|
||||||
|
return this->root[core].GetNext();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class KPriorityQueueImpl {
|
||||||
|
public:
|
||||||
|
constexpr KPriorityQueueImpl() = default;
|
||||||
|
|
||||||
|
constexpr void PushBack(s32 priority, s32 core, Member* member) {
|
||||||
|
ASSERT(IsValidCore(core));
|
||||||
|
ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
|
if (priority > LowestPriority) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this->queues[priority].PushBack(core, member)) {
|
||||||
|
this->available_priorities[core].SetBit(priority);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void PushFront(s32 priority, s32 core, Member* member) {
|
||||||
|
ASSERT(IsValidCore(core));
|
||||||
|
ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
|
if (priority > LowestPriority) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this->queues[priority].PushFront(core, member)) {
|
||||||
|
this->available_priorities[core].SetBit(priority);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void Remove(s32 priority, s32 core, Member* member) {
|
||||||
|
ASSERT(IsValidCore(core));
|
||||||
|
ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
|
if (priority > LowestPriority) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this->queues[priority].Remove(core, member)) {
|
||||||
|
this->available_priorities[core].ClearBit(priority);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Member* GetFront(s32 core) const {
|
||||||
|
ASSERT(IsValidCore(core));
|
||||||
|
|
||||||
|
const s32 priority =
|
||||||
|
static_cast<s32>(this->available_priorities[core].CountLeadingZero());
|
||||||
|
if (priority <= LowestPriority) {
|
||||||
|
return this->queues[priority].GetFront(core);
|
||||||
|
} else {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Member* GetFront(s32 priority, s32 core) const {
|
||||||
|
ASSERT(IsValidCore(core));
|
||||||
|
ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
|
if (priority <= LowestPriority) {
|
||||||
|
return this->queues[priority].GetFront(core);
|
||||||
|
} else {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Member* GetNext(s32 core, const Member* member) const {
|
||||||
|
ASSERT(IsValidCore(core));
|
||||||
|
|
||||||
|
Member* next = member->GetPriorityQueueEntry(core).GetNext();
|
||||||
|
if (next == nullptr) {
|
||||||
|
const s32 priority = static_cast<s32>(
|
||||||
|
this->available_priorities[core].GetNextSet(member->GetPriority()));
|
||||||
|
if (priority <= LowestPriority) {
|
||||||
|
next = this->queues[priority].GetFront(core);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return next;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void MoveToFront(s32 priority, s32 core, Member* member) {
|
||||||
|
ASSERT(IsValidCore(core));
|
||||||
|
ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
|
if (priority <= LowestPriority) {
|
||||||
|
this->queues[priority].Remove(core, member);
|
||||||
|
this->queues[priority].PushFront(core, member);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Member* MoveToBack(s32 priority, s32 core, Member* member) {
|
||||||
|
ASSERT(IsValidCore(core));
|
||||||
|
ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
|
if (priority <= LowestPriority) {
|
||||||
|
this->queues[priority].Remove(core, member);
|
||||||
|
this->queues[priority].PushBack(core, member);
|
||||||
|
return this->queues[priority].GetFront(core);
|
||||||
|
} else {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::array<KPerCoreQueue, NumPriority> queues{};
|
||||||
|
std::array<Common::BitSet64<NumPriority>, NumCores> available_priorities{};
|
||||||
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
KPriorityQueueImpl scheduled_queue;
|
||||||
|
KPriorityQueueImpl suggested_queue;
|
||||||
|
|
||||||
|
private:
|
||||||
|
constexpr void ClearAffinityBit(u64& affinity, s32 core) {
|
||||||
|
affinity &= ~(u64(1) << core);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr s32 GetNextCore(u64& affinity) {
|
||||||
|
const s32 core = Common::CountTrailingZeroes64(affinity);
|
||||||
|
ClearAffinityBit(affinity, core);
|
||||||
|
return core;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void PushBack(s32 priority, Member* member) {
|
||||||
|
ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
|
// Push onto the scheduled queue for its core, if we can.
|
||||||
|
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||||
|
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||||
|
this->scheduled_queue.PushBack(priority, core, member);
|
||||||
|
ClearAffinityBit(affinity, core);
|
||||||
|
}
|
||||||
|
|
||||||
|
// And suggest the thread for all other cores.
|
||||||
|
while (affinity) {
|
||||||
|
this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void PushFront(s32 priority, Member* member) {
|
||||||
|
ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
|
// Push onto the scheduled queue for its core, if we can.
|
||||||
|
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||||
|
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||||
|
this->scheduled_queue.PushFront(priority, core, member);
|
||||||
|
ClearAffinityBit(affinity, core);
|
||||||
|
}
|
||||||
|
|
||||||
|
// And suggest the thread for all other cores.
|
||||||
|
// Note: Nintendo pushes onto the back of the suggested queue, not the front.
|
||||||
|
while (affinity) {
|
||||||
|
this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void Remove(s32 priority, Member* member) {
|
||||||
|
ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
|
// Remove from the scheduled queue for its core.
|
||||||
|
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||||
|
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||||
|
this->scheduled_queue.Remove(priority, core, member);
|
||||||
|
ClearAffinityBit(affinity, core);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from the suggested queue for all other cores.
|
||||||
|
while (affinity) {
|
||||||
|
this->suggested_queue.Remove(priority, GetNextCore(affinity), member);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr KPriorityQueue() = default;
|
||||||
|
|
||||||
|
// Getters.
|
||||||
|
constexpr Member* GetScheduledFront(s32 core) const {
|
||||||
|
return this->scheduled_queue.GetFront(core);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Member* GetScheduledFront(s32 core, s32 priority) const {
|
||||||
|
return this->scheduled_queue.GetFront(priority, core);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Member* GetSuggestedFront(s32 core) const {
|
||||||
|
return this->suggested_queue.GetFront(core);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Member* GetSuggestedFront(s32 core, s32 priority) const {
|
||||||
|
return this->suggested_queue.GetFront(priority, core);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Member* GetScheduledNext(s32 core, const Member* member) const {
|
||||||
|
return this->scheduled_queue.GetNext(core, member);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Member* GetSuggestedNext(s32 core, const Member* member) const {
|
||||||
|
return this->suggested_queue.GetNext(core, member);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const {
|
||||||
|
return member->GetPriorityQueueEntry(core).GetNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutators.
|
||||||
|
constexpr void PushBack(Member* member) {
|
||||||
|
this->PushBack(member->GetPriority(), member);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void Remove(Member* member) {
|
||||||
|
this->Remove(member->GetPriority(), member);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void MoveToScheduledFront(Member* member) {
|
||||||
|
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Thread* MoveToScheduledBack(Member* member) {
|
||||||
|
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
|
||||||
|
member);
|
||||||
|
}
|
||||||
|
|
||||||
|
// First class fancy operations.
|
||||||
|
constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) {
|
||||||
|
ASSERT(IsValidPriority(prev_priority));
|
||||||
|
|
||||||
|
// Remove the member from the queues.
|
||||||
|
const s32 new_priority = member->GetPriority();
|
||||||
|
this->Remove(prev_priority, member);
|
||||||
|
|
||||||
|
// And enqueue. If the member is running, we want to keep it running.
|
||||||
|
if (is_running) {
|
||||||
|
this->PushFront(new_priority, member);
|
||||||
|
} else {
|
||||||
|
this->PushBack(new_priority, member);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity,
|
||||||
|
Member* member) {
|
||||||
|
// Get the new information.
|
||||||
|
const s32 priority = member->GetPriority();
|
||||||
|
const AffinityMaskType& new_affinity = member->GetAffinityMask();
|
||||||
|
const s32 new_core = member->GetActiveCore();
|
||||||
|
|
||||||
|
// Remove the member from all queues it was in before.
|
||||||
|
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
|
||||||
|
if (prev_affinity.GetAffinity(core)) {
|
||||||
|
if (core == prev_core) {
|
||||||
|
this->scheduled_queue.Remove(priority, core, member);
|
||||||
|
} else {
|
||||||
|
this->suggested_queue.Remove(priority, core, member);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// And add the member to all queues it should be in now.
|
||||||
|
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
|
||||||
|
if (new_affinity.GetAffinity(core)) {
|
||||||
|
if (core == new_core) {
|
||||||
|
this->scheduled_queue.PushBack(priority, core, member);
|
||||||
|
} else {
|
||||||
|
this->suggested_queue.PushBack(priority, core, member);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) {
|
||||||
|
// Get the new information.
|
||||||
|
const s32 new_core = member->GetActiveCore();
|
||||||
|
const s32 priority = member->GetPriority();
|
||||||
|
|
||||||
|
// We don't need to do anything if the core is the same.
|
||||||
|
if (prev_core != new_core) {
|
||||||
|
// Remove from the scheduled queue for the previous core.
|
||||||
|
if (prev_core >= 0) {
|
||||||
|
this->scheduled_queue.Remove(priority, prev_core, member);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from the suggested queue and add to the scheduled queue for the new core.
|
||||||
|
if (new_core >= 0) {
|
||||||
|
this->suggested_queue.Remove(priority, new_core, member);
|
||||||
|
if (to_front) {
|
||||||
|
this->scheduled_queue.PushFront(priority, new_core, member);
|
||||||
|
} else {
|
||||||
|
this->scheduled_queue.PushBack(priority, new_core, member);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add to the suggested queue for the previous core.
|
||||||
|
if (prev_core >= 0) {
|
||||||
|
this->suggested_queue.PushBack(priority, prev_core, member);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
784
src/core/hle/kernel/k_scheduler.cpp
Normal file
784
src/core/hle/kernel/k_scheduler.cpp
Normal file
|
@ -0,0 +1,784 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/bit_util.h"
|
||||||
|
#include "common/fiber.h"
|
||||||
|
#include "common/logging/log.h"
|
||||||
|
#include "core/arm/arm_interface.h"
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "core/core_timing.h"
|
||||||
|
#include "core/cpu_manager.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/kernel/physical_core.h"
|
||||||
|
#include "core/hle/kernel/process.h"
|
||||||
|
#include "core/hle/kernel/thread.h"
|
||||||
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
static void IncrementScheduledCount(Kernel::Thread* thread) {
|
||||||
|
if (auto process = thread->GetOwnerProcess(); process) {
|
||||||
|
process->IncrementScheduledCount();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
|
||||||
|
Core::EmuThreadHandle global_thread) {
|
||||||
|
u32 current_core = global_thread.host_handle;
|
||||||
|
bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
|
||||||
|
(current_core < Core::Hardware::NUM_CPU_CORES);
|
||||||
|
|
||||||
|
while (cores_pending_reschedule != 0) {
|
||||||
|
u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule);
|
||||||
|
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
|
||||||
|
if (!must_context_switch || core != current_core) {
|
||||||
|
auto& phys_core = kernel.PhysicalCore(core);
|
||||||
|
phys_core.Interrupt();
|
||||||
|
} else {
|
||||||
|
must_context_switch = true;
|
||||||
|
}
|
||||||
|
cores_pending_reschedule &= ~(1ULL << core);
|
||||||
|
}
|
||||||
|
if (must_context_switch) {
|
||||||
|
auto core_scheduler = kernel.CurrentScheduler();
|
||||||
|
kernel.ExitSVCProfile();
|
||||||
|
core_scheduler->RescheduleCurrentCore();
|
||||||
|
kernel.EnterSVCProfile();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
|
||||||
|
std::scoped_lock lock{guard};
|
||||||
|
if (Thread* prev_highest_thread = this->state.highest_priority_thread;
|
||||||
|
prev_highest_thread != highest_thread) {
|
||||||
|
if (prev_highest_thread != nullptr) {
|
||||||
|
IncrementScheduledCount(prev_highest_thread);
|
||||||
|
prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
|
||||||
|
}
|
||||||
|
if (this->state.should_count_idle) {
|
||||||
|
if (highest_thread != nullptr) {
|
||||||
|
// if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
|
||||||
|
// process->SetRunningThread(this->core_id, highest_thread,
|
||||||
|
// this->state.idle_count);
|
||||||
|
//}
|
||||||
|
} else {
|
||||||
|
this->state.idle_count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this->state.highest_priority_thread = highest_thread;
|
||||||
|
this->state.needs_scheduling = true;
|
||||||
|
return (1ULL << this->core_id);
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||||
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
// Clear that we need to update.
|
||||||
|
ClearSchedulerUpdateNeeded(kernel);
|
||||||
|
|
||||||
|
u64 cores_needing_scheduling = 0, idle_cores = 0;
|
||||||
|
Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
|
||||||
|
auto& priority_queue = GetPriorityQueue(kernel);
|
||||||
|
|
||||||
|
/// We want to go over all cores, finding the highest priority thread and determining if
|
||||||
|
/// scheduling is needed for that core.
|
||||||
|
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
|
Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
|
||||||
|
if (top_thread != nullptr) {
|
||||||
|
// If the thread has no waiters, we need to check if the process has a thread pinned.
|
||||||
|
// TODO(bunnei): Implement thread pinning
|
||||||
|
} else {
|
||||||
|
idle_cores |= (1ULL << core_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
top_threads[core_id] = top_thread;
|
||||||
|
cores_needing_scheduling |=
|
||||||
|
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
|
||||||
|
while (idle_cores != 0) {
|
||||||
|
u32 core_id = Common::CountTrailingZeroes64(idle_cores);
|
||||||
|
if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
|
||||||
|
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
|
||||||
|
size_t num_candidates = 0;
|
||||||
|
|
||||||
|
// While we have a suggested thread, try to migrate it!
|
||||||
|
while (suggested != nullptr) {
|
||||||
|
// Check if the suggested thread is the top thread on its core.
|
||||||
|
const s32 suggested_core = suggested->GetActiveCore();
|
||||||
|
if (Thread* top_thread =
|
||||||
|
(suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
|
||||||
|
top_thread != suggested) {
|
||||||
|
// Make sure we're not dealing with threads too high priority for migration.
|
||||||
|
if (top_thread != nullptr &&
|
||||||
|
top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The suggested thread isn't bound to its core, so we can migrate it!
|
||||||
|
suggested->SetActiveCore(core_id);
|
||||||
|
priority_queue.ChangeCore(suggested_core, suggested);
|
||||||
|
|
||||||
|
top_threads[core_id] = suggested;
|
||||||
|
cores_needing_scheduling |=
|
||||||
|
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note this core as a candidate for migration.
|
||||||
|
ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES);
|
||||||
|
migration_candidates[num_candidates++] = suggested_core;
|
||||||
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
|
||||||
|
// candidate cores' top threads.
|
||||||
|
if (suggested == nullptr) {
|
||||||
|
for (size_t i = 0; i < num_candidates; i++) {
|
||||||
|
// Check if there's some other thread that can run on the candidate core.
|
||||||
|
const s32 candidate_core = migration_candidates[i];
|
||||||
|
suggested = top_threads[candidate_core];
|
||||||
|
if (Thread* next_on_candidate_core =
|
||||||
|
priority_queue.GetScheduledNext(candidate_core, suggested);
|
||||||
|
next_on_candidate_core != nullptr) {
|
||||||
|
// The candidate core can run some other thread! We'll migrate its current
|
||||||
|
// top thread to us.
|
||||||
|
top_threads[candidate_core] = next_on_candidate_core;
|
||||||
|
cores_needing_scheduling |=
|
||||||
|
kernel.Scheduler(candidate_core)
|
||||||
|
.UpdateHighestPriorityThread(top_threads[candidate_core]);
|
||||||
|
|
||||||
|
// Perform the migration.
|
||||||
|
suggested->SetActiveCore(core_id);
|
||||||
|
priority_queue.ChangeCore(candidate_core, suggested);
|
||||||
|
|
||||||
|
top_threads[core_id] = suggested;
|
||||||
|
cores_needing_scheduling |=
|
||||||
|
kernel.Scheduler(core_id).UpdateHighestPriorityThread(
|
||||||
|
top_threads[core_id]);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
idle_cores &= ~(1ULL << core_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
return cores_needing_scheduling;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) {
|
||||||
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
// Check if the state has changed, because if it hasn't there's nothing to do.
|
||||||
|
const auto cur_state = thread->scheduling_state;
|
||||||
|
if (cur_state == old_state) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the priority queues.
|
||||||
|
if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||||
|
// If we were previously runnable, then we're not runnable now, and we should remove.
|
||||||
|
GetPriorityQueue(kernel).Remove(thread);
|
||||||
|
IncrementScheduledCount(thread);
|
||||||
|
SetSchedulerUpdateNeeded(kernel);
|
||||||
|
} else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||||
|
// If we're now runnable, then we weren't previously, and we should add.
|
||||||
|
GetPriorityQueue(kernel).PushBack(thread);
|
||||||
|
IncrementScheduledCount(thread);
|
||||||
|
SetSchedulerUpdateNeeded(kernel);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
|
||||||
|
u32 old_priority) {
|
||||||
|
|
||||||
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
// If the thread is runnable, we want to change its priority in the queue.
|
||||||
|
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||||
|
GetPriorityQueue(kernel).ChangePriority(
|
||||||
|
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
|
||||||
|
IncrementScheduledCount(thread);
|
||||||
|
SetSchedulerUpdateNeeded(kernel);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
|
||||||
|
const KAffinityMask& old_affinity, s32 old_core) {
|
||||||
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
// If the thread is runnable, we want to change its affinity in the queue.
|
||||||
|
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||||
|
GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
|
||||||
|
IncrementScheduledCount(thread);
|
||||||
|
SetSchedulerUpdateNeeded(kernel);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||||
|
ASSERT(system.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
// Get a reference to the priority queue.
|
||||||
|
auto& kernel = system.Kernel();
|
||||||
|
auto& priority_queue = GetPriorityQueue(kernel);
|
||||||
|
|
||||||
|
// Rotate the front of the queue to the end.
|
||||||
|
Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
|
||||||
|
Thread* next_thread = nullptr;
|
||||||
|
if (top_thread != nullptr) {
|
||||||
|
next_thread = priority_queue.MoveToScheduledBack(top_thread);
|
||||||
|
if (next_thread != top_thread) {
|
||||||
|
IncrementScheduledCount(top_thread);
|
||||||
|
IncrementScheduledCount(next_thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// While we have a suggested thread, try to migrate it!
|
||||||
|
{
|
||||||
|
Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
|
||||||
|
while (suggested != nullptr) {
|
||||||
|
// Check if the suggested thread is the top thread on its core.
|
||||||
|
const s32 suggested_core = suggested->GetActiveCore();
|
||||||
|
if (Thread* top_on_suggested_core =
|
||||||
|
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||||
|
: nullptr;
|
||||||
|
top_on_suggested_core != suggested) {
|
||||||
|
// If the next thread is a new thread that has been waiting longer than our
|
||||||
|
// suggestion, we prefer it to our suggestion.
|
||||||
|
if (top_thread != next_thread && next_thread != nullptr &&
|
||||||
|
next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
|
||||||
|
suggested = nullptr;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're allowed to do a migration, do one.
|
||||||
|
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
|
||||||
|
// to the front of the queue.
|
||||||
|
if (top_on_suggested_core == nullptr ||
|
||||||
|
top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
|
||||||
|
suggested->SetActiveCore(core_id);
|
||||||
|
priority_queue.ChangeCore(suggested_core, suggested, true);
|
||||||
|
IncrementScheduledCount(suggested);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the next suggestion.
|
||||||
|
suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that we might have migrated a thread with the same priority, check if we can do better.
|
||||||
|
|
||||||
|
{
|
||||||
|
Thread* best_thread = priority_queue.GetScheduledFront(core_id);
|
||||||
|
if (best_thread == GetCurrentThread()) {
|
||||||
|
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the best thread we can choose has a priority the same or worse than ours, try to
|
||||||
|
// migrate a higher priority thread.
|
||||||
|
if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
|
||||||
|
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||||
|
while (suggested != nullptr) {
|
||||||
|
// If the suggestion's priority is the same as ours, don't bother.
|
||||||
|
if (suggested->GetPriority() >= best_thread->GetPriority()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the suggested thread is the top thread on its core.
|
||||||
|
const s32 suggested_core = suggested->GetActiveCore();
|
||||||
|
if (Thread* top_on_suggested_core =
|
||||||
|
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||||
|
: nullptr;
|
||||||
|
top_on_suggested_core != suggested) {
|
||||||
|
// If we're allowed to do a migration, do one.
|
||||||
|
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
|
||||||
|
// suggestion to the front of the queue.
|
||||||
|
if (top_on_suggested_core == nullptr ||
|
||||||
|
top_on_suggested_core->GetPriority() >=
|
||||||
|
HighestCoreMigrationAllowedPriority) {
|
||||||
|
suggested->SetActiveCore(core_id);
|
||||||
|
priority_queue.ChangeCore(suggested_core, suggested, true);
|
||||||
|
IncrementScheduledCount(suggested);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the next suggestion.
|
||||||
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// After a rotation, we need a scheduler update.
|
||||||
|
SetSchedulerUpdateNeeded(kernel);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KScheduler::CanSchedule(KernelCore& kernel) {
|
||||||
|
return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
|
||||||
|
return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
|
||||||
|
kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
|
||||||
|
kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::DisableScheduling(KernelCore& kernel) {
|
||||||
|
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
||||||
|
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
|
||||||
|
scheduler->GetCurrentThread()->DisableDispatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
|
||||||
|
Core::EmuThreadHandle global_thread) {
|
||||||
|
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
||||||
|
scheduler->GetCurrentThread()->EnableDispatch();
|
||||||
|
}
|
||||||
|
RescheduleCores(kernel, cores_needing_scheduling, global_thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
||||||
|
if (IsSchedulerUpdateNeeded(kernel)) {
|
||||||
|
return UpdateHighestPriorityThreadsImpl(kernel);
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
|
||||||
|
return kernel.GlobalSchedulerContext().priority_queue;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::YieldWithoutCoreMigration() {
|
||||||
|
auto& kernel = system.Kernel();
|
||||||
|
|
||||||
|
// Validate preconditions.
|
||||||
|
ASSERT(CanSchedule(kernel));
|
||||||
|
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||||
|
|
||||||
|
// Get the current thread and process.
|
||||||
|
Thread& cur_thread = *GetCurrentThread();
|
||||||
|
Process& cur_process = *kernel.CurrentProcess();
|
||||||
|
|
||||||
|
// If the thread's yield count matches, there's nothing for us to do.
|
||||||
|
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a reference to the priority queue.
|
||||||
|
auto& priority_queue = GetPriorityQueue(kernel);
|
||||||
|
|
||||||
|
// Perform the yield.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
|
||||||
|
const auto cur_state = cur_thread.scheduling_state;
|
||||||
|
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||||
|
// Put the current thread at the back of the queue.
|
||||||
|
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||||
|
IncrementScheduledCount(std::addressof(cur_thread));
|
||||||
|
|
||||||
|
// If the next thread is different, we have an update to perform.
|
||||||
|
if (next_thread != std::addressof(cur_thread)) {
|
||||||
|
SetSchedulerUpdateNeeded(kernel);
|
||||||
|
} else {
|
||||||
|
// Otherwise, set the thread's yield count so that we won't waste work until the
|
||||||
|
// process is scheduled again.
|
||||||
|
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::YieldWithCoreMigration() {
|
||||||
|
auto& kernel = system.Kernel();
|
||||||
|
|
||||||
|
// Validate preconditions.
|
||||||
|
ASSERT(CanSchedule(kernel));
|
||||||
|
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||||
|
|
||||||
|
// Get the current thread and process.
|
||||||
|
Thread& cur_thread = *GetCurrentThread();
|
||||||
|
Process& cur_process = *kernel.CurrentProcess();
|
||||||
|
|
||||||
|
// If the thread's yield count matches, there's nothing for us to do.
|
||||||
|
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a reference to the priority queue.
|
||||||
|
auto& priority_queue = GetPriorityQueue(kernel);
|
||||||
|
|
||||||
|
// Perform the yield.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
|
||||||
|
const auto cur_state = cur_thread.scheduling_state;
|
||||||
|
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||||
|
// Get the current active core.
|
||||||
|
const s32 core_id = cur_thread.GetActiveCore();
|
||||||
|
|
||||||
|
// Put the current thread at the back of the queue.
|
||||||
|
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||||
|
IncrementScheduledCount(std::addressof(cur_thread));
|
||||||
|
|
||||||
|
// While we have a suggested thread, try to migrate it!
|
||||||
|
bool recheck = false;
|
||||||
|
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||||
|
while (suggested != nullptr) {
|
||||||
|
// Check if the suggested thread is the thread running on its core.
|
||||||
|
const s32 suggested_core = suggested->GetActiveCore();
|
||||||
|
|
||||||
|
if (Thread* running_on_suggested_core =
|
||||||
|
(suggested_core >= 0)
|
||||||
|
? kernel.Scheduler(suggested_core).state.highest_priority_thread
|
||||||
|
: nullptr;
|
||||||
|
running_on_suggested_core != suggested) {
|
||||||
|
// If the current thread's priority is higher than our suggestion's we prefer
|
||||||
|
// the next thread to the suggestion. We also prefer the next thread when the
|
||||||
|
// current thread's priority is equal to the suggestions, but the next thread
|
||||||
|
// has been waiting longer.
|
||||||
|
if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
|
||||||
|
(suggested->GetPriority() == cur_thread.GetPriority() &&
|
||||||
|
next_thread != std::addressof(cur_thread) &&
|
||||||
|
next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) {
|
||||||
|
suggested = nullptr;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're allowed to do a migration, do one.
|
||||||
|
// NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
|
||||||
|
// suggestion to the front of the queue.
|
||||||
|
if (running_on_suggested_core == nullptr ||
|
||||||
|
running_on_suggested_core->GetPriority() >=
|
||||||
|
HighestCoreMigrationAllowedPriority) {
|
||||||
|
suggested->SetActiveCore(core_id);
|
||||||
|
priority_queue.ChangeCore(suggested_core, suggested, true);
|
||||||
|
IncrementScheduledCount(suggested);
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
// We couldn't perform a migration, but we should check again on a future
|
||||||
|
// yield.
|
||||||
|
recheck = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the next suggestion.
|
||||||
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we still have a suggestion or the next thread is different, we have an update to
|
||||||
|
// perform.
|
||||||
|
if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
|
||||||
|
SetSchedulerUpdateNeeded(kernel);
|
||||||
|
} else if (!recheck) {
|
||||||
|
// Otherwise if we don't need to re-check, set the thread's yield count so that we
|
||||||
|
// won't waste work until the process is scheduled again.
|
||||||
|
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::YieldToAnyThread() {
|
||||||
|
auto& kernel = system.Kernel();
|
||||||
|
|
||||||
|
// Validate preconditions.
|
||||||
|
ASSERT(CanSchedule(kernel));
|
||||||
|
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||||
|
|
||||||
|
// Get the current thread and process.
|
||||||
|
Thread& cur_thread = *GetCurrentThread();
|
||||||
|
Process& cur_process = *kernel.CurrentProcess();
|
||||||
|
|
||||||
|
// If the thread's yield count matches, there's nothing for us to do.
|
||||||
|
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a reference to the priority queue.
|
||||||
|
auto& priority_queue = GetPriorityQueue(kernel);
|
||||||
|
|
||||||
|
// Perform the yield.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
|
||||||
|
const auto cur_state = cur_thread.scheduling_state;
|
||||||
|
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||||
|
// Get the current active core.
|
||||||
|
const s32 core_id = cur_thread.GetActiveCore();
|
||||||
|
|
||||||
|
// Migrate the current thread to core -1.
|
||||||
|
cur_thread.SetActiveCore(-1);
|
||||||
|
priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
|
||||||
|
IncrementScheduledCount(std::addressof(cur_thread));
|
||||||
|
|
||||||
|
// If there's nothing scheduled, we can try to perform a migration.
|
||||||
|
if (priority_queue.GetScheduledFront(core_id) == nullptr) {
|
||||||
|
// While we have a suggested thread, try to migrate it!
|
||||||
|
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||||
|
while (suggested != nullptr) {
|
||||||
|
// Check if the suggested thread is the top thread on its core.
|
||||||
|
const s32 suggested_core = suggested->GetActiveCore();
|
||||||
|
if (Thread* top_on_suggested_core =
|
||||||
|
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||||
|
: nullptr;
|
||||||
|
top_on_suggested_core != suggested) {
|
||||||
|
// If we're allowed to do a migration, do one.
|
||||||
|
if (top_on_suggested_core == nullptr ||
|
||||||
|
top_on_suggested_core->GetPriority() >=
|
||||||
|
HighestCoreMigrationAllowedPriority) {
|
||||||
|
suggested->SetActiveCore(core_id);
|
||||||
|
priority_queue.ChangeCore(suggested_core, suggested);
|
||||||
|
IncrementScheduledCount(suggested);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regardless of whether we migrated, we had a candidate, so we're done.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the next suggestion.
|
||||||
|
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the suggestion is different from the current thread, we need to perform an
|
||||||
|
// update.
|
||||||
|
if (suggested != std::addressof(cur_thread)) {
|
||||||
|
SetSchedulerUpdateNeeded(kernel);
|
||||||
|
} else {
|
||||||
|
// Otherwise, set the thread's yield count so that we won't waste work until the
|
||||||
|
// process is scheduled again.
|
||||||
|
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Otherwise, we have an update to perform.
|
||||||
|
SetSchedulerUpdateNeeded(kernel);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
KScheduler::KScheduler(Core::System& system, std::size_t core_id)
|
||||||
|
: system(system), core_id(core_id) {
|
||||||
|
switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
|
||||||
|
this->state.needs_scheduling = true;
|
||||||
|
this->state.interrupt_task_thread_runnable = false;
|
||||||
|
this->state.should_count_idle = false;
|
||||||
|
this->state.idle_count = 0;
|
||||||
|
this->state.idle_thread_stack = nullptr;
|
||||||
|
this->state.highest_priority_thread = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
KScheduler::~KScheduler() = default;
|
||||||
|
|
||||||
|
Thread* KScheduler::GetCurrentThread() const {
|
||||||
|
if (current_thread) {
|
||||||
|
return current_thread;
|
||||||
|
}
|
||||||
|
return idle_thread;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 KScheduler::GetLastContextSwitchTicks() const {
|
||||||
|
return last_context_switch_time;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::RescheduleCurrentCore() {
|
||||||
|
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
|
||||||
|
|
||||||
|
auto& phys_core = system.Kernel().PhysicalCore(core_id);
|
||||||
|
if (phys_core.IsInterrupted()) {
|
||||||
|
phys_core.ClearInterrupt();
|
||||||
|
}
|
||||||
|
guard.lock();
|
||||||
|
if (this->state.needs_scheduling) {
|
||||||
|
Schedule();
|
||||||
|
} else {
|
||||||
|
guard.unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::OnThreadStart() {
|
||||||
|
SwitchContextStep2();
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::Unload(Thread* thread) {
|
||||||
|
if (thread) {
|
||||||
|
thread->SetIsRunning(false);
|
||||||
|
if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
|
||||||
|
system.ArmInterface(core_id).ExceptionalExit();
|
||||||
|
thread->SetContinuousOnSVC(false);
|
||||||
|
}
|
||||||
|
if (!thread->IsHLEThread() && !thread->HasExited()) {
|
||||||
|
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||||
|
cpu_core.SaveContext(thread->GetContext32());
|
||||||
|
cpu_core.SaveContext(thread->GetContext64());
|
||||||
|
// Save the TPIDR_EL0 system register in case it was modified.
|
||||||
|
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||||
|
cpu_core.ClearExclusiveState();
|
||||||
|
}
|
||||||
|
thread->context_guard.unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::Reload(Thread* thread) {
|
||||||
|
if (thread) {
|
||||||
|
ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
|
||||||
|
"Thread must be runnable.");
|
||||||
|
|
||||||
|
// Cancel any outstanding wakeup events for this thread
|
||||||
|
thread->SetIsRunning(true);
|
||||||
|
thread->SetWasRunning(false);
|
||||||
|
|
||||||
|
auto* const thread_owner_process = thread->GetOwnerProcess();
|
||||||
|
if (thread_owner_process != nullptr) {
|
||||||
|
system.Kernel().MakeCurrentProcess(thread_owner_process);
|
||||||
|
}
|
||||||
|
if (!thread->IsHLEThread()) {
|
||||||
|
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||||
|
cpu_core.LoadContext(thread->GetContext32());
|
||||||
|
cpu_core.LoadContext(thread->GetContext64());
|
||||||
|
cpu_core.SetTlsAddress(thread->GetTLSAddress());
|
||||||
|
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
|
||||||
|
cpu_core.ClearExclusiveState();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::SwitchContextStep2() {
|
||||||
|
// Load context of new thread
|
||||||
|
Reload(current_thread);
|
||||||
|
|
||||||
|
RescheduleCurrentCore();
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::ScheduleImpl() {
|
||||||
|
Thread* previous_thread = current_thread;
|
||||||
|
current_thread = state.highest_priority_thread;
|
||||||
|
|
||||||
|
this->state.needs_scheduling = false;
|
||||||
|
|
||||||
|
if (current_thread == previous_thread) {
|
||||||
|
guard.unlock();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Process* const previous_process = system.Kernel().CurrentProcess();
|
||||||
|
|
||||||
|
UpdateLastContextSwitchTime(previous_thread, previous_process);
|
||||||
|
|
||||||
|
// Save context for previous thread
|
||||||
|
Unload(previous_thread);
|
||||||
|
|
||||||
|
std::shared_ptr<Common::Fiber>* old_context;
|
||||||
|
if (previous_thread != nullptr) {
|
||||||
|
old_context = &previous_thread->GetHostContext();
|
||||||
|
} else {
|
||||||
|
old_context = &idle_thread->GetHostContext();
|
||||||
|
}
|
||||||
|
guard.unlock();
|
||||||
|
|
||||||
|
Common::Fiber::YieldTo(*old_context, switch_fiber);
|
||||||
|
/// When a thread wakes up, the scheduler may have changed to other in another core.
|
||||||
|
auto& next_scheduler = *system.Kernel().CurrentScheduler();
|
||||||
|
next_scheduler.SwitchContextStep2();
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::OnSwitch(void* this_scheduler) {
|
||||||
|
KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
|
||||||
|
sched->SwitchToCurrent();
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::SwitchToCurrent() {
|
||||||
|
while (true) {
|
||||||
|
{
|
||||||
|
std::scoped_lock lock{guard};
|
||||||
|
current_thread = state.highest_priority_thread;
|
||||||
|
this->state.needs_scheduling = false;
|
||||||
|
}
|
||||||
|
const auto is_switch_pending = [this] {
|
||||||
|
std::scoped_lock lock{guard};
|
||||||
|
return state.needs_scheduling.load(std::memory_order_relaxed);
|
||||||
|
};
|
||||||
|
do {
|
||||||
|
if (current_thread != nullptr && !current_thread->IsHLEThread()) {
|
||||||
|
current_thread->context_guard.lock();
|
||||||
|
if (!current_thread->IsRunnable()) {
|
||||||
|
current_thread->context_guard.unlock();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
|
||||||
|
current_thread->context_guard.unlock();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::shared_ptr<Common::Fiber>* next_context;
|
||||||
|
if (current_thread != nullptr) {
|
||||||
|
next_context = ¤t_thread->GetHostContext();
|
||||||
|
} else {
|
||||||
|
next_context = &idle_thread->GetHostContext();
|
||||||
|
}
|
||||||
|
Common::Fiber::YieldTo(switch_fiber, *next_context);
|
||||||
|
} while (!is_switch_pending());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
|
||||||
|
const u64 prev_switch_ticks = last_context_switch_time;
|
||||||
|
const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
|
||||||
|
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
|
||||||
|
|
||||||
|
if (thread != nullptr) {
|
||||||
|
thread->UpdateCPUTimeTicks(update_ticks);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (process != nullptr) {
|
||||||
|
process->UpdateCPUTimeTicks(update_ticks);
|
||||||
|
}
|
||||||
|
|
||||||
|
last_context_switch_time = most_recent_switch_ticks;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KScheduler::Initialize() {
|
||||||
|
std::string name = "Idle Thread Id:" + std::to_string(core_id);
|
||||||
|
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
||||||
|
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||||
|
ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
|
||||||
|
auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
|
||||||
|
nullptr, std::move(init_func), init_func_parameter);
|
||||||
|
idle_thread = thread_res.Unwrap().get();
|
||||||
|
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock lock{system.Kernel()};
|
||||||
|
idle_thread->SetStatus(ThreadStatus::Ready);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
|
||||||
|
: KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {}
|
||||||
|
|
||||||
|
KScopedSchedulerLock::~KScopedSchedulerLock() = default;
|
||||||
|
|
||||||
|
} // namespace Kernel
|
201
src/core/hle/kernel/k_scheduler.h
Normal file
201
src/core/hle/kernel/k_scheduler.h
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "common/spin_lock.h"
|
||||||
|
#include "core/hle/kernel/global_scheduler_context.h"
|
||||||
|
#include "core/hle/kernel/k_priority_queue.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler_lock.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_lock.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
class Fiber;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
class System;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KernelCore;
|
||||||
|
class Process;
|
||||||
|
class SchedulerLock;
|
||||||
|
class Thread;
|
||||||
|
|
||||||
|
class KScheduler final {
|
||||||
|
public:
|
||||||
|
explicit KScheduler(Core::System& system, std::size_t core_id);
|
||||||
|
~KScheduler();
|
||||||
|
|
||||||
|
/// Reschedules to the next available thread (call after current thread is suspended)
|
||||||
|
void RescheduleCurrentCore();
|
||||||
|
|
||||||
|
/// Reschedules cores pending reschedule, to be called on EnableScheduling.
|
||||||
|
static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
|
||||||
|
Core::EmuThreadHandle global_thread);
|
||||||
|
|
||||||
|
/// The next two are for SingleCore Only.
|
||||||
|
/// Unload current thread before preempting core.
|
||||||
|
void Unload(Thread* thread);
|
||||||
|
|
||||||
|
/// Reload current thread after core preemption.
|
||||||
|
void Reload(Thread* thread);
|
||||||
|
|
||||||
|
/// Gets the current running thread
|
||||||
|
[[nodiscard]] Thread* GetCurrentThread() const;
|
||||||
|
|
||||||
|
/// Gets the timestamp for the last context switch in ticks.
|
||||||
|
[[nodiscard]] u64 GetLastContextSwitchTicks() const;
|
||||||
|
|
||||||
|
[[nodiscard]] bool ContextSwitchPending() const {
|
||||||
|
return state.needs_scheduling.load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Initialize();
|
||||||
|
|
||||||
|
void OnThreadStart();
|
||||||
|
|
||||||
|
[[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() {
|
||||||
|
return switch_fiber;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const {
|
||||||
|
return switch_fiber;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Takes a thread and moves it to the back of the it's priority list.
|
||||||
|
*
|
||||||
|
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||||
|
*/
|
||||||
|
void YieldWithoutCoreMigration();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Takes a thread and moves it to the back of the it's priority list.
|
||||||
|
* Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
|
||||||
|
* a better priority than the next thread in the core.
|
||||||
|
*
|
||||||
|
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||||
|
*/
|
||||||
|
void YieldWithCoreMigration();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Takes a thread and moves it out of the scheduling queue.
|
||||||
|
* and into the suggested queue. If no thread can be scheduled afterwards in that core,
|
||||||
|
* a suggested thread is obtained instead.
|
||||||
|
*
|
||||||
|
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||||
|
*/
|
||||||
|
void YieldToAnyThread();
|
||||||
|
|
||||||
|
/// Notify the scheduler a thread's status has changed.
|
||||||
|
static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state);
|
||||||
|
|
||||||
|
/// Notify the scheduler a thread's priority has changed.
|
||||||
|
static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
|
||||||
|
u32 old_priority);
|
||||||
|
|
||||||
|
/// Notify the scheduler a thread's core and/or affinity mask has changed.
|
||||||
|
static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
|
||||||
|
const KAffinityMask& old_affinity, s32 old_core);
|
||||||
|
|
||||||
|
static bool CanSchedule(KernelCore& kernel);
|
||||||
|
static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
|
||||||
|
static void SetSchedulerUpdateNeeded(KernelCore& kernel);
|
||||||
|
static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
|
||||||
|
static void DisableScheduling(KernelCore& kernel);
|
||||||
|
static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
|
||||||
|
Core::EmuThreadHandle global_thread);
|
||||||
|
[[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class GlobalSchedulerContext;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Takes care of selecting the new scheduled threads in three steps:
|
||||||
|
*
|
||||||
|
* 1. First a thread is selected from the top of the priority queue. If no thread
|
||||||
|
* is obtained then we move to step two, else we are done.
|
||||||
|
*
|
||||||
|
* 2. Second we try to get a suggested thread that's not assigned to any core or
|
||||||
|
* that is not the top thread in that core.
|
||||||
|
*
|
||||||
|
* 3. Third is no suggested thread is found, we do a second pass and pick a running
|
||||||
|
* thread in another core and swap it with its current thread.
|
||||||
|
*
|
||||||
|
* returns the cores needing scheduling.
|
||||||
|
*/
|
||||||
|
[[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
|
||||||
|
|
||||||
|
[[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
|
||||||
|
|
||||||
|
void RotateScheduledQueue(s32 core_id, s32 priority);
|
||||||
|
|
||||||
|
void Schedule() {
|
||||||
|
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
|
||||||
|
this->ScheduleImpl();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Switches the CPU's active thread context to that of the specified thread
|
||||||
|
void ScheduleImpl();
|
||||||
|
|
||||||
|
/// When a thread wakes up, it must run this through it's new scheduler
|
||||||
|
void SwitchContextStep2();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called on every context switch to update the internal timestamp
|
||||||
|
* This also updates the running time ticks for the given thread and
|
||||||
|
* process using the following difference:
|
||||||
|
*
|
||||||
|
* ticks += most_recent_ticks - last_context_switch_ticks
|
||||||
|
*
|
||||||
|
* The internal tick timestamp for the scheduler is simply the
|
||||||
|
* most recent tick count retrieved. No special arithmetic is
|
||||||
|
* applied to it.
|
||||||
|
*/
|
||||||
|
void UpdateLastContextSwitchTime(Thread* thread, Process* process);
|
||||||
|
|
||||||
|
static void OnSwitch(void* this_scheduler);
|
||||||
|
void SwitchToCurrent();
|
||||||
|
|
||||||
|
Thread* current_thread{};
|
||||||
|
Thread* idle_thread{};
|
||||||
|
|
||||||
|
std::shared_ptr<Common::Fiber> switch_fiber{};
|
||||||
|
|
||||||
|
struct SchedulingState {
|
||||||
|
std::atomic<bool> needs_scheduling;
|
||||||
|
bool interrupt_task_thread_runnable{};
|
||||||
|
bool should_count_idle{};
|
||||||
|
u64 idle_count{};
|
||||||
|
Thread* highest_priority_thread{};
|
||||||
|
void* idle_thread_stack{};
|
||||||
|
};
|
||||||
|
|
||||||
|
SchedulingState state;
|
||||||
|
|
||||||
|
Core::System& system;
|
||||||
|
u64 last_context_switch_time{};
|
||||||
|
const std::size_t core_id;
|
||||||
|
|
||||||
|
Common::SpinLock guard{};
|
||||||
|
};
|
||||||
|
|
||||||
|
class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
|
||||||
|
public:
|
||||||
|
explicit KScopedSchedulerLock(KernelCore& kernel);
|
||||||
|
~KScopedSchedulerLock();
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
74
src/core/hle/kernel/k_scheduler_lock.h
Normal file
74
src/core/hle/kernel/k_scheduler_lock.h
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/spin_lock.h"
|
||||||
|
#include "core/hardware_properties.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KernelCore;
|
||||||
|
|
||||||
|
template <typename SchedulerType>
|
||||||
|
class KAbstractSchedulerLock {
|
||||||
|
public:
|
||||||
|
explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {}
|
||||||
|
|
||||||
|
bool IsLockedByCurrentThread() const {
|
||||||
|
return this->owner_thread == kernel.GetCurrentEmuThreadID();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Lock() {
|
||||||
|
if (this->IsLockedByCurrentThread()) {
|
||||||
|
// If we already own the lock, we can just increment the count.
|
||||||
|
ASSERT(this->lock_count > 0);
|
||||||
|
this->lock_count++;
|
||||||
|
} else {
|
||||||
|
// Otherwise, we want to disable scheduling and acquire the spinlock.
|
||||||
|
SchedulerType::DisableScheduling(kernel);
|
||||||
|
this->spin_lock.lock();
|
||||||
|
|
||||||
|
// For debug, ensure that our state is valid.
|
||||||
|
ASSERT(this->lock_count == 0);
|
||||||
|
ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle());
|
||||||
|
|
||||||
|
// Increment count, take ownership.
|
||||||
|
this->lock_count = 1;
|
||||||
|
this->owner_thread = kernel.GetCurrentEmuThreadID();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Unlock() {
|
||||||
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
ASSERT(this->lock_count > 0);
|
||||||
|
|
||||||
|
// Release an instance of the lock.
|
||||||
|
if ((--this->lock_count) == 0) {
|
||||||
|
// We're no longer going to hold the lock. Take note of what cores need scheduling.
|
||||||
|
const u64 cores_needing_scheduling =
|
||||||
|
SchedulerType::UpdateHighestPriorityThreads(kernel);
|
||||||
|
Core::EmuThreadHandle leaving_thread = owner_thread;
|
||||||
|
|
||||||
|
// Note that we no longer hold the lock, and unlock the spinlock.
|
||||||
|
this->owner_thread = Core::EmuThreadHandle::InvalidHandle();
|
||||||
|
this->spin_lock.unlock();
|
||||||
|
|
||||||
|
// Enable scheduling, and perform a rescheduling operation.
|
||||||
|
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
KernelCore& kernel;
|
||||||
|
Common::SpinLock spin_lock{};
|
||||||
|
s32 lock_count{};
|
||||||
|
Core::EmuThreadHandle owner_thread{Core::EmuThreadHandle::InvalidHandle()};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
41
src/core/hle/kernel/k_scoped_lock.h
Normal file
41
src/core/hle/kernel/k_scoped_lock.h
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
concept KLockable = !std::is_reference_v<T> && requires(T & t) {
|
||||||
|
{ t.Lock() }
|
||||||
|
->std::same_as<void>;
|
||||||
|
{ t.Unlock() }
|
||||||
|
->std::same_as<void>;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires KLockable<T> class KScopedLock {
|
||||||
|
public:
|
||||||
|
explicit KScopedLock(T* l) : lock_ptr(l) {
|
||||||
|
this->lock_ptr->Lock();
|
||||||
|
}
|
||||||
|
explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) { /* ... */
|
||||||
|
}
|
||||||
|
~KScopedLock() {
|
||||||
|
this->lock_ptr->Unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
KScopedLock(const KScopedLock&) = delete;
|
||||||
|
KScopedLock(KScopedLock&&) = delete;
|
||||||
|
|
||||||
|
private:
|
||||||
|
T* lock_ptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
50
src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
Normal file
50
src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/kernel/handle_table.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/kernel/thread.h"
|
||||||
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
class KScopedSchedulerLockAndSleep {
|
||||||
|
public:
|
||||||
|
explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t,
|
||||||
|
s64 timeout)
|
||||||
|
: kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
|
||||||
|
event_handle = InvalidHandle;
|
||||||
|
|
||||||
|
// Lock the scheduler.
|
||||||
|
kernel.GlobalSchedulerContext().scheduler_lock.Lock();
|
||||||
|
}
|
||||||
|
|
||||||
|
~KScopedSchedulerLockAndSleep() {
|
||||||
|
// Register the sleep.
|
||||||
|
if (this->timeout_tick > 0) {
|
||||||
|
kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock the scheduler.
|
||||||
|
kernel.GlobalSchedulerContext().scheduler_lock.Unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CancelSleep() {
|
||||||
|
this->timeout_tick = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
KernelCore& kernel;
|
||||||
|
Handle& event_handle;
|
||||||
|
Thread* thread{};
|
||||||
|
s64 timeout_tick{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Kernel
|
|
@ -27,6 +27,7 @@
|
||||||
#include "core/hle/kernel/client_port.h"
|
#include "core/hle/kernel/client_port.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/memory/memory_layout.h"
|
#include "core/hle/kernel/memory/memory_layout.h"
|
||||||
#include "core/hle/kernel/memory/memory_manager.h"
|
#include "core/hle/kernel/memory/memory_manager.h"
|
||||||
|
@ -34,7 +35,6 @@
|
||||||
#include "core/hle/kernel/physical_core.h"
|
#include "core/hle/kernel/physical_core.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/resource_limit.h"
|
#include "core/hle/kernel/resource_limit.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/shared_memory.h"
|
#include "core/hle/kernel/shared_memory.h"
|
||||||
#include "core/hle/kernel/synchronization.h"
|
#include "core/hle/kernel/synchronization.h"
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
|
@ -49,17 +49,18 @@ namespace Kernel {
|
||||||
|
|
||||||
struct KernelCore::Impl {
|
struct KernelCore::Impl {
|
||||||
explicit Impl(Core::System& system, KernelCore& kernel)
|
explicit Impl(Core::System& system, KernelCore& kernel)
|
||||||
: global_scheduler{kernel}, synchronization{system}, time_manager{system},
|
: synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{
|
||||||
global_handle_table{kernel}, system{system} {}
|
system} {}
|
||||||
|
|
||||||
void SetMulticore(bool is_multicore) {
|
void SetMulticore(bool is_multicore) {
|
||||||
this->is_multicore = is_multicore;
|
this->is_multicore = is_multicore;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Initialize(KernelCore& kernel) {
|
void Initialize(KernelCore& kernel) {
|
||||||
Shutdown();
|
|
||||||
RegisterHostThread();
|
RegisterHostThread();
|
||||||
|
|
||||||
|
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
|
||||||
|
|
||||||
InitializePhysicalCores();
|
InitializePhysicalCores();
|
||||||
InitializeSystemResourceLimit(kernel);
|
InitializeSystemResourceLimit(kernel);
|
||||||
InitializeMemoryLayout();
|
InitializeMemoryLayout();
|
||||||
|
@ -86,29 +87,20 @@ struct KernelCore::Impl {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (std::size_t i = 0; i < cores.size(); i++) {
|
|
||||||
cores[i].Shutdown();
|
|
||||||
schedulers[i].reset();
|
|
||||||
}
|
|
||||||
cores.clear();
|
cores.clear();
|
||||||
|
|
||||||
process_list.clear();
|
process_list.clear();
|
||||||
|
|
||||||
current_process = nullptr;
|
current_process = nullptr;
|
||||||
|
|
||||||
system_resource_limit = nullptr;
|
system_resource_limit = nullptr;
|
||||||
|
|
||||||
global_handle_table.Clear();
|
global_handle_table.Clear();
|
||||||
|
|
||||||
preemption_event = nullptr;
|
preemption_event = nullptr;
|
||||||
|
|
||||||
global_scheduler.Shutdown();
|
|
||||||
|
|
||||||
named_ports.clear();
|
named_ports.clear();
|
||||||
|
|
||||||
for (auto& core : cores) {
|
|
||||||
core.Shutdown();
|
|
||||||
}
|
|
||||||
cores.clear();
|
|
||||||
|
|
||||||
exclusive_monitor.reset();
|
exclusive_monitor.reset();
|
||||||
|
|
||||||
num_host_threads = 0;
|
num_host_threads = 0;
|
||||||
|
@ -121,7 +113,7 @@ struct KernelCore::Impl {
|
||||||
exclusive_monitor =
|
exclusive_monitor =
|
||||||
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
|
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
|
||||||
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
||||||
schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i);
|
schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
|
||||||
cores.emplace_back(i, system, *schedulers[i], interrupts);
|
cores.emplace_back(i, system, *schedulers[i], interrupts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -154,8 +146,8 @@ struct KernelCore::Impl {
|
||||||
preemption_event = Core::Timing::CreateEvent(
|
preemption_event = Core::Timing::CreateEvent(
|
||||||
"PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
|
"PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
|
||||||
{
|
{
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
global_scheduler.PreemptThreads();
|
global_scheduler_context->PreemptThreads();
|
||||||
}
|
}
|
||||||
const auto time_interval = std::chrono::nanoseconds{
|
const auto time_interval = std::chrono::nanoseconds{
|
||||||
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
||||||
|
@ -245,7 +237,7 @@ struct KernelCore::Impl {
|
||||||
if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
|
if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler();
|
const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler();
|
||||||
const Kernel::Thread* current = sched.GetCurrentThread();
|
const Kernel::Thread* current = sched.GetCurrentThread();
|
||||||
if (current != nullptr && !current->IsPhantomMode()) {
|
if (current != nullptr && !current->IsPhantomMode()) {
|
||||||
result.guest_handle = current->GetGlobalHandle();
|
result.guest_handle = current->GetGlobalHandle();
|
||||||
|
@ -314,7 +306,7 @@ struct KernelCore::Impl {
|
||||||
// Lists all processes that exist in the current session.
|
// Lists all processes that exist in the current session.
|
||||||
std::vector<std::shared_ptr<Process>> process_list;
|
std::vector<std::shared_ptr<Process>> process_list;
|
||||||
Process* current_process = nullptr;
|
Process* current_process = nullptr;
|
||||||
Kernel::GlobalScheduler global_scheduler;
|
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
|
||||||
Kernel::Synchronization synchronization;
|
Kernel::Synchronization synchronization;
|
||||||
Kernel::TimeManager time_manager;
|
Kernel::TimeManager time_manager;
|
||||||
|
|
||||||
|
@ -355,7 +347,7 @@ struct KernelCore::Impl {
|
||||||
|
|
||||||
std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
|
std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
|
||||||
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
||||||
std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
||||||
|
|
||||||
bool is_multicore{};
|
bool is_multicore{};
|
||||||
std::thread::id single_core_thread_id{};
|
std::thread::id single_core_thread_id{};
|
||||||
|
@ -415,19 +407,19 @@ const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const
|
||||||
return impl->process_list;
|
return impl->process_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::GlobalScheduler& KernelCore::GlobalScheduler() {
|
Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() {
|
||||||
return impl->global_scheduler;
|
return *impl->global_scheduler_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const {
|
const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const {
|
||||||
return impl->global_scheduler;
|
return *impl->global_scheduler_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) {
|
Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) {
|
||||||
return *impl->schedulers[id];
|
return *impl->schedulers[id];
|
||||||
}
|
}
|
||||||
|
|
||||||
const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const {
|
const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
|
||||||
return *impl->schedulers[id];
|
return *impl->schedulers[id];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -451,16 +443,13 @@ const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
|
||||||
return impl->cores[core_id];
|
return impl->cores[core_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::Scheduler& KernelCore::CurrentScheduler() {
|
Kernel::KScheduler* KernelCore::CurrentScheduler() {
|
||||||
u32 core_id = impl->GetCurrentHostThreadID();
|
u32 core_id = impl->GetCurrentHostThreadID();
|
||||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
|
||||||
return *impl->schedulers[core_id];
|
// This is expected when called from not a guest thread
|
||||||
}
|
return {};
|
||||||
|
}
|
||||||
const Kernel::Scheduler& KernelCore::CurrentScheduler() const {
|
return impl->schedulers[core_id].get();
|
||||||
u32 core_id = impl->GetCurrentHostThreadID();
|
|
||||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
|
||||||
return *impl->schedulers[core_id];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
|
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
|
||||||
|
@ -623,7 +612,7 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
|
||||||
void KernelCore::Suspend(bool in_suspention) {
|
void KernelCore::Suspend(bool in_suspention) {
|
||||||
const bool should_suspend = exception_exited || in_suspention;
|
const bool should_suspend = exception_exited || in_suspention;
|
||||||
{
|
{
|
||||||
SchedulerLock lock(*this);
|
KScopedSchedulerLock lock(*this);
|
||||||
ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
|
ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
|
||||||
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
||||||
impl->suspend_threads[i]->SetStatus(status);
|
impl->suspend_threads[i]->SetStatus(status);
|
||||||
|
|
|
@ -35,12 +35,12 @@ class SlabHeap;
|
||||||
|
|
||||||
class AddressArbiter;
|
class AddressArbiter;
|
||||||
class ClientPort;
|
class ClientPort;
|
||||||
class GlobalScheduler;
|
class GlobalSchedulerContext;
|
||||||
class HandleTable;
|
class HandleTable;
|
||||||
class PhysicalCore;
|
class PhysicalCore;
|
||||||
class Process;
|
class Process;
|
||||||
class ResourceLimit;
|
class ResourceLimit;
|
||||||
class Scheduler;
|
class KScheduler;
|
||||||
class SharedMemory;
|
class SharedMemory;
|
||||||
class Synchronization;
|
class Synchronization;
|
||||||
class Thread;
|
class Thread;
|
||||||
|
@ -102,16 +102,16 @@ public:
|
||||||
const std::vector<std::shared_ptr<Process>>& GetProcessList() const;
|
const std::vector<std::shared_ptr<Process>>& GetProcessList() const;
|
||||||
|
|
||||||
/// Gets the sole instance of the global scheduler
|
/// Gets the sole instance of the global scheduler
|
||||||
Kernel::GlobalScheduler& GlobalScheduler();
|
Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
|
||||||
|
|
||||||
/// Gets the sole instance of the global scheduler
|
/// Gets the sole instance of the global scheduler
|
||||||
const Kernel::GlobalScheduler& GlobalScheduler() const;
|
const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const;
|
||||||
|
|
||||||
/// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
|
/// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
|
||||||
Kernel::Scheduler& Scheduler(std::size_t id);
|
Kernel::KScheduler& Scheduler(std::size_t id);
|
||||||
|
|
||||||
/// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
|
/// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
|
||||||
const Kernel::Scheduler& Scheduler(std::size_t id) const;
|
const Kernel::KScheduler& Scheduler(std::size_t id) const;
|
||||||
|
|
||||||
/// Gets the an instance of the respective physical CPU core.
|
/// Gets the an instance of the respective physical CPU core.
|
||||||
Kernel::PhysicalCore& PhysicalCore(std::size_t id);
|
Kernel::PhysicalCore& PhysicalCore(std::size_t id);
|
||||||
|
@ -120,10 +120,7 @@ public:
|
||||||
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
|
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
|
||||||
|
|
||||||
/// Gets the sole instance of the Scheduler at the current running core.
|
/// Gets the sole instance of the Scheduler at the current running core.
|
||||||
Kernel::Scheduler& CurrentScheduler();
|
Kernel::KScheduler* CurrentScheduler();
|
||||||
|
|
||||||
/// Gets the sole instance of the Scheduler at the current running core.
|
|
||||||
const Kernel::Scheduler& CurrentScheduler() const;
|
|
||||||
|
|
||||||
/// Gets the an instance of the current physical CPU core.
|
/// Gets the an instance of the current physical CPU core.
|
||||||
Kernel::PhysicalCore& CurrentPhysicalCore();
|
Kernel::PhysicalCore& CurrentPhysicalCore();
|
||||||
|
|
|
@ -11,11 +11,11 @@
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/mutex.h"
|
#include "core/hle/kernel/mutex.h"
|
||||||
#include "core/hle/kernel/object.h"
|
#include "core/hle/kernel/object.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
@ -73,9 +73,9 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||||
|
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
std::shared_ptr<Thread> current_thread =
|
std::shared_ptr<Thread> current_thread =
|
||||||
SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
|
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||||
{
|
{
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
// The mutex address must be 4-byte aligned
|
// The mutex address must be 4-byte aligned
|
||||||
if ((address % sizeof(u32)) != 0) {
|
if ((address % sizeof(u32)) != 0) {
|
||||||
return ERR_INVALID_ADDRESS;
|
return ERR_INVALID_ADDRESS;
|
||||||
|
@ -114,7 +114,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
auto* owner = current_thread->GetLockOwner();
|
auto* owner = current_thread->GetLockOwner();
|
||||||
if (owner != nullptr) {
|
if (owner != nullptr) {
|
||||||
owner->RemoveMutexWaiter(current_thread);
|
owner->RemoveMutexWaiter(current_thread);
|
||||||
|
@ -153,10 +153,10 @@ std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thr
|
||||||
|
|
||||||
ResultCode Mutex::Release(VAddr address) {
|
ResultCode Mutex::Release(VAddr address) {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
|
||||||
std::shared_ptr<Thread> current_thread =
|
std::shared_ptr<Thread> current_thread =
|
||||||
SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
|
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||||
|
|
||||||
auto [result, new_owner] = Unlock(current_thread, address);
|
auto [result, new_owner] = Unlock(current_thread, address);
|
||||||
|
|
||||||
|
|
|
@ -7,14 +7,14 @@
|
||||||
#include "core/arm/dynarmic/arm_dynarmic_32.h"
|
#include "core/arm/dynarmic/arm_dynarmic_32.h"
|
||||||
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/physical_core.h"
|
#include "core/hle/kernel/physical_core.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system,
|
PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system,
|
||||||
Kernel::Scheduler& scheduler, Core::CPUInterrupts& interrupts)
|
Kernel::KScheduler& scheduler, Core::CPUInterrupts& interrupts)
|
||||||
: core_index{core_index}, system{system}, scheduler{scheduler},
|
: core_index{core_index}, system{system}, scheduler{scheduler},
|
||||||
interrupts{interrupts}, guard{std::make_unique<Common::SpinLock>()} {}
|
interrupts{interrupts}, guard{std::make_unique<Common::SpinLock>()} {}
|
||||||
|
|
||||||
|
@ -43,10 +43,6 @@ void PhysicalCore::Idle() {
|
||||||
interrupts[core_index].AwaitInterrupt();
|
interrupts[core_index].AwaitInterrupt();
|
||||||
}
|
}
|
||||||
|
|
||||||
void PhysicalCore::Shutdown() {
|
|
||||||
scheduler.Shutdown();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool PhysicalCore::IsInterrupted() const {
|
bool PhysicalCore::IsInterrupted() const {
|
||||||
return interrupts[core_index].IsInterrupted();
|
return interrupts[core_index].IsInterrupted();
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ class SpinLock;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
class Scheduler;
|
class KScheduler;
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
|
@ -28,7 +28,7 @@ namespace Kernel {
|
||||||
|
|
||||||
class PhysicalCore {
|
class PhysicalCore {
|
||||||
public:
|
public:
|
||||||
PhysicalCore(std::size_t core_index, Core::System& system, Kernel::Scheduler& scheduler,
|
PhysicalCore(std::size_t core_index, Core::System& system, Kernel::KScheduler& scheduler,
|
||||||
Core::CPUInterrupts& interrupts);
|
Core::CPUInterrupts& interrupts);
|
||||||
~PhysicalCore();
|
~PhysicalCore();
|
||||||
|
|
||||||
|
@ -55,9 +55,6 @@ public:
|
||||||
/// Check if this core is interrupted
|
/// Check if this core is interrupted
|
||||||
bool IsInterrupted() const;
|
bool IsInterrupted() const;
|
||||||
|
|
||||||
// Shutdown this physical core.
|
|
||||||
void Shutdown();
|
|
||||||
|
|
||||||
bool IsInitialized() const {
|
bool IsInitialized() const {
|
||||||
return arm_interface != nullptr;
|
return arm_interface != nullptr;
|
||||||
}
|
}
|
||||||
|
@ -82,18 +79,18 @@ public:
|
||||||
return core_index;
|
return core_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::Scheduler& Scheduler() {
|
Kernel::KScheduler& Scheduler() {
|
||||||
return scheduler;
|
return scheduler;
|
||||||
}
|
}
|
||||||
|
|
||||||
const Kernel::Scheduler& Scheduler() const {
|
const Kernel::KScheduler& Scheduler() const {
|
||||||
return scheduler;
|
return scheduler;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const std::size_t core_index;
|
const std::size_t core_index;
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
Kernel::Scheduler& scheduler;
|
Kernel::KScheduler& scheduler;
|
||||||
Core::CPUInterrupts& interrupts;
|
Core::CPUInterrupts& interrupts;
|
||||||
std::unique_ptr<Common::SpinLock> guard;
|
std::unique_ptr<Common::SpinLock> guard;
|
||||||
std::unique_ptr<Core::ARM_Interface> arm_interface;
|
std::unique_ptr<Core::ARM_Interface> arm_interface;
|
||||||
|
|
|
@ -15,13 +15,13 @@
|
||||||
#include "core/file_sys/program_metadata.h"
|
#include "core/file_sys/program_metadata.h"
|
||||||
#include "core/hle/kernel/code_set.h"
|
#include "core/hle/kernel/code_set.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/memory/memory_block_manager.h"
|
#include "core/hle/kernel/memory/memory_block_manager.h"
|
||||||
#include "core/hle/kernel/memory/page_table.h"
|
#include "core/hle/kernel/memory/page_table.h"
|
||||||
#include "core/hle/kernel/memory/slab_heap.h"
|
#include "core/hle/kernel/memory/slab_heap.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/resource_limit.h"
|
#include "core/hle/kernel/resource_limit.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
#include "core/hle/lock.h"
|
#include "core/hle/lock.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
@ -54,7 +54,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority,
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
||||||
{
|
{
|
||||||
SchedulerLock lock{kernel};
|
KScopedSchedulerLock lock{kernel};
|
||||||
thread->SetStatus(ThreadStatus::Ready);
|
thread->SetStatus(ThreadStatus::Ready);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -213,7 +213,7 @@ void Process::UnregisterThread(const Thread* thread) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode Process::ClearSignalState() {
|
ResultCode Process::ClearSignalState() {
|
||||||
SchedulerLock lock(system.Kernel());
|
KScopedSchedulerLock lock(system.Kernel());
|
||||||
if (status == ProcessStatus::Exited) {
|
if (status == ProcessStatus::Exited) {
|
||||||
LOG_ERROR(Kernel, "called on a terminated process instance.");
|
LOG_ERROR(Kernel, "called on a terminated process instance.");
|
||||||
return ERR_INVALID_STATE;
|
return ERR_INVALID_STATE;
|
||||||
|
@ -314,7 +314,7 @@ void Process::PrepareForTermination() {
|
||||||
if (thread->GetOwnerProcess() != this)
|
if (thread->GetOwnerProcess() != this)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (thread.get() == system.CurrentScheduler().GetCurrentThread())
|
if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
// TODO(Subv): When are the other running/ready threads terminated?
|
// TODO(Subv): When are the other running/ready threads terminated?
|
||||||
|
@ -325,7 +325,7 @@ void Process::PrepareForTermination() {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
stop_threads(system.GlobalScheduler().GetThreadList());
|
stop_threads(system.GlobalSchedulerContext().GetThreadList());
|
||||||
|
|
||||||
FreeTLSRegion(tls_region_address);
|
FreeTLSRegion(tls_region_address);
|
||||||
tls_region_address = 0;
|
tls_region_address = 0;
|
||||||
|
@ -347,7 +347,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr Process::CreateTLSRegion() {
|
VAddr Process::CreateTLSRegion() {
|
||||||
SchedulerLock lock(system.Kernel());
|
KScopedSchedulerLock lock(system.Kernel());
|
||||||
if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
|
if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
|
||||||
tls_page_iter != tls_pages.cend()) {
|
tls_page_iter != tls_pages.cend()) {
|
||||||
return *tls_page_iter->ReserveSlot();
|
return *tls_page_iter->ReserveSlot();
|
||||||
|
@ -378,7 +378,7 @@ VAddr Process::CreateTLSRegion() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Process::FreeTLSRegion(VAddr tls_address) {
|
void Process::FreeTLSRegion(VAddr tls_address) {
|
||||||
SchedulerLock lock(system.Kernel());
|
KScopedSchedulerLock lock(system.Kernel());
|
||||||
const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
|
const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
|
||||||
auto iter =
|
auto iter =
|
||||||
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
|
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
|
||||||
|
|
|
@ -216,6 +216,16 @@ public:
|
||||||
total_process_running_time_ticks += ticks;
|
total_process_running_time_ticks += ticks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets the process schedule count, used for thread yelding
|
||||||
|
s64 GetScheduledCount() const {
|
||||||
|
return schedule_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Increments the process schedule count, used for thread yielding.
|
||||||
|
void IncrementScheduledCount() {
|
||||||
|
++schedule_count;
|
||||||
|
}
|
||||||
|
|
||||||
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
|
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
|
||||||
u64 GetRandomEntropy(std::size_t index) const {
|
u64 GetRandomEntropy(std::size_t index) const {
|
||||||
return random_entropy.at(index);
|
return random_entropy.at(index);
|
||||||
|
@ -397,6 +407,9 @@ private:
|
||||||
/// Name of this process
|
/// Name of this process
|
||||||
std::string name;
|
std::string name;
|
||||||
|
|
||||||
|
/// Schedule count of this process
|
||||||
|
s64 schedule_count{};
|
||||||
|
|
||||||
/// System context
|
/// System context
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
};
|
};
|
||||||
|
|
|
@ -6,10 +6,10 @@
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/object.h"
|
#include "core/hle/kernel/object.h"
|
||||||
#include "core/hle/kernel/readable_event.h"
|
#include "core/hle/kernel/readable_event.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
@ -39,7 +39,7 @@ void ReadableEvent::Clear() {
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode ReadableEvent::Reset() {
|
ResultCode ReadableEvent::Reset() {
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
if (!is_signaled) {
|
if (!is_signaled) {
|
||||||
LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
|
LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
|
||||||
GetObjectId(), GetTypeName(), GetName());
|
GetObjectId(), GetTypeName(), GetName());
|
||||||
|
|
|
@ -1,819 +0,0 @@
|
||||||
// Copyright 2018 yuzu emulator team
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
//
|
|
||||||
// SelectThreads, Yield functions originally by TuxSH.
|
|
||||||
// licensed under GPLv2 or later under exception provided by the author.
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <mutex>
|
|
||||||
#include <set>
|
|
||||||
#include <unordered_set>
|
|
||||||
#include <utility>
|
|
||||||
|
|
||||||
#include "common/assert.h"
|
|
||||||
#include "common/bit_util.h"
|
|
||||||
#include "common/fiber.h"
|
|
||||||
#include "common/logging/log.h"
|
|
||||||
#include "core/arm/arm_interface.h"
|
|
||||||
#include "core/core.h"
|
|
||||||
#include "core/core_timing.h"
|
|
||||||
#include "core/cpu_manager.h"
|
|
||||||
#include "core/hle/kernel/kernel.h"
|
|
||||||
#include "core/hle/kernel/physical_core.h"
|
|
||||||
#include "core/hle/kernel/process.h"
|
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/time_manager.h"
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
|
|
||||||
|
|
||||||
GlobalScheduler::~GlobalScheduler() = default;
|
|
||||||
|
|
||||||
void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
|
|
||||||
std::scoped_lock lock{global_list_guard};
|
|
||||||
thread_list.push_back(std::move(thread));
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
|
|
||||||
std::scoped_lock lock{global_list_guard};
|
|
||||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
|
||||||
thread_list.end());
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 GlobalScheduler::SelectThreads() {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
const auto update_thread = [](Thread* thread, Scheduler& sched) {
|
|
||||||
std::scoped_lock lock{sched.guard};
|
|
||||||
if (thread != sched.selected_thread_set.get()) {
|
|
||||||
if (thread == nullptr) {
|
|
||||||
++sched.idle_selection_count;
|
|
||||||
}
|
|
||||||
sched.selected_thread_set = SharedFrom(thread);
|
|
||||||
}
|
|
||||||
const bool reschedule_pending =
|
|
||||||
sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
|
|
||||||
sched.is_context_switch_pending = reschedule_pending;
|
|
||||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
|
||||||
return reschedule_pending;
|
|
||||||
};
|
|
||||||
if (!is_reselection_pending.load()) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{};
|
|
||||||
|
|
||||||
u32 idle_cores{};
|
|
||||||
|
|
||||||
// Step 1: Get top thread in schedule queue.
|
|
||||||
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
||||||
Thread* top_thread =
|
|
||||||
scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
|
|
||||||
if (top_thread != nullptr) {
|
|
||||||
// TODO(Blinkhawk): Implement Thread Pinning
|
|
||||||
} else {
|
|
||||||
idle_cores |= (1U << core);
|
|
||||||
}
|
|
||||||
top_threads[core] = top_thread;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (idle_cores != 0) {
|
|
||||||
u32 core_id = Common::CountTrailingZeroes32(idle_cores);
|
|
||||||
|
|
||||||
if (!suggested_queue[core_id].empty()) {
|
|
||||||
std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{};
|
|
||||||
std::size_t num_candidates = 0;
|
|
||||||
auto iter = suggested_queue[core_id].begin();
|
|
||||||
Thread* suggested = nullptr;
|
|
||||||
// Step 2: Try selecting a suggested thread.
|
|
||||||
while (iter != suggested_queue[core_id].end()) {
|
|
||||||
suggested = *iter;
|
|
||||||
iter++;
|
|
||||||
s32 suggested_core_id = suggested->GetProcessorID();
|
|
||||||
Thread* top_thread =
|
|
||||||
suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
|
|
||||||
if (top_thread != suggested) {
|
|
||||||
if (top_thread != nullptr &&
|
|
||||||
top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
|
|
||||||
suggested = nullptr;
|
|
||||||
break;
|
|
||||||
// There's a too high thread to do core migration, cancel
|
|
||||||
}
|
|
||||||
TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
suggested = nullptr;
|
|
||||||
migration_candidates[num_candidates++] = suggested_core_id;
|
|
||||||
}
|
|
||||||
// Step 3: Select a suggested thread from another core
|
|
||||||
if (suggested == nullptr) {
|
|
||||||
for (std::size_t i = 0; i < num_candidates; i++) {
|
|
||||||
s32 candidate_core = migration_candidates[i];
|
|
||||||
suggested = top_threads[candidate_core];
|
|
||||||
auto it = scheduled_queue[candidate_core].begin();
|
|
||||||
it++;
|
|
||||||
Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
|
|
||||||
if (next != nullptr) {
|
|
||||||
TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
|
|
||||||
suggested);
|
|
||||||
top_threads[candidate_core] = next;
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
suggested = nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
top_threads[core_id] = suggested;
|
|
||||||
}
|
|
||||||
|
|
||||||
idle_cores &= ~(1U << core_id);
|
|
||||||
}
|
|
||||||
u32 cores_needing_context_switch{};
|
|
||||||
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
||||||
Scheduler& sched = kernel.Scheduler(core);
|
|
||||||
ASSERT(top_threads[core] == nullptr ||
|
|
||||||
static_cast<u32>(top_threads[core]->GetProcessorID()) == core);
|
|
||||||
if (update_thread(top_threads[core], sched)) {
|
|
||||||
cores_needing_context_switch |= (1U << core);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cores_needing_context_switch;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
// Note: caller should use critical section, etc.
|
|
||||||
if (!yielding_thread->IsRunnable()) {
|
|
||||||
// Normally this case shouldn't happen except for SetThreadActivity.
|
|
||||||
is_reselection_pending.store(true, std::memory_order_release);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
|
|
||||||
const u32 priority = yielding_thread->GetPriority();
|
|
||||||
|
|
||||||
// Yield the thread
|
|
||||||
Reschedule(priority, core_id, yielding_thread);
|
|
||||||
const Thread* const winner = scheduled_queue[core_id].front();
|
|
||||||
if (kernel.GetCurrentHostThreadID() != core_id) {
|
|
||||||
is_reselection_pending.store(true, std::memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
|
|
||||||
// etc.
|
|
||||||
if (!yielding_thread->IsRunnable()) {
|
|
||||||
// Normally this case shouldn't happen except for SetThreadActivity.
|
|
||||||
is_reselection_pending.store(true, std::memory_order_release);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
|
|
||||||
const u32 priority = yielding_thread->GetPriority();
|
|
||||||
|
|
||||||
// Yield the thread
|
|
||||||
Reschedule(priority, core_id, yielding_thread);
|
|
||||||
|
|
||||||
std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
|
|
||||||
for (std::size_t i = 0; i < current_threads.size(); i++) {
|
|
||||||
current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
|
|
||||||
}
|
|
||||||
|
|
||||||
Thread* next_thread = scheduled_queue[core_id].front(priority);
|
|
||||||
Thread* winner = nullptr;
|
|
||||||
for (auto& thread : suggested_queue[core_id]) {
|
|
||||||
const s32 source_core = thread->GetProcessorID();
|
|
||||||
if (source_core >= 0) {
|
|
||||||
if (current_threads[source_core] != nullptr) {
|
|
||||||
if (thread == current_threads[source_core] ||
|
|
||||||
current_threads[source_core]->GetPriority() < min_regular_priority) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
|
|
||||||
next_thread->GetPriority() < thread->GetPriority()) {
|
|
||||||
if (thread->GetPriority() <= priority) {
|
|
||||||
winner = thread;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (winner != nullptr) {
|
|
||||||
if (winner != yielding_thread) {
|
|
||||||
TransferToCore(winner->GetPriority(), s32(core_id), winner);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
winner = next_thread;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (kernel.GetCurrentHostThreadID() != core_id) {
|
|
||||||
is_reselection_pending.store(true, std::memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
|
|
||||||
// etc.
|
|
||||||
if (!yielding_thread->IsRunnable()) {
|
|
||||||
// Normally this case shouldn't happen except for SetThreadActivity.
|
|
||||||
is_reselection_pending.store(true, std::memory_order_release);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
Thread* winner = nullptr;
|
|
||||||
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
|
|
||||||
|
|
||||||
// Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
|
|
||||||
TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread);
|
|
||||||
|
|
||||||
// If the core is idle, perform load balancing, excluding the threads that have just used this
|
|
||||||
// function...
|
|
||||||
if (scheduled_queue[core_id].empty()) {
|
|
||||||
// Here, "current_threads" is calculated after the ""yield"", unlike yield -1
|
|
||||||
std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
|
|
||||||
for (std::size_t i = 0; i < current_threads.size(); i++) {
|
|
||||||
current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
|
|
||||||
}
|
|
||||||
for (auto& thread : suggested_queue[core_id]) {
|
|
||||||
const s32 source_core = thread->GetProcessorID();
|
|
||||||
if (source_core < 0 || thread == current_threads[source_core]) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (current_threads[source_core] == nullptr ||
|
|
||||||
current_threads[source_core]->GetPriority() >= min_regular_priority) {
|
|
||||||
winner = thread;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (winner != nullptr) {
|
|
||||||
if (winner != yielding_thread) {
|
|
||||||
TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
winner = yielding_thread;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
winner = scheduled_queue[core_id].front();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (kernel.GetCurrentHostThreadID() != core_id) {
|
|
||||||
is_reselection_pending.store(true, std::memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::PreemptThreads() {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
|
||||||
const u32 priority = preemption_priorities[core_id];
|
|
||||||
|
|
||||||
if (scheduled_queue[core_id].size(priority) > 0) {
|
|
||||||
if (scheduled_queue[core_id].size(priority) > 1) {
|
|
||||||
scheduled_queue[core_id].front(priority)->IncrementYieldCount();
|
|
||||||
}
|
|
||||||
scheduled_queue[core_id].yield(priority);
|
|
||||||
if (scheduled_queue[core_id].size(priority) > 1) {
|
|
||||||
scheduled_queue[core_id].front(priority)->IncrementYieldCount();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Thread* current_thread =
|
|
||||||
scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
|
|
||||||
Thread* winner = nullptr;
|
|
||||||
for (auto& thread : suggested_queue[core_id]) {
|
|
||||||
const s32 source_core = thread->GetProcessorID();
|
|
||||||
if (thread->GetPriority() != priority) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (source_core >= 0) {
|
|
||||||
Thread* next_thread = scheduled_queue[source_core].empty()
|
|
||||||
? nullptr
|
|
||||||
: scheduled_queue[source_core].front();
|
|
||||||
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (next_thread == thread) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (current_thread != nullptr &&
|
|
||||||
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
|
|
||||||
winner = thread;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (winner != nullptr) {
|
|
||||||
TransferToCore(winner->GetPriority(), s32(core_id), winner);
|
|
||||||
current_thread =
|
|
||||||
winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (current_thread != nullptr && current_thread->GetPriority() > priority) {
|
|
||||||
for (auto& thread : suggested_queue[core_id]) {
|
|
||||||
const s32 source_core = thread->GetProcessorID();
|
|
||||||
if (thread->GetPriority() < priority) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (source_core >= 0) {
|
|
||||||
Thread* next_thread = scheduled_queue[source_core].empty()
|
|
||||||
? nullptr
|
|
||||||
: scheduled_queue[source_core].front();
|
|
||||||
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (next_thread == thread) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (current_thread != nullptr &&
|
|
||||||
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
|
|
||||||
winner = thread;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (winner != nullptr) {
|
|
||||||
TransferToCore(winner->GetPriority(), s32(core_id), winner);
|
|
||||||
current_thread = winner;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
is_reselection_pending.store(true, std::memory_order_release);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
|
|
||||||
Core::EmuThreadHandle global_thread) {
|
|
||||||
u32 current_core = global_thread.host_handle;
|
|
||||||
bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
|
|
||||||
(current_core < Core::Hardware::NUM_CPU_CORES);
|
|
||||||
while (cores_pending_reschedule != 0) {
|
|
||||||
u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
|
|
||||||
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
|
|
||||||
if (!must_context_switch || core != current_core) {
|
|
||||||
auto& phys_core = kernel.PhysicalCore(core);
|
|
||||||
phys_core.Interrupt();
|
|
||||||
} else {
|
|
||||||
must_context_switch = true;
|
|
||||||
}
|
|
||||||
cores_pending_reschedule &= ~(1U << core);
|
|
||||||
}
|
|
||||||
if (must_context_switch) {
|
|
||||||
auto& core_scheduler = kernel.CurrentScheduler();
|
|
||||||
kernel.ExitSVCProfile();
|
|
||||||
core_scheduler.TryDoContextSwitch();
|
|
||||||
kernel.EnterSVCProfile();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
suggested_queue[core].add(thread, priority);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
suggested_queue[core].remove(thread, priority);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
|
|
||||||
scheduled_queue[core].add(thread, priority);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
|
|
||||||
scheduled_queue[core].add(thread, priority, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
scheduled_queue[core].remove(thread, priority);
|
|
||||||
scheduled_queue[core].add(thread, priority);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
scheduled_queue[core].remove(thread, priority);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
|
|
||||||
ASSERT(is_locked);
|
|
||||||
const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
|
|
||||||
const s32 source_core = thread->GetProcessorID();
|
|
||||||
if (source_core == destination_core || !schedulable) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
thread->SetProcessorID(destination_core);
|
|
||||||
if (source_core >= 0) {
|
|
||||||
Unschedule(priority, static_cast<u32>(source_core), thread);
|
|
||||||
}
|
|
||||||
if (destination_core >= 0) {
|
|
||||||
Unsuggest(priority, static_cast<u32>(destination_core), thread);
|
|
||||||
Schedule(priority, static_cast<u32>(destination_core), thread);
|
|
||||||
}
|
|
||||||
if (source_core >= 0) {
|
|
||||||
Suggest(priority, static_cast<u32>(source_core), thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
|
|
||||||
const Thread* winner) {
|
|
||||||
if (current_thread == winner) {
|
|
||||||
current_thread->IncrementYieldCount();
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
is_reselection_pending.store(true, std::memory_order_release);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
|
|
||||||
if (old_flags == thread->scheduling_state) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ASSERT(is_locked);
|
|
||||||
|
|
||||||
if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
|
||||||
// In this case the thread was running, now it's pausing/exitting
|
|
||||||
if (thread->processor_id >= 0) {
|
|
||||||
Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
||||||
if (core != static_cast<u32>(thread->processor_id) &&
|
|
||||||
((thread->affinity_mask >> core) & 1) != 0) {
|
|
||||||
Unsuggest(thread->current_priority, core, thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
|
||||||
// The thread is now set to running from being stopped
|
|
||||||
if (thread->processor_id >= 0) {
|
|
||||||
Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
||||||
if (core != static_cast<u32>(thread->processor_id) &&
|
|
||||||
((thread->affinity_mask >> core) & 1) != 0) {
|
|
||||||
Suggest(thread->current_priority, core, thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SetReselectionPending();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) {
|
|
||||||
if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ASSERT(is_locked);
|
|
||||||
if (thread->processor_id >= 0) {
|
|
||||||
Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
||||||
if (core != static_cast<u32>(thread->processor_id) &&
|
|
||||||
((thread->affinity_mask >> core) & 1) != 0) {
|
|
||||||
Unsuggest(old_priority, core, thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (thread->processor_id >= 0) {
|
|
||||||
if (thread == kernel.CurrentScheduler().GetCurrentThread()) {
|
|
||||||
SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id),
|
|
||||||
thread);
|
|
||||||
} else {
|
|
||||||
Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
||||||
if (core != static_cast<u32>(thread->processor_id) &&
|
|
||||||
((thread->affinity_mask >> core) & 1) != 0) {
|
|
||||||
Suggest(thread->current_priority, core, thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
thread->IncrementYieldCount();
|
|
||||||
SetReselectionPending();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask,
|
|
||||||
s32 old_core) {
|
|
||||||
if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) ||
|
|
||||||
thread->current_priority >= THREADPRIO_COUNT) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ASSERT(is_locked);
|
|
||||||
|
|
||||||
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
||||||
if (((old_affinity_mask >> core) & 1) != 0) {
|
|
||||||
if (core == static_cast<u32>(old_core)) {
|
|
||||||
Unschedule(thread->current_priority, core, thread);
|
|
||||||
} else {
|
|
||||||
Unsuggest(thread->current_priority, core, thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
||||||
if (((thread->affinity_mask >> core) & 1) != 0) {
|
|
||||||
if (core == static_cast<u32>(thread->processor_id)) {
|
|
||||||
Schedule(thread->current_priority, core, thread);
|
|
||||||
} else {
|
|
||||||
Suggest(thread->current_priority, core, thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
thread->IncrementYieldCount();
|
|
||||||
SetReselectionPending();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::Shutdown() {
|
|
||||||
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
||||||
scheduled_queue[core].clear();
|
|
||||||
suggested_queue[core].clear();
|
|
||||||
}
|
|
||||||
thread_list.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::Lock() {
|
|
||||||
Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
|
|
||||||
ASSERT(!current_thread.IsInvalid());
|
|
||||||
if (current_thread == current_owner) {
|
|
||||||
++scope_lock;
|
|
||||||
} else {
|
|
||||||
inner_lock.lock();
|
|
||||||
is_locked = true;
|
|
||||||
current_owner = current_thread;
|
|
||||||
ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
|
|
||||||
scope_lock = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void GlobalScheduler::Unlock() {
|
|
||||||
if (--scope_lock != 0) {
|
|
||||||
ASSERT(scope_lock > 0);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
u32 cores_pending_reschedule = SelectThreads();
|
|
||||||
Core::EmuThreadHandle leaving_thread = current_owner;
|
|
||||||
current_owner = Core::EmuThreadHandle::InvalidHandle();
|
|
||||||
scope_lock = 1;
|
|
||||||
is_locked = false;
|
|
||||||
inner_lock.unlock();
|
|
||||||
EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) {
|
|
||||||
switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
|
|
||||||
}
|
|
||||||
|
|
||||||
Scheduler::~Scheduler() = default;
|
|
||||||
|
|
||||||
bool Scheduler::HaveReadyThreads() const {
|
|
||||||
return system.GlobalScheduler().HaveReadyThreads(core_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
Thread* Scheduler::GetCurrentThread() const {
|
|
||||||
if (current_thread) {
|
|
||||||
return current_thread.get();
|
|
||||||
}
|
|
||||||
return idle_thread.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
Thread* Scheduler::GetSelectedThread() const {
|
|
||||||
return selected_thread.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 Scheduler::GetLastContextSwitchTicks() const {
|
|
||||||
return last_context_switch_time;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::TryDoContextSwitch() {
|
|
||||||
auto& phys_core = system.Kernel().CurrentPhysicalCore();
|
|
||||||
if (phys_core.IsInterrupted()) {
|
|
||||||
phys_core.ClearInterrupt();
|
|
||||||
}
|
|
||||||
guard.lock();
|
|
||||||
if (is_context_switch_pending) {
|
|
||||||
SwitchContext();
|
|
||||||
} else {
|
|
||||||
guard.unlock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::OnThreadStart() {
|
|
||||||
SwitchContextStep2();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::Unload(Thread* thread) {
|
|
||||||
if (thread) {
|
|
||||||
thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
|
|
||||||
thread->SetIsRunning(false);
|
|
||||||
if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
|
|
||||||
system.ArmInterface(core_id).ExceptionalExit();
|
|
||||||
thread->SetContinuousOnSVC(false);
|
|
||||||
}
|
|
||||||
if (!thread->IsHLEThread() && !thread->HasExited()) {
|
|
||||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
|
||||||
cpu_core.SaveContext(thread->GetContext32());
|
|
||||||
cpu_core.SaveContext(thread->GetContext64());
|
|
||||||
// Save the TPIDR_EL0 system register in case it was modified.
|
|
||||||
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
|
||||||
cpu_core.ClearExclusiveState();
|
|
||||||
}
|
|
||||||
thread->context_guard.unlock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::Unload() {
|
|
||||||
Unload(current_thread.get());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::Reload(Thread* thread) {
|
|
||||||
if (thread) {
|
|
||||||
ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
|
|
||||||
"Thread must be runnable.");
|
|
||||||
|
|
||||||
// Cancel any outstanding wakeup events for this thread
|
|
||||||
thread->SetIsRunning(true);
|
|
||||||
thread->SetWasRunning(false);
|
|
||||||
thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
|
|
||||||
|
|
||||||
auto* const thread_owner_process = thread->GetOwnerProcess();
|
|
||||||
if (thread_owner_process != nullptr) {
|
|
||||||
system.Kernel().MakeCurrentProcess(thread_owner_process);
|
|
||||||
}
|
|
||||||
if (!thread->IsHLEThread()) {
|
|
||||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
|
||||||
cpu_core.LoadContext(thread->GetContext32());
|
|
||||||
cpu_core.LoadContext(thread->GetContext64());
|
|
||||||
cpu_core.SetTlsAddress(thread->GetTLSAddress());
|
|
||||||
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
|
|
||||||
cpu_core.ClearExclusiveState();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::Reload() {
|
|
||||||
Reload(current_thread.get());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::SwitchContextStep2() {
|
|
||||||
// Load context of new thread
|
|
||||||
Reload(selected_thread.get());
|
|
||||||
|
|
||||||
TryDoContextSwitch();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::SwitchContext() {
|
|
||||||
current_thread_prev = current_thread;
|
|
||||||
selected_thread = selected_thread_set;
|
|
||||||
Thread* previous_thread = current_thread_prev.get();
|
|
||||||
Thread* new_thread = selected_thread.get();
|
|
||||||
current_thread = selected_thread;
|
|
||||||
|
|
||||||
is_context_switch_pending = false;
|
|
||||||
|
|
||||||
if (new_thread == previous_thread) {
|
|
||||||
guard.unlock();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
Process* const previous_process = system.Kernel().CurrentProcess();
|
|
||||||
|
|
||||||
UpdateLastContextSwitchTime(previous_thread, previous_process);
|
|
||||||
|
|
||||||
// Save context for previous thread
|
|
||||||
Unload(previous_thread);
|
|
||||||
|
|
||||||
std::shared_ptr<Common::Fiber>* old_context;
|
|
||||||
if (previous_thread != nullptr) {
|
|
||||||
old_context = &previous_thread->GetHostContext();
|
|
||||||
} else {
|
|
||||||
old_context = &idle_thread->GetHostContext();
|
|
||||||
}
|
|
||||||
guard.unlock();
|
|
||||||
|
|
||||||
Common::Fiber::YieldTo(*old_context, switch_fiber);
|
|
||||||
/// When a thread wakes up, the scheduler may have changed to other in another core.
|
|
||||||
auto& next_scheduler = system.Kernel().CurrentScheduler();
|
|
||||||
next_scheduler.SwitchContextStep2();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::OnSwitch(void* this_scheduler) {
|
|
||||||
Scheduler* sched = static_cast<Scheduler*>(this_scheduler);
|
|
||||||
sched->SwitchToCurrent();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::SwitchToCurrent() {
|
|
||||||
while (true) {
|
|
||||||
{
|
|
||||||
std::scoped_lock lock{guard};
|
|
||||||
selected_thread = selected_thread_set;
|
|
||||||
current_thread = selected_thread;
|
|
||||||
is_context_switch_pending = false;
|
|
||||||
}
|
|
||||||
const auto is_switch_pending = [this] {
|
|
||||||
std::scoped_lock lock{guard};
|
|
||||||
return is_context_switch_pending;
|
|
||||||
};
|
|
||||||
do {
|
|
||||||
if (current_thread != nullptr && !current_thread->IsHLEThread()) {
|
|
||||||
current_thread->context_guard.lock();
|
|
||||||
if (!current_thread->IsRunnable()) {
|
|
||||||
current_thread->context_guard.unlock();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
|
|
||||||
current_thread->context_guard.unlock();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::shared_ptr<Common::Fiber>* next_context;
|
|
||||||
if (current_thread != nullptr) {
|
|
||||||
next_context = ¤t_thread->GetHostContext();
|
|
||||||
} else {
|
|
||||||
next_context = &idle_thread->GetHostContext();
|
|
||||||
}
|
|
||||||
Common::Fiber::YieldTo(switch_fiber, *next_context);
|
|
||||||
} while (!is_switch_pending());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
|
|
||||||
const u64 prev_switch_ticks = last_context_switch_time;
|
|
||||||
const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
|
|
||||||
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
|
|
||||||
|
|
||||||
if (thread != nullptr) {
|
|
||||||
thread->UpdateCPUTimeTicks(update_ticks);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (process != nullptr) {
|
|
||||||
process->UpdateCPUTimeTicks(update_ticks);
|
|
||||||
}
|
|
||||||
|
|
||||||
last_context_switch_time = most_recent_switch_ticks;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::Initialize() {
|
|
||||||
std::string name = "Idle Thread Id:" + std::to_string(core_id);
|
|
||||||
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
|
||||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
|
||||||
ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
|
|
||||||
auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
|
|
||||||
nullptr, std::move(init_func), init_func_parameter);
|
|
||||||
idle_thread = std::move(thread_res).Unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Scheduler::Shutdown() {
|
|
||||||
current_thread = nullptr;
|
|
||||||
selected_thread = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} {
|
|
||||||
kernel.GlobalScheduler().Lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
SchedulerLock::~SchedulerLock() {
|
|
||||||
kernel.GlobalScheduler().Unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle,
|
|
||||||
Thread* time_task, s64 nanoseconds)
|
|
||||||
: SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{
|
|
||||||
nanoseconds} {
|
|
||||||
event_handle = InvalidHandle;
|
|
||||||
}
|
|
||||||
|
|
||||||
SchedulerLockAndSleep::~SchedulerLockAndSleep() {
|
|
||||||
if (sleep_cancelled) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto& time_manager = kernel.TimeManager();
|
|
||||||
time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SchedulerLockAndSleep::Release() {
|
|
||||||
if (sleep_cancelled) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto& time_manager = kernel.TimeManager();
|
|
||||||
time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
|
|
||||||
sleep_cancelled = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Kernel
|
|
|
@ -1,320 +0,0 @@
|
||||||
// Copyright 2018 yuzu emulator team
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <memory>
|
|
||||||
#include <mutex>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "common/common_types.h"
|
|
||||||
#include "common/multi_level_queue.h"
|
|
||||||
#include "common/spin_lock.h"
|
|
||||||
#include "core/hardware_properties.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
|
||||||
|
|
||||||
namespace Common {
|
|
||||||
class Fiber;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace Core {
|
|
||||||
class ARM_Interface;
|
|
||||||
class System;
|
|
||||||
} // namespace Core
|
|
||||||
|
|
||||||
namespace Kernel {
|
|
||||||
|
|
||||||
class KernelCore;
|
|
||||||
class Process;
|
|
||||||
class SchedulerLock;
|
|
||||||
|
|
||||||
class GlobalScheduler final {
|
|
||||||
public:
|
|
||||||
explicit GlobalScheduler(KernelCore& kernel);
|
|
||||||
~GlobalScheduler();
|
|
||||||
|
|
||||||
/// Adds a new thread to the scheduler
|
|
||||||
void AddThread(std::shared_ptr<Thread> thread);
|
|
||||||
|
|
||||||
/// Removes a thread from the scheduler
|
|
||||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
|
||||||
|
|
||||||
/// Returns a list of all threads managed by the scheduler
|
|
||||||
const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
|
|
||||||
return thread_list;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Notify the scheduler a thread's status has changed.
|
|
||||||
void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags);
|
|
||||||
|
|
||||||
/// Notify the scheduler a thread's priority has changed.
|
|
||||||
void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority);
|
|
||||||
|
|
||||||
/// Notify the scheduler a thread's core and/or affinity mask has changed.
|
|
||||||
void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Takes care of selecting the new scheduled threads in three steps:
|
|
||||||
*
|
|
||||||
* 1. First a thread is selected from the top of the priority queue. If no thread
|
|
||||||
* is obtained then we move to step two, else we are done.
|
|
||||||
*
|
|
||||||
* 2. Second we try to get a suggested thread that's not assigned to any core or
|
|
||||||
* that is not the top thread in that core.
|
|
||||||
*
|
|
||||||
* 3. Third is no suggested thread is found, we do a second pass and pick a running
|
|
||||||
* thread in another core and swap it with its current thread.
|
|
||||||
*
|
|
||||||
* returns the cores needing scheduling.
|
|
||||||
*/
|
|
||||||
u32 SelectThreads();
|
|
||||||
|
|
||||||
bool HaveReadyThreads(std::size_t core_id) const {
|
|
||||||
return !scheduled_queue[core_id].empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Takes a thread and moves it to the back of the it's priority list.
|
|
||||||
*
|
|
||||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
|
||||||
*/
|
|
||||||
bool YieldThread(Thread* thread);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Takes a thread and moves it to the back of the it's priority list.
|
|
||||||
* Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
|
|
||||||
* a better priority than the next thread in the core.
|
|
||||||
*
|
|
||||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
|
||||||
*/
|
|
||||||
bool YieldThreadAndBalanceLoad(Thread* thread);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Takes a thread and moves it out of the scheduling queue.
|
|
||||||
* and into the suggested queue. If no thread can be scheduled afterwards in that core,
|
|
||||||
* a suggested thread is obtained instead.
|
|
||||||
*
|
|
||||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
|
||||||
*/
|
|
||||||
bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Rotates the scheduling queues of threads at a preemption priority and then does
|
|
||||||
* some core rebalancing. Preemption priorities can be found in the array
|
|
||||||
* 'preemption_priorities'.
|
|
||||||
*
|
|
||||||
* @note This operation happens every 10ms.
|
|
||||||
*/
|
|
||||||
void PreemptThreads();
|
|
||||||
|
|
||||||
u32 CpuCoresCount() const {
|
|
||||||
return Core::Hardware::NUM_CPU_CORES;
|
|
||||||
}
|
|
||||||
|
|
||||||
void SetReselectionPending() {
|
|
||||||
is_reselection_pending.store(true, std::memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsReselectionPending() const {
|
|
||||||
return is_reselection_pending.load(std::memory_order_acquire);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Shutdown();
|
|
||||||
|
|
||||||
private:
|
|
||||||
friend class SchedulerLock;
|
|
||||||
|
|
||||||
/// Lock the scheduler to the current thread.
|
|
||||||
void Lock();
|
|
||||||
|
|
||||||
/// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
|
|
||||||
/// and reschedules current core if needed.
|
|
||||||
void Unlock();
|
|
||||||
|
|
||||||
void EnableInterruptAndSchedule(u32 cores_pending_reschedule,
|
|
||||||
Core::EmuThreadHandle global_thread);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Add a thread to the suggested queue of a cpu core. Suggested threads may be
|
|
||||||
* picked if no thread is scheduled to run on the core.
|
|
||||||
*/
|
|
||||||
void Suggest(u32 priority, std::size_t core, Thread* thread);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Remove a thread to the suggested queue of a cpu core. Suggested threads may be
|
|
||||||
* picked if no thread is scheduled to run on the core.
|
|
||||||
*/
|
|
||||||
void Unsuggest(u32 priority, std::size_t core, Thread* thread);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Add a thread to the scheduling queue of a cpu core. The thread is added at the
|
|
||||||
* back the queue in its priority level.
|
|
||||||
*/
|
|
||||||
void Schedule(u32 priority, std::size_t core, Thread* thread);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Add a thread to the scheduling queue of a cpu core. The thread is added at the
|
|
||||||
* front the queue in its priority level.
|
|
||||||
*/
|
|
||||||
void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
|
|
||||||
|
|
||||||
/// Reschedule an already scheduled thread based on a new priority
|
|
||||||
void Reschedule(u32 priority, std::size_t core, Thread* thread);
|
|
||||||
|
|
||||||
/// Unschedules a thread.
|
|
||||||
void Unschedule(u32 priority, std::size_t core, Thread* thread);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Transfers a thread into an specific core. If the destination_core is -1
|
|
||||||
* it will be unscheduled from its source code and added into its suggested
|
|
||||||
* queue.
|
|
||||||
*/
|
|
||||||
void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
|
|
||||||
|
|
||||||
bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner);
|
|
||||||
|
|
||||||
static constexpr u32 min_regular_priority = 2;
|
|
||||||
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
|
|
||||||
scheduled_queue;
|
|
||||||
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
|
|
||||||
suggested_queue;
|
|
||||||
std::atomic<bool> is_reselection_pending{false};
|
|
||||||
|
|
||||||
// The priority levels at which the global scheduler preempts threads every 10 ms. They are
|
|
||||||
// ordered from Core 0 to Core 3.
|
|
||||||
std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
|
|
||||||
|
|
||||||
/// Scheduler lock mechanisms.
|
|
||||||
bool is_locked{};
|
|
||||||
std::mutex inner_lock;
|
|
||||||
std::atomic<s64> scope_lock{};
|
|
||||||
Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
|
|
||||||
|
|
||||||
Common::SpinLock global_list_guard{};
|
|
||||||
|
|
||||||
/// Lists all thread ids that aren't deleted/etc.
|
|
||||||
std::vector<std::shared_ptr<Thread>> thread_list;
|
|
||||||
KernelCore& kernel;
|
|
||||||
};
|
|
||||||
|
|
||||||
class Scheduler final {
|
|
||||||
public:
|
|
||||||
explicit Scheduler(Core::System& system, std::size_t core_id);
|
|
||||||
~Scheduler();
|
|
||||||
|
|
||||||
/// Returns whether there are any threads that are ready to run.
|
|
||||||
bool HaveReadyThreads() const;
|
|
||||||
|
|
||||||
/// Reschedules to the next available thread (call after current thread is suspended)
|
|
||||||
void TryDoContextSwitch();
|
|
||||||
|
|
||||||
/// The next two are for SingleCore Only.
|
|
||||||
/// Unload current thread before preempting core.
|
|
||||||
void Unload(Thread* thread);
|
|
||||||
void Unload();
|
|
||||||
/// Reload current thread after core preemption.
|
|
||||||
void Reload(Thread* thread);
|
|
||||||
void Reload();
|
|
||||||
|
|
||||||
/// Gets the current running thread
|
|
||||||
Thread* GetCurrentThread() const;
|
|
||||||
|
|
||||||
/// Gets the currently selected thread from the top of the multilevel queue
|
|
||||||
Thread* GetSelectedThread() const;
|
|
||||||
|
|
||||||
/// Gets the timestamp for the last context switch in ticks.
|
|
||||||
u64 GetLastContextSwitchTicks() const;
|
|
||||||
|
|
||||||
bool ContextSwitchPending() const {
|
|
||||||
return is_context_switch_pending;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Initialize();
|
|
||||||
|
|
||||||
/// Shutdowns the scheduler.
|
|
||||||
void Shutdown();
|
|
||||||
|
|
||||||
void OnThreadStart();
|
|
||||||
|
|
||||||
std::shared_ptr<Common::Fiber>& ControlContext() {
|
|
||||||
return switch_fiber;
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::shared_ptr<Common::Fiber>& ControlContext() const {
|
|
||||||
return switch_fiber;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
friend class GlobalScheduler;
|
|
||||||
|
|
||||||
/// Switches the CPU's active thread context to that of the specified thread
|
|
||||||
void SwitchContext();
|
|
||||||
|
|
||||||
/// When a thread wakes up, it must run this through it's new scheduler
|
|
||||||
void SwitchContextStep2();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called on every context switch to update the internal timestamp
|
|
||||||
* This also updates the running time ticks for the given thread and
|
|
||||||
* process using the following difference:
|
|
||||||
*
|
|
||||||
* ticks += most_recent_ticks - last_context_switch_ticks
|
|
||||||
*
|
|
||||||
* The internal tick timestamp for the scheduler is simply the
|
|
||||||
* most recent tick count retrieved. No special arithmetic is
|
|
||||||
* applied to it.
|
|
||||||
*/
|
|
||||||
void UpdateLastContextSwitchTime(Thread* thread, Process* process);
|
|
||||||
|
|
||||||
static void OnSwitch(void* this_scheduler);
|
|
||||||
void SwitchToCurrent();
|
|
||||||
|
|
||||||
std::shared_ptr<Thread> current_thread = nullptr;
|
|
||||||
std::shared_ptr<Thread> selected_thread = nullptr;
|
|
||||||
std::shared_ptr<Thread> current_thread_prev = nullptr;
|
|
||||||
std::shared_ptr<Thread> selected_thread_set = nullptr;
|
|
||||||
std::shared_ptr<Thread> idle_thread = nullptr;
|
|
||||||
|
|
||||||
std::shared_ptr<Common::Fiber> switch_fiber = nullptr;
|
|
||||||
|
|
||||||
Core::System& system;
|
|
||||||
u64 last_context_switch_time = 0;
|
|
||||||
u64 idle_selection_count = 0;
|
|
||||||
const std::size_t core_id;
|
|
||||||
|
|
||||||
Common::SpinLock guard{};
|
|
||||||
|
|
||||||
bool is_context_switch_pending = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
class SchedulerLock {
|
|
||||||
public:
|
|
||||||
[[nodiscard]] explicit SchedulerLock(KernelCore& kernel);
|
|
||||||
~SchedulerLock();
|
|
||||||
|
|
||||||
protected:
|
|
||||||
KernelCore& kernel;
|
|
||||||
};
|
|
||||||
|
|
||||||
class SchedulerLockAndSleep : public SchedulerLock {
|
|
||||||
public:
|
|
||||||
explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task,
|
|
||||||
s64 nanoseconds);
|
|
||||||
~SchedulerLockAndSleep();
|
|
||||||
|
|
||||||
void CancelSleep() {
|
|
||||||
sleep_cancelled = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Release();
|
|
||||||
|
|
||||||
private:
|
|
||||||
Handle& event_handle;
|
|
||||||
Thread* time_task;
|
|
||||||
s64 nanoseconds;
|
|
||||||
bool sleep_cancelled{};
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace Kernel
|
|
|
@ -14,9 +14,9 @@
|
||||||
#include "core/hle/kernel/client_session.h"
|
#include "core/hle/kernel/client_session.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
#include "core/hle/kernel/hle_ipc.h"
|
#include "core/hle/kernel/hle_ipc.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/server_session.h"
|
#include "core/hle/kernel/server_session.h"
|
||||||
#include "core/hle/kernel/session.h"
|
#include "core/hle/kernel/session.h"
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
|
@ -170,7 +170,7 @@ ResultCode ServerSession::CompleteSyncRequest() {
|
||||||
|
|
||||||
// Some service requests require the thread to block
|
// Some service requests require the thread to block
|
||||||
{
|
{
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
if (!context.IsThreadWaiting()) {
|
if (!context.IsThreadWaiting()) {
|
||||||
context.GetThread().ResumeFromWait();
|
context.GetThread().ResumeFromWait();
|
||||||
context.GetThread().SetSynchronizationResults(nullptr, result);
|
context.GetThread().SetSynchronizationResults(nullptr, result);
|
||||||
|
|
|
@ -24,6 +24,8 @@
|
||||||
#include "core/hle/kernel/client_session.h"
|
#include "core/hle/kernel/client_session.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/memory/memory_block.h"
|
#include "core/hle/kernel/memory/memory_block.h"
|
||||||
#include "core/hle/kernel/memory/page_table.h"
|
#include "core/hle/kernel/memory/page_table.h"
|
||||||
|
@ -32,7 +34,6 @@
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/readable_event.h"
|
#include "core/hle/kernel/readable_event.h"
|
||||||
#include "core/hle/kernel/resource_limit.h"
|
#include "core/hle/kernel/resource_limit.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/shared_memory.h"
|
#include "core/hle/kernel/shared_memory.h"
|
||||||
#include "core/hle/kernel/svc.h"
|
#include "core/hle/kernel/svc.h"
|
||||||
#include "core/hle/kernel/svc_types.h"
|
#include "core/hle/kernel/svc_types.h"
|
||||||
|
@ -329,7 +330,8 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
|
||||||
|
|
||||||
/// Makes a blocking IPC call to an OS service.
|
/// Makes a blocking IPC call to an OS service.
|
||||||
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
auto& kernel = system.Kernel();
|
||||||
|
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
||||||
std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
|
std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
|
||||||
if (!session) {
|
if (!session) {
|
||||||
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
|
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
|
||||||
|
@ -338,9 +340,9 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||||
|
|
||||||
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
||||||
|
|
||||||
auto thread = system.CurrentScheduler().GetCurrentThread();
|
auto thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
{
|
{
|
||||||
SchedulerLock lock(system.Kernel());
|
KScopedSchedulerLock lock(kernel);
|
||||||
thread->InvalidateHLECallback();
|
thread->InvalidateHLECallback();
|
||||||
thread->SetStatus(ThreadStatus::WaitIPC);
|
thread->SetStatus(ThreadStatus::WaitIPC);
|
||||||
session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
|
session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
|
||||||
|
@ -349,12 +351,12 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||||
if (thread->HasHLECallback()) {
|
if (thread->HasHLECallback()) {
|
||||||
Handle event_handle = thread->GetHLETimeEvent();
|
Handle event_handle = thread->GetHLETimeEvent();
|
||||||
if (event_handle != InvalidHandle) {
|
if (event_handle != InvalidHandle) {
|
||||||
auto& time_manager = system.Kernel().TimeManager();
|
auto& time_manager = kernel.TimeManager();
|
||||||
time_manager.UnscheduleTimeEvent(event_handle);
|
time_manager.UnscheduleTimeEvent(event_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
SchedulerLock lock(system.Kernel());
|
KScopedSchedulerLock lock(kernel);
|
||||||
auto* sync_object = thread->GetHLESyncObject();
|
auto* sync_object = thread->GetHLESyncObject();
|
||||||
sync_object->RemoveWaitingThread(SharedFrom(thread));
|
sync_object->RemoveWaitingThread(SharedFrom(thread));
|
||||||
}
|
}
|
||||||
|
@ -654,7 +656,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
|
||||||
info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
|
info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
|
||||||
|
|
||||||
if (!break_reason.signal_debugger) {
|
if (!break_reason.signal_debugger) {
|
||||||
SchedulerLock lock(system.Kernel());
|
|
||||||
LOG_CRITICAL(
|
LOG_CRITICAL(
|
||||||
Debug_Emulated,
|
Debug_Emulated,
|
||||||
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
|
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
|
||||||
|
@ -662,13 +663,9 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
|
||||||
|
|
||||||
handle_debug_buffer(info1, info2);
|
handle_debug_buffer(info1, info2);
|
||||||
|
|
||||||
auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
|
auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||||
const auto thread_processor_id = current_thread->GetProcessorID();
|
const auto thread_processor_id = current_thread->GetProcessorID();
|
||||||
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
|
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
|
||||||
|
|
||||||
// Kill the current thread
|
|
||||||
system.Kernel().ExceptionalExit();
|
|
||||||
current_thread->Stop();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -918,7 +915,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto& core_timing = system.CoreTiming();
|
const auto& core_timing = system.CoreTiming();
|
||||||
const auto& scheduler = system.CurrentScheduler();
|
const auto& scheduler = *system.Kernel().CurrentScheduler();
|
||||||
const auto* const current_thread = scheduler.GetCurrentThread();
|
const auto* const current_thread = scheduler.GetCurrentThread();
|
||||||
const bool same_thread = current_thread == thread.get();
|
const bool same_thread = current_thread == thread.get();
|
||||||
|
|
||||||
|
@ -1086,7 +1083,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
|
||||||
return ERR_INVALID_HANDLE;
|
return ERR_INVALID_HANDLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (thread.get() == system.CurrentScheduler().GetCurrentThread()) {
|
if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
|
||||||
LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
|
LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
|
||||||
return ERR_BUSY;
|
return ERR_BUSY;
|
||||||
}
|
}
|
||||||
|
@ -1119,7 +1116,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
|
||||||
return ERR_INVALID_HANDLE;
|
return ERR_INVALID_HANDLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (thread.get() == system.CurrentScheduler().GetCurrentThread()) {
|
if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
|
||||||
LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
|
LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
|
||||||
return ERR_BUSY;
|
return ERR_BUSY;
|
||||||
}
|
}
|
||||||
|
@ -1475,7 +1472,7 @@ static void ExitProcess(Core::System& system) {
|
||||||
current_process->PrepareForTermination();
|
current_process->PrepareForTermination();
|
||||||
|
|
||||||
// Kill the current thread
|
// Kill the current thread
|
||||||
system.CurrentScheduler().GetCurrentThread()->Stop();
|
system.Kernel().CurrentScheduler()->GetCurrentThread()->Stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ExitProcess32(Core::System& system) {
|
static void ExitProcess32(Core::System& system) {
|
||||||
|
@ -1575,8 +1572,8 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
|
||||||
static void ExitThread(Core::System& system) {
|
static void ExitThread(Core::System& system) {
|
||||||
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
|
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
|
||||||
|
|
||||||
auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
|
auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||||
system.GlobalScheduler().RemoveThread(SharedFrom(current_thread));
|
system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread));
|
||||||
current_thread->Stop();
|
current_thread->Stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1589,44 +1586,31 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
|
||||||
LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds);
|
LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds);
|
||||||
|
|
||||||
enum class SleepType : s64 {
|
enum class SleepType : s64 {
|
||||||
YieldWithoutLoadBalancing = 0,
|
YieldWithoutCoreMigration = 0,
|
||||||
YieldWithLoadBalancing = -1,
|
YieldWithCoreMigration = -1,
|
||||||
YieldAndWaitForLoadBalancing = -2,
|
YieldAndWaitForLoadBalancing = -2,
|
||||||
};
|
};
|
||||||
|
|
||||||
auto& scheduler = system.CurrentScheduler();
|
auto& scheduler = *system.Kernel().CurrentScheduler();
|
||||||
auto* const current_thread = scheduler.GetCurrentThread();
|
|
||||||
bool is_redundant = false;
|
|
||||||
|
|
||||||
if (nanoseconds <= 0) {
|
if (nanoseconds <= 0) {
|
||||||
switch (static_cast<SleepType>(nanoseconds)) {
|
switch (static_cast<SleepType>(nanoseconds)) {
|
||||||
case SleepType::YieldWithoutLoadBalancing: {
|
case SleepType::YieldWithoutCoreMigration: {
|
||||||
auto pair = current_thread->YieldSimple();
|
scheduler.YieldWithoutCoreMigration();
|
||||||
is_redundant = pair.second;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case SleepType::YieldWithLoadBalancing: {
|
case SleepType::YieldWithCoreMigration: {
|
||||||
auto pair = current_thread->YieldAndBalanceLoad();
|
scheduler.YieldWithCoreMigration();
|
||||||
is_redundant = pair.second;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case SleepType::YieldAndWaitForLoadBalancing: {
|
case SleepType::YieldAndWaitForLoadBalancing: {
|
||||||
auto pair = current_thread->YieldAndWaitForLoadBalancing();
|
scheduler.YieldToAnyThread();
|
||||||
is_redundant = pair.second;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
|
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
current_thread->Sleep(nanoseconds);
|
scheduler.GetCurrentThread()->Sleep(nanoseconds);
|
||||||
}
|
|
||||||
|
|
||||||
if (is_redundant && !system.Kernel().IsMulticore()) {
|
|
||||||
system.Kernel().ExitSVCProfile();
|
|
||||||
system.CoreTiming().AddTicks(1000U);
|
|
||||||
system.GetCpuManager().PreemptSingleCore();
|
|
||||||
system.Kernel().EnterSVCProfile();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1661,10 +1645,10 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
|
||||||
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
|
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
Handle event_handle;
|
Handle event_handle;
|
||||||
Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
|
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
auto* const current_process = system.Kernel().CurrentProcess();
|
auto* const current_process = kernel.CurrentProcess();
|
||||||
{
|
{
|
||||||
SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
|
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
|
||||||
const auto& handle_table = current_process->GetHandleTable();
|
const auto& handle_table = current_process->GetHandleTable();
|
||||||
std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||||
ASSERT(thread);
|
ASSERT(thread);
|
||||||
|
@ -1700,7 +1684,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
|
||||||
auto* owner = current_thread->GetLockOwner();
|
auto* owner = current_thread->GetLockOwner();
|
||||||
if (owner != nullptr) {
|
if (owner != nullptr) {
|
||||||
|
@ -1731,7 +1715,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
|
||||||
|
|
||||||
// Retrieve a list of all threads that are waiting for this condition variable.
|
// Retrieve a list of all threads that are waiting for this condition variable.
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
auto* const current_process = kernel.CurrentProcess();
|
auto* const current_process = kernel.CurrentProcess();
|
||||||
std::vector<std::shared_ptr<Thread>> waiting_threads =
|
std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||||
current_process->GetConditionVariableThreads(condition_variable_addr);
|
current_process->GetConditionVariableThreads(condition_variable_addr);
|
||||||
|
@ -1993,7 +1977,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
|
||||||
}
|
}
|
||||||
|
|
||||||
*core = thread->GetIdealCore();
|
*core = thread->GetIdealCore();
|
||||||
*mask = thread->GetAffinityMask();
|
*mask = thread->GetAffinityMask().GetAffinityMask();
|
||||||
|
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -2629,7 +2613,7 @@ void Call(Core::System& system, u32 immediate) {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
kernel.EnterSVCProfile();
|
kernel.EnterSVCProfile();
|
||||||
|
|
||||||
auto* thread = system.CurrentScheduler().GetCurrentThread();
|
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
thread->SetContinuousOnSVC(true);
|
thread->SetContinuousOnSVC(true);
|
||||||
|
|
||||||
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
|
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
|
||||||
|
|
|
@ -5,8 +5,9 @@
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/synchronization.h"
|
#include "core/hle/kernel/synchronization.h"
|
||||||
#include "core/hle/kernel/synchronization_object.h"
|
#include "core/hle/kernel/synchronization_object.h"
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
|
@ -18,7 +19,7 @@ Synchronization::Synchronization(Core::System& system) : system{system} {}
|
||||||
|
|
||||||
void Synchronization::SignalObject(SynchronizationObject& obj) const {
|
void Synchronization::SignalObject(SynchronizationObject& obj) const {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
if (obj.IsSignaled()) {
|
if (obj.IsSignaled()) {
|
||||||
for (auto thread : obj.GetWaitingThreads()) {
|
for (auto thread : obj.GetWaitingThreads()) {
|
||||||
if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
|
if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
|
||||||
|
@ -37,10 +38,10 @@ void Synchronization::SignalObject(SynchronizationObject& obj) const {
|
||||||
std::pair<ResultCode, Handle> Synchronization::WaitFor(
|
std::pair<ResultCode, Handle> Synchronization::WaitFor(
|
||||||
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
|
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
auto* const thread = system.CurrentScheduler().GetCurrentThread();
|
auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
Handle event_handle = InvalidHandle;
|
Handle event_handle = InvalidHandle;
|
||||||
{
|
{
|
||||||
SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
|
KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
|
||||||
const auto itr =
|
const auto itr =
|
||||||
std::find_if(sync_objects.begin(), sync_objects.end(),
|
std::find_if(sync_objects.begin(), sync_objects.end(),
|
||||||
[thread](const std::shared_ptr<SynchronizationObject>& object) {
|
[thread](const std::shared_ptr<SynchronizationObject>& object) {
|
||||||
|
@ -89,7 +90,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
ResultCode signaling_result = thread->GetSignalingResult();
|
ResultCode signaling_result = thread->GetSignalingResult();
|
||||||
SynchronizationObject* signaling_object = thread->GetSignalingObject();
|
SynchronizationObject* signaling_object = thread->GetSignalingObject();
|
||||||
thread->SetSynchronizationObjects(nullptr);
|
thread->SetSynchronizationObjects(nullptr);
|
||||||
|
|
|
@ -17,10 +17,11 @@
|
||||||
#include "core/hardware_properties.h"
|
#include "core/hardware_properties.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/object.h"
|
#include "core/hle/kernel/object.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
#include "core/hle/kernel/time_manager.h"
|
#include "core/hle/kernel/time_manager.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
|
@ -50,7 +51,7 @@ Thread::~Thread() = default;
|
||||||
|
|
||||||
void Thread::Stop() {
|
void Thread::Stop() {
|
||||||
{
|
{
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
SetStatus(ThreadStatus::Dead);
|
SetStatus(ThreadStatus::Dead);
|
||||||
Signal();
|
Signal();
|
||||||
kernel.GlobalHandleTable().Close(global_handle);
|
kernel.GlobalHandleTable().Close(global_handle);
|
||||||
|
@ -67,7 +68,7 @@ void Thread::Stop() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::ResumeFromWait() {
|
void Thread::ResumeFromWait() {
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
switch (status) {
|
switch (status) {
|
||||||
case ThreadStatus::Paused:
|
case ThreadStatus::Paused:
|
||||||
case ThreadStatus::WaitSynch:
|
case ThreadStatus::WaitSynch:
|
||||||
|
@ -99,19 +100,18 @@ void Thread::ResumeFromWait() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::OnWakeUp() {
|
void Thread::OnWakeUp() {
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
|
||||||
SetStatus(ThreadStatus::Ready);
|
SetStatus(ThreadStatus::Ready);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode Thread::Start() {
|
ResultCode Thread::Start() {
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
SetStatus(ThreadStatus::Ready);
|
SetStatus(ThreadStatus::Ready);
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::CancelWait() {
|
void Thread::CancelWait() {
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
|
if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
|
||||||
is_sync_cancelled = true;
|
is_sync_cancelled = true;
|
||||||
return;
|
return;
|
||||||
|
@ -186,12 +186,14 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
|
||||||
thread->status = ThreadStatus::Dormant;
|
thread->status = ThreadStatus::Dormant;
|
||||||
thread->entry_point = entry_point;
|
thread->entry_point = entry_point;
|
||||||
thread->stack_top = stack_top;
|
thread->stack_top = stack_top;
|
||||||
|
thread->disable_count = 1;
|
||||||
thread->tpidr_el0 = 0;
|
thread->tpidr_el0 = 0;
|
||||||
thread->nominal_priority = thread->current_priority = priority;
|
thread->nominal_priority = thread->current_priority = priority;
|
||||||
thread->last_running_ticks = 0;
|
thread->schedule_count = -1;
|
||||||
|
thread->last_scheduled_tick = 0;
|
||||||
thread->processor_id = processor_id;
|
thread->processor_id = processor_id;
|
||||||
thread->ideal_core = processor_id;
|
thread->ideal_core = processor_id;
|
||||||
thread->affinity_mask = 1ULL << processor_id;
|
thread->affinity_mask.SetAffinity(processor_id, true);
|
||||||
thread->wait_objects = nullptr;
|
thread->wait_objects = nullptr;
|
||||||
thread->mutex_wait_address = 0;
|
thread->mutex_wait_address = 0;
|
||||||
thread->condvar_wait_address = 0;
|
thread->condvar_wait_address = 0;
|
||||||
|
@ -201,7 +203,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
|
||||||
thread->owner_process = owner_process;
|
thread->owner_process = owner_process;
|
||||||
thread->type = type_flags;
|
thread->type = type_flags;
|
||||||
if ((type_flags & THREADTYPE_IDLE) == 0) {
|
if ((type_flags & THREADTYPE_IDLE) == 0) {
|
||||||
auto& scheduler = kernel.GlobalScheduler();
|
auto& scheduler = kernel.GlobalSchedulerContext();
|
||||||
scheduler.AddThread(thread);
|
scheduler.AddThread(thread);
|
||||||
}
|
}
|
||||||
if (owner_process) {
|
if (owner_process) {
|
||||||
|
@ -225,7 +227,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::SetPriority(u32 priority) {
|
void Thread::SetPriority(u32 priority) {
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
|
ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
|
||||||
"Invalid priority value.");
|
"Invalid priority value.");
|
||||||
nominal_priority = priority;
|
nominal_priority = priority;
|
||||||
|
@ -362,7 +364,7 @@ bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode Thread::SetActivity(ThreadActivity value) {
|
ResultCode Thread::SetActivity(ThreadActivity value) {
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
|
||||||
auto sched_status = GetSchedulingStatus();
|
auto sched_status = GetSchedulingStatus();
|
||||||
|
|
||||||
|
@ -391,7 +393,7 @@ ResultCode Thread::SetActivity(ThreadActivity value) {
|
||||||
ResultCode Thread::Sleep(s64 nanoseconds) {
|
ResultCode Thread::Sleep(s64 nanoseconds) {
|
||||||
Handle event_handle{};
|
Handle event_handle{};
|
||||||
{
|
{
|
||||||
SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
|
KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
|
||||||
SetStatus(ThreadStatus::WaitSleep);
|
SetStatus(ThreadStatus::WaitSleep);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,39 +404,12 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<ResultCode, bool> Thread::YieldSimple() {
|
|
||||||
bool is_redundant = false;
|
|
||||||
{
|
|
||||||
SchedulerLock lock(kernel);
|
|
||||||
is_redundant = kernel.GlobalScheduler().YieldThread(this);
|
|
||||||
}
|
|
||||||
return {RESULT_SUCCESS, is_redundant};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() {
|
|
||||||
bool is_redundant = false;
|
|
||||||
{
|
|
||||||
SchedulerLock lock(kernel);
|
|
||||||
is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this);
|
|
||||||
}
|
|
||||||
return {RESULT_SUCCESS, is_redundant};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() {
|
|
||||||
bool is_redundant = false;
|
|
||||||
{
|
|
||||||
SchedulerLock lock(kernel);
|
|
||||||
is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this);
|
|
||||||
}
|
|
||||||
return {RESULT_SUCCESS, is_redundant};
|
|
||||||
}
|
|
||||||
|
|
||||||
void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
|
void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
|
||||||
const u32 old_state = scheduling_state;
|
const u32 old_state = scheduling_state;
|
||||||
pausing_state |= static_cast<u32>(flag);
|
pausing_state |= static_cast<u32>(flag);
|
||||||
const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
|
const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
|
||||||
scheduling_state = base_scheduling | pausing_state;
|
scheduling_state = base_scheduling | pausing_state;
|
||||||
kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
|
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
|
void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
|
||||||
|
@ -442,23 +417,24 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
|
||||||
pausing_state &= ~static_cast<u32>(flag);
|
pausing_state &= ~static_cast<u32>(flag);
|
||||||
const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
|
const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
|
||||||
scheduling_state = base_scheduling | pausing_state;
|
scheduling_state = base_scheduling | pausing_state;
|
||||||
kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
|
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
|
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
|
||||||
const u32 old_state = scheduling_state;
|
const u32 old_state = scheduling_state;
|
||||||
scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
|
scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
|
||||||
static_cast<u32>(new_status);
|
static_cast<u32>(new_status);
|
||||||
kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
|
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::SetCurrentPriority(u32 new_priority) {
|
void Thread::SetCurrentPriority(u32 new_priority) {
|
||||||
const u32 old_priority = std::exchange(current_priority, new_priority);
|
const u32 old_priority = std::exchange(current_priority, new_priority);
|
||||||
kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority);
|
KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
|
||||||
|
old_priority);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
||||||
SchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
|
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
|
||||||
for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
|
for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
|
||||||
if (((mask >> core) & 1) != 0) {
|
if (((mask >> core) & 1) != 0) {
|
||||||
|
@ -479,20 +455,21 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
||||||
}
|
}
|
||||||
if (use_override) {
|
if (use_override) {
|
||||||
ideal_core_override = new_core;
|
ideal_core_override = new_core;
|
||||||
affinity_mask_override = new_affinity_mask;
|
|
||||||
} else {
|
} else {
|
||||||
const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask);
|
const auto old_affinity_mask = affinity_mask;
|
||||||
|
affinity_mask.SetAffinityMask(new_affinity_mask);
|
||||||
ideal_core = new_core;
|
ideal_core = new_core;
|
||||||
if (old_affinity_mask != new_affinity_mask) {
|
if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) {
|
||||||
const s32 old_core = processor_id;
|
const s32 old_core = processor_id;
|
||||||
if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
|
if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
|
||||||
if (static_cast<s32>(ideal_core) < 0) {
|
if (static_cast<s32>(ideal_core) < 0) {
|
||||||
processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES);
|
processor_id = HighestSetCore(affinity_mask.GetAffinityMask(),
|
||||||
|
Core::Hardware::NUM_CPU_CORES);
|
||||||
} else {
|
} else {
|
||||||
processor_id = ideal_core;
|
processor_id = ideal_core;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core);
|
KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return RESULT_SUCCESS;
|
return RESULT_SUCCESS;
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
@ -12,6 +13,7 @@
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/spin_lock.h"
|
#include "common/spin_lock.h"
|
||||||
#include "core/arm/arm_interface.h"
|
#include "core/arm/arm_interface.h"
|
||||||
|
#include "core/hle/kernel/k_affinity_mask.h"
|
||||||
#include "core/hle/kernel/object.h"
|
#include "core/hle/kernel/object.h"
|
||||||
#include "core/hle/kernel/synchronization_object.h"
|
#include "core/hle/kernel/synchronization_object.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
|
@ -27,10 +29,10 @@ class System;
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class GlobalScheduler;
|
class GlobalSchedulerContext;
|
||||||
class KernelCore;
|
class KernelCore;
|
||||||
class Process;
|
class Process;
|
||||||
class Scheduler;
|
class KScheduler;
|
||||||
|
|
||||||
enum ThreadPriority : u32 {
|
enum ThreadPriority : u32 {
|
||||||
THREADPRIO_HIGHEST = 0, ///< Highest thread priority
|
THREADPRIO_HIGHEST = 0, ///< Highest thread priority
|
||||||
|
@ -345,8 +347,12 @@ public:
|
||||||
|
|
||||||
void SetStatus(ThreadStatus new_status);
|
void SetStatus(ThreadStatus new_status);
|
||||||
|
|
||||||
u64 GetLastRunningTicks() const {
|
s64 GetLastScheduledTick() const {
|
||||||
return last_running_ticks;
|
return this->last_scheduled_tick;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetLastScheduledTick(s64 tick) {
|
||||||
|
this->last_scheduled_tick = tick;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 GetTotalCPUTimeTicks() const {
|
u64 GetTotalCPUTimeTicks() const {
|
||||||
|
@ -361,10 +367,18 @@ public:
|
||||||
return processor_id;
|
return processor_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s32 GetActiveCore() const {
|
||||||
|
return GetProcessorID();
|
||||||
|
}
|
||||||
|
|
||||||
void SetProcessorID(s32 new_core) {
|
void SetProcessorID(s32 new_core) {
|
||||||
processor_id = new_core;
|
processor_id = new_core;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SetActiveCore(s32 new_core) {
|
||||||
|
processor_id = new_core;
|
||||||
|
}
|
||||||
|
|
||||||
Process* GetOwnerProcess() {
|
Process* GetOwnerProcess() {
|
||||||
return owner_process;
|
return owner_process;
|
||||||
}
|
}
|
||||||
|
@ -469,7 +483,7 @@ public:
|
||||||
return ideal_core;
|
return ideal_core;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 GetAffinityMask() const {
|
const KAffinityMask& GetAffinityMask() const {
|
||||||
return affinity_mask;
|
return affinity_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -478,21 +492,12 @@ public:
|
||||||
/// Sleeps this thread for the given amount of nanoseconds.
|
/// Sleeps this thread for the given amount of nanoseconds.
|
||||||
ResultCode Sleep(s64 nanoseconds);
|
ResultCode Sleep(s64 nanoseconds);
|
||||||
|
|
||||||
/// Yields this thread without rebalancing loads.
|
s64 GetYieldScheduleCount() const {
|
||||||
std::pair<ResultCode, bool> YieldSimple();
|
return this->schedule_count;
|
||||||
|
|
||||||
/// Yields this thread and does a load rebalancing.
|
|
||||||
std::pair<ResultCode, bool> YieldAndBalanceLoad();
|
|
||||||
|
|
||||||
/// Yields this thread and if the core is left idle, loads are rebalanced
|
|
||||||
std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing();
|
|
||||||
|
|
||||||
void IncrementYieldCount() {
|
|
||||||
yield_count++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 GetYieldCount() const {
|
void SetYieldScheduleCount(s64 count) {
|
||||||
return yield_count;
|
this->schedule_count = count;
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadSchedStatus GetSchedulingStatus() const {
|
ThreadSchedStatus GetSchedulingStatus() const {
|
||||||
|
@ -568,9 +573,59 @@ public:
|
||||||
return has_exited;
|
return has_exited;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class QueueEntry {
|
||||||
|
public:
|
||||||
|
constexpr QueueEntry() = default;
|
||||||
|
|
||||||
|
constexpr void Initialize() {
|
||||||
|
this->prev = nullptr;
|
||||||
|
this->next = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Thread* GetPrev() const {
|
||||||
|
return this->prev;
|
||||||
|
}
|
||||||
|
constexpr Thread* GetNext() const {
|
||||||
|
return this->next;
|
||||||
|
}
|
||||||
|
constexpr void SetPrev(Thread* thread) {
|
||||||
|
this->prev = thread;
|
||||||
|
}
|
||||||
|
constexpr void SetNext(Thread* thread) {
|
||||||
|
this->next = thread;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Thread* prev{};
|
||||||
|
Thread* next{};
|
||||||
|
};
|
||||||
|
|
||||||
|
QueueEntry& GetPriorityQueueEntry(s32 core) {
|
||||||
|
return this->per_core_priority_queue_entry[core];
|
||||||
|
}
|
||||||
|
|
||||||
|
const QueueEntry& GetPriorityQueueEntry(s32 core) const {
|
||||||
|
return this->per_core_priority_queue_entry[core];
|
||||||
|
}
|
||||||
|
|
||||||
|
s32 GetDisableDispatchCount() const {
|
||||||
|
return disable_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DisableDispatch() {
|
||||||
|
ASSERT(GetDisableDispatchCount() >= 0);
|
||||||
|
disable_count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void EnableDispatch() {
|
||||||
|
ASSERT(GetDisableDispatchCount() > 0);
|
||||||
|
disable_count--;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class GlobalScheduler;
|
friend class GlobalSchedulerContext;
|
||||||
friend class Scheduler;
|
friend class KScheduler;
|
||||||
|
friend class Process;
|
||||||
|
|
||||||
void SetSchedulingStatus(ThreadSchedStatus new_status);
|
void SetSchedulingStatus(ThreadSchedStatus new_status);
|
||||||
void AddSchedulingFlag(ThreadSchedFlags flag);
|
void AddSchedulingFlag(ThreadSchedFlags flag);
|
||||||
|
@ -583,12 +638,14 @@ private:
|
||||||
ThreadContext64 context_64{};
|
ThreadContext64 context_64{};
|
||||||
std::shared_ptr<Common::Fiber> host_context{};
|
std::shared_ptr<Common::Fiber> host_context{};
|
||||||
|
|
||||||
u64 thread_id = 0;
|
|
||||||
|
|
||||||
ThreadStatus status = ThreadStatus::Dormant;
|
ThreadStatus status = ThreadStatus::Dormant;
|
||||||
|
u32 scheduling_state = 0;
|
||||||
|
|
||||||
|
u64 thread_id = 0;
|
||||||
|
|
||||||
VAddr entry_point = 0;
|
VAddr entry_point = 0;
|
||||||
VAddr stack_top = 0;
|
VAddr stack_top = 0;
|
||||||
|
std::atomic_int disable_count = 0;
|
||||||
|
|
||||||
ThreadType type;
|
ThreadType type;
|
||||||
|
|
||||||
|
@ -602,9 +659,8 @@ private:
|
||||||
u32 current_priority = 0;
|
u32 current_priority = 0;
|
||||||
|
|
||||||
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
|
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
|
||||||
u64 last_running_ticks = 0; ///< CPU tick when thread was last running
|
s64 schedule_count{};
|
||||||
u64 yield_count = 0; ///< Number of redundant yields carried by this thread.
|
s64 last_scheduled_tick{};
|
||||||
///< a redundant yield is one where no scheduling is changed
|
|
||||||
|
|
||||||
s32 processor_id = 0;
|
s32 processor_id = 0;
|
||||||
|
|
||||||
|
@ -646,16 +702,16 @@ private:
|
||||||
Handle hle_time_event;
|
Handle hle_time_event;
|
||||||
SynchronizationObject* hle_object;
|
SynchronizationObject* hle_object;
|
||||||
|
|
||||||
Scheduler* scheduler = nullptr;
|
KScheduler* scheduler = nullptr;
|
||||||
|
|
||||||
|
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
|
||||||
|
|
||||||
u32 ideal_core{0xFFFFFFFF};
|
u32 ideal_core{0xFFFFFFFF};
|
||||||
u64 affinity_mask{0x1};
|
KAffinityMask affinity_mask{};
|
||||||
|
|
||||||
s32 ideal_core_override = -1;
|
s32 ideal_core_override = -1;
|
||||||
u64 affinity_mask_override = 0x1;
|
|
||||||
u32 affinity_override_count = 0;
|
u32 affinity_override_count = 0;
|
||||||
|
|
||||||
u32 scheduling_state = 0;
|
|
||||||
u32 pausing_state = 0;
|
u32 pausing_state = 0;
|
||||||
bool is_running = false;
|
bool is_running = false;
|
||||||
bool is_waiting_on_sync = false;
|
bool is_waiting_on_sync = false;
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/core_timing_util.h"
|
#include "core/core_timing_util.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
#include "core/hle/kernel/time_manager.h"
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
|
||||||
|
@ -18,12 +18,18 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||||
time_manager_event_type = Core::Timing::CreateEvent(
|
time_manager_event_type = Core::Timing::CreateEvent(
|
||||||
"Kernel::TimeManagerCallback",
|
"Kernel::TimeManagerCallback",
|
||||||
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
||||||
const SchedulerLock lock(system.Kernel());
|
const KScopedSchedulerLock lock(system.Kernel());
|
||||||
const auto proper_handle = static_cast<Handle>(thread_handle);
|
const auto proper_handle = static_cast<Handle>(thread_handle);
|
||||||
if (cancelled_events[proper_handle]) {
|
|
||||||
return;
|
std::shared_ptr<Thread> thread;
|
||||||
|
{
|
||||||
|
std::lock_guard lock{mutex};
|
||||||
|
if (cancelled_events[proper_handle]) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
|
||||||
}
|
}
|
||||||
auto thread = this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
|
|
||||||
if (thread) {
|
if (thread) {
|
||||||
// Thread can be null if process has exited
|
// Thread can be null if process has exited
|
||||||
thread->OnWakeUp();
|
thread->OnWakeUp();
|
||||||
|
@ -56,6 +62,7 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void TimeManager::CancelTimeEvent(Thread* time_task) {
|
void TimeManager::CancelTimeEvent(Thread* time_task) {
|
||||||
|
std::lock_guard lock{mutex};
|
||||||
const Handle event_handle = time_task->GetGlobalHandle();
|
const Handle event_handle = time_task->GetGlobalHandle();
|
||||||
UnscheduleTimeEvent(event_handle);
|
UnscheduleTimeEvent(event_handle);
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,8 +10,8 @@
|
||||||
#include "core/hle/ipc_helpers.h"
|
#include "core/hle/ipc_helpers.h"
|
||||||
#include "core/hle/kernel/client_port.h"
|
#include "core/hle/kernel/client_port.h"
|
||||||
#include "core/hle/kernel/client_session.h"
|
#include "core/hle/kernel/client_session.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/service/time/interface.h"
|
#include "core/hle/service/time/interface.h"
|
||||||
#include "core/hle/service/time/time.h"
|
#include "core/hle/service/time/time.h"
|
||||||
#include "core/hle/service/time/time_sharedmemory.h"
|
#include "core/hle/service/time/time_sharedmemory.h"
|
||||||
|
|
|
@ -2,7 +2,6 @@ add_executable(tests
|
||||||
common/bit_field.cpp
|
common/bit_field.cpp
|
||||||
common/bit_utils.cpp
|
common/bit_utils.cpp
|
||||||
common/fibers.cpp
|
common/fibers.cpp
|
||||||
common/multi_level_queue.cpp
|
|
||||||
common/param_package.cpp
|
common/param_package.cpp
|
||||||
common/ring_buffer.cpp
|
common/ring_buffer.cpp
|
||||||
core/arm/arm_test_common.cpp
|
core/arm/arm_test_common.cpp
|
||||||
|
|
|
@ -1,55 +0,0 @@
|
||||||
// Copyright 2019 Yuzu Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#include <catch2/catch.hpp>
|
|
||||||
#include <math.h>
|
|
||||||
#include "common/common_types.h"
|
|
||||||
#include "common/multi_level_queue.h"
|
|
||||||
|
|
||||||
namespace Common {
|
|
||||||
|
|
||||||
TEST_CASE("MultiLevelQueue", "[common]") {
|
|
||||||
std::array<f32, 8> values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0};
|
|
||||||
Common::MultiLevelQueue<f32, 64> mlq;
|
|
||||||
REQUIRE(mlq.empty());
|
|
||||||
mlq.add(values[2], 2);
|
|
||||||
mlq.add(values[7], 7);
|
|
||||||
mlq.add(values[3], 3);
|
|
||||||
mlq.add(values[4], 4);
|
|
||||||
mlq.add(values[0], 0);
|
|
||||||
mlq.add(values[5], 5);
|
|
||||||
mlq.add(values[6], 6);
|
|
||||||
mlq.add(values[1], 1);
|
|
||||||
u32 index = 0;
|
|
||||||
bool all_set = true;
|
|
||||||
for (auto& f : mlq) {
|
|
||||||
all_set &= (f == values[index]);
|
|
||||||
index++;
|
|
||||||
}
|
|
||||||
REQUIRE(all_set);
|
|
||||||
REQUIRE(!mlq.empty());
|
|
||||||
f32 v = 8.0;
|
|
||||||
mlq.add(v, 2);
|
|
||||||
v = -7.0;
|
|
||||||
mlq.add(v, 2, false);
|
|
||||||
REQUIRE(mlq.front(2) == -7.0);
|
|
||||||
mlq.yield(2);
|
|
||||||
REQUIRE(mlq.front(2) == values[2]);
|
|
||||||
REQUIRE(mlq.back(2) == -7.0);
|
|
||||||
REQUIRE(mlq.empty(8));
|
|
||||||
v = 10.0;
|
|
||||||
mlq.add(v, 8);
|
|
||||||
mlq.adjust(v, 8, 9);
|
|
||||||
REQUIRE(mlq.front(9) == v);
|
|
||||||
REQUIRE(mlq.empty(8));
|
|
||||||
REQUIRE(!mlq.empty(9));
|
|
||||||
mlq.adjust(values[0], 0, 9);
|
|
||||||
REQUIRE(mlq.highest_priority_set() == 1);
|
|
||||||
REQUIRE(mlq.lowest_priority_set() == 9);
|
|
||||||
mlq.remove(values[1], 1);
|
|
||||||
REQUIRE(mlq.highest_priority_set() == 2);
|
|
||||||
REQUIRE(mlq.empty(1));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Common
|
|
|
@ -13,10 +13,10 @@
|
||||||
#include "core/arm/arm_interface.h"
|
#include "core/arm/arm_interface.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/mutex.h"
|
#include "core/hle/kernel/mutex.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/kernel/readable_event.h"
|
#include "core/hle/kernel/readable_event.h"
|
||||||
#include "core/hle/kernel/scheduler.h"
|
|
||||||
#include "core/hle/kernel/synchronization_object.h"
|
#include "core/hle/kernel/synchronization_object.h"
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
@ -101,7 +101,7 @@ std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList()
|
||||||
};
|
};
|
||||||
|
|
||||||
const auto& system = Core::System::GetInstance();
|
const auto& system = Core::System::GetInstance();
|
||||||
add_threads(system.GlobalScheduler().GetThreadList());
|
add_threads(system.GlobalSchedulerContext().GetThreadList());
|
||||||
|
|
||||||
return item_list;
|
return item_list;
|
||||||
}
|
}
|
||||||
|
@ -349,14 +349,14 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
|
||||||
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
|
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
|
||||||
list.push_back(
|
list.push_back(
|
||||||
std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.GetIdealCore())));
|
std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.GetIdealCore())));
|
||||||
list.push_back(
|
list.push_back(std::make_unique<WaitTreeText>(
|
||||||
std::make_unique<WaitTreeText>(tr("affinity mask = %1").arg(thread.GetAffinityMask())));
|
tr("affinity mask = %1").arg(thread.GetAffinityMask().GetAffinityMask())));
|
||||||
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID())));
|
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID())));
|
||||||
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
|
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
|
||||||
.arg(thread.GetPriority())
|
.arg(thread.GetPriority())
|
||||||
.arg(thread.GetNominalPriority())));
|
.arg(thread.GetNominalPriority())));
|
||||||
list.push_back(std::make_unique<WaitTreeText>(
|
list.push_back(std::make_unique<WaitTreeText>(
|
||||||
tr("last running ticks = %1").arg(thread.GetLastRunningTicks())));
|
tr("last running ticks = %1").arg(thread.GetLastScheduledTick())));
|
||||||
|
|
||||||
const VAddr mutex_wait_address = thread.GetMutexWaitAddress();
|
const VAddr mutex_wait_address = thread.GetMutexWaitAddress();
|
||||||
if (mutex_wait_address != 0) {
|
if (mutex_wait_address != 0) {
|
||||||
|
|
Loading…
Reference in a new issue