early-access version 2929

This commit is contained in:
pineappleEA 2022-09-01 09:37:35 +02:00
parent d6cf83dcef
commit 4521504d2e
54 changed files with 8468 additions and 8686 deletions

View File

@ -1,7 +1,7 @@
yuzu emulator early access
=============
This is the source code for early-access 2927.
This is the source code for early-access 2929.
## Legal Notice

628
dist/languages/ca.ts vendored

File diff suppressed because it is too large Load Diff

628
dist/languages/cs.ts vendored

File diff suppressed because it is too large Load Diff

628
dist/languages/da.ts vendored

File diff suppressed because it is too large Load Diff

630
dist/languages/de.ts vendored

File diff suppressed because it is too large Load Diff

628
dist/languages/el.ts vendored

File diff suppressed because it is too large Load Diff

922
dist/languages/es.ts vendored

File diff suppressed because it is too large Load Diff

730
dist/languages/fr.ts vendored

File diff suppressed because it is too large Load Diff

628
dist/languages/id.ts vendored

File diff suppressed because it is too large Load Diff

1604
dist/languages/it.ts vendored

File diff suppressed because it is too large Load Diff

1025
dist/languages/ja_JP.ts vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

628
dist/languages/nb.ts vendored

File diff suppressed because it is too large Load Diff

628
dist/languages/nl.ts vendored

File diff suppressed because it is too large Load Diff

882
dist/languages/pl.ts vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

628
dist/languages/sv.ts vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

628
dist/languages/vi.ts vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -46,19 +46,19 @@ public:
void ReserveRange(u64 start, std::size_t size);
[[nodiscard]] constexpr const BaseAddr& operator[](std::size_t index) const {
[[nodiscard]] const BaseAddr& operator[](std::size_t index) const {
return base_ptr[index];
}
[[nodiscard]] constexpr BaseAddr& operator[](std::size_t index) {
[[nodiscard]] BaseAddr& operator[](std::size_t index) {
return base_ptr[index];
}
[[nodiscard]] constexpr BaseAddr* data() {
[[nodiscard]] BaseAddr* data() {
return base_ptr;
}
[[nodiscard]] constexpr const BaseAddr* data() const {
[[nodiscard]] const BaseAddr* data() const {
return base_ptr;
}

View File

@ -10,9 +10,11 @@
namespace Service::Nvidia::NvCore {
struct ContainerImpl {
ContainerImpl(Tegra::Host1x::Host1x& host1x_) : file{host1x_}, manager{host1x_} {}
explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_)
: file{host1x_}, manager{host1x_}, device_file_data{} {}
NvMap file;
SyncpointManager manager;
Container::Host1xDeviceFileData device_file_data;
};
Container::Container(Tegra::Host1x::Host1x& host1x_) {
@ -29,6 +31,14 @@ const NvMap& Container::GetNvMapFile() const {
return impl->file;
}
Container::Host1xDeviceFileData& Container::Host1xDeviceFile() {
return impl->device_file_data;
}
const Container::Host1xDeviceFileData& Container::Host1xDeviceFile() const {
return impl->device_file_data;
}
SyncpointManager& Container::GetSyncpointManager() {
return impl->manager;
}

View File

@ -4,15 +4,15 @@
#pragma once
#include <deque>
#include <memory>
#include <unordered_map>
namespace Tegra {
#include "core/hle/service/nvdrv/nvdata.h"
namespace Host1x {
namespace Tegra::Host1x {
class Host1x;
} // namespace Host1x
} // namespace Tegra
} // namespace Tegra::Host1x
namespace Service::Nvidia::NvCore {
@ -23,7 +23,7 @@ struct ContainerImpl;
class Container {
public:
Container(Tegra::Host1x::Host1x& host1x);
explicit Container(Tegra::Host1x::Host1x& host1x);
~Container();
NvMap& GetNvMapFile();
@ -34,6 +34,17 @@ public:
const SyncpointManager& GetSyncpointManager() const;
struct Host1xDeviceFileData {
std::unordered_map<DeviceFD, u32> fd_to_id{};
std::deque<u32> syncpts_accumulated{};
u32 nvdec_next_id{};
u32 vic_next_id{};
};
Host1xDeviceFileData& Host1xDeviceFile();
const Host1xDeviceFileData& Host1xDeviceFile() const;
private:
std::unique_ptr<ContainerImpl> impl;
};

View File

@ -119,7 +119,7 @@ std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
std::scoped_lock lock(handles_lock);
try {
return handles.at(handle);
} catch ([[maybe_unused]] std::out_of_range& e) {
} catch (std::out_of_range&) {
return nullptr;
}
}
@ -128,7 +128,7 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) {
std::scoped_lock lock(handles_lock);
try {
return handles.at(handle)->address;
} catch ([[maybe_unused]] std::out_of_range& e) {
} catch (std::out_of_range&) {
return 0;
}
}

View File

@ -98,35 +98,6 @@ public:
}
};
private:
std::list<std::shared_ptr<Handle>> unmap_queue{};
std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue`
std::unordered_map<Handle::Id, std::shared_ptr<Handle>>
handles{}; //!< Main owning map of handles
std::mutex handles_lock; //!< Protects access to `handles`
static constexpr u32 HandleIdIncrement{
4}; //!< Each new handle ID is an increment of 4 from the previous
std::atomic<u32> next_handle_id{HandleIdIncrement};
Tegra::Host1x::Host1x& host1x;
void AddHandle(std::shared_ptr<Handle> handle);
/**
* @brief Unmaps and frees the SMMU memory region a handle is mapped to
* @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this
*/
void UnmapHandle(Handle& handle_description);
/**
* @brief Removes a handle from the map taking its dupes into account
* @note handle_description.mutex MUST be locked when calling this
* @return If the handle was removed from the map
*/
bool TryRemoveHandle(const Handle& handle_description);
public:
/**
* @brief Encapsulates the result of a FreeHandle operation
*/
@ -136,7 +107,7 @@ public:
bool was_uncached; //!< If the handle was allocated as uncached
};
NvMap(Tegra::Host1x::Host1x& host1x);
explicit NvMap(Tegra::Host1x::Host1x& host1x);
/**
* @brief Creates an unallocated handle of the given size
@ -172,5 +143,33 @@ public:
* describing the prior state of the handle
*/
std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internal_session);
private:
std::list<std::shared_ptr<Handle>> unmap_queue{};
std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue`
std::unordered_map<Handle::Id, std::shared_ptr<Handle>>
handles{}; //!< Main owning map of handles
std::mutex handles_lock; //!< Protects access to `handles`
static constexpr u32 HandleIdIncrement{
4}; //!< Each new handle ID is an increment of 4 from the previous
std::atomic<u32> next_handle_id{HandleIdIncrement};
Tegra::Host1x::Host1x& host1x;
void AddHandle(std::shared_ptr<Handle> handle);
/**
* @brief Unmaps and frees the SMMU memory region a handle is mapped to
* @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this
*/
void UnmapHandle(Handle& handle_description);
/**
* @brief Removes a handle from the map taking its dupes into account
* @note handle_description.mutex MUST be locked when calling this
* @return If the handle was removed from the map
*/
bool TryRemoveHandle(const Handle& handle_description);
};
} // namespace Service::Nvidia::NvCore

View File

@ -18,23 +18,23 @@ SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host
ReserveSyncpoint(VBlank0SyncpointId, true);
ReserveSyncpoint(VBlank1SyncpointId, true);
for (u32 syncpointId : channel_syncpoints) {
if (syncpointId) {
ReserveSyncpoint(syncpointId, false);
for (u32 syncpoint_id : channel_syncpoints) {
if (syncpoint_id) {
ReserveSyncpoint(syncpoint_id, false);
}
}
}
SyncpointManager::~SyncpointManager() = default;
u32 SyncpointManager::ReserveSyncpoint(u32 id, bool clientManaged) {
u32 SyncpointManager::ReserveSyncpoint(u32 id, bool client_managed) {
if (syncpoints.at(id).reserved) {
ASSERT_MSG(false, "Requested syncpoint is in use");
return 0;
}
syncpoints.at(id).reserved = true;
syncpoints.at(id).interfaceManaged = clientManaged;
syncpoints.at(id).interface_managed = client_managed;
return id;
}
@ -49,9 +49,9 @@ u32 SyncpointManager::FindFreeSyncpoint() {
return 0;
}
u32 SyncpointManager::AllocateSyncpoint(bool clientManaged) {
u32 SyncpointManager::AllocateSyncpoint(bool client_managed) {
std::lock_guard lock(reservation_lock);
return ReserveSyncpoint(FindFreeSyncpoint(), clientManaged);
return ReserveSyncpoint(FindFreeSyncpoint(), client_managed);
}
void SyncpointManager::FreeSyncpoint(u32 id) {
@ -64,7 +64,7 @@ bool SyncpointManager::IsSyncpointAllocated(u32 id) {
return (id <= SyncpointCount) && syncpoints[id].reserved;
}
bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) {
bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) const {
const SyncpointInfo& syncpoint{syncpoints.at(id)};
if (!syncpoint.reserved) {
@ -74,10 +74,10 @@ bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) {
// If the interface manages counters then we don't keep track of the maximum value as it handles
// sanity checking the values then
if (syncpoint.interfaceManaged) {
return static_cast<s32>(syncpoint.counterMin - threshold) >= 0;
if (syncpoint.interface_managed) {
return static_cast<s32>(syncpoint.counter_min - threshold) >= 0;
} else {
return (syncpoint.counterMax - threshold) >= (syncpoint.counterMin - threshold);
return (syncpoint.counter_max - threshold) >= (syncpoint.counter_min - threshold);
}
}
@ -87,7 +87,7 @@ u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) {
return 0;
}
return syncpoints.at(id).counterMax += amount;
return syncpoints.at(id).counter_max += amount;
}
u32 SyncpointManager::ReadSyncpointMinValue(u32 id) {
@ -96,7 +96,7 @@ u32 SyncpointManager::ReadSyncpointMinValue(u32 id) {
return 0;
}
return syncpoints.at(id).counterMin;
return syncpoints.at(id).counter_min;
}
u32 SyncpointManager::UpdateMin(u32 id) {
@ -105,8 +105,8 @@ u32 SyncpointManager::UpdateMin(u32 id) {
return 0;
}
syncpoints.at(id).counterMin = host1x.GetSyncpointManager().GetHostSyncpointValue(id);
return syncpoints.at(id).counterMin;
syncpoints.at(id).counter_min = host1x.GetSyncpointManager().GetHostSyncpointValue(id);
return syncpoints.at(id).counter_min;
}
NvFence SyncpointManager::GetSyncpointFence(u32 id) {
@ -115,7 +115,7 @@ NvFence SyncpointManager::GetSyncpointFence(u32 id) {
return NvFence{};
}
return {.id = static_cast<s32>(id), .value = syncpoints.at(id).counterMax};
return {.id = static_cast<s32>(id), .value = syncpoints.at(id).counter_max};
}
} // namespace Service::Nvidia::NvCore

View File

@ -11,13 +11,9 @@
#include "common/common_types.h"
#include "core/hle/service/nvdrv/nvdata.h"
namespace Tegra {
namespace Host1x {
namespace Tegra::Host1x {
class Host1x;
} // namespace Host1x
} // namespace Tegra
} // namespace Tegra::Host1x
namespace Service::Nvidia::NvCore {
@ -54,15 +50,15 @@ public:
* @brief Finds a free syncpoint and reserves it
* @return The ID of the reserved syncpoint
*/
u32 AllocateSyncpoint(bool clientManaged);
u32 AllocateSyncpoint(bool client_managed);
/**
* @url
* https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259
*/
bool HasSyncpointExpired(u32 id, u32 threshold);
bool HasSyncpointExpired(u32 id, u32 threshold) const;
bool IsFenceSignalled(NvFence fence) {
bool IsFenceSignalled(NvFence fence) const {
return HasSyncpointExpired(fence.id, fence.value);
}
@ -107,7 +103,7 @@ private:
/**
* @note reservation_lock should be locked when calling this
*/
u32 ReserveSyncpoint(u32 id, bool clientManaged);
u32 ReserveSyncpoint(u32 id, bool client_managed);
/**
* @return The ID of the first free syncpoint
@ -115,15 +111,15 @@ private:
u32 FindFreeSyncpoint();
struct SyncpointInfo {
std::atomic<u32> counterMin; //!< The least value the syncpoint can be (The value it was
std::atomic<u32> counter_min; //!< The least value the syncpoint can be (The value it was
//!< when it was last synchronized with host1x)
std::atomic<u32> counterMax; //!< The maximum value the syncpoint can reach according to the
//!< current usage
bool interfaceManaged; //!< If the syncpoint is managed by a host1x client interface, a
std::atomic<u32> counter_max; //!< The maximum value the syncpoint can reach according to
//!< the current usage
bool interface_managed; //!< If the syncpoint is managed by a host1x client interface, a
//!< client interface is a HW block that can handle host1x
//!< transactions on behalf of a host1x client (Which would otherwise
//!< need to be manually synced using PIO which is synchronous and
//!< requires direct cooperation of the CPU)
//!< transactions on behalf of a host1x client (Which would
//!< otherwise need to be manually synced using PIO which is
//!< synchronous and requires direct cooperation of the CPU)
bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved
//!< value
};

View File

@ -106,7 +106,7 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>&
return NvResult::BadValue;
}
if (!(params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES)) {
if ((params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES) == 0) {
LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size);
return NvResult::BadValue;
}
@ -124,12 +124,13 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>&
vm.va_range_end = params.va_range_end;
}
const u64 start_pages{vm.va_range_start >> VM::PAGE_SIZE_BITS};
const u64 end_pages{vm.va_range_split >> VM::PAGE_SIZE_BITS};
const auto start_pages{static_cast<u32>(vm.va_range_start >> VM::PAGE_SIZE_BITS)};
const auto end_pages{static_cast<u32>(vm.va_range_split >> VM::PAGE_SIZE_BITS)};
vm.small_page_allocator = std::make_shared<VM::Allocator>(start_pages, end_pages);
const u64 start_big_pages{vm.va_range_split >> vm.big_page_size_bits};
const u64 end_big_pages{(vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits};
const auto start_big_pages{static_cast<u32>(vm.va_range_split >> vm.big_page_size_bits)};
const auto end_big_pages{
static_cast<u32>((vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits)};
vm.big_page_allocator = std::make_unique<VM::Allocator>(start_big_pages, end_big_pages);
gmmu = std::make_shared<Tegra::MemoryManager>(system, 40, vm.big_page_size_bits,
@ -210,10 +211,11 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
// Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
// Only FreeSpace can unmap them fully
if (mapping->sparse_alloc)
if (mapping->sparse_alloc) {
gmmu->MapSparse(offset, mapping->size, mapping->big_page);
else
} else {
gmmu->Unmap(offset, mapping->size);
}
mapping_map.erase(offset);
}
@ -256,7 +258,7 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>&
allocator.Free(static_cast<u32>(params.offset >> page_size_bits),
static_cast<u32>(allocation.size >> page_size_bits));
allocation_map.erase(params.offset);
} catch ([[maybe_unused]] const std::out_of_range& e) {
} catch (const std::out_of_range&) {
return NvResult::BadValue;
}
@ -351,7 +353,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page);
return NvResult::Success;
} catch ([[maybe_unused]] const std::out_of_range& e) {
} catch (const std::out_of_range&) {
LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}",
params.offset);
return NvResult::BadValue;
@ -367,11 +369,11 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
bool big_page{[&]() {
if (Common::IsAligned(handle->align, vm.big_page_size))
if (Common::IsAligned(handle->align, vm.big_page_size)) {
return true;
else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE))
} else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) {
return false;
else {
} else {
ASSERT(false);
return false;
}
@ -450,7 +452,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8
}
mapping_map.erase(params.offset);
} catch ([[maybe_unused]] const std::out_of_range& e) {
} catch (const std::out_of_range&) {
LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset);
}

View File

@ -79,7 +79,7 @@ private:
// Used for waiting on a syncpoint & canceling it.
Tegra::Host1x::SyncpointManager::ActionHandle wait_handle{};
bool IsBeingUsed() {
bool IsBeingUsed() const {
const auto current_status = status.load(std::memory_order_acquire);
return current_status == EventState::Waiting ||
current_status == EventState::Cancelling ||

View File

@ -184,7 +184,7 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8
params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
params.unk3);
if (channel_state->initiated) {
if (channel_state->initialized) {
LOG_CRITICAL(Service_NVDRV, "Already allocated!");
return NvResult::AlreadyAllocated;
}

View File

@ -4,13 +4,12 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
#include "video_core/renderer_base.h"
namespace Service::Nvidia::Devices {
u32 nvhost_nvdec::next_id{};
nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_)
: nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {}
nvhost_nvdec::~nvhost_nvdec() = default;
@ -21,8 +20,9 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
case 0x0:
switch (command.cmd) {
case 0x1: {
if (!fd_to_id.contains(fd)) {
fd_to_id[fd] = next_id++;
auto& host1x_file = core.Host1xDeviceFile();
if (!host1x_file.fd_to_id.contains(fd)) {
host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++;
}
return Submit(fd, input, output);
}
@ -70,8 +70,9 @@ void nvhost_nvdec::OnOpen(DeviceFD fd) {}
void nvhost_nvdec::OnClose(DeviceFD fd) {
LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
const auto iter = fd_to_id.find(fd);
if (iter != fd_to_id.end()) {
auto& host1x_file = core.Host1xDeviceFile();
const auto iter = host1x_file.fd_to_id.find(fd);
if (iter != host1x_file.fd_to_id.end()) {
system.GPU().ClearCdmaInstance(iter->second);
}
}

View File

@ -22,9 +22,6 @@ public:
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
private:
static u32 next_id;
};
} // namespace Service::Nvidia::Devices

View File

@ -46,13 +46,11 @@ std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::s
}
} // Anonymous namespace
std::unordered_map<DeviceFD, u32> nvhost_nvdec_common::fd_to_id{};
std::deque<u32> nvhost_nvdec_common::syncpts_accumulated{};
nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_,
NvCore::ChannelType channel_type_)
: nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()},
nvmap{core.GetNvMapFile()}, channel_type{channel_type_} {
auto& syncpts_accumulated = core.Host1xDeviceFile().syncpts_accumulated;
if (syncpts_accumulated.empty()) {
channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
} else {
@ -60,8 +58,9 @@ nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Containe
syncpts_accumulated.pop_front();
}
}
nvhost_nvdec_common::~nvhost_nvdec_common() {
syncpts_accumulated.push_back(channel_syncpoint);
core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint);
}
NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
@ -108,7 +107,7 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
cmdlist.size() * sizeof(u32));
gpu.PushCommandBuffer(fd_to_id[fd], cmdlist);
gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
}
std::memcpy(output.data(), &params, sizeof(IoctlSubmit));
// Some games expect command_buffers to be written back
@ -186,8 +185,4 @@ Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) {
return nullptr;
}
void nvhost_nvdec_common::Reset() {
fd_to_id.clear();
}
} // namespace Service::Nvidia::Devices

View File

@ -25,8 +25,6 @@ public:
NvCore::ChannelType channel_type);
~nvhost_nvdec_common() override;
static void Reset();
protected:
struct IoctlSetNvmapFD {
s32_le nvmap_fd{};
@ -119,7 +117,6 @@ protected:
Kernel::KEvent* QueryEvent(u32 event_id) override;
static std::unordered_map<DeviceFD, u32> fd_to_id;
u32 channel_syncpoint;
s32_le nvmap_fd{};
u32_le submit_timeout{};
@ -128,8 +125,6 @@ protected:
NvCore::NvMap& nvmap;
NvCore::ChannelType channel_type;
std::array<u32, MaxSyncPoints> device_syncpoints{};
static std::deque<u32> syncpts_accumulated;
};
}; // namespace Devices
} // namespace Service::Nvidia

View File

@ -4,13 +4,12 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
#include "video_core/renderer_base.h"
namespace Service::Nvidia::Devices {
u32 nvhost_vic::next_id{};
nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_)
: nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {}
@ -21,11 +20,13 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& i
switch (command.group) {
case 0x0:
switch (command.cmd) {
case 0x1:
if (!fd_to_id.contains(fd)) {
fd_to_id[fd] = next_id++;
case 0x1: {
auto& host1x_file = core.Host1xDeviceFile();
if (!host1x_file.fd_to_id.contains(fd)) {
host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++;
}
return Submit(fd, input, output);
}
case 0x2:
return GetSyncpoint(input, output);
case 0x3:
@ -69,8 +70,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& i
void nvhost_vic::OnOpen(DeviceFD fd) {}
void nvhost_vic::OnClose(DeviceFD fd) {
const auto iter = fd_to_id.find(fd);
if (iter != fd_to_id.end()) {
auto& host1x_file = core.Host1xDeviceFile();
const auto iter = host1x_file.fd_to_id.find(fd);
if (iter != host1x_file.fd_to_id.end()) {
system.GPU().ClearCdmaInstance(iter->second);
}
}

View File

@ -21,8 +21,5 @@ public:
void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
private:
static u32 next_id;
};
} // namespace Service::Nvidia::Devices

View File

@ -23,8 +23,8 @@ public:
explicit nvmap(Core::System& system_, NvCore::Container& container);
~nvmap() override;
nvmap(nvmap const&) = delete;
nvmap& operator=(nvmap const&) = delete;
nvmap(const nvmap&) = delete;
nvmap& operator=(const nvmap&) = delete;
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
std::vector<u8>& output) override;

View File

@ -101,9 +101,7 @@ Module::Module(Core::System& system)
};
}
Module::~Module() {
Devices::nvhost_nvdec_common::Reset();
}
Module::~Module() {}
NvResult Module::VerifyFD(DeviceFD fd) const {
if (fd < 0) {

View File

@ -46,7 +46,7 @@ class Module;
class EventInterface {
public:
EventInterface(Module& module_);
explicit EventInterface(Module& module_);
~EventInterface();
Kernel::KEvent* CreateEvent(std::string name);

View File

@ -14,10 +14,7 @@
namespace Tegra::Control {
ChannelState::ChannelState(s32 bind_id_) {
bind_id = bind_id_;
initiated = false;
}
ChannelState::ChannelState(s32 bind_id_) : bind_id{bind_id_}, initialized{} {}
void ChannelState::Init(Core::System& system, GPU& gpu) {
ASSERT(memory_manager);
@ -27,7 +24,7 @@ void ChannelState::Init(Core::System& system, GPU& gpu) {
kepler_compute = std::make_unique<Engines::KeplerCompute>(system, *memory_manager);
maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager);
kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager);
initiated = true;
initialized = true;
}
void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {

View File

@ -34,7 +34,7 @@ class DmaPusher;
namespace Control {
struct ChannelState {
ChannelState(s32 bind_id);
explicit ChannelState(s32 bind_id);
ChannelState(const ChannelState& state) = delete;
ChannelState& operator=(const ChannelState&) = delete;
ChannelState(ChannelState&& other) noexcept = default;
@ -60,7 +60,7 @@ struct ChannelState {
std::unique_ptr<DmaPusher> dma_pusher;
bool initiated{};
bool initialized{};
};
} // namespace Control

View File

@ -32,7 +32,7 @@ namespace VideoCommon {
class ChannelInfo {
public:
ChannelInfo() = delete;
ChannelInfo(Tegra::Control::ChannelState& state);
explicit ChannelInfo(Tegra::Control::ChannelState& state);
ChannelInfo(const ChannelInfo& state) = delete;
ChannelInfo& operator=(const ChannelInfo&) = delete;
ChannelInfo(ChannelInfo&& other) = default;

View File

@ -3,6 +3,7 @@
#include <memory>
#include "common/assert.h"
#include "video_core/control/channel_state.h"
#include "video_core/control/scheduler.h"
#include "video_core/gpu.h"
@ -13,8 +14,9 @@ Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {}
Scheduler::~Scheduler() = default;
void Scheduler::Push(s32 channel, CommandList&& entries) {
std::unique_lock<std::mutex> lk(scheduling_guard);
std::unique_lock lk(scheduling_guard);
auto it = channels.find(channel);
ASSERT(it != channels.end());
auto channel_state = it->second;
gpu.BindChannel(channel_state->bind_id);
channel_state->dma_pusher->Push(std::move(entries));
@ -23,7 +25,7 @@ void Scheduler::Push(s32 channel, CommandList&& entries) {
void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) {
s32 channel = new_channel->bind_id;
std::unique_lock<std::mutex> lk(scheduling_guard);
std::unique_lock lk(scheduling_guard);
channels.emplace(channel, new_channel);
}

View File

@ -19,7 +19,7 @@ struct ChannelState;
class Scheduler {
public:
Scheduler(GPU& gpu_);
explicit Scheduler(GPU& gpu_);
~Scheduler();
void Push(s32 channel, CommandList&& entries);

View File

@ -195,7 +195,7 @@ public:
BitField<24, 2, u32> num_dst_components_minus_one;
};
Swizzle GetComponent(size_t i) {
Swizzle GetComponent(size_t i) const {
const u32 raw = dst_components_raw;
return static_cast<Swizzle>((raw >> (i * 3)) & 0x7);
}

View File

@ -355,7 +355,7 @@ struct GPU::Impl {
std::condition_variable sync_cv;
std::list<std::function<void(void)>> sync_requests;
std::list<std::function<void()>> sync_requests;
std::atomic<u64> current_sync_fence{};
u64 last_sync_fence{};
std::mutex sync_request_mutex;

View File

@ -19,7 +19,7 @@ namespace Host1x {
class Host1x {
public:
Host1x(Core::System& system);
explicit Host1x(Core::System& system);
SyncpointManager& GetSyncpointManager() {
return syncpoint_manager;

View File

@ -12,13 +12,13 @@ MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value,
std::function<void(void)>& action) {
std::function<void()>&& action) {
if (syncpoint.load(std::memory_order_acquire) >= expected_value) {
action();
return {};
}
std::unique_lock<std::mutex> lk(guard);
std::unique_lock lk(guard);
if (syncpoint.load(std::memory_order_relaxed) >= expected_value) {
action();
return {};
@ -30,12 +30,12 @@ SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
}
++it;
}
return action_storage.emplace(it, expected_value, action);
return action_storage.emplace(it, expected_value, std::move(action));
}
void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage,
ActionHandle& handle) {
std::unique_lock<std::mutex> lk(guard);
std::unique_lock lk(guard);
action_storage.erase(handle);
}
@ -68,7 +68,7 @@ void SyncpointManager::Increment(std::atomic<u32>& syncpoint, std::condition_var
std::list<RegisteredAction>& action_storage) {
auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1};
std::unique_lock<std::mutex> lk(guard);
std::unique_lock lk(guard);
auto it = action_storage.begin();
while (it != action_storage.end()) {
if (it->expected_value > new_value) {
@ -87,7 +87,7 @@ void SyncpointManager::Wait(std::atomic<u32>& syncpoint, std::condition_variable
return;
}
std::unique_lock<std::mutex> lk(guard);
std::unique_lock lk(guard);
wait_cv.wait(lk, pred);
}

View File

@ -18,34 +18,34 @@ namespace Host1x {
class SyncpointManager {
public:
u32 GetGuestSyncpointValue(u32 id) {
u32 GetGuestSyncpointValue(u32 id) const {
return syncpoints_guest[id].load(std::memory_order_acquire);
}
u32 GetHostSyncpointValue(u32 id) {
u32 GetHostSyncpointValue(u32 id) const {
return syncpoints_host[id].load(std::memory_order_acquire);
}
struct RegisteredAction {
RegisteredAction(u32 expected_value_, std::function<void(void)>& action_)
: expected_value{expected_value_}, action{action_} {}
explicit RegisteredAction(u32 expected_value_, std::function<void()>&& action_)
: expected_value{expected_value_}, action{std::move(action_)} {}
u32 expected_value;
std::function<void(void)> action;
std::function<void()> action;
};
using ActionHandle = std::list<RegisteredAction>::iterator;
template <typename Func>
ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
std::function<void(void)> func(action);
std::function<void()> func(action);
return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id],
expected_value, func);
expected_value, std::move(func));
}
template <typename Func>
ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
std::function<void(void)> func(action);
std::function<void()> func(action);
return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id],
expected_value, func);
expected_value, std::move(func));
}
void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle);
@ -60,11 +60,11 @@ public:
void WaitHost(u32 syncpoint_id, u32 expected_value);
bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) {
bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const {
return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
}
bool IsReadyHost(u32 syncpoint_id, u32 expected_value) {
bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const {
return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
}
@ -74,7 +74,7 @@ private:
ActionHandle RegisterAction(std::atomic<u32>& syncpoint,
std::list<RegisteredAction>& action_storage, u32 expected_value,
std::function<void(void)>& action);
std::function<void()>&& action);
void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle);

View File

@ -126,7 +126,7 @@ private:
void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size);
template <bool is_big_page>
[[nodiscard]] inline std::size_t PageEntryIndex(GPUVAddr gpu_addr) const {
[[nodiscard]] std::size_t PageEntryIndex(GPUVAddr gpu_addr) const {
if constexpr (is_big_page) {
return (gpu_addr >> big_page_bits) & big_page_table_mask;
} else {