early-access version 2261
This commit is contained in:
parent
4fadc26497
commit
ecc126a6a8
8 changed files with 42 additions and 44 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 2260.
|
This is the source code for early-access 2261.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
|
||||||
case 0x0:
|
case 0x0:
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x1:
|
case 0x1:
|
||||||
return Submit(input, output);
|
return Submit(fd, input, output);
|
||||||
case 0x2:
|
case 0x2:
|
||||||
return GetSyncpoint(input, output);
|
return GetSyncpoint(input, output);
|
||||||
case 0x3:
|
case 0x3:
|
||||||
|
@ -62,11 +62,16 @@ NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>&
|
||||||
return NvResult::NotImplemented;
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvhost_nvdec::OnOpen(DeviceFD fd) {}
|
void nvhost_nvdec::OnOpen(DeviceFD fd) {
|
||||||
|
static u32 next_id{};
|
||||||
|
fd_to_id[fd] = next_id++;
|
||||||
|
}
|
||||||
|
|
||||||
void nvhost_nvdec::OnClose(DeviceFD fd) {
|
void nvhost_nvdec::OnClose(DeviceFD fd) {
|
||||||
LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
|
LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
|
||||||
system.GPU().ClearCdmaInstance();
|
if (fd_to_id.find(fd) != fd_to_id.end()) {
|
||||||
|
system.GPU().ClearCdmaInstance(fd_to_id[fd]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -59,7 +59,8 @@ NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::Submit(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
|
||||||
|
std::vector<u8>& output) {
|
||||||
IoctlSubmit params{};
|
IoctlSubmit params{};
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmit));
|
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmit));
|
||||||
LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count);
|
LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count);
|
||||||
|
@ -93,7 +94,7 @@ NvResult nvhost_nvdec_common::Submit(const std::vector<u8>& input, std::vector<u
|
||||||
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
|
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
|
||||||
system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(),
|
system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(),
|
||||||
cmdlist.size() * sizeof(u32));
|
cmdlist.size() * sizeof(u32));
|
||||||
gpu.PushCommandBuffer(cmdlist);
|
gpu.PushCommandBuffer(fd_to_id[fd], cmdlist);
|
||||||
}
|
}
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit));
|
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit));
|
||||||
// Some games expect command_buffers to be written back
|
// Some games expect command_buffers to be written back
|
||||||
|
|
|
@ -104,13 +104,14 @@ protected:
|
||||||
|
|
||||||
/// Ioctl command implementations
|
/// Ioctl command implementations
|
||||||
NvResult SetNVMAPfd(const std::vector<u8>& input);
|
NvResult SetNVMAPfd(const std::vector<u8>& input);
|
||||||
NvResult Submit(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult Submit(DeviceFD fd, const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult MapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult MapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
|
|
||||||
|
std::unordered_map<DeviceFD, u32> fd_to_id{};
|
||||||
s32_le nvmap_fd{};
|
s32_le nvmap_fd{};
|
||||||
u32_le submit_timeout{};
|
u32_le submit_timeout{};
|
||||||
std::shared_ptr<nvmap> nvmap_dev;
|
std::shared_ptr<nvmap> nvmap_dev;
|
||||||
|
|
|
@ -21,7 +21,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& i
|
||||||
case 0x0:
|
case 0x0:
|
||||||
switch (command.cmd) {
|
switch (command.cmd) {
|
||||||
case 0x1:
|
case 0x1:
|
||||||
return Submit(input, output);
|
return Submit(fd, input, output);
|
||||||
case 0x2:
|
case 0x2:
|
||||||
return GetSyncpoint(input, output);
|
return GetSyncpoint(input, output);
|
||||||
case 0x3:
|
case 0x3:
|
||||||
|
@ -62,10 +62,15 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& i
|
||||||
return NvResult::NotImplemented;
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvhost_vic::OnOpen(DeviceFD fd) {}
|
void nvhost_vic::OnOpen(DeviceFD fd) {
|
||||||
|
static u32 next_id{};
|
||||||
|
fd_to_id[fd] = next_id++;
|
||||||
|
}
|
||||||
|
|
||||||
void nvhost_vic::OnClose(DeviceFD fd) {
|
void nvhost_vic::OnClose(DeviceFD fd) {
|
||||||
system.GPU().ClearCdmaInstance();
|
if (fd_to_id.find(fd) != fd_to_id.end()) {
|
||||||
|
system.GPU().ClearCdmaInstance(fd_to_id[fd]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -185,16 +185,6 @@ struct GPU::Impl {
|
||||||
return *dma_pusher;
|
return *dma_pusher;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a reference to the GPU CDMA pusher.
|
|
||||||
[[nodiscard]] Tegra::CDmaPusher& CDmaPusher() {
|
|
||||||
return *cdma_pusher;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a const reference to the GPU CDMA pusher.
|
|
||||||
[[nodiscard]] const Tegra::CDmaPusher& CDmaPusher() const {
|
|
||||||
return *cdma_pusher;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a reference to the underlying renderer.
|
/// Returns a reference to the underlying renderer.
|
||||||
[[nodiscard]] VideoCore::RendererBase& Renderer() {
|
[[nodiscard]] VideoCore::RendererBase& Renderer() {
|
||||||
return *renderer;
|
return *renderer;
|
||||||
|
@ -338,25 +328,26 @@ struct GPU::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Push GPU command buffer entries to be processed
|
/// Push GPU command buffer entries to be processed
|
||||||
void PushCommandBuffer(Tegra::ChCommandHeaderList& entries) {
|
void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) {
|
||||||
if (!use_nvdec) {
|
if (!use_nvdec) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cdma_pusher) {
|
if (cdma_pushers.find(id) == cdma_pushers.end()) {
|
||||||
cdma_pusher = std::make_unique<Tegra::CDmaPusher>(gpu);
|
cdma_pushers[id] = std::make_unique<Tegra::CDmaPusher>(gpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubmitCommandBuffer would make the nvdec operations async, this is not currently working
|
// SubmitCommandBuffer would make the nvdec operations async, this is not currently working
|
||||||
// TODO(ameerj): RE proper async nvdec operation
|
// TODO(ameerj): RE proper async nvdec operation
|
||||||
// gpu_thread.SubmitCommandBuffer(std::move(entries));
|
// gpu_thread.SubmitCommandBuffer(std::move(entries));
|
||||||
|
cdma_pushers[id]->ProcessEntries(std::move(entries));
|
||||||
cdma_pusher->ProcessEntries(std::move(entries));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Frees the CDMAPusher instance to free up resources
|
/// Frees the CDMAPusher instance to free up resources
|
||||||
void ClearCdmaInstance() {
|
void ClearCdmaInstance(u32 id) {
|
||||||
cdma_pusher.reset();
|
if (cdma_pushers.find(id) != cdma_pushers.end()) {
|
||||||
|
cdma_pushers.erase(id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Swap buffers (render frame)
|
/// Swap buffers (render frame)
|
||||||
|
@ -659,7 +650,7 @@ struct GPU::Impl {
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
std::unique_ptr<Tegra::MemoryManager> memory_manager;
|
std::unique_ptr<Tegra::MemoryManager> memory_manager;
|
||||||
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
|
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
|
||||||
std::unique_ptr<Tegra::CDmaPusher> cdma_pusher;
|
std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers;
|
||||||
std::unique_ptr<VideoCore::RendererBase> renderer;
|
std::unique_ptr<VideoCore::RendererBase> renderer;
|
||||||
VideoCore::RasterizerInterface* rasterizer = nullptr;
|
VideoCore::RasterizerInterface* rasterizer = nullptr;
|
||||||
const bool use_nvdec;
|
const bool use_nvdec;
|
||||||
|
@ -811,14 +802,6 @@ const Tegra::DmaPusher& GPU::DmaPusher() const {
|
||||||
return impl->DmaPusher();
|
return impl->DmaPusher();
|
||||||
}
|
}
|
||||||
|
|
||||||
Tegra::CDmaPusher& GPU::CDmaPusher() {
|
|
||||||
return impl->CDmaPusher();
|
|
||||||
}
|
|
||||||
|
|
||||||
const Tegra::CDmaPusher& GPU::CDmaPusher() const {
|
|
||||||
return impl->CDmaPusher();
|
|
||||||
}
|
|
||||||
|
|
||||||
VideoCore::RendererBase& GPU::Renderer() {
|
VideoCore::RendererBase& GPU::Renderer() {
|
||||||
return impl->Renderer();
|
return impl->Renderer();
|
||||||
}
|
}
|
||||||
|
@ -887,12 +870,12 @@ void GPU::PushGPUEntries(Tegra::CommandList&& entries) {
|
||||||
impl->PushGPUEntries(std::move(entries));
|
impl->PushGPUEntries(std::move(entries));
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::PushCommandBuffer(Tegra::ChCommandHeaderList& entries) {
|
void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) {
|
||||||
impl->PushCommandBuffer(entries);
|
impl->PushCommandBuffer(id, entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::ClearCdmaInstance() {
|
void GPU::ClearCdmaInstance(u32 id) {
|
||||||
impl->ClearCdmaInstance();
|
impl->ClearCdmaInstance(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
||||||
|
|
|
@ -242,10 +242,10 @@ public:
|
||||||
void PushGPUEntries(Tegra::CommandList&& entries);
|
void PushGPUEntries(Tegra::CommandList&& entries);
|
||||||
|
|
||||||
/// Push GPU command buffer entries to be processed
|
/// Push GPU command buffer entries to be processed
|
||||||
void PushCommandBuffer(Tegra::ChCommandHeaderList& entries);
|
void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries);
|
||||||
|
|
||||||
/// Frees the CDMAPusher instance to free up resources
|
/// Frees the CDMAPusher instance to free up resources
|
||||||
void ClearCdmaInstance();
|
void ClearCdmaInstance(u32 id);
|
||||||
|
|
||||||
/// Swap buffers (render frame)
|
/// Swap buffers (render frame)
|
||||||
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
|
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
|
||||||
|
|
|
@ -8,10 +8,13 @@
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
|
|
||||||
#include "common/input.h"
|
#include "common/input.h"
|
||||||
#include "common/settings.h"
|
#include "common/settings_input.h"
|
||||||
#include "core/hid/emulated_controller.h"
|
|
||||||
|
|
||||||
namespace Core::HID {
|
namespace Core::HID {
|
||||||
|
using ButtonValues = std::array<Common::Input::ButtonStatus, Settings::NativeButton::NumButtons>;
|
||||||
|
using SticksValues = std::array<Common::Input::StickStatus, Settings::NativeAnalog::NumAnalogs>;
|
||||||
|
enum class ControllerTriggerType;
|
||||||
|
class EmulatedController;
|
||||||
class HIDCore;
|
class HIDCore;
|
||||||
} // namespace Core::HID
|
} // namespace Core::HID
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue