early-access version 1341

This commit is contained in:
pineappleEA 2021-01-19 00:14:45 +01:00
parent cd088d656c
commit fab33e730a
14 changed files with 152 additions and 242 deletions

View file

@ -1,7 +1,7 @@
yuzu emulator early access
=============
This is the source code for early-access 1339.
This is the source code for early-access 1341.
## Legal Notice

View file

@ -195,9 +195,8 @@ struct Memory::Impl {
switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {}) "
"at PC 0x{:08X}",
current_vaddr, src_addr, size, system.CurrentArmInterface().GetPC());
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
current_vaddr, src_addr, size);
std::memset(dest_buffer, 0, copy_amount);
break;
}
@ -241,9 +240,8 @@ struct Memory::Impl {
switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {}) "
"at PC 0x{:08X}",
current_vaddr, src_addr, size, system.CurrentArmInterface().GetPC());
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
current_vaddr, src_addr, size);
std::memset(dest_buffer, 0, copy_amount);
break;
}
@ -293,9 +291,8 @@ struct Memory::Impl {
switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {}) "
"at PC 0x{:08X}",
current_vaddr, dest_addr, size, system.CurrentArmInterface().GetPC());
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
current_vaddr, dest_addr, size);
break;
}
case Common::PageType::Memory: {
@ -337,9 +334,8 @@ struct Memory::Impl {
switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {}) "
"at PC 0x{:08X}",
current_vaddr, dest_addr, size, system.CurrentArmInterface().GetPC());
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
current_vaddr, dest_addr, size);
break;
}
case Common::PageType::Memory: {
@ -387,9 +383,8 @@ struct Memory::Impl {
switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {}) "
"at PC 0x{:08X}",
current_vaddr, dest_addr, size, system.CurrentArmInterface().GetPC());
"Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
current_vaddr, dest_addr, size);
break;
}
case Common::PageType::Memory: {
@ -434,9 +429,8 @@ struct Memory::Impl {
switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {}) "
"at PC 0x{:08X}",
current_vaddr, src_addr, size, system.CurrentArmInterface().GetPC());
"Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
current_vaddr, src_addr, size);
ZeroBlock(process, dest_addr, copy_amount);
break;
}
@ -607,8 +601,7 @@ struct Memory::Impl {
}
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
case Common::PageType::Unmapped:
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X} at PC 0x{:08X}", sizeof(T) * 8, vaddr,
system.CurrentArmInterface().GetPC());
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
return 0;
case Common::PageType::Memory:
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
@ -645,9 +638,8 @@ struct Memory::Impl {
}
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
case Common::PageType::Unmapped:
LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X} at PC 0x{:08X}",
sizeof(data) * 8, static_cast<u32>(data), vaddr,
system.CurrentArmInterface().GetPC());
LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
static_cast<u32>(data), vaddr);
return;
case Common::PageType::Memory:
ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);

View file

@ -251,6 +251,16 @@ public:
flags &= ~BufferFlagBits::Picked;
}
/// Increases the likeliness of this being a stream buffer
void IncreaseStreamScore(int score) noexcept {
stream_score += score;
}
/// Returns the likeliness of this being a stream buffer
[[nodiscard]] int StreamScore() const noexcept {
return stream_score;
}
/// Returns true when vaddr -> vaddr+size is fully contained in the buffer
[[nodiscard]] bool IsInBounds(VAddr addr, u64 size) const noexcept {
return addr >= cpu_addr && addr + size <= cpu_addr + SizeBytes();
@ -574,6 +584,7 @@ private:
VAddr cpu_addr = 0;
Words words;
BufferFlagBits flags{};
int stream_score = 0;
};
} // namespace VideoCommon

View file

@ -71,6 +71,13 @@ class BufferCache {
struct Empty {};
struct OverlapResult {
std::vector<BufferId> ids;
VAddr begin;
VAddr end;
bool has_stream_leap = false;
};
struct Binding {
VAddr cpu_addr{};
u32 size{};
@ -84,7 +91,7 @@ class BufferCache {
};
public:
static constexpr size_t SKIP_CACHE_SIZE = 4096;
static constexpr u32 SKIP_CACHE_SIZE = 4096;
explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_,
Tegra::Engines::Maxwell3D& maxwell3d_,
@ -220,6 +227,10 @@ private:
[[nodiscard]] BufferId FindBuffer(VAddr cpu_addr, u32 size);
[[nodiscard]] OverlapResult ResolveOverlaps(VAddr cpu_addr, u32 wanted_size);
void JoinOverlap(BufferId new_buffer_id, BufferId overlap_id, bool accumulate_stream_score);
[[nodiscard]] BufferId CreateBuffer(VAddr cpu_addr, u32 wanted_size);
void Register(BufferId buffer_id);
@ -988,12 +999,15 @@ BufferId BufferCache<P>::FindBuffer(VAddr cpu_addr, u32 size) {
}
template <class P>
BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) {
typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu_addr,
u32 wanted_size) {
static constexpr int STREAM_LEAP_THRESHOLD = 16;
std::vector<BufferId> overlap_ids;
VAddr cpu_addr_begin = cpu_addr;
VAddr cpu_addr_end = cpu_addr + wanted_size;
for (; cpu_addr >> PAGE_BITS < Common::DivCeil(cpu_addr_end, PAGE_SIZE);
cpu_addr += PAGE_SIZE) {
VAddr begin = cpu_addr;
VAddr end = cpu_addr + wanted_size;
int stream_score = 0;
bool has_stream_leap = false;
for (; cpu_addr >> PAGE_BITS < Common::DivCeil(end, PAGE_SIZE); cpu_addr += PAGE_SIZE) {
const BufferId overlap_id = page_table[cpu_addr >> PAGE_BITS];
if (!overlap_id) {
continue;
@ -1002,22 +1016,38 @@ BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) {
if (overlap.IsPicked()) {
continue;
}
overlap.Pick();
overlap_ids.push_back(overlap_id);
overlap.Pick();
const VAddr overlap_cpu_addr = overlap.CpuAddr();
if (overlap_cpu_addr < cpu_addr_begin) {
cpu_addr = cpu_addr_begin = overlap_cpu_addr;
if (overlap_cpu_addr < begin) {
cpu_addr = begin = overlap_cpu_addr;
}
cpu_addr_end = std::max(cpu_addr_end, overlap_cpu_addr + overlap.SizeBytes());
end = std::max(end, overlap_cpu_addr + overlap.SizeBytes());
stream_score += overlap.StreamScore();
if (stream_score > STREAM_LEAP_THRESHOLD && !has_stream_leap) {
// When this memory region has been joined a bunch of times, we assume it's being used
// as a stream buffer. Increase the size to skip constantly recreating buffers.
has_stream_leap = true;
end += PAGE_SIZE * 256;
}
const u32 size = static_cast<u32>(cpu_addr_end - cpu_addr_begin);
const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, cpu_addr_begin, size);
}
return OverlapResult{
.ids = std::move(overlap_ids),
.begin = begin,
.end = end,
.has_stream_leap = has_stream_leap,
};
}
template <class P>
void BufferCache<P>::JoinOverlap(BufferId new_buffer_id, BufferId overlap_id,
bool accumulate_stream_score) {
Buffer& new_buffer = slot_buffers[new_buffer_id];
for (const BufferId overlap_id : overlap_ids) {
Buffer& overlap = slot_buffers[overlap_id];
overlap.Unpick();
if (accumulate_stream_score) {
new_buffer.IncreaseStreamScore(overlap.StreamScore() + 1);
}
std::vector<BufferCopy> copies;
const size_t dst_base_offset = overlap.CpuAddr() - new_buffer.CpuAddr();
overlap.ForEachDownloadRange([&](u64 begin, u64 range_size) {
@ -1034,6 +1064,15 @@ BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) {
}
ReplaceBufferDownloads(overlap_id, new_buffer_id);
DeleteBuffer(overlap_id);
}
template <class P>
BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) {
const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size);
const u32 size = static_cast<u32>(overlap.end - overlap.begin);
const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size);
for (const BufferId overlap_id : overlap.ids) {
JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap);
}
Register(new_buffer_id);
return new_buffer_id;

View file

@ -7,10 +7,6 @@
#include "video_core/buffer_cache/buffer_cache.h"
#include "video_core/renderer_opengl/gl_buffer_cache.h"
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_instance.h"
#include "video_core/vulkan_common/vulkan_library.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
namespace OpenGL {
namespace {
@ -36,13 +32,8 @@ Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rast
buffer.Create();
const std::string name = fmt::format("Buffer 0x{:x}", CpuAddr());
glObjectLabel(GL_BUFFER, buffer.handle, static_cast<GLsizei>(name.size()), name.data());
if (runtime.device.UseAssemblyShaders()) {
CreateMemoryObjects(runtime);
glNamedBufferStorageMemEXT(buffer.handle, SizeBytes(), memory_commit.ExportOpenGLHandle(),
memory_commit.Offset());
} else {
glNamedBufferData(buffer.handle, SizeBytes(), nullptr, GL_DYNAMIC_DRAW);
}
if (runtime.has_unified_vertex_buffers) {
glGetNamedBufferParameterui64vNV(buffer.handle, GL_BUFFER_GPU_ADDRESS_NV, &address);
}
@ -71,61 +62,30 @@ void Buffer::MakeResident(GLenum access) noexcept {
glMakeNamedBufferResidentNV(buffer.handle, access);
}
GLuint Buffer::SubBuffer(u32 offset) {
if (offset == 0) {
return buffer.handle;
}
for (const auto& [sub_buffer, sub_offset] : subs) {
if (sub_offset == offset) {
return sub_buffer.handle;
}
}
OGLBuffer sub_buffer;
sub_buffer.Create();
glNamedBufferStorageMemEXT(sub_buffer.handle, SizeBytes() - offset,
memory_commit.ExportOpenGLHandle(), memory_commit.Offset() + offset);
return subs.emplace_back(std::move(sub_buffer), offset).first.handle;
}
void Buffer::CreateMemoryObjects(BufferCacheRuntime& runtime) {
auto& allocator = runtime.vulkan_memory_allocator;
auto& device = runtime.vulkan_device->GetLogical();
auto vulkan_buffer = device.CreateBuffer(VkBufferCreateInfo{
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = SizeBytes(),
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
});
const VkMemoryRequirements requirements = device.GetBufferMemoryRequirements(*vulkan_buffer);
memory_commit = allocator->Commit(requirements, Vulkan::MemoryUsage::DeviceLocal);
}
BufferCacheRuntime::BufferCacheRuntime(const Device& device_, const Vulkan::Device* vulkan_device_,
Vulkan::MemoryAllocator* vulkan_memory_allocator_)
: device{device_}, vulkan_device{vulkan_device_},
vulkan_memory_allocator{vulkan_memory_allocator_},
stream_buffer{device.HasFastBufferSubData() ? std::nullopt
: std::make_optional<StreamBuffer>()} {
BufferCacheRuntime::BufferCacheRuntime(const Device& device_)
: device{device_}, has_fast_buffer_sub_data{device.HasFastBufferSubData()},
use_assembly_shaders{device.UseAssemblyShaders()},
has_unified_vertex_buffers{device.HasVertexBufferUnifiedMemory()},
stream_buffer{has_fast_buffer_sub_data ? std::nullopt : std::make_optional<StreamBuffer>()} {
GLint gl_max_attributes;
glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, &gl_max_attributes);
max_attributes = static_cast<u32>(gl_max_attributes);
use_assembly_shaders = device.UseAssemblyShaders();
has_unified_vertex_buffers = device.HasVertexBufferUnifiedMemory();
for (auto& stage_uniforms : fast_uniforms) {
for (OGLBuffer& buffer : stage_uniforms) {
buffer.Create();
glNamedBufferData(buffer.handle, BufferCache::SKIP_CACHE_SIZE, nullptr, GL_STREAM_DRAW);
}
}
for (auto& stage_uniforms : copy_uniforms) {
for (OGLBuffer& buffer : stage_uniforms) {
buffer.Create();
glNamedBufferData(buffer.handle, 0x10'000, nullptr, GL_STREAM_COPY);
}
}
for (OGLBuffer& buffer : copy_compute_uniforms) {
buffer.Create();
glNamedBufferData(buffer.handle, 0x10'000, nullptr, GL_STREAM_COPY);
}
}
void BufferCacheRuntime::CopyBuffer(Buffer& dst_buffer, Buffer& src_buffer,
@ -167,8 +127,14 @@ void BufferCacheRuntime::BindVertexBuffer(u32 index, Buffer& buffer, u32 offset,
void BufferCacheRuntime::BindUniformBuffer(size_t stage, u32 binding_index, Buffer& buffer,
u32 offset, u32 size) {
if (use_assembly_shaders) {
const GLuint sub_buffer = buffer.SubBuffer(offset);
glBindBufferRangeNV(PABO_LUT[stage], binding_index, sub_buffer, 0,
GLuint handle;
if (offset != 0) {
handle = copy_uniforms[stage][binding_index].handle;
glCopyNamedBufferSubData(buffer.Handle(), handle, offset, 0, size);
} else {
handle = buffer.Handle();
}
glBindBufferRangeNV(PABO_LUT[stage], binding_index, handle, 0,
static_cast<GLsizeiptr>(size));
} else {
const GLuint base_binding = device.GetBaseBindings(stage).uniform_buffer;
@ -181,8 +147,15 @@ void BufferCacheRuntime::BindUniformBuffer(size_t stage, u32 binding_index, Buff
void BufferCacheRuntime::BindComputeUniformBuffer(u32 binding_index, Buffer& buffer, u32 offset,
u32 size) {
if (use_assembly_shaders) {
glBindBufferRangeNV(GL_COMPUTE_PROGRAM_PARAMETER_BUFFER_NV, binding_index,
buffer.SubBuffer(offset), 0, static_cast<GLsizeiptr>(size));
GLuint handle;
if (offset != 0) {
handle = copy_compute_uniforms[binding_index].handle;
glCopyNamedBufferSubData(buffer.Handle(), handle, offset, 0, size);
} else {
handle = buffer.Handle();
}
glBindBufferRangeNV(GL_COMPUTE_PROGRAM_PARAMETER_BUFFER_NV, binding_index, handle, 0,
static_cast<GLsizeiptr>(size));
} else {
glBindBufferRange(GL_UNIFORM_BUFFER, binding_index, buffer.Handle(),
static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size));

View file

@ -15,13 +15,6 @@
#include "video_core/renderer_opengl/gl_device.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_stream_buffer.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
namespace Vulkan {
class Device;
class MemoryAllocator;
} // namespace Vulkan
namespace OpenGL {
@ -39,8 +32,6 @@ public:
void MakeResident(GLenum access) noexcept;
[[nodiscard]] GLuint SubBuffer(u32 offset);
[[nodiscard]] GLuint64EXT HostGpuAddr() const noexcept {
return address;
}
@ -50,13 +41,9 @@ public:
}
private:
void CreateMemoryObjects(BufferCacheRuntime& runtime);
GLuint64EXT address = 0;
Vulkan::MemoryCommit memory_commit;
OGLBuffer buffer;
GLenum current_residency_access = GL_NONE;
std::vector<std::pair<OGLBuffer, u32>> subs;
};
class BufferCacheRuntime {
@ -65,8 +52,7 @@ class BufferCacheRuntime {
public:
static constexpr u8 INVALID_BINDING = std::numeric_limits<u8>::max();
explicit BufferCacheRuntime(const Device& device_, const Vulkan::Device* vulkan_device_,
Vulkan::MemoryAllocator* vulkan_memory_allocator_);
explicit BufferCacheRuntime(const Device& device_);
void CopyBuffer(Buffer& dst_buffer, Buffer& src_buffer,
std::span<const VideoCommon::BufferCopy> copies);
@ -127,7 +113,7 @@ public:
}
[[nodiscard]] bool HasFastBufferSubData() const noexcept {
return device.HasFastBufferSubData();
return has_fast_buffer_sub_data;
}
private:
@ -138,18 +124,22 @@ private:
};
const Device& device;
const Vulkan::Device* vulkan_device;
Vulkan::MemoryAllocator* vulkan_memory_allocator;
std::optional<StreamBuffer> stream_buffer;
bool has_fast_buffer_sub_data = false;
bool use_assembly_shaders = false;
bool has_unified_vertex_buffers = false;
u32 max_attributes = 0;
bool use_assembly_shaders = false;
bool has_unified_vertex_buffers = false;
std::optional<StreamBuffer> stream_buffer;
std::array<std::array<OGLBuffer, VideoCommon::NUM_GRAPHICS_UNIFORM_BUFFERS>,
VideoCommon::NUM_STAGES>
fast_uniforms;
std::array<std::array<OGLBuffer, VideoCommon::NUM_GRAPHICS_UNIFORM_BUFFERS>,
VideoCommon::NUM_STAGES>
copy_uniforms;
std::array<OGLBuffer, VideoCommon::NUM_COMPUTE_UNIFORM_BUFFERS> copy_compute_uniforms;
u32 index_buffer_offset = 0;
};

View file

@ -197,7 +197,7 @@ bool IsASTCSupported() {
}
} // Anonymous namespace
Device::Device(bool has_vulkan_instance) {
Device::Device() {
if (!GLAD_GL_VERSION_4_3) {
LOG_ERROR(Render_OpenGL, "OpenGL 4.3 is not available");
throw std::runtime_error{"Insufficient version"};
@ -246,8 +246,7 @@ Device::Device(bool has_vulkan_instance) {
use_assembly_shaders = Settings::values.use_assembly_shaders.GetValue() &&
GLAD_GL_NV_gpu_program5 && GLAD_GL_NV_compute_program5 &&
GLAD_GL_NV_transform_feedback && GLAD_GL_NV_transform_feedback2 &&
has_vulkan_instance;
GLAD_GL_NV_transform_feedback && GLAD_GL_NV_transform_feedback2;
use_asynchronous_shaders = Settings::values.use_asynchronous_shaders.GetValue();

View file

@ -19,7 +19,7 @@ public:
u32 image{};
};
explicit Device(bool has_vulkan_instance);
explicit Device();
explicit Device(std::nullptr_t);
u32 GetMaxUniformBuffers(Tegra::Engines::ShaderType shader_type) const noexcept {

View file

@ -170,8 +170,6 @@ ImageViewType ImageViewTypeFromEntry(const ImageEntry& entry) {
RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
Core::Memory::Memory& cpu_memory_, const Device& device_,
const Vulkan::Device* vulkan_device,
Vulkan::MemoryAllocator* vulkan_memory_allocator,
ScreenInfo& screen_info_, ProgramManager& program_manager_,
StateTracker& state_tracker_)
: RasterizerAccelerated(cpu_memory_), gpu(gpu_), maxwell3d(gpu.Maxwell3D()),
@ -179,7 +177,7 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra
screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_),
texture_cache_runtime(device, program_manager, state_tracker),
texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
buffer_cache_runtime(device, vulkan_device, vulkan_memory_allocator),
buffer_cache_runtime(device),
buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime),
shader_cache(*this, emu_window_, gpu, maxwell3d, kepler_compute, gpu_memory, device),
query_cache(*this, maxwell3d, gpu_memory),

View file

@ -46,11 +46,6 @@ namespace Tegra {
class MemoryManager;
}
namespace Vulkan {
class Device;
class MemoryAllocator;
} // namespace Vulkan
namespace OpenGL {
struct ScreenInfo;
@ -67,8 +62,6 @@ class RasterizerOpenGL : public VideoCore::RasterizerAccelerated {
public:
explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
Core::Memory::Memory& cpu_memory_, const Device& device_,
const Vulkan::Device* vulkan_device,
Vulkan::MemoryAllocator* vulkan_memory_allocator,
ScreenInfo& screen_info_, ProgramManager& program_manager_,
StateTracker& state_tracker_);
~RasterizerOpenGL() override;

View file

@ -27,11 +27,6 @@
#include "video_core/renderer_opengl/gl_shader_manager.h"
#include "video_core/renderer_opengl/renderer_opengl.h"
#include "video_core/textures/decoders.h"
#include "video_core/vulkan_common/vulkan_debug_callback.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_instance.h"
#include "video_core/vulkan_common/vulkan_library.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
namespace OpenGL {
namespace {
@ -127,93 +122,16 @@ void APIENTRY DebugHandler(GLenum source, GLenum type, GLuint id, GLenum severit
break;
}
}
Vulkan::vk::PhysicalDevice FindPhysicalDevice(Vulkan::vk::Instance& instance) {
using namespace Vulkan;
using UUID = std::array<GLubyte, GL_UUID_SIZE_EXT>;
GLint num_device_uuids;
glGetIntegerv(GL_NUM_DEVICE_UUIDS_EXT, &num_device_uuids);
std::vector<UUID> device_uuids(num_device_uuids);
for (GLint index = 0; index < num_device_uuids; ++index) {
glGetUnsignedBytei_vEXT(GL_DEVICE_UUID_EXT, 0, device_uuids[index].data());
}
UUID driver_uuid;
glGetUnsignedBytevEXT(GL_DRIVER_UUID_EXT, driver_uuid.data());
for (const VkPhysicalDevice raw_physical_device : instance.EnumeratePhysicalDevices()) {
VkPhysicalDeviceIDProperties device_id_properties{};
device_id_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES;
VkPhysicalDeviceProperties2KHR properties{
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR,
.pNext = &device_id_properties,
.properties{},
};
vk::PhysicalDevice physical_device(raw_physical_device, instance.Dispatch());
physical_device.GetProperties2KHR(properties);
if (!std::ranges::equal(device_id_properties.driverUUID, driver_uuid)) {
continue;
}
const auto it =
std::ranges::find_if(device_uuids, [&device_id_properties, driver_uuid](UUID uuid) {
return std::ranges::equal(device_id_properties.deviceUUID, uuid);
});
if (it != device_uuids.end()) {
return physical_device;
}
}
throw vk::Exception(VK_ERROR_INCOMPATIBLE_DRIVER);
}
} // Anonymous namespace
struct VulkanObjects {
static std::unique_ptr<VulkanObjects> TryCreate() {
if (!GLAD_GL_EXT_memory_object) {
// Interop is not present
return nullptr;
}
const std::string_view vendor{reinterpret_cast<const char*>(glGetString(GL_VENDOR))};
if (vendor == "ATI Technologies Inc.") {
// Avoid using GL_EXT_memory_object on AMD, as it makes the GL driver crash
return nullptr;
}
if (!Settings::values.use_assembly_shaders.GetValue()) {
// We only need interop when assembly shaders are enabled
return nullptr;
}
#ifdef __linux__
LOG_WARNING(Render_OpenGL, "Interop doesn't work on Linux at the moment");
return nullptr;
#endif
try {
return std::make_unique<VulkanObjects>();
} catch (const Vulkan::vk::Exception& exception) {
LOG_ERROR(Render_OpenGL, "Failed to initialize Vulkan objects with error: {}",
exception.what());
return nullptr;
}
}
Common::DynamicLibrary library{Vulkan::OpenLibrary()};
Vulkan::vk::InstanceDispatch dld;
Vulkan::vk::Instance instance{Vulkan::CreateInstance(library, dld, VK_API_VERSION_1_1)};
Vulkan::Device device{*instance, FindPhysicalDevice(instance), nullptr, dld};
Vulkan::MemoryAllocator memory_allocator{device, true};
};
RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_,
Core::Frontend::EmuWindow& emu_window_,
Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
std::unique_ptr<Core::Frontend::GraphicsContext> context_)
: RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_},
emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_},
vulkan_objects{VulkanObjects::TryCreate()}, device{vulkan_objects != nullptr},
state_tracker{gpu}, program_manager{device},
rasterizer(emu_window, gpu, cpu_memory, device,
vulkan_objects ? &vulkan_objects->device : nullptr,
vulkan_objects ? &vulkan_objects->memory_allocator : nullptr, screen_info,
program_manager, state_tracker) {
emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{gpu},
program_manager{device},
rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) {
if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) {
glEnable(GL_DEBUG_OUTPUT);
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);

View file

@ -38,8 +38,6 @@ class GPU;
namespace OpenGL {
struct VulkanObjects;
/// Structure used for storing information about the textures for the Switch screen
struct TextureInfo {
OGLTexture resource;
@ -101,7 +99,6 @@ private:
Core::Memory::Memory& cpu_memory;
Tegra::GPU& gpu;
std::unique_ptr<VulkanObjects> vulkan_objects;
Device device;
StateTracker state_tracker;
ProgramManager program_manager;

View file

@ -785,7 +785,7 @@ void Config::ReadRendererValues() {
true);
ReadSettingGlobal(Settings::values.use_vsync, QStringLiteral("use_vsync"), true);
ReadSettingGlobal(Settings::values.use_assembly_shaders, QStringLiteral("use_assembly_shaders"),
true);
false);
ReadSettingGlobal(Settings::values.use_asynchronous_shaders,
QStringLiteral("use_asynchronous_shaders"), false);
ReadSettingGlobal(Settings::values.use_fast_gpu_time, QStringLiteral("use_fast_gpu_time"),