early-access version 3568
This commit is contained in:
parent
acf7b81b9a
commit
7ae07b908c
13 changed files with 39 additions and 51 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 3567.
|
This is the source code for early-access 3568.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <set>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include "core/file_sys/vfs_layered.h"
|
#include "core/file_sys/vfs_layered.h"
|
||||||
#include "core/file_sys/vfs_types.h"
|
|
||||||
|
|
||||||
namespace FileSys {
|
namespace FileSys {
|
||||||
|
|
||||||
|
@ -59,13 +59,13 @@ std::string LayeredVfsDirectory::GetFullPath() const {
|
||||||
|
|
||||||
std::vector<VirtualFile> LayeredVfsDirectory::GetFiles() const {
|
std::vector<VirtualFile> LayeredVfsDirectory::GetFiles() const {
|
||||||
std::vector<VirtualFile> out;
|
std::vector<VirtualFile> out;
|
||||||
std::map<std::string, size_t, std::less<>> out_positions;
|
std::set<std::string, std::less<>> out_names;
|
||||||
|
|
||||||
for (const auto& layer : dirs) {
|
for (const auto& layer : dirs) {
|
||||||
for (const auto& file : layer->GetFiles()) {
|
for (const auto& file : layer->GetFiles()) {
|
||||||
auto file_name = file->GetName();
|
auto file_name = file->GetName();
|
||||||
if (!out_positions.contains(file_name)) {
|
if (!out_names.contains(file_name)) {
|
||||||
out_positions.emplace(std::move(file_name), out.size());
|
out_names.emplace(std::move(file_name));
|
||||||
out.push_back(file);
|
out.push_back(file);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -979,7 +979,6 @@ void EmulatedController::SetMotion(const Common::Input::CallbackStatus& callback
|
||||||
emulated.SetUserGyroThreshold(raw_status.gyro.x.properties.threshold);
|
emulated.SetUserGyroThreshold(raw_status.gyro.x.properties.threshold);
|
||||||
emulated.UpdateRotation(raw_status.delta_timestamp);
|
emulated.UpdateRotation(raw_status.delta_timestamp);
|
||||||
emulated.UpdateOrientation(raw_status.delta_timestamp);
|
emulated.UpdateOrientation(raw_status.delta_timestamp);
|
||||||
force_update_motion = raw_status.force_update;
|
|
||||||
|
|
||||||
auto& motion = controller.motion_state[index];
|
auto& motion = controller.motion_state[index];
|
||||||
motion.accel = emulated.GetAcceleration();
|
motion.accel = emulated.GetAcceleration();
|
||||||
|
@ -1618,19 +1617,6 @@ NpadGcTriggerState EmulatedController::GetTriggers() const {
|
||||||
|
|
||||||
MotionState EmulatedController::GetMotions() const {
|
MotionState EmulatedController::GetMotions() const {
|
||||||
std::unique_lock lock{mutex};
|
std::unique_lock lock{mutex};
|
||||||
|
|
||||||
// Some drivers like mouse motion need constant refreshing
|
|
||||||
if (force_update_motion) {
|
|
||||||
for (auto& device : motion_devices) {
|
|
||||||
if (!device) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
lock.unlock();
|
|
||||||
device->ForceUpdate();
|
|
||||||
lock.lock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return controller.motion_state;
|
return controller.motion_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1696,8 +1682,21 @@ void EmulatedController::DeleteCallback(int key) {
|
||||||
callback_list.erase(iterator);
|
callback_list.erase(iterator);
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmulatedController::TurboButtonUpdate() {
|
void EmulatedController::StatusUpdate() {
|
||||||
turbo_button_state = (turbo_button_state + 1) % (TURBO_BUTTON_DELAY * 2);
|
turbo_button_state = (turbo_button_state + 1) % (TURBO_BUTTON_DELAY * 2);
|
||||||
|
|
||||||
|
// Some drivers like key motion need constant refreshing
|
||||||
|
for (std::size_t index = 0; index < motion_devices.size(); ++index) {
|
||||||
|
const auto& raw_status = controller.motion_values[index].raw_status;
|
||||||
|
auto& device = motion_devices[index];
|
||||||
|
if (!raw_status.force_update) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (!device) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
device->ForceUpdate();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
NpadButton EmulatedController::GetTurboButtonMask() const {
|
NpadButton EmulatedController::GetTurboButtonMask() const {
|
||||||
|
|
|
@ -415,8 +415,8 @@ public:
|
||||||
*/
|
*/
|
||||||
void DeleteCallback(int key);
|
void DeleteCallback(int key);
|
||||||
|
|
||||||
/// Swaps the state of the turbo buttons
|
/// Swaps the state of the turbo buttons and updates motion input
|
||||||
void TurboButtonUpdate();
|
void StatusUpdate();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// creates input devices from params
|
/// creates input devices from params
|
||||||
|
@ -528,7 +528,6 @@ private:
|
||||||
bool is_configuring{false};
|
bool is_configuring{false};
|
||||||
bool system_buttons_enabled{true};
|
bool system_buttons_enabled{true};
|
||||||
f32 motion_sensitivity{Core::HID::MotionInput::IsAtRestStandard};
|
f32 motion_sensitivity{Core::HID::MotionInput::IsAtRestStandard};
|
||||||
bool force_update_motion{false};
|
|
||||||
u32 turbo_button_state{0};
|
u32 turbo_button_state{0};
|
||||||
|
|
||||||
// Temporary values to avoid doing changes while the controller is in configuring mode
|
// Temporary values to avoid doing changes while the controller is in configuring mode
|
||||||
|
|
|
@ -86,7 +86,7 @@ Common::Input::MotionStatus TransformToMotion(const Common::Input::CallbackStatu
|
||||||
.range = 1.0f,
|
.range = 1.0f,
|
||||||
.offset = 0.0f,
|
.offset = 0.0f,
|
||||||
};
|
};
|
||||||
status.delta_timestamp = 5000;
|
status.delta_timestamp = 1000;
|
||||||
status.force_update = true;
|
status.force_update = true;
|
||||||
status.accel.x = {
|
status.accel.x = {
|
||||||
.value = 0.0f,
|
.value = 0.0f,
|
||||||
|
|
|
@ -423,8 +423,8 @@ void Controller_NPad::RequestPadStateUpdate(Core::HID::NpadIdType npad_id) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function is unique to yuzu for the turbo buttons to work properly
|
// This function is unique to yuzu for the turbo buttons and motion to work properly
|
||||||
controller.device->TurboButtonUpdate();
|
controller.device->StatusUpdate();
|
||||||
|
|
||||||
auto& pad_entry = controller.npad_pad_state;
|
auto& pad_entry = controller.npad_pad_state;
|
||||||
auto& trigger_entry = controller.npad_trigger_state;
|
auto& trigger_entry = controller.npad_trigger_state;
|
||||||
|
|
|
@ -667,7 +667,7 @@ public:
|
||||||
.raw_value = input_engine->GetAxis(identifier, axis_z),
|
.raw_value = input_engine->GetAxis(identifier, axis_z),
|
||||||
.properties = properties_z,
|
.properties = properties_z,
|
||||||
};
|
};
|
||||||
status.delta_timestamp = 5000;
|
status.delta_timestamp = 1000;
|
||||||
status.force_update = true;
|
status.force_update = true;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
|
@ -203,11 +203,8 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
|
||||||
const VAddr new_base_address = *cpu_dest_address + diff;
|
const VAddr new_base_address = *cpu_dest_address + diff;
|
||||||
const IntervalType add_interval{new_base_address, new_base_address + size};
|
const IntervalType add_interval{new_base_address, new_base_address + size};
|
||||||
tmp_intervals.push_back(add_interval);
|
tmp_intervals.push_back(add_interval);
|
||||||
if (!Settings::values.use_reactive_flushing.GetValue() ||
|
uncommitted_ranges.add(add_interval);
|
||||||
memory_tracker.IsRegionPreflushable(new_base_address, new_base_address + size)) {
|
pending_ranges.add(add_interval);
|
||||||
uncommitted_ranges.add(add_interval);
|
|
||||||
pending_ranges.add(add_interval);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror);
|
ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror);
|
||||||
// This subtraction in this order is important for overlapping copies.
|
// This subtraction in this order is important for overlapping copies.
|
||||||
|
@ -1234,10 +1231,6 @@ void BufferCache<P>::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s
|
||||||
|
|
||||||
const IntervalType base_interval{cpu_addr, cpu_addr + size};
|
const IntervalType base_interval{cpu_addr, cpu_addr + size};
|
||||||
common_ranges.add(base_interval);
|
common_ranges.add(base_interval);
|
||||||
if (Settings::values.use_reactive_flushing.GetValue() &&
|
|
||||||
!memory_tracker.IsRegionPreflushable(cpu_addr, cpu_addr + size)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
uncommitted_ranges.add(base_interval);
|
uncommitted_ranges.add(base_interval);
|
||||||
pending_ranges.add(base_interval);
|
pending_ranges.add(base_interval);
|
||||||
}
|
}
|
||||||
|
|
|
@ -288,7 +288,7 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
|
||||||
write_buffer.resize_destructive(dst_size);
|
write_buffer.resize_destructive(dst_size);
|
||||||
|
|
||||||
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
||||||
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
|
||||||
|
|
||||||
// If the input is linear and the output is tiled, swizzle the input and copy it over.
|
// If the input is linear and the output is tiled, swizzle the input and copy it over.
|
||||||
SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
|
SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
|
||||||
|
|
|
@ -1304,7 +1304,7 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
|
||||||
const Tegra::DMA::BufferOperand& buffer_operand,
|
const Tegra::DMA::BufferOperand& buffer_operand,
|
||||||
const Tegra::DMA::ImageOperand& image_operand) {
|
const Tegra::DMA::ImageOperand& image_operand) {
|
||||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
||||||
const auto image_id = texture_cache.DmaImageId(image_operand);
|
const auto image_id = texture_cache.DmaImageId(image_operand, IS_IMAGE_UPLOAD);
|
||||||
if (image_id == VideoCommon::NULL_IMAGE_ID) {
|
if (image_id == VideoCommon::NULL_IMAGE_ID) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -510,13 +510,6 @@ VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(VAddr addr, u64
|
||||||
return *area;
|
return *area;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
|
||||||
std::scoped_lock lock{buffer_cache.mutex};
|
|
||||||
auto area = buffer_cache.GetFlushArea(addr, size);
|
|
||||||
if (area) {
|
|
||||||
return *area;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
VideoCore::RasterizerDownloadArea new_area{
|
VideoCore::RasterizerDownloadArea new_area{
|
||||||
.start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
|
.start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
|
||||||
.end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
|
.end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
|
||||||
|
@ -800,7 +793,7 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
|
||||||
const Tegra::DMA::BufferOperand& buffer_operand,
|
const Tegra::DMA::BufferOperand& buffer_operand,
|
||||||
const Tegra::DMA::ImageOperand& image_operand) {
|
const Tegra::DMA::ImageOperand& image_operand) {
|
||||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
||||||
const auto image_id = texture_cache.DmaImageId(image_operand);
|
const auto image_id = texture_cache.DmaImageId(image_operand, IS_IMAGE_UPLOAD);
|
||||||
if (image_id == VideoCommon::NULL_IMAGE_ID) {
|
if (image_id == VideoCommon::NULL_IMAGE_ID) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -811,7 +811,7 @@ void TextureCache<P>::PopAsyncFlushes() {
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
|
ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand, bool is_upload) {
|
||||||
const ImageInfo dst_info(operand);
|
const ImageInfo dst_info(operand);
|
||||||
const ImageId dst_id = FindDMAImage(dst_info, operand.address);
|
const ImageId dst_id = FindDMAImage(dst_info, operand.address);
|
||||||
if (!dst_id) {
|
if (!dst_id) {
|
||||||
|
@ -822,7 +822,7 @@ ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
|
||||||
// No need to waste time on an image that's synced with guest
|
// No need to waste time on an image that's synced with guest
|
||||||
return NULL_IMAGE_ID;
|
return NULL_IMAGE_ID;
|
||||||
}
|
}
|
||||||
if (!image.info.dma_downloaded) {
|
if (!is_upload && !image.info.dma_downloaded) {
|
||||||
// Force a full sync.
|
// Force a full sync.
|
||||||
image.info.dma_downloaded = true;
|
image.info.dma_downloaded = true;
|
||||||
return NULL_IMAGE_ID;
|
return NULL_IMAGE_ID;
|
||||||
|
@ -1323,7 +1323,6 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
|
||||||
all_siblings.push_back(overlap_id);
|
all_siblings.push_back(overlap_id);
|
||||||
} else {
|
} else {
|
||||||
bad_overlap_ids.push_back(overlap_id);
|
bad_overlap_ids.push_back(overlap_id);
|
||||||
overlap.flags |= ImageFlagBits::BadOverlap;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
ForEachImageInRegion(cpu_addr, size_bytes, region_check);
|
ForEachImageInRegion(cpu_addr, size_bytes, region_check);
|
||||||
|
@ -1434,7 +1433,12 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
|
||||||
ImageBase& aliased = slot_images[aliased_id];
|
ImageBase& aliased = slot_images[aliased_id];
|
||||||
aliased.overlapping_images.push_back(new_image_id);
|
aliased.overlapping_images.push_back(new_image_id);
|
||||||
new_image.overlapping_images.push_back(aliased_id);
|
new_image.overlapping_images.push_back(aliased_id);
|
||||||
new_image.flags |= ImageFlagBits::BadOverlap;
|
if (aliased.info.resources.levels == 1 && aliased.overlapping_images.size() > 1) {
|
||||||
|
aliased.flags |= ImageFlagBits::BadOverlap;
|
||||||
|
}
|
||||||
|
if (new_image.info.resources.levels == 1 && new_image.overlapping_images.size() > 1) {
|
||||||
|
new_image.flags |= ImageFlagBits::BadOverlap;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
RegisterImage(new_image_id);
|
RegisterImage(new_image_id);
|
||||||
return new_image_id;
|
return new_image_id;
|
||||||
|
|
|
@ -207,7 +207,7 @@ public:
|
||||||
/// Pop asynchronous downloads
|
/// Pop asynchronous downloads
|
||||||
void PopAsyncFlushes();
|
void PopAsyncFlushes();
|
||||||
|
|
||||||
[[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand);
|
[[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand, bool is_upload);
|
||||||
|
|
||||||
[[nodiscard]] std::pair<Image*, BufferImageCopy> DmaBufferImageCopy(
|
[[nodiscard]] std::pair<Image*, BufferImageCopy> DmaBufferImageCopy(
|
||||||
const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
|
const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
|
||||||
|
|
Loading…
Reference in a new issue