Texture cache: Only force flush the dma downloads

This commit is contained in:
Fernando Sahmkow 2023-05-07 23:34:52 +02:00
parent 2df19ef0fd
commit 8014dd8259
5 changed files with 6 additions and 6 deletions

View file

@ -288,7 +288,7 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
write_buffer.resize_destructive(dst_size); write_buffer.resize_destructive(dst_size);
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
// If the input is linear and the output is tiled, swizzle the input and copy it over. // If the input is linear and the output is tiled, swizzle the input and copy it over.
SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset, SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,

View file

@ -1304,7 +1304,7 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
const Tegra::DMA::BufferOperand& buffer_operand, const Tegra::DMA::BufferOperand& buffer_operand,
const Tegra::DMA::ImageOperand& image_operand) { const Tegra::DMA::ImageOperand& image_operand) {
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
const auto image_id = texture_cache.DmaImageId(image_operand); const auto image_id = texture_cache.DmaImageId(image_operand, IS_IMAGE_UPLOAD);
if (image_id == VideoCommon::NULL_IMAGE_ID) { if (image_id == VideoCommon::NULL_IMAGE_ID) {
return false; return false;
} }

View file

@ -793,7 +793,7 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
const Tegra::DMA::BufferOperand& buffer_operand, const Tegra::DMA::BufferOperand& buffer_operand,
const Tegra::DMA::ImageOperand& image_operand) { const Tegra::DMA::ImageOperand& image_operand) {
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
const auto image_id = texture_cache.DmaImageId(image_operand); const auto image_id = texture_cache.DmaImageId(image_operand, IS_IMAGE_UPLOAD);
if (image_id == VideoCommon::NULL_IMAGE_ID) { if (image_id == VideoCommon::NULL_IMAGE_ID) {
return false; return false;
} }

View file

@ -811,7 +811,7 @@ void TextureCache<P>::PopAsyncFlushes() {
} }
template <class P> template <class P>
ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand) { ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand, bool is_upload) {
const ImageInfo dst_info(operand); const ImageInfo dst_info(operand);
const ImageId dst_id = FindDMAImage(dst_info, operand.address); const ImageId dst_id = FindDMAImage(dst_info, operand.address);
if (!dst_id) { if (!dst_id) {
@ -822,7 +822,7 @@ ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
// No need to waste time on an image that's synced with guest // No need to waste time on an image that's synced with guest
return NULL_IMAGE_ID; return NULL_IMAGE_ID;
} }
if (!image.info.dma_downloaded) { if (!is_upload && !image.info.dma_downloaded) {
// Force a full sync. // Force a full sync.
image.info.dma_downloaded = true; image.info.dma_downloaded = true;
return NULL_IMAGE_ID; return NULL_IMAGE_ID;

View file

@ -207,7 +207,7 @@ public:
/// Pop asynchronous downloads /// Pop asynchronous downloads
void PopAsyncFlushes(); void PopAsyncFlushes();
[[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand); [[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand, bool is_upload);
[[nodiscard]] std::pair<Image*, BufferImageCopy> DmaBufferImageCopy( [[nodiscard]] std::pair<Image*, BufferImageCopy> DmaBufferImageCopy(
const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand, const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,