summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp45
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h16
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp24
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp248
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp22
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h6
-rw-r--r--src/video_core/texture_cache/texture_cache.h86
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h10
10 files changed, 208 insertions, 260 deletions
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 7bced675c..33748762f 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -63,7 +63,7 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra
63 buffer_cache(*this, cpu_memory_, buffer_cache_runtime), 63 buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
64 shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager, 64 shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager,
65 state_tracker, gpu.ShaderNotify()), 65 state_tracker, gpu.ShaderNotify()),
66 query_cache(*this), accelerate_dma(buffer_cache), 66 query_cache(*this), accelerate_dma(buffer_cache, texture_cache),
67 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache), 67 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache),
68 blit_image(program_manager_) {} 68 blit_image(program_manager_) {}
69 69
@@ -1262,7 +1262,8 @@ void RasterizerOpenGL::ReleaseChannel(s32 channel_id) {
1262 query_cache.EraseChannel(channel_id); 1262 query_cache.EraseChannel(channel_id);
1263} 1263}
1264 1264
1265AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} 1265AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_, TextureCache& texture_cache_)
1266 : buffer_cache{buffer_cache_}, texture_cache{texture_cache_} {}
1266 1267
1267bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { 1268bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
1268 std::scoped_lock lock{buffer_cache.mutex}; 1269 std::scoped_lock lock{buffer_cache.mutex};
@@ -1274,4 +1275,44 @@ bool AccelerateDMA::BufferClear(GPUVAddr src_address, u64 amount, u32 value) {
1274 return buffer_cache.DMAClear(src_address, amount, value); 1275 return buffer_cache.DMAClear(src_address, amount, value);
1275} 1276}
1276 1277
1278template <bool IS_IMAGE_UPLOAD>
1279bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
1280 const Tegra::DMA::BufferOperand& buffer_operand,
1281 const Tegra::DMA::ImageOperand& image_operand) {
1282 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
1283 const auto image_id = texture_cache.DmaImageId(image_operand);
1284 if (image_id == VideoCommon::NULL_IMAGE_ID) {
1285 return false;
1286 }
1287 const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
1288 static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize;
1289 const auto post_op = IS_IMAGE_UPLOAD ? VideoCommon::ObtainBufferOperation::DoNothing
1290 : VideoCommon::ObtainBufferOperation::MarkAsWritten;
1291 const auto [buffer, offset] =
1292 buffer_cache.ObtainBuffer(buffer_operand.address, buffer_size, sync_info, post_op);
1293
1294 const auto [image, copy] = texture_cache.DmaBufferImageCopy(
1295 copy_info, buffer_operand, image_operand, image_id, IS_IMAGE_UPLOAD);
1296 const std::span copy_span{&copy, 1};
1297
1298 if constexpr (IS_IMAGE_UPLOAD) {
1299 image->UploadMemory(buffer->Handle(), offset, copy_span);
1300 } else {
1301 image->DownloadMemory(buffer->Handle(), offset, copy_span);
1302 }
1303 return true;
1304}
1305
1306bool AccelerateDMA::ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info,
1307 const Tegra::DMA::ImageOperand& image_operand,
1308 const Tegra::DMA::BufferOperand& buffer_operand) {
1309 return DmaBufferImageCopy<false>(copy_info, buffer_operand, image_operand);
1310}
1311
1312bool AccelerateDMA::BufferToImage(const Tegra::DMA::ImageCopy& copy_info,
1313 const Tegra::DMA::BufferOperand& buffer_operand,
1314 const Tegra::DMA::ImageOperand& image_operand) {
1315 return DmaBufferImageCopy<true>(copy_info, buffer_operand, image_operand);
1316}
1317
1277} // namespace OpenGL 1318} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 7e21fc43d..1f6562ef8 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -50,24 +50,26 @@ static_assert(sizeof(BindlessSSBO) * CHAR_BIT == 128);
50 50
51class AccelerateDMA : public Tegra::Engines::AccelerateDMAInterface { 51class AccelerateDMA : public Tegra::Engines::AccelerateDMAInterface {
52public: 52public:
53 explicit AccelerateDMA(BufferCache& buffer_cache); 53 explicit AccelerateDMA(BufferCache& buffer_cache, TextureCache& texture_cache);
54 54
55 bool BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) override; 55 bool BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) override;
56 56
57 bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override; 57 bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override;
58 58
59 bool ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::ImageOperand& src, 59 bool ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::ImageOperand& src,
60 const Tegra::DMA::BufferOperand& dst) override { 60 const Tegra::DMA::BufferOperand& dst) override;
61 return false;
62 }
63 61
64 bool BufferToImage(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& src, 62 bool BufferToImage(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& src,
65 const Tegra::DMA::ImageOperand& dst) override { 63 const Tegra::DMA::ImageOperand& dst) override;
66 return false;
67 }
68 64
69private: 65private:
66 template <bool IS_IMAGE_UPLOAD>
67 bool DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
68 const Tegra::DMA::BufferOperand& src,
69 const Tegra::DMA::ImageOperand& dst);
70
70 BufferCache& buffer_cache; 71 BufferCache& buffer_cache;
72 TextureCache& texture_cache;
71}; 73};
72 74
73class RasterizerOpenGL : public VideoCore::RasterizerAccelerated, 75class RasterizerOpenGL : public VideoCore::RasterizerAccelerated,
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
index 9b99125e5..0b9c4a904 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -763,14 +763,14 @@ Image::Image(const VideoCommon::NullImageParams& params) : VideoCommon::ImageBas
763 763
764Image::~Image() = default; 764Image::~Image() = default;
765 765
766void Image::UploadMemory(const ImageBufferMap& map, 766void Image::UploadMemory(GLuint buffer_handle, size_t buffer_offset,
767 std::span<const VideoCommon::BufferImageCopy> copies) { 767 std::span<const VideoCommon::BufferImageCopy> copies) {
768 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); 768 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
769 if (is_rescaled) { 769 if (is_rescaled) {
770 ScaleDown(true); 770 ScaleDown(true);
771 } 771 }
772 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, map.buffer); 772 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer_handle);
773 glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, map.offset, unswizzled_size_bytes); 773 glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, buffer_offset, unswizzled_size_bytes);
774 774
775 glPixelStorei(GL_UNPACK_ALIGNMENT, 1); 775 glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
776 776
@@ -789,21 +789,26 @@ void Image::UploadMemory(const ImageBufferMap& map,
789 current_image_height = copy.buffer_image_height; 789 current_image_height = copy.buffer_image_height;
790 glPixelStorei(GL_UNPACK_IMAGE_HEIGHT, current_image_height); 790 glPixelStorei(GL_UNPACK_IMAGE_HEIGHT, current_image_height);
791 } 791 }
792 CopyBufferToImage(copy, map.offset); 792 CopyBufferToImage(copy, buffer_offset);
793 } 793 }
794 if (is_rescaled) { 794 if (is_rescaled) {
795 ScaleUp(); 795 ScaleUp();
796 } 796 }
797} 797}
798 798
799void Image::DownloadMemory(ImageBufferMap& map, 799void Image::UploadMemory(const ImageBufferMap& map,
800 std::span<const VideoCommon::BufferImageCopy> copies) {
801 UploadMemory(map.buffer, map.offset, copies);
802}
803
804void Image::DownloadMemory(GLuint buffer_handle, size_t buffer_offset,
800 std::span<const VideoCommon::BufferImageCopy> copies) { 805 std::span<const VideoCommon::BufferImageCopy> copies) {
801 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); 806 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
802 if (is_rescaled) { 807 if (is_rescaled) {
803 ScaleDown(); 808 ScaleDown();
804 } 809 }
805 glMemoryBarrier(GL_PIXEL_BUFFER_BARRIER_BIT); // TODO: Move this to its own API 810 glMemoryBarrier(GL_PIXEL_BUFFER_BARRIER_BIT); // TODO: Move this to its own API
806 glBindBuffer(GL_PIXEL_PACK_BUFFER, map.buffer); 811 glBindBuffer(GL_PIXEL_PACK_BUFFER, buffer_handle);
807 glPixelStorei(GL_PACK_ALIGNMENT, 1); 812 glPixelStorei(GL_PACK_ALIGNMENT, 1);
808 813
809 u32 current_row_length = std::numeric_limits<u32>::max(); 814 u32 current_row_length = std::numeric_limits<u32>::max();
@@ -821,13 +826,18 @@ void Image::DownloadMemory(ImageBufferMap& map,
821 current_image_height = copy.buffer_image_height; 826 current_image_height = copy.buffer_image_height;
822 glPixelStorei(GL_PACK_IMAGE_HEIGHT, current_image_height); 827 glPixelStorei(GL_PACK_IMAGE_HEIGHT, current_image_height);
823 } 828 }
824 CopyImageToBuffer(copy, map.offset); 829 CopyImageToBuffer(copy, buffer_offset);
825 } 830 }
826 if (is_rescaled) { 831 if (is_rescaled) {
827 ScaleUp(true); 832 ScaleUp(true);
828 } 833 }
829} 834}
830 835
836void Image::DownloadMemory(ImageBufferMap& map,
837 std::span<const VideoCommon::BufferImageCopy> copies) {
838 DownloadMemory(map.buffer, map.offset, copies);
839}
840
831GLuint Image::StorageHandle() noexcept { 841GLuint Image::StorageHandle() noexcept {
832 switch (info.format) { 842 switch (info.format) {
833 case PixelFormat::A8B8G8R8_SRGB: 843 case PixelFormat::A8B8G8R8_SRGB:
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h
index e30875496..911e4607a 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.h
+++ b/src/video_core/renderer_opengl/gl_texture_cache.h
@@ -206,9 +206,15 @@ public:
206 Image(Image&&) = default; 206 Image(Image&&) = default;
207 Image& operator=(Image&&) = default; 207 Image& operator=(Image&&) = default;
208 208
209 void UploadMemory(GLuint buffer_handle, size_t buffer_offset,
210 std::span<const VideoCommon::BufferImageCopy> copies);
211
209 void UploadMemory(const ImageBufferMap& map, 212 void UploadMemory(const ImageBufferMap& map,
210 std::span<const VideoCommon::BufferImageCopy> copies); 213 std::span<const VideoCommon::BufferImageCopy> copies);
211 214
215 void DownloadMemory(GLuint buffer_handle, size_t buffer_offset,
216 std::span<const VideoCommon::BufferImageCopy> copies);
217
212 void DownloadMemory(ImageBufferMap& map, std::span<const VideoCommon::BufferImageCopy> copies); 218 void DownloadMemory(ImageBufferMap& map, std::span<const VideoCommon::BufferImageCopy> copies);
213 219
214 GLuint StorageHandle() noexcept; 220 GLuint StorageHandle() noexcept;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 25965b684..855488ead 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -770,232 +770,44 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64
770 return buffer_cache.DMACopy(src_address, dest_address, amount); 770 return buffer_cache.DMACopy(src_address, dest_address, amount);
771} 771}
772 772
773bool AccelerateDMA::ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info, 773template <bool IS_IMAGE_UPLOAD>
774 const Tegra::DMA::ImageOperand& src, 774bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
775 const Tegra::DMA::BufferOperand& dst) { 775 const Tegra::DMA::BufferOperand& buffer_operand,
776 const Tegra::DMA::ImageOperand& image_operand) {
776 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 777 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
777 auto query_image = texture_cache.ObtainImage(src, false); 778 const auto image_id = texture_cache.DmaImageId(image_operand);
778 if (!query_image) { 779 if (image_id == VideoCommon::NULL_IMAGE_ID) {
779 return false; 780 return false;
780 } 781 }
781 auto* image = query_image->first; 782 const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
782 auto [level, base] = query_image->second; 783 static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize;
783 const u32 buffer_size = static_cast<u32>(dst.pitch * dst.height); 784 const auto post_op = IS_IMAGE_UPLOAD ? VideoCommon::ObtainBufferOperation::DoNothing
784 const auto [buffer, offset] = buffer_cache.ObtainBuffer( 785 : VideoCommon::ObtainBufferOperation::MarkAsWritten;
785 dst.address, buffer_size, VideoCommon::ObtainBufferSynchronize::FullSynchronize, 786 const auto [buffer, offset] =
786 VideoCommon::ObtainBufferOperation::MarkAsWritten); 787 buffer_cache.ObtainBuffer(buffer_operand.address, buffer_size, sync_info, post_op);
787 788
788 const bool is_rescaled = image->IsRescaled(); 789 const auto [image, copy] = texture_cache.DmaBufferImageCopy(
789 if (is_rescaled) { 790 copy_info, buffer_operand, image_operand, image_id, IS_IMAGE_UPLOAD);
790 image->ScaleDown(); 791 const std::span copy_span{&copy, 1};
791 } 792
792 VkImageSubresourceLayers subresources{ 793 if constexpr (IS_IMAGE_UPLOAD) {
793 .aspectMask = image->AspectMask(), 794 image->UploadMemory(buffer->Handle(), offset, copy_span);
794 .mipLevel = level, 795 } else {
795 .baseArrayLayer = base, 796 image->DownloadMemory(buffer->Handle(), offset, copy_span);
796 .layerCount = 1,
797 };
798 const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format);
799 const auto convert = [old_bpp = src.bytes_per_pixel, bpp](u32 value) {
800 return (old_bpp * value) / bpp;
801 };
802 const u32 base_x = convert(src.params.origin.x.Value());
803 const u32 base_y = src.params.origin.y.Value();
804 const u32 length_x = convert(copy_info.length_x);
805 const u32 length_y = copy_info.length_y;
806 VkOffset3D image_offset{
807 .x = static_cast<s32>(base_x),
808 .y = static_cast<s32>(base_y),
809 .z = 0,
810 };
811 VkExtent3D image_extent{
812 .width = length_x,
813 .height = length_y,
814 .depth = 1,
815 };
816 auto buff_info(dst);
817 buff_info.pitch = convert(dst.pitch);
818 scheduler.RequestOutsideRenderPassOperationContext();
819 scheduler.Record([src_image = image->Handle(), dst_buffer = buffer->Handle(),
820 buffer_offset = offset, subresources, image_offset, image_extent,
821 buff_info](vk::CommandBuffer cmdbuf) {
822 const std::array buffer_copy_info{
823 VkBufferImageCopy{
824 .bufferOffset = buffer_offset,
825 .bufferRowLength = buff_info.pitch,
826 .bufferImageHeight = buff_info.height,
827 .imageSubresource = subresources,
828 .imageOffset = image_offset,
829 .imageExtent = image_extent,
830 },
831 };
832 const VkImageSubresourceRange range{
833 .aspectMask = subresources.aspectMask,
834 .baseMipLevel = subresources.mipLevel,
835 .levelCount = 1,
836 .baseArrayLayer = subresources.baseArrayLayer,
837 .layerCount = 1,
838 };
839 static constexpr VkMemoryBarrier WRITE_BARRIER{
840 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
841 .pNext = nullptr,
842 .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
843 .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
844 };
845 const std::array pre_barriers{
846 VkImageMemoryBarrier{
847 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
848 .pNext = nullptr,
849 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
850 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
851 VK_ACCESS_TRANSFER_WRITE_BIT,
852 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
853 .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
854 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
855 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
856 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
857 .image = src_image,
858 .subresourceRange = range,
859 },
860 };
861 const std::array post_barriers{
862 VkImageMemoryBarrier{
863 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
864 .pNext = nullptr,
865 .srcAccessMask = 0,
866 .dstAccessMask = 0,
867 .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
868 .newLayout = VK_IMAGE_LAYOUT_GENERAL,
869 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
870 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
871 .image = src_image,
872 .subresourceRange = range,
873 },
874 };
875 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
876 0, {}, {}, pre_barriers);
877 cmdbuf.CopyImageToBuffer(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_buffer,
878 buffer_copy_info);
879 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
880 0, WRITE_BARRIER, nullptr, post_barriers);
881 });
882 if (is_rescaled) {
883 image->ScaleUp(true);
884 } 797 }
885 return true; 798 return true;
886} 799}
887 800
801bool AccelerateDMA::ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info,
802 const Tegra::DMA::ImageOperand& image_operand,
803 const Tegra::DMA::BufferOperand& buffer_operand) {
804 return DmaBufferImageCopy<false>(copy_info, buffer_operand, image_operand);
805}
806
888bool AccelerateDMA::BufferToImage(const Tegra::DMA::ImageCopy& copy_info, 807bool AccelerateDMA::BufferToImage(const Tegra::DMA::ImageCopy& copy_info,
889 const Tegra::DMA::BufferOperand& src, 808 const Tegra::DMA::BufferOperand& buffer_operand,
890 const Tegra::DMA::ImageOperand& dst) { 809 const Tegra::DMA::ImageOperand& image_operand) {
891 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 810 return DmaBufferImageCopy<true>(copy_info, buffer_operand, image_operand);
892 auto query_image = texture_cache.ObtainImage(dst, true);
893 if (!query_image) {
894 return false;
895 }
896 auto* image = query_image->first;
897 auto [level, base] = query_image->second;
898 const u32 buffer_size = static_cast<u32>(src.pitch * src.height);
899 const auto [buffer, offset] = buffer_cache.ObtainBuffer(
900 src.address, buffer_size, VideoCommon::ObtainBufferSynchronize::FullSynchronize,
901 VideoCommon::ObtainBufferOperation::DoNothing);
902 const bool is_rescaled = image->IsRescaled();
903 if (is_rescaled) {
904 image->ScaleDown(true);
905 }
906 VkImageSubresourceLayers subresources{
907 .aspectMask = image->AspectMask(),
908 .mipLevel = level,
909 .baseArrayLayer = base,
910 .layerCount = 1,
911 };
912 const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format);
913 const auto convert = [old_bpp = dst.bytes_per_pixel, bpp](u32 value) {
914 return (old_bpp * value) / bpp;
915 };
916 const u32 base_x = convert(dst.params.origin.x.Value());
917 const u32 base_y = dst.params.origin.y.Value();
918 const u32 length_x = convert(copy_info.length_x);
919 const u32 length_y = copy_info.length_y;
920 VkOffset3D image_offset{
921 .x = static_cast<s32>(base_x),
922 .y = static_cast<s32>(base_y),
923 .z = 0,
924 };
925 VkExtent3D image_extent{
926 .width = length_x,
927 .height = length_y,
928 .depth = 1,
929 };
930 auto buff_info(src);
931 buff_info.pitch = convert(src.pitch);
932 scheduler.RequestOutsideRenderPassOperationContext();
933 scheduler.Record([dst_image = image->Handle(), src_buffer = buffer->Handle(),
934 buffer_offset = offset, subresources, image_offset, image_extent,
935 buff_info](vk::CommandBuffer cmdbuf) {
936 const std::array buffer_copy_info{
937 VkBufferImageCopy{
938 .bufferOffset = buffer_offset,
939 .bufferRowLength = buff_info.pitch,
940 .bufferImageHeight = buff_info.height,
941 .imageSubresource = subresources,
942 .imageOffset = image_offset,
943 .imageExtent = image_extent,
944 },
945 };
946 const VkImageSubresourceRange range{
947 .aspectMask = subresources.aspectMask,
948 .baseMipLevel = subresources.mipLevel,
949 .levelCount = 1,
950 .baseArrayLayer = subresources.baseArrayLayer,
951 .layerCount = 1,
952 };
953 static constexpr VkMemoryBarrier READ_BARRIER{
954 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
955 .pNext = nullptr,
956 .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT,
957 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
958 };
959 const std::array pre_barriers{
960 VkImageMemoryBarrier{
961 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
962 .pNext = nullptr,
963 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
964 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
965 VK_ACCESS_TRANSFER_WRITE_BIT,
966 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
967 .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
968 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
969 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
970 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
971 .image = dst_image,
972 .subresourceRange = range,
973 },
974 };
975 const std::array post_barriers{
976 VkImageMemoryBarrier{
977 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
978 .pNext = nullptr,
979 .srcAccessMask = 0,
980 .dstAccessMask = 0,
981 .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
982 .newLayout = VK_IMAGE_LAYOUT_GENERAL,
983 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
984 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
985 .image = dst_image,
986 .subresourceRange = range,
987 },
988 };
989 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
990 0, READ_BARRIER, {}, pre_barriers);
991 cmdbuf.CopyBufferToImage(src_buffer, dst_image, VK_IMAGE_LAYOUT_GENERAL, buffer_copy_info);
992 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
993 0, nullptr, nullptr, post_barriers);
994 });
995 if (is_rescaled) {
996 image->ScaleUp();
997 }
998 return true;
999} 811}
1000 812
1001void RasterizerVulkan::UpdateDynamicStates() { 813void RasterizerVulkan::UpdateDynamicStates() {
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 7746c5434..1659fbc13 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -59,6 +59,11 @@ public:
59 const Tegra::DMA::ImageOperand& dst) override; 59 const Tegra::DMA::ImageOperand& dst) override;
60 60
61private: 61private:
62 template <bool IS_IMAGE_UPLOAD>
63 bool DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
64 const Tegra::DMA::BufferOperand& src,
65 const Tegra::DMA::ImageOperand& dst);
66
62 BufferCache& buffer_cache; 67 BufferCache& buffer_cache;
63 TextureCache& texture_cache; 68 TextureCache& texture_cache;
64 Scheduler& scheduler; 69 Scheduler& scheduler;
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index e013d1c60..ae15f6976 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -1315,15 +1315,16 @@ Image::Image(const VideoCommon::NullImageParams& params) : VideoCommon::ImageBas
1315 1315
1316Image::~Image() = default; 1316Image::~Image() = default;
1317 1317
1318void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) { 1318void Image::UploadMemory(VkBuffer buffer, VkDeviceSize offset,
1319 std::span<const VideoCommon::BufferImageCopy> copies) {
1319 // TODO: Move this to another API 1320 // TODO: Move this to another API
1320 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); 1321 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
1321 if (is_rescaled) { 1322 if (is_rescaled) {
1322 ScaleDown(true); 1323 ScaleDown(true);
1323 } 1324 }
1324 scheduler->RequestOutsideRenderPassOperationContext(); 1325 scheduler->RequestOutsideRenderPassOperationContext();
1325 std::vector vk_copies = TransformBufferImageCopies(copies, map.offset, aspect_mask); 1326 std::vector vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask);
1326 const VkBuffer src_buffer = map.buffer; 1327 const VkBuffer src_buffer = buffer;
1327 const VkImage vk_image = *original_image; 1328 const VkImage vk_image = *original_image;
1328 const VkImageAspectFlags vk_aspect_mask = aspect_mask; 1329 const VkImageAspectFlags vk_aspect_mask = aspect_mask;
1329 const bool is_initialized = std::exchange(initialized, true); 1330 const bool is_initialized = std::exchange(initialized, true);
@@ -1336,14 +1337,19 @@ void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImag
1336 } 1337 }
1337} 1338}
1338 1339
1339void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) { 1340void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
1341 UploadMemory(map.buffer, map.offset, copies);
1342}
1343
1344void Image::DownloadMemory(VkBuffer buffer, VkDeviceSize offset,
1345 std::span<const VideoCommon::BufferImageCopy> copies) {
1340 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); 1346 const bool is_rescaled = True(flags & ImageFlagBits::Rescaled);
1341 if (is_rescaled) { 1347 if (is_rescaled) {
1342 ScaleDown(); 1348 ScaleDown();
1343 } 1349 }
1344 std::vector vk_copies = TransformBufferImageCopies(copies, map.offset, aspect_mask); 1350 std::vector vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask);
1345 scheduler->RequestOutsideRenderPassOperationContext(); 1351 scheduler->RequestOutsideRenderPassOperationContext();
1346 scheduler->Record([buffer = map.buffer, image = *original_image, aspect_mask = aspect_mask, 1352 scheduler->Record([buffer, image = *original_image, aspect_mask = aspect_mask,
1347 vk_copies](vk::CommandBuffer cmdbuf) { 1353 vk_copies](vk::CommandBuffer cmdbuf) {
1348 const VkImageMemoryBarrier read_barrier{ 1354 const VkImageMemoryBarrier read_barrier{
1349 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, 1355 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
@@ -1398,6 +1404,10 @@ void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferIm
1398 } 1404 }
1399} 1405}
1400 1406
1407void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
1408 DownloadMemory(map.buffer, map.offset, copies);
1409}
1410
1401bool Image::IsRescaled() const noexcept { 1411bool Image::IsRescaled() const noexcept {
1402 return True(flags & ImageFlagBits::Rescaled); 1412 return True(flags & ImageFlagBits::Rescaled);
1403} 1413}
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 0ce39616f..d5ee23f8d 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -132,9 +132,15 @@ public:
132 Image(Image&&) = default; 132 Image(Image&&) = default;
133 Image& operator=(Image&&) = default; 133 Image& operator=(Image&&) = default;
134 134
135 void UploadMemory(VkBuffer buffer, VkDeviceSize offset,
136 std::span<const VideoCommon::BufferImageCopy> copies);
137
135 void UploadMemory(const StagingBufferRef& map, 138 void UploadMemory(const StagingBufferRef& map,
136 std::span<const VideoCommon::BufferImageCopy> copies); 139 std::span<const VideoCommon::BufferImageCopy> copies);
137 140
141 void DownloadMemory(VkBuffer buffer, VkDeviceSize offset,
142 std::span<const VideoCommon::BufferImageCopy> copies);
143
138 void DownloadMemory(const StagingBufferRef& map, 144 void DownloadMemory(const StagingBufferRef& map,
139 std::span<const VideoCommon::BufferImageCopy> copies); 145 std::span<const VideoCommon::BufferImageCopy> copies);
140 146
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 335338434..8e8b9a5e6 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -745,6 +745,25 @@ void TextureCache<P>::PopAsyncFlushes() {
745} 745}
746 746
747template <class P> 747template <class P>
748ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
749 const ImageInfo dst_info(operand);
750 const ImageId dst_id = FindDMAImage(dst_info, operand.address);
751 if (!dst_id) {
752 return NULL_IMAGE_ID;
753 }
754 const auto& image = slot_images[dst_id];
755 if (False(image.flags & ImageFlagBits::GpuModified)) {
756 // No need to waste time on an image that's synced with guest
757 return NULL_IMAGE_ID;
758 }
759 const auto base = image.TryFindBase(operand.address);
760 if (!base) {
761 return NULL_IMAGE_ID;
762 }
763 return dst_id;
764}
765
766template <class P>
748bool TextureCache<P>::IsRescaling() const noexcept { 767bool TextureCache<P>::IsRescaling() const noexcept {
749 return is_rescaling; 768 return is_rescaling;
750} 769}
@@ -772,6 +791,49 @@ bool TextureCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
772} 791}
773 792
774template <class P> 793template <class P>
794std::pair<typename TextureCache<P>::Image*, BufferImageCopy> TextureCache<P>::DmaBufferImageCopy(
795 const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
796 const Tegra::DMA::ImageOperand& image_operand, ImageId image_id, bool modifies_image) {
797 const auto [level, base] = PrepareDmaImage(image_id, image_operand.address, modifies_image);
798 auto* image = &slot_images[image_id];
799 const u32 buffer_size = static_cast<u32>(buffer_operand.pitch * buffer_operand.height);
800 const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format);
801 const auto convert = [old_bpp = image_operand.bytes_per_pixel, bpp](u32 value) {
802 return (old_bpp * value) / bpp;
803 };
804 const u32 base_x = convert(image_operand.params.origin.x.Value());
805 const u32 base_y = image_operand.params.origin.y.Value();
806 const u32 length_x = convert(copy_info.length_x);
807 const u32 length_y = copy_info.length_y;
808
809 const BufferImageCopy copy{
810 .buffer_offset = 0,
811 .buffer_size = buffer_size,
812 .buffer_row_length = convert(buffer_operand.pitch),
813 .buffer_image_height = buffer_operand.height,
814 .image_subresource =
815 {
816 .base_level = static_cast<s32>(level),
817 .base_layer = static_cast<s32>(base),
818 .num_layers = 1,
819 },
820 .image_offset =
821 {
822 .x = static_cast<s32>(base_x),
823 .y = static_cast<s32>(base_y),
824 .z = 0,
825 },
826 .image_extent =
827 {
828 .width = length_x,
829 .height = length_y,
830 .depth = 1,
831 },
832 };
833 return {image, copy};
834}
835
836template <class P>
775void TextureCache<P>::RefreshContents(Image& image, ImageId image_id) { 837void TextureCache<P>::RefreshContents(Image& image, ImageId image_id) {
776 if (False(image.flags & ImageFlagBits::CpuModified)) { 838 if (False(image.flags & ImageFlagBits::CpuModified)) {
777 // Only upload modified images 839 // Only upload modified images
@@ -1405,26 +1467,14 @@ ImageId TextureCache<P>::FindDMAImage(const ImageInfo& info, GPUVAddr gpu_addr)
1405} 1467}
1406 1468
1407template <class P> 1469template <class P>
1408std::optional<std::pair<typename TextureCache<P>::Image*, std::pair<u32, u32>>> 1470std::pair<u32, u32> TextureCache<P>::PrepareDmaImage(ImageId dst_id, GPUVAddr base_addr,
1409TextureCache<P>::ObtainImage(const Tegra::DMA::ImageOperand& operand, bool mark_as_modified) { 1471 bool mark_as_modified) {
1410 ImageInfo dst_info(operand); 1472 const auto& image = slot_images[dst_id];
1411 ImageId dst_id = FindDMAImage(dst_info, operand.address); 1473 const auto base = image.TryFindBase(base_addr);
1412 if (!dst_id) {
1413 return std::nullopt;
1414 }
1415 auto& image = slot_images[dst_id];
1416 auto base = image.TryFindBase(operand.address);
1417 if (!base) {
1418 return std::nullopt;
1419 }
1420 if (False(image.flags & ImageFlagBits::GpuModified)) {
1421 // No need to waste time on an image that's synced with guest
1422 return std::nullopt;
1423 }
1424 PrepareImage(dst_id, mark_as_modified, false); 1474 PrepareImage(dst_id, mark_as_modified, false);
1425 auto& new_image = slot_images[dst_id]; 1475 const auto& new_image = slot_images[dst_id];
1426 lru_cache.Touch(new_image.lru_index, frame_tick); 1476 lru_cache.Touch(new_image.lru_index, frame_tick);
1427 return std::make_pair(&new_image, std::make_pair(base->level, base->layer)); 1477 return std::make_pair(base->level, base->layer);
1428} 1478}
1429 1479
1430template <class P> 1480template <class P>
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 848a5d9ea..5a5b4179c 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -209,8 +209,11 @@ public:
209 /// Pop asynchronous downloads 209 /// Pop asynchronous downloads
210 void PopAsyncFlushes(); 210 void PopAsyncFlushes();
211 211
212 [[nodiscard]] std::optional<std::pair<Image*, std::pair<u32, u32>>> ObtainImage( 212 [[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand);
213 const Tegra::DMA::ImageOperand& operand, bool mark_as_modified); 213
214 [[nodiscard]] std::pair<Image*, BufferImageCopy> DmaBufferImageCopy(
215 const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
216 const Tegra::DMA::ImageOperand& image_operand, ImageId image_id, bool modifies_image);
214 217
215 /// Return true when a CPU region is modified from the GPU 218 /// Return true when a CPU region is modified from the GPU
216 [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size); 219 [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size);
@@ -386,6 +389,9 @@ private:
386 /// Returns true if the current clear parameters clear the whole image of a given image view 389 /// Returns true if the current clear parameters clear the whole image of a given image view
387 [[nodiscard]] bool IsFullClear(ImageViewId id); 390 [[nodiscard]] bool IsFullClear(ImageViewId id);
388 391
392 [[nodiscard]] std::pair<u32, u32> PrepareDmaImage(ImageId dst_id, GPUVAddr base_addr,
393 bool mark_as_modified);
394
389 bool ImageCanRescale(ImageBase& image); 395 bool ImageCanRescale(ImageBase& image);
390 void InvalidateScale(Image& image); 396 void InvalidateScale(Image& image);
391 bool ScaleUp(Image& image); 397 bool ScaleUp(Image& image);