diff options
| author | 2022-03-27 05:05:57 +0200 | |
|---|---|---|
| committer | 2023-03-05 12:18:00 +0100 | |
| commit | 8a3411b417f76db786b1d3cfffbd90926abb20ca (patch) | |
| tree | 383070bd0d4a33189f38423ceea9a5692d38ba09 /src/video_core/renderer_vulkan | |
| parent | Merge pull request #9884 from liamwhite/service-cleanup (diff) | |
| download | yuzu-8a3411b417f76db786b1d3cfffbd90926abb20ca.tar.gz yuzu-8a3411b417f76db786b1d3cfffbd90926abb20ca.tar.xz yuzu-8a3411b417f76db786b1d3cfffbd90926abb20ca.zip | |
Engines: Implement Accelerate DMA Texture.
Diffstat (limited to 'src/video_core/renderer_vulkan')
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_rasterizer.cpp | 234 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_rasterizer.h | 11 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_texture_cache.cpp | 14 |
3 files changed, 252 insertions, 7 deletions
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 719edbcfb..f085d53a1 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp | |||
| @@ -172,7 +172,7 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra | |||
| 172 | buffer_cache(*this, cpu_memory_, buffer_cache_runtime), | 172 | buffer_cache(*this, cpu_memory_, buffer_cache_runtime), |
| 173 | pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue, | 173 | pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue, |
| 174 | render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()), | 174 | render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()), |
| 175 | query_cache{*this, device, scheduler}, accelerate_dma{buffer_cache}, | 175 | query_cache{*this, device, scheduler}, accelerate_dma(buffer_cache, texture_cache, scheduler), |
| 176 | fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), | 176 | fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), |
| 177 | wfi_event(device.GetLogical().CreateEvent()) { | 177 | wfi_event(device.GetLogical().CreateEvent()) { |
| 178 | scheduler.SetQueryCache(query_cache); | 178 | scheduler.SetQueryCache(query_cache); |
| @@ -756,7 +756,9 @@ void RasterizerVulkan::FlushWork() { | |||
| 756 | draw_counter = 0; | 756 | draw_counter = 0; |
| 757 | } | 757 | } |
| 758 | 758 | ||
| 759 | AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} | 759 | AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_, TextureCache& texture_cache_, |
| 760 | Scheduler& scheduler_) | ||
| 761 | : buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, scheduler{scheduler_} {} | ||
| 760 | 762 | ||
| 761 | bool AccelerateDMA::BufferClear(GPUVAddr src_address, u64 amount, u32 value) { | 763 | bool AccelerateDMA::BufferClear(GPUVAddr src_address, u64 amount, u32 value) { |
| 762 | std::scoped_lock lock{buffer_cache.mutex}; | 764 | std::scoped_lock lock{buffer_cache.mutex}; |
| @@ -768,6 +770,234 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 | |||
| 768 | return buffer_cache.DMACopy(src_address, dest_address, amount); | 770 | return buffer_cache.DMACopy(src_address, dest_address, amount); |
| 769 | } | 771 | } |
| 770 | 772 | ||
| 773 | bool AccelerateDMA::ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info, | ||
| 774 | const Tegra::DMA::ImageOperand& src, | ||
| 775 | const Tegra::DMA::BufferOperand& dst) { | ||
| 776 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 777 | auto query_image = texture_cache.ObtainImage(src, false); | ||
| 778 | if (!query_image) { | ||
| 779 | return false; | ||
| 780 | } | ||
| 781 | auto* image = query_image->first; | ||
| 782 | auto [level, base] = query_image->second; | ||
| 783 | const u32 buffer_size = static_cast<u32>(dst.pitch * dst.height); | ||
| 784 | const auto [buffer, offset] = buffer_cache.ObtainBuffer( | ||
| 785 | dst.address, buffer_size, VideoCommon::ObtainBufferSynchronize::FullSynchronize, | ||
| 786 | VideoCommon::ObtainBufferOperation::MarkAsWritten); | ||
| 787 | |||
| 788 | const bool is_rescaled = image->IsRescaled(); | ||
| 789 | if (is_rescaled) { | ||
| 790 | image->ScaleDown(); | ||
| 791 | } | ||
| 792 | VkImageSubresourceLayers subresources{ | ||
| 793 | .aspectMask = image->AspectMask(), | ||
| 794 | .mipLevel = level, | ||
| 795 | .baseArrayLayer = base, | ||
| 796 | .layerCount = 1, | ||
| 797 | }; | ||
| 798 | const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format); | ||
| 799 | const auto convert = [old_bpp = src.bytes_per_pixel, bpp](u32 value) { | ||
| 800 | return (old_bpp * value) / bpp; | ||
| 801 | }; | ||
| 802 | const u32 base_x = convert(src.params.origin.x.Value()); | ||
| 803 | const u32 base_y = src.params.origin.y.Value(); | ||
| 804 | const u32 length_x = convert(copy_info.length_x); | ||
| 805 | const u32 length_y = copy_info.length_y; | ||
| 806 | VkOffset3D image_offset{ | ||
| 807 | .x = static_cast<s32>(base_x), | ||
| 808 | .y = static_cast<s32>(base_y), | ||
| 809 | .z = 0, | ||
| 810 | }; | ||
| 811 | VkExtent3D image_extent{ | ||
| 812 | .width = length_x, | ||
| 813 | .height = length_y, | ||
| 814 | .depth = 1, | ||
| 815 | }; | ||
| 816 | auto buff_info(dst); | ||
| 817 | buff_info.pitch = convert(dst.pitch); | ||
| 818 | scheduler.RequestOutsideRenderPassOperationContext(); | ||
| 819 | scheduler.Record([src_image = image->Handle(), dst_buffer = buffer->Handle(), | ||
| 820 | buffer_offset = offset, subresources, image_offset, image_extent, | ||
| 821 | buff_info](vk::CommandBuffer cmdbuf) { | ||
| 822 | const std::array buffer_copy_info{ | ||
| 823 | VkBufferImageCopy{ | ||
| 824 | .bufferOffset = buffer_offset, | ||
| 825 | .bufferRowLength = buff_info.pitch, | ||
| 826 | .bufferImageHeight = buff_info.height, | ||
| 827 | .imageSubresource = subresources, | ||
| 828 | .imageOffset = image_offset, | ||
| 829 | .imageExtent = image_extent, | ||
| 830 | }, | ||
| 831 | }; | ||
| 832 | const VkImageSubresourceRange range{ | ||
| 833 | .aspectMask = subresources.aspectMask, | ||
| 834 | .baseMipLevel = subresources.mipLevel, | ||
| 835 | .levelCount = 1, | ||
| 836 | .baseArrayLayer = subresources.baseArrayLayer, | ||
| 837 | .layerCount = 1, | ||
| 838 | }; | ||
| 839 | static constexpr VkMemoryBarrier WRITE_BARRIER{ | ||
| 840 | .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, | ||
| 841 | .pNext = nullptr, | ||
| 842 | .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, | ||
| 843 | .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, | ||
| 844 | }; | ||
| 845 | const std::array pre_barriers{ | ||
| 846 | VkImageMemoryBarrier{ | ||
| 847 | .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, | ||
| 848 | .pNext = nullptr, | ||
| 849 | .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | | ||
| 850 | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | | ||
| 851 | VK_ACCESS_TRANSFER_WRITE_BIT, | ||
| 852 | .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, | ||
| 853 | .oldLayout = VK_IMAGE_LAYOUT_GENERAL, | ||
| 854 | .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, | ||
| 855 | .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||
| 856 | .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||
| 857 | .image = src_image, | ||
| 858 | .subresourceRange = range, | ||
| 859 | }, | ||
| 860 | }; | ||
| 861 | const std::array post_barriers{ | ||
| 862 | VkImageMemoryBarrier{ | ||
| 863 | .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, | ||
| 864 | .pNext = nullptr, | ||
| 865 | .srcAccessMask = 0, | ||
| 866 | .dstAccessMask = 0, | ||
| 867 | .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, | ||
| 868 | .newLayout = VK_IMAGE_LAYOUT_GENERAL, | ||
| 869 | .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||
| 870 | .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||
| 871 | .image = src_image, | ||
| 872 | .subresourceRange = range, | ||
| 873 | }, | ||
| 874 | }; | ||
| 875 | cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, | ||
| 876 | 0, {}, {}, pre_barriers); | ||
| 877 | cmdbuf.CopyImageToBuffer(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_buffer, | ||
| 878 | buffer_copy_info); | ||
| 879 | cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, | ||
| 880 | 0, WRITE_BARRIER, nullptr, post_barriers); | ||
| 881 | }); | ||
| 882 | if (is_rescaled) { | ||
| 883 | image->ScaleUp(true); | ||
| 884 | } | ||
| 885 | return true; | ||
| 886 | } | ||
| 887 | |||
| 888 | bool AccelerateDMA::BufferToImage(const Tegra::DMA::ImageCopy& copy_info, | ||
| 889 | const Tegra::DMA::BufferOperand& src, | ||
| 890 | const Tegra::DMA::ImageOperand& dst) { | ||
| 891 | std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; | ||
| 892 | auto query_image = texture_cache.ObtainImage(dst, true); | ||
| 893 | if (!query_image) { | ||
| 894 | return false; | ||
| 895 | } | ||
| 896 | auto* image = query_image->first; | ||
| 897 | auto [level, base] = query_image->second; | ||
| 898 | const u32 buffer_size = static_cast<u32>(src.pitch * src.height); | ||
| 899 | const auto [buffer, offset] = buffer_cache.ObtainBuffer( | ||
| 900 | src.address, buffer_size, VideoCommon::ObtainBufferSynchronize::FullSynchronize, | ||
| 901 | VideoCommon::ObtainBufferOperation::DoNothing); | ||
| 902 | const bool is_rescaled = image->IsRescaled(); | ||
| 903 | if (is_rescaled) { | ||
| 904 | image->ScaleDown(true); | ||
| 905 | } | ||
| 906 | VkImageSubresourceLayers subresources{ | ||
| 907 | .aspectMask = image->AspectMask(), | ||
| 908 | .mipLevel = level, | ||
| 909 | .baseArrayLayer = base, | ||
| 910 | .layerCount = 1, | ||
| 911 | }; | ||
| 912 | const u32 bpp = VideoCore::Surface::BytesPerBlock(image->info.format); | ||
| 913 | const auto convert = [old_bpp = dst.bytes_per_pixel, bpp](u32 value) { | ||
| 914 | return (old_bpp * value) / bpp; | ||
| 915 | }; | ||
| 916 | const u32 base_x = convert(dst.params.origin.x.Value()); | ||
| 917 | const u32 base_y = dst.params.origin.y.Value(); | ||
| 918 | const u32 length_x = convert(copy_info.length_x); | ||
| 919 | const u32 length_y = copy_info.length_y; | ||
| 920 | VkOffset3D image_offset{ | ||
| 921 | .x = static_cast<s32>(base_x), | ||
| 922 | .y = static_cast<s32>(base_y), | ||
| 923 | .z = 0, | ||
| 924 | }; | ||
| 925 | VkExtent3D image_extent{ | ||
| 926 | .width = length_x, | ||
| 927 | .height = length_y, | ||
| 928 | .depth = 1, | ||
| 929 | }; | ||
| 930 | auto buff_info(src); | ||
| 931 | buff_info.pitch = convert(src.pitch); | ||
| 932 | scheduler.RequestOutsideRenderPassOperationContext(); | ||
| 933 | scheduler.Record([dst_image = image->Handle(), src_buffer = buffer->Handle(), | ||
| 934 | buffer_offset = offset, subresources, image_offset, image_extent, | ||
| 935 | buff_info](vk::CommandBuffer cmdbuf) { | ||
| 936 | const std::array buffer_copy_info{ | ||
| 937 | VkBufferImageCopy{ | ||
| 938 | .bufferOffset = buffer_offset, | ||
| 939 | .bufferRowLength = buff_info.pitch, | ||
| 940 | .bufferImageHeight = buff_info.height, | ||
| 941 | .imageSubresource = subresources, | ||
| 942 | .imageOffset = image_offset, | ||
| 943 | .imageExtent = image_extent, | ||
| 944 | }, | ||
| 945 | }; | ||
| 946 | const VkImageSubresourceRange range{ | ||
| 947 | .aspectMask = subresources.aspectMask, | ||
| 948 | .baseMipLevel = subresources.mipLevel, | ||
| 949 | .levelCount = 1, | ||
| 950 | .baseArrayLayer = subresources.baseArrayLayer, | ||
| 951 | .layerCount = 1, | ||
| 952 | }; | ||
| 953 | static constexpr VkMemoryBarrier READ_BARRIER{ | ||
| 954 | .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, | ||
| 955 | .pNext = nullptr, | ||
| 956 | .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, | ||
| 957 | .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT, | ||
| 958 | }; | ||
| 959 | const std::array pre_barriers{ | ||
| 960 | VkImageMemoryBarrier{ | ||
| 961 | .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, | ||
| 962 | .pNext = nullptr, | ||
| 963 | .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | | ||
| 964 | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | | ||
| 965 | VK_ACCESS_TRANSFER_WRITE_BIT, | ||
| 966 | .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, | ||
| 967 | .oldLayout = VK_IMAGE_LAYOUT_GENERAL, | ||
| 968 | .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, | ||
| 969 | .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||
| 970 | .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||
| 971 | .image = dst_image, | ||
| 972 | .subresourceRange = range, | ||
| 973 | }, | ||
| 974 | }; | ||
| 975 | const std::array post_barriers{ | ||
| 976 | VkImageMemoryBarrier{ | ||
| 977 | .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, | ||
| 978 | .pNext = nullptr, | ||
| 979 | .srcAccessMask = 0, | ||
| 980 | .dstAccessMask = 0, | ||
| 981 | .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, | ||
| 982 | .newLayout = VK_IMAGE_LAYOUT_GENERAL, | ||
| 983 | .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||
| 984 | .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||
| 985 | .image = dst_image, | ||
| 986 | .subresourceRange = range, | ||
| 987 | }, | ||
| 988 | }; | ||
| 989 | cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, | ||
| 990 | 0, READ_BARRIER, {}, pre_barriers); | ||
| 991 | cmdbuf.CopyBufferToImage(src_buffer, dst_image, VK_IMAGE_LAYOUT_GENERAL, buffer_copy_info); | ||
| 992 | cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, | ||
| 993 | 0, nullptr, nullptr, post_barriers); | ||
| 994 | }); | ||
| 995 | if (is_rescaled) { | ||
| 996 | image->ScaleUp(); | ||
| 997 | } | ||
| 998 | return true; | ||
| 999 | } | ||
| 1000 | |||
| 771 | void RasterizerVulkan::UpdateDynamicStates() { | 1001 | void RasterizerVulkan::UpdateDynamicStates() { |
| 772 | auto& regs = maxwell3d->regs; | 1002 | auto& regs = maxwell3d->regs; |
| 773 | UpdateViewportsState(regs); | 1003 | UpdateViewportsState(regs); |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index a0508b57c..7746c5434 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h | |||
| @@ -45,14 +45,23 @@ class StateTracker; | |||
| 45 | 45 | ||
| 46 | class AccelerateDMA : public Tegra::Engines::AccelerateDMAInterface { | 46 | class AccelerateDMA : public Tegra::Engines::AccelerateDMAInterface { |
| 47 | public: | 47 | public: |
| 48 | explicit AccelerateDMA(BufferCache& buffer_cache); | 48 | explicit AccelerateDMA(BufferCache& buffer_cache, TextureCache& texture_cache, |
| 49 | Scheduler& scheduler); | ||
| 49 | 50 | ||
| 50 | bool BufferCopy(GPUVAddr start_address, GPUVAddr end_address, u64 amount) override; | 51 | bool BufferCopy(GPUVAddr start_address, GPUVAddr end_address, u64 amount) override; |
| 51 | 52 | ||
| 52 | bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override; | 53 | bool BufferClear(GPUVAddr src_address, u64 amount, u32 value) override; |
| 53 | 54 | ||
| 55 | bool ImageToBuffer(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::ImageOperand& src, | ||
| 56 | const Tegra::DMA::BufferOperand& dst) override; | ||
| 57 | |||
| 58 | bool BufferToImage(const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& src, | ||
| 59 | const Tegra::DMA::ImageOperand& dst) override; | ||
| 60 | |||
| 54 | private: | 61 | private: |
| 55 | BufferCache& buffer_cache; | 62 | BufferCache& buffer_cache; |
| 63 | TextureCache& texture_cache; | ||
| 64 | Scheduler& scheduler; | ||
| 56 | }; | 65 | }; |
| 57 | 66 | ||
| 58 | class RasterizerVulkan final : public VideoCore::RasterizerAccelerated, | 67 | class RasterizerVulkan final : public VideoCore::RasterizerAccelerated, |
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index 80adb70eb..8a204f93f 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp | |||
| @@ -864,13 +864,19 @@ void TextureCacheRuntime::ReinterpretImage(Image& dst, Image& src, | |||
| 864 | const VkImageAspectFlags src_aspect_mask = src.AspectMask(); | 864 | const VkImageAspectFlags src_aspect_mask = src.AspectMask(); |
| 865 | const VkImageAspectFlags dst_aspect_mask = dst.AspectMask(); | 865 | const VkImageAspectFlags dst_aspect_mask = dst.AspectMask(); |
| 866 | 866 | ||
| 867 | std::ranges::transform(copies, vk_in_copies.begin(), [src_aspect_mask](const auto& copy) { | 867 | const auto bpp_in = BytesPerBlock(src.info.format) / DefaultBlockWidth(src.info.format); |
| 868 | return MakeBufferImageCopy(copy, true, src_aspect_mask); | 868 | const auto bpp_out = BytesPerBlock(dst.info.format) / DefaultBlockWidth(dst.info.format); |
| 869 | }); | 869 | std::ranges::transform(copies, vk_in_copies.begin(), |
| 870 | [src_aspect_mask, bpp_in, bpp_out](const auto& copy) { | ||
| 871 | auto copy2 = copy; | ||
| 872 | copy2.src_offset.x = (bpp_out * copy.src_offset.x) / bpp_in; | ||
| 873 | copy2.extent.width = (bpp_out * copy.extent.width) / bpp_in; | ||
| 874 | return MakeBufferImageCopy(copy2, true, src_aspect_mask); | ||
| 875 | }); | ||
| 870 | std::ranges::transform(copies, vk_out_copies.begin(), [dst_aspect_mask](const auto& copy) { | 876 | std::ranges::transform(copies, vk_out_copies.begin(), [dst_aspect_mask](const auto& copy) { |
| 871 | return MakeBufferImageCopy(copy, false, dst_aspect_mask); | 877 | return MakeBufferImageCopy(copy, false, dst_aspect_mask); |
| 872 | }); | 878 | }); |
| 873 | const u32 img_bpp = BytesPerBlock(src.info.format); | 879 | const u32 img_bpp = BytesPerBlock(dst.info.format); |
| 874 | size_t total_size = 0; | 880 | size_t total_size = 0; |
| 875 | for (const auto& copy : copies) { | 881 | for (const auto& copy : copies) { |
| 876 | total_size += copy.extent.width * copy.extent.height * copy.extent.depth * img_bpp; | 882 | total_size += copy.extent.width * copy.extent.height * copy.extent.depth * img_bpp; |