diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/video_core/renderer_opengl/gl_texture_cache.cpp | 194 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_texture_cache.h | 6 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_device.cpp | 2 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_texture_cache.cpp | 34 | ||||
| -rw-r--r-- | src/video_core/surface.h | 97 | ||||
| -rw-r--r-- | src/video_core/texture_cache/surface_base.cpp | 39 | ||||
| -rw-r--r-- | src/video_core/texture_cache/surface_base.h | 15 | ||||
| -rw-r--r-- | src/video_core/texture_cache/surface_params.cpp | 28 | ||||
| -rw-r--r-- | src/video_core/texture_cache/surface_params.h | 36 | ||||
| -rw-r--r-- | src/video_core/texture_cache/texture_cache.h | 6 |
10 files changed, 176 insertions, 281 deletions
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp index f424e3000..b64027f31 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.cpp +++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp | |||
| @@ -24,7 +24,6 @@ using Tegra::Texture::SwizzleSource; | |||
| 24 | using VideoCore::MortonSwizzleMode; | 24 | using VideoCore::MortonSwizzleMode; |
| 25 | 25 | ||
| 26 | using VideoCore::Surface::PixelFormat; | 26 | using VideoCore::Surface::PixelFormat; |
| 27 | using VideoCore::Surface::SurfaceCompression; | ||
| 28 | using VideoCore::Surface::SurfaceTarget; | 27 | using VideoCore::Surface::SurfaceTarget; |
| 29 | using VideoCore::Surface::SurfaceType; | 28 | using VideoCore::Surface::SurfaceType; |
| 30 | 29 | ||
| @@ -37,96 +36,95 @@ namespace { | |||
| 37 | 36 | ||
| 38 | struct FormatTuple { | 37 | struct FormatTuple { |
| 39 | GLint internal_format; | 38 | GLint internal_format; |
| 40 | GLenum format; | 39 | GLenum format = GL_NONE; |
| 41 | GLenum type; | 40 | GLenum type = GL_NONE; |
| 42 | bool compressed; | ||
| 43 | }; | 41 | }; |
| 44 | 42 | ||
| 45 | constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{ | 43 | constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{ |
| 46 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false}, // ABGR8U | 44 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV}, // ABGR8U |
| 47 | {GL_RGBA8_SNORM, GL_RGBA, GL_BYTE, false}, // ABGR8S | 45 | {GL_RGBA8_SNORM, GL_RGBA, GL_BYTE}, // ABGR8S |
| 48 | {GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, false}, // ABGR8UI | 46 | {GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE}, // ABGR8UI |
| 49 | {GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, false}, // B5G6R5U | 47 | {GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV}, // B5G6R5U |
| 50 | {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, false}, // A2B10G10R10U | 48 | {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV}, // A2B10G10R10U |
| 51 | {GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV, false}, // A1B5G5R5U | 49 | {GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV}, // A1B5G5R5U |
| 52 | {GL_R8, GL_RED, GL_UNSIGNED_BYTE, false}, // R8U | 50 | {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R8U |
| 53 | {GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, false}, // R8UI | 51 | {GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE}, // R8UI |
| 54 | {GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, false}, // RGBA16F | 52 | {GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT}, // RGBA16F |
| 55 | {GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT, false}, // RGBA16U | 53 | {GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT}, // RGBA16U |
| 56 | {GL_RGBA16_SNORM, GL_RGBA, GL_SHORT, false}, // RGBA16S | 54 | {GL_RGBA16_SNORM, GL_RGBA, GL_SHORT}, // RGBA16S |
| 57 | {GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, false}, // RGBA16UI | 55 | {GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT}, // RGBA16UI |
| 58 | {GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, false}, // R11FG11FB10F | 56 | {GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV}, // R11FG11FB10F |
| 59 | {GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, false}, // RGBA32UI | 57 | {GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT}, // RGBA32UI |
| 60 | {GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT1 | 58 | {GL_COMPRESSED_RGBA_S3TC_DXT1_EXT}, // DXT1 |
| 61 | {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT23 | 59 | {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT}, // DXT23 |
| 62 | {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT45 | 60 | {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT}, // DXT45 |
| 63 | {GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_INT_8_8_8_8, true}, // DXN1 | 61 | {GL_COMPRESSED_RED_RGTC1}, // DXN1 |
| 64 | {GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_INT_8_8_8_8, true}, // DXN2UNORM | 62 | {GL_COMPRESSED_RG_RGTC2}, // DXN2UNORM |
| 65 | {GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_INT, true}, // DXN2SNORM | 63 | {GL_COMPRESSED_SIGNED_RG_RGTC2}, // DXN2SNORM |
| 66 | {GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // BC7U | 64 | {GL_COMPRESSED_RGBA_BPTC_UNORM}, // BC7U |
| 67 | {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true}, // BC6H_UF16 | 65 | {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT}, // BC6H_UF16 |
| 68 | {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true}, // BC6H_SF16 | 66 | {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT}, // BC6H_SF16 |
| 69 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_4X4 | 67 | {GL_COMPRESSED_RGBA_ASTC_4x4_KHR}, // ASTC_2D_4X4 |
| 70 | {GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, false}, // BGRA8 | 68 | {GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE}, // BGRA8 |
| 71 | {GL_RGBA32F, GL_RGBA, GL_FLOAT, false}, // RGBA32F | 69 | {GL_RGBA32F, GL_RGBA, GL_FLOAT}, // RGBA32F |
| 72 | {GL_RG32F, GL_RG, GL_FLOAT, false}, // RG32F | 70 | {GL_RG32F, GL_RG, GL_FLOAT}, // RG32F |
| 73 | {GL_R32F, GL_RED, GL_FLOAT, false}, // R32F | 71 | {GL_R32F, GL_RED, GL_FLOAT}, // R32F |
| 74 | {GL_R16F, GL_RED, GL_HALF_FLOAT, false}, // R16F | 72 | {GL_R16F, GL_RED, GL_HALF_FLOAT}, // R16F |
| 75 | {GL_R16, GL_RED, GL_UNSIGNED_SHORT, false}, // R16U | 73 | {GL_R16, GL_RED, GL_UNSIGNED_SHORT}, // R16U |
| 76 | {GL_R16_SNORM, GL_RED, GL_SHORT, false}, // R16S | 74 | {GL_R16_SNORM, GL_RED, GL_SHORT}, // R16S |
| 77 | {GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, false}, // R16UI | 75 | {GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT}, // R16UI |
| 78 | {GL_R16I, GL_RED_INTEGER, GL_SHORT, false}, // R16I | 76 | {GL_R16I, GL_RED_INTEGER, GL_SHORT}, // R16I |
| 79 | {GL_RG16, GL_RG, GL_UNSIGNED_SHORT, false}, // RG16 | 77 | {GL_RG16, GL_RG, GL_UNSIGNED_SHORT}, // RG16 |
| 80 | {GL_RG16F, GL_RG, GL_HALF_FLOAT, false}, // RG16F | 78 | {GL_RG16F, GL_RG, GL_HALF_FLOAT}, // RG16F |
| 81 | {GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, false}, // RG16UI | 79 | {GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT}, // RG16UI |
| 82 | {GL_RG16I, GL_RG_INTEGER, GL_SHORT, false}, // RG16I | 80 | {GL_RG16I, GL_RG_INTEGER, GL_SHORT}, // RG16I |
| 83 | {GL_RG16_SNORM, GL_RG, GL_SHORT, false}, // RG16S | 81 | {GL_RG16_SNORM, GL_RG, GL_SHORT}, // RG16S |
| 84 | {GL_RGB32F, GL_RGB, GL_FLOAT, false}, // RGB32F | 82 | {GL_RGB32F, GL_RGB, GL_FLOAT}, // RGB32F |
| 85 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false}, // RGBA8_SRGB | 83 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV}, // RGBA8_SRGB |
| 86 | {GL_RG8, GL_RG, GL_UNSIGNED_BYTE, false}, // RG8U | 84 | {GL_RG8, GL_RG, GL_UNSIGNED_BYTE}, // RG8U |
| 87 | {GL_RG8_SNORM, GL_RG, GL_BYTE, false}, // RG8S | 85 | {GL_RG8_SNORM, GL_RG, GL_BYTE}, // RG8S |
| 88 | {GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, false}, // RG32UI | 86 | {GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT}, // RG32UI |
| 89 | {GL_RGB16F, GL_RGBA, GL_HALF_FLOAT, false}, // RGBX16F | 87 | {GL_RGB16F, GL_RGBA, GL_HALF_FLOAT}, // RGBX16F |
| 90 | {GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, false}, // R32UI | 88 | {GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT}, // R32UI |
| 91 | {GL_R32I, GL_RED_INTEGER, GL_INT, false}, // R32I | 89 | {GL_R32I, GL_RED_INTEGER, GL_INT}, // R32I |
| 92 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X8 | 90 | {GL_COMPRESSED_RGBA_ASTC_8x8_KHR}, // ASTC_2D_8X8 |
| 93 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X5 | 91 | {GL_COMPRESSED_RGBA_ASTC_8x5_KHR}, // ASTC_2D_8X5 |
| 94 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X4 | 92 | {GL_COMPRESSED_RGBA_ASTC_5x4_KHR}, // ASTC_2D_5X4 |
| 95 | {GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, false}, // BGRA8 | 93 | {GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE}, // BGRA8 |
| 96 | // Compressed sRGB formats | 94 | // Compressed sRGB formats |
| 97 | {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT1_SRGB | 95 | {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT}, // DXT1_SRGB |
| 98 | {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT23_SRGB | 96 | {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT}, // DXT23_SRGB |
| 99 | {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT45_SRGB | 97 | {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // DXT45_SRGB |
| 100 | {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // BC7U_SRGB | 98 | {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7U_SRGB |
| 101 | {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV, false}, // R4G4B4A4U | 99 | {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // R4G4B4A4U |
| 102 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_4X4_SRGB | 100 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB |
| 103 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X8_SRGB | 101 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB |
| 104 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X5_SRGB | 102 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB |
| 105 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X4_SRGB | 103 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR}, // ASTC_2D_5X4_SRGB |
| 106 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X5 | 104 | {GL_COMPRESSED_RGBA_ASTC_5x5_KHR}, // ASTC_2D_5X5 |
| 107 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X5_SRGB | 105 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR}, // ASTC_2D_5X5_SRGB |
| 108 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X8 | 106 | {GL_COMPRESSED_RGBA_ASTC_10x8_KHR}, // ASTC_2D_10X8 |
| 109 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X8_SRGB | 107 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR}, // ASTC_2D_10X8_SRGB |
| 110 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X6 | 108 | {GL_COMPRESSED_RGBA_ASTC_6x6_KHR}, // ASTC_2D_6X6 |
| 111 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X6_SRGB | 109 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR}, // ASTC_2D_6X6_SRGB |
| 112 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X10 | 110 | {GL_COMPRESSED_RGBA_ASTC_10x10_KHR}, // ASTC_2D_10X10 |
| 113 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X10_SRGB | 111 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR}, // ASTC_2D_10X10_SRGB |
| 114 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_12X12 | 112 | {GL_COMPRESSED_RGBA_ASTC_12x12_KHR}, // ASTC_2D_12X12 |
| 115 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_12X12_SRGB | 113 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR}, // ASTC_2D_12X12_SRGB |
| 116 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X6 | 114 | {GL_COMPRESSED_RGBA_ASTC_8x6_KHR}, // ASTC_2D_8X6 |
| 117 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X6_SRGB | 115 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR}, // ASTC_2D_8X6_SRGB |
| 118 | {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X5 | 116 | {GL_COMPRESSED_RGBA_ASTC_6x5_KHR}, // ASTC_2D_6X5 |
| 119 | {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X5_SRGB | 117 | {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR}, // ASTC_2D_6X5_SRGB |
| 120 | {GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, false}, // E5B9G9R9F | 118 | {GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV}, // E5B9G9R9F |
| 121 | 119 | ||
| 122 | // Depth formats | 120 | // Depth formats |
| 123 | {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, false}, // Z32F | 121 | {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT}, // Z32F |
| 124 | {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, false}, // Z16 | 122 | {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT}, // Z16 |
| 125 | 123 | ||
| 126 | // DepthStencil formats | 124 | // DepthStencil formats |
| 127 | {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, false}, // Z24S8 | 125 | {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // Z24S8 |
| 128 | {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, false}, // S8Z24 | 126 | {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // S8Z24 |
| 129 | {GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, false}, // Z32FS8 | 127 | {GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV}, // Z32FS8 |
| 130 | }}; | 128 | }}; |
| 131 | 129 | ||
| 132 | const FormatTuple& GetFormatTuple(PixelFormat pixel_format) { | 130 | const FormatTuple& GetFormatTuple(PixelFormat pixel_format) { |
| @@ -242,13 +240,14 @@ OGLTexture CreateTexture(const SurfaceParams& params, GLenum target, GLenum inte | |||
| 242 | 240 | ||
| 243 | } // Anonymous namespace | 241 | } // Anonymous namespace |
| 244 | 242 | ||
| 245 | CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params) | 243 | CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params, |
| 246 | : VideoCommon::SurfaceBase<View>(gpu_addr, params) { | 244 | bool is_astc_supported) |
| 245 | : VideoCommon::SurfaceBase<View>(gpu_addr, params, is_astc_supported) { | ||
| 247 | const auto& tuple{GetFormatTuple(params.pixel_format)}; | 246 | const auto& tuple{GetFormatTuple(params.pixel_format)}; |
| 248 | internal_format = tuple.internal_format; | 247 | internal_format = tuple.internal_format; |
| 249 | format = tuple.format; | 248 | format = tuple.format; |
| 250 | type = tuple.type; | 249 | type = tuple.type; |
| 251 | is_compressed = tuple.compressed; | 250 | is_compressed = !is_converted && params.IsCompressed(); |
| 252 | target = GetTextureTarget(params.target); | 251 | target = GetTextureTarget(params.target); |
| 253 | texture = CreateTexture(params, target, internal_format, texture_buffer); | 252 | texture = CreateTexture(params, target, internal_format, texture_buffer); |
| 254 | DecorateSurfaceName(); | 253 | DecorateSurfaceName(); |
| @@ -264,7 +263,7 @@ void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) { | |||
| 264 | 263 | ||
| 265 | if (params.IsBuffer()) { | 264 | if (params.IsBuffer()) { |
| 266 | glGetNamedBufferSubData(texture_buffer.handle, 0, | 265 | glGetNamedBufferSubData(texture_buffer.handle, 0, |
| 267 | static_cast<GLsizeiptr>(params.GetHostSizeInBytes()), | 266 | static_cast<GLsizeiptr>(params.GetHostSizeInBytes(false)), |
| 268 | staging_buffer.data()); | 267 | staging_buffer.data()); |
| 269 | return; | 268 | return; |
| 270 | } | 269 | } |
| @@ -272,9 +271,10 @@ void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) { | |||
| 272 | SCOPE_EXIT({ glPixelStorei(GL_PACK_ROW_LENGTH, 0); }); | 271 | SCOPE_EXIT({ glPixelStorei(GL_PACK_ROW_LENGTH, 0); }); |
| 273 | 272 | ||
| 274 | for (u32 level = 0; level < params.emulated_levels; ++level) { | 273 | for (u32 level = 0; level < params.emulated_levels; ++level) { |
| 275 | glPixelStorei(GL_PACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level))); | 274 | glPixelStorei(GL_PACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level, is_converted))); |
| 276 | glPixelStorei(GL_PACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level))); | 275 | glPixelStorei(GL_PACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level))); |
| 277 | const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level); | 276 | const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level, is_converted); |
| 277 | |||
| 278 | u8* const mip_data = staging_buffer.data() + mip_offset; | 278 | u8* const mip_data = staging_buffer.data() + mip_offset; |
| 279 | const GLsizei size = static_cast<GLsizei>(params.GetHostMipmapSize(level)); | 279 | const GLsizei size = static_cast<GLsizei>(params.GetHostMipmapSize(level)); |
| 280 | if (is_compressed) { | 280 | if (is_compressed) { |
| @@ -294,14 +294,10 @@ void CachedSurface::UploadTexture(const std::vector<u8>& staging_buffer) { | |||
| 294 | } | 294 | } |
| 295 | 295 | ||
| 296 | void CachedSurface::UploadTextureMipmap(u32 level, const std::vector<u8>& staging_buffer) { | 296 | void CachedSurface::UploadTextureMipmap(u32 level, const std::vector<u8>& staging_buffer) { |
| 297 | glPixelStorei(GL_UNPACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level))); | 297 | glPixelStorei(GL_UNPACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level, is_converted))); |
| 298 | glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level))); | 298 | glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level))); |
| 299 | 299 | ||
| 300 | auto compression_type = params.GetCompressionType(); | 300 | const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level, is_converted); |
| 301 | |||
| 302 | const std::size_t mip_offset = compression_type == SurfaceCompression::Converted | ||
| 303 | ? params.GetConvertedMipmapOffset(level) | ||
| 304 | : params.GetHostMipmapLevelOffset(level); | ||
| 305 | const u8* buffer{staging_buffer.data() + mip_offset}; | 301 | const u8* buffer{staging_buffer.data() + mip_offset}; |
| 306 | if (is_compressed) { | 302 | if (is_compressed) { |
| 307 | const auto image_size{static_cast<GLsizei>(params.GetHostMipmapSize(level))}; | 303 | const auto image_size{static_cast<GLsizei>(params.GetHostMipmapSize(level))}; |
| @@ -482,7 +478,7 @@ OGLTextureView CachedSurfaceView::CreateTextureView() const { | |||
| 482 | TextureCacheOpenGL::TextureCacheOpenGL(Core::System& system, | 478 | TextureCacheOpenGL::TextureCacheOpenGL(Core::System& system, |
| 483 | VideoCore::RasterizerInterface& rasterizer, | 479 | VideoCore::RasterizerInterface& rasterizer, |
| 484 | const Device& device, StateTracker& state_tracker) | 480 | const Device& device, StateTracker& state_tracker) |
| 485 | : TextureCacheBase{system, rasterizer}, state_tracker{state_tracker} { | 481 | : TextureCacheBase{system, rasterizer, device.HasASTC()}, state_tracker{state_tracker} { |
| 486 | src_framebuffer.Create(); | 482 | src_framebuffer.Create(); |
| 487 | dst_framebuffer.Create(); | 483 | dst_framebuffer.Create(); |
| 488 | } | 484 | } |
| @@ -490,7 +486,7 @@ TextureCacheOpenGL::TextureCacheOpenGL(Core::System& system, | |||
| 490 | TextureCacheOpenGL::~TextureCacheOpenGL() = default; | 486 | TextureCacheOpenGL::~TextureCacheOpenGL() = default; |
| 491 | 487 | ||
| 492 | Surface TextureCacheOpenGL::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) { | 488 | Surface TextureCacheOpenGL::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) { |
| 493 | return std::make_shared<CachedSurface>(gpu_addr, params); | 489 | return std::make_shared<CachedSurface>(gpu_addr, params, is_astc_supported); |
| 494 | } | 490 | } |
| 495 | 491 | ||
| 496 | void TextureCacheOpenGL::ImageCopy(Surface& src_surface, Surface& dst_surface, | 492 | void TextureCacheOpenGL::ImageCopy(Surface& src_surface, Surface& dst_surface, |
| @@ -596,7 +592,7 @@ void TextureCacheOpenGL::BufferCopy(Surface& src_surface, Surface& dst_surface) | |||
| 596 | 592 | ||
| 597 | glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle); | 593 | glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle); |
| 598 | 594 | ||
| 599 | if (source_format.compressed) { | 595 | if (src_surface->IsCompressed()) { |
| 600 | glGetCompressedTextureImage(src_surface->GetTexture(), 0, static_cast<GLsizei>(source_size), | 596 | glGetCompressedTextureImage(src_surface->GetTexture(), 0, static_cast<GLsizei>(source_size), |
| 601 | nullptr); | 597 | nullptr); |
| 602 | } else { | 598 | } else { |
| @@ -610,7 +606,7 @@ void TextureCacheOpenGL::BufferCopy(Surface& src_surface, Surface& dst_surface) | |||
| 610 | const GLsizei width = static_cast<GLsizei>(dst_params.width); | 606 | const GLsizei width = static_cast<GLsizei>(dst_params.width); |
| 611 | const GLsizei height = static_cast<GLsizei>(dst_params.height); | 607 | const GLsizei height = static_cast<GLsizei>(dst_params.height); |
| 612 | const GLsizei depth = static_cast<GLsizei>(dst_params.depth); | 608 | const GLsizei depth = static_cast<GLsizei>(dst_params.depth); |
| 613 | if (dest_format.compressed) { | 609 | if (dst_surface->IsCompressed()) { |
| 614 | LOG_CRITICAL(HW_GPU, "Compressed buffer copy is unimplemented!"); | 610 | LOG_CRITICAL(HW_GPU, "Compressed buffer copy is unimplemented!"); |
| 615 | UNREACHABLE(); | 611 | UNREACHABLE(); |
| 616 | } else { | 612 | } else { |
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h index 6658c6ffd..02d9981a1 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.h +++ b/src/video_core/renderer_opengl/gl_texture_cache.h | |||
| @@ -37,7 +37,7 @@ class CachedSurface final : public VideoCommon::SurfaceBase<View> { | |||
| 37 | friend CachedSurfaceView; | 37 | friend CachedSurfaceView; |
| 38 | 38 | ||
| 39 | public: | 39 | public: |
| 40 | explicit CachedSurface(GPUVAddr gpu_addr, const SurfaceParams& params); | 40 | explicit CachedSurface(GPUVAddr gpu_addr, const SurfaceParams& params, bool is_astc_supported); |
| 41 | ~CachedSurface(); | 41 | ~CachedSurface(); |
| 42 | 42 | ||
| 43 | void UploadTexture(const std::vector<u8>& staging_buffer) override; | 43 | void UploadTexture(const std::vector<u8>& staging_buffer) override; |
| @@ -51,6 +51,10 @@ public: | |||
| 51 | return texture.handle; | 51 | return texture.handle; |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | bool IsCompressed() const { | ||
| 55 | return is_compressed; | ||
| 56 | } | ||
| 57 | |||
| 54 | protected: | 58 | protected: |
| 55 | void DecorateSurfaceName() override; | 59 | void DecorateSurfaceName() override; |
| 56 | 60 | ||
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp index 28d2fbc4f..0f6f68a49 100644 --- a/src/video_core/renderer_vulkan/vk_device.cpp +++ b/src/video_core/renderer_vulkan/vk_device.cpp | |||
| @@ -237,8 +237,6 @@ void VKDevice::ReportLoss() const { | |||
| 237 | 237 | ||
| 238 | bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features, | 238 | bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features, |
| 239 | const vk::DispatchLoaderDynamic& dldi) const { | 239 | const vk::DispatchLoaderDynamic& dldi) const { |
| 240 | // Disable for now to avoid converting ASTC twice. | ||
| 241 | return false; | ||
| 242 | static constexpr std::array astc_formats = { | 240 | static constexpr std::array astc_formats = { |
| 243 | vk::Format::eAstc4x4SrgbBlock, vk::Format::eAstc8x8SrgbBlock, | 241 | vk::Format::eAstc4x4SrgbBlock, vk::Format::eAstc8x8SrgbBlock, |
| 244 | vk::Format::eAstc8x5SrgbBlock, vk::Format::eAstc5x4SrgbBlock, | 242 | vk::Format::eAstc8x5SrgbBlock, vk::Format::eAstc5x4SrgbBlock, |
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index 26175921b..5b9b39670 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp | |||
| @@ -35,7 +35,6 @@ using VideoCore::MortonSwizzleMode; | |||
| 35 | 35 | ||
| 36 | using Tegra::Texture::SwizzleSource; | 36 | using Tegra::Texture::SwizzleSource; |
| 37 | using VideoCore::Surface::PixelFormat; | 37 | using VideoCore::Surface::PixelFormat; |
| 38 | using VideoCore::Surface::SurfaceCompression; | ||
| 39 | using VideoCore::Surface::SurfaceTarget; | 38 | using VideoCore::Surface::SurfaceTarget; |
| 40 | 39 | ||
| 41 | namespace { | 40 | namespace { |
| @@ -96,9 +95,10 @@ vk::ImageViewType GetImageViewType(SurfaceTarget target) { | |||
| 96 | return {}; | 95 | return {}; |
| 97 | } | 96 | } |
| 98 | 97 | ||
| 99 | UniqueBuffer CreateBuffer(const VKDevice& device, const SurfaceParams& params) { | 98 | UniqueBuffer CreateBuffer(const VKDevice& device, const SurfaceParams& params, |
| 99 | std::size_t host_memory_size) { | ||
| 100 | // TODO(Rodrigo): Move texture buffer creation to the buffer cache | 100 | // TODO(Rodrigo): Move texture buffer creation to the buffer cache |
| 101 | const vk::BufferCreateInfo buffer_ci({}, params.GetHostSizeInBytes(), | 101 | const vk::BufferCreateInfo buffer_ci({}, host_memory_size, |
| 102 | vk::BufferUsageFlagBits::eUniformTexelBuffer | | 102 | vk::BufferUsageFlagBits::eUniformTexelBuffer | |
| 103 | vk::BufferUsageFlagBits::eTransferSrc | | 103 | vk::BufferUsageFlagBits::eTransferSrc | |
| 104 | vk::BufferUsageFlagBits::eTransferDst, | 104 | vk::BufferUsageFlagBits::eTransferDst, |
| @@ -110,12 +110,13 @@ UniqueBuffer CreateBuffer(const VKDevice& device, const SurfaceParams& params) { | |||
| 110 | 110 | ||
| 111 | vk::BufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device, | 111 | vk::BufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device, |
| 112 | const SurfaceParams& params, | 112 | const SurfaceParams& params, |
| 113 | vk::Buffer buffer) { | 113 | vk::Buffer buffer, |
| 114 | std::size_t host_memory_size) { | ||
| 114 | ASSERT(params.IsBuffer()); | 115 | ASSERT(params.IsBuffer()); |
| 115 | 116 | ||
| 116 | const auto format = | 117 | const auto format = |
| 117 | MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format; | 118 | MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format; |
| 118 | return vk::BufferViewCreateInfo({}, buffer, format, 0, params.GetHostSizeInBytes()); | 119 | return vk::BufferViewCreateInfo({}, buffer, format, 0, host_memory_size); |
| 119 | } | 120 | } |
| 120 | 121 | ||
| 121 | vk::ImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) { | 122 | vk::ImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) { |
| @@ -169,14 +170,15 @@ CachedSurface::CachedSurface(Core::System& system, const VKDevice& device, | |||
| 169 | VKResourceManager& resource_manager, VKMemoryManager& memory_manager, | 170 | VKResourceManager& resource_manager, VKMemoryManager& memory_manager, |
| 170 | VKScheduler& scheduler, VKStagingBufferPool& staging_pool, | 171 | VKScheduler& scheduler, VKStagingBufferPool& staging_pool, |
| 171 | GPUVAddr gpu_addr, const SurfaceParams& params) | 172 | GPUVAddr gpu_addr, const SurfaceParams& params) |
| 172 | : SurfaceBase<View>{gpu_addr, params}, system{system}, device{device}, | 173 | : SurfaceBase<View>{gpu_addr, params, device.IsOptimalAstcSupported()}, system{system}, |
| 173 | resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler}, | 174 | device{device}, resource_manager{resource_manager}, |
| 174 | staging_pool{staging_pool} { | 175 | memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} { |
| 175 | if (params.IsBuffer()) { | 176 | if (params.IsBuffer()) { |
| 176 | buffer = CreateBuffer(device, params); | 177 | buffer = CreateBuffer(device, params, host_memory_size); |
| 177 | commit = memory_manager.Commit(*buffer, false); | 178 | commit = memory_manager.Commit(*buffer, false); |
| 178 | 179 | ||
| 179 | const auto buffer_view_ci = GenerateBufferViewCreateInfo(device, params, *buffer); | 180 | const auto buffer_view_ci = |
| 181 | GenerateBufferViewCreateInfo(device, params, *buffer, host_memory_size); | ||
| 180 | format = buffer_view_ci.format; | 182 | format = buffer_view_ci.format; |
| 181 | 183 | ||
| 182 | const auto dev = device.GetLogical(); | 184 | const auto dev = device.GetLogical(); |
| @@ -255,7 +257,7 @@ void CachedSurface::UploadBuffer(const std::vector<u8>& staging_buffer) { | |||
| 255 | std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size); | 257 | std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size); |
| 256 | 258 | ||
| 257 | scheduler.Record([src_buffer = *src_buffer.handle, dst_buffer = *buffer, | 259 | scheduler.Record([src_buffer = *src_buffer.handle, dst_buffer = *buffer, |
| 258 | size = params.GetHostSizeInBytes()](auto cmdbuf, auto& dld) { | 260 | size = host_memory_size](auto cmdbuf, auto& dld) { |
| 259 | const vk::BufferCopy copy(0, 0, size); | 261 | const vk::BufferCopy copy(0, 0, size); |
| 260 | cmdbuf.copyBuffer(src_buffer, dst_buffer, {copy}, dld); | 262 | cmdbuf.copyBuffer(src_buffer, dst_buffer, {copy}, dld); |
| 261 | 263 | ||
| @@ -299,10 +301,7 @@ void CachedSurface::UploadImage(const std::vector<u8>& staging_buffer) { | |||
| 299 | 301 | ||
| 300 | vk::BufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const { | 302 | vk::BufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const { |
| 301 | const u32 vk_depth = params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1; | 303 | const u32 vk_depth = params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1; |
| 302 | const auto compression_type = params.GetCompressionType(); | 304 | const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level, is_converted); |
| 303 | const std::size_t mip_offset = compression_type == SurfaceCompression::Converted | ||
| 304 | ? params.GetConvertedMipmapOffset(level) | ||
| 305 | : params.GetHostMipmapLevelOffset(level); | ||
| 306 | 305 | ||
| 307 | return vk::BufferImageCopy( | 306 | return vk::BufferImageCopy( |
| 308 | mip_offset, 0, 0, | 307 | mip_offset, 0, 0, |
| @@ -390,8 +389,9 @@ VKTextureCache::VKTextureCache(Core::System& system, VideoCore::RasterizerInterf | |||
| 390 | const VKDevice& device, VKResourceManager& resource_manager, | 389 | const VKDevice& device, VKResourceManager& resource_manager, |
| 391 | VKMemoryManager& memory_manager, VKScheduler& scheduler, | 390 | VKMemoryManager& memory_manager, VKScheduler& scheduler, |
| 392 | VKStagingBufferPool& staging_pool) | 391 | VKStagingBufferPool& staging_pool) |
| 393 | : TextureCache(system, rasterizer), device{device}, resource_manager{resource_manager}, | 392 | : TextureCache(system, rasterizer, device.IsOptimalAstcSupported()), device{device}, |
| 394 | memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} {} | 393 | resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler}, |
| 394 | staging_pool{staging_pool} {} | ||
| 395 | 395 | ||
| 396 | VKTextureCache::~VKTextureCache() = default; | 396 | VKTextureCache::~VKTextureCache() = default; |
| 397 | 397 | ||
diff --git a/src/video_core/surface.h b/src/video_core/surface.h index ae8817465..e0acd44d3 100644 --- a/src/video_core/surface.h +++ b/src/video_core/surface.h | |||
| @@ -504,103 +504,6 @@ static constexpr u32 GetBytesPerPixel(PixelFormat pixel_format) { | |||
| 504 | return GetFormatBpp(pixel_format) / CHAR_BIT; | 504 | return GetFormatBpp(pixel_format) / CHAR_BIT; |
| 505 | } | 505 | } |
| 506 | 506 | ||
| 507 | enum class SurfaceCompression { | ||
| 508 | None, // Not compressed | ||
| 509 | Compressed, // Texture is compressed | ||
| 510 | Converted, // Texture is converted before upload or after download | ||
| 511 | Rearranged, // Texture is swizzled before upload or after download | ||
| 512 | }; | ||
| 513 | |||
| 514 | constexpr std::array<SurfaceCompression, MaxPixelFormat> compression_type_table = {{ | ||
| 515 | SurfaceCompression::None, // ABGR8U | ||
| 516 | SurfaceCompression::None, // ABGR8S | ||
| 517 | SurfaceCompression::None, // ABGR8UI | ||
| 518 | SurfaceCompression::None, // B5G6R5U | ||
| 519 | SurfaceCompression::None, // A2B10G10R10U | ||
| 520 | SurfaceCompression::None, // A1B5G5R5U | ||
| 521 | SurfaceCompression::None, // R8U | ||
| 522 | SurfaceCompression::None, // R8UI | ||
| 523 | SurfaceCompression::None, // RGBA16F | ||
| 524 | SurfaceCompression::None, // RGBA16U | ||
| 525 | SurfaceCompression::None, // RGBA16S | ||
| 526 | SurfaceCompression::None, // RGBA16UI | ||
| 527 | SurfaceCompression::None, // R11FG11FB10F | ||
| 528 | SurfaceCompression::None, // RGBA32UI | ||
| 529 | SurfaceCompression::Compressed, // DXT1 | ||
| 530 | SurfaceCompression::Compressed, // DXT23 | ||
| 531 | SurfaceCompression::Compressed, // DXT45 | ||
| 532 | SurfaceCompression::Compressed, // DXN1 | ||
| 533 | SurfaceCompression::Compressed, // DXN2UNORM | ||
| 534 | SurfaceCompression::Compressed, // DXN2SNORM | ||
| 535 | SurfaceCompression::Compressed, // BC7U | ||
| 536 | SurfaceCompression::Compressed, // BC6H_UF16 | ||
| 537 | SurfaceCompression::Compressed, // BC6H_SF16 | ||
| 538 | SurfaceCompression::Converted, // ASTC_2D_4X4 | ||
| 539 | SurfaceCompression::None, // BGRA8 | ||
| 540 | SurfaceCompression::None, // RGBA32F | ||
| 541 | SurfaceCompression::None, // RG32F | ||
| 542 | SurfaceCompression::None, // R32F | ||
| 543 | SurfaceCompression::None, // R16F | ||
| 544 | SurfaceCompression::None, // R16U | ||
| 545 | SurfaceCompression::None, // R16S | ||
| 546 | SurfaceCompression::None, // R16UI | ||
| 547 | SurfaceCompression::None, // R16I | ||
| 548 | SurfaceCompression::None, // RG16 | ||
| 549 | SurfaceCompression::None, // RG16F | ||
| 550 | SurfaceCompression::None, // RG16UI | ||
| 551 | SurfaceCompression::None, // RG16I | ||
| 552 | SurfaceCompression::None, // RG16S | ||
| 553 | SurfaceCompression::None, // RGB32F | ||
| 554 | SurfaceCompression::None, // RGBA8_SRGB | ||
| 555 | SurfaceCompression::None, // RG8U | ||
| 556 | SurfaceCompression::None, // RG8S | ||
| 557 | SurfaceCompression::None, // RG32UI | ||
| 558 | SurfaceCompression::None, // RGBX16F | ||
| 559 | SurfaceCompression::None, // R32UI | ||
| 560 | SurfaceCompression::None, // R32I | ||
| 561 | SurfaceCompression::Converted, // ASTC_2D_8X8 | ||
| 562 | SurfaceCompression::Converted, // ASTC_2D_8X5 | ||
| 563 | SurfaceCompression::Converted, // ASTC_2D_5X4 | ||
| 564 | SurfaceCompression::None, // BGRA8_SRGB | ||
| 565 | SurfaceCompression::Compressed, // DXT1_SRGB | ||
| 566 | SurfaceCompression::Compressed, // DXT23_SRGB | ||
| 567 | SurfaceCompression::Compressed, // DXT45_SRGB | ||
| 568 | SurfaceCompression::Compressed, // BC7U_SRGB | ||
| 569 | SurfaceCompression::None, // R4G4B4A4U | ||
| 570 | SurfaceCompression::Converted, // ASTC_2D_4X4_SRGB | ||
| 571 | SurfaceCompression::Converted, // ASTC_2D_8X8_SRGB | ||
| 572 | SurfaceCompression::Converted, // ASTC_2D_8X5_SRGB | ||
| 573 | SurfaceCompression::Converted, // ASTC_2D_5X4_SRGB | ||
| 574 | SurfaceCompression::Converted, // ASTC_2D_5X5 | ||
| 575 | SurfaceCompression::Converted, // ASTC_2D_5X5_SRGB | ||
| 576 | SurfaceCompression::Converted, // ASTC_2D_10X8 | ||
| 577 | SurfaceCompression::Converted, // ASTC_2D_10X8_SRGB | ||
| 578 | SurfaceCompression::Converted, // ASTC_2D_6X6 | ||
| 579 | SurfaceCompression::Converted, // ASTC_2D_6X6_SRGB | ||
| 580 | SurfaceCompression::Converted, // ASTC_2D_10X10 | ||
| 581 | SurfaceCompression::Converted, // ASTC_2D_10X10_SRGB | ||
| 582 | SurfaceCompression::Converted, // ASTC_2D_12X12 | ||
| 583 | SurfaceCompression::Converted, // ASTC_2D_12X12_SRGB | ||
| 584 | SurfaceCompression::Converted, // ASTC_2D_8X6 | ||
| 585 | SurfaceCompression::Converted, // ASTC_2D_8X6_SRGB | ||
| 586 | SurfaceCompression::Converted, // ASTC_2D_6X5 | ||
| 587 | SurfaceCompression::Converted, // ASTC_2D_6X5_SRGB | ||
| 588 | SurfaceCompression::None, // E5B9G9R9F | ||
| 589 | SurfaceCompression::None, // Z32F | ||
| 590 | SurfaceCompression::None, // Z16 | ||
| 591 | SurfaceCompression::None, // Z24S8 | ||
| 592 | SurfaceCompression::Rearranged, // S8Z24 | ||
| 593 | SurfaceCompression::None, // Z32FS8 | ||
| 594 | }}; | ||
| 595 | |||
| 596 | constexpr SurfaceCompression GetFormatCompressionType(PixelFormat format) { | ||
| 597 | if (format == PixelFormat::Invalid) { | ||
| 598 | return SurfaceCompression::None; | ||
| 599 | } | ||
| 600 | DEBUG_ASSERT(static_cast<std::size_t>(format) < compression_type_table.size()); | ||
| 601 | return compression_type_table[static_cast<std::size_t>(format)]; | ||
| 602 | } | ||
| 603 | |||
| 604 | SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_type); | 507 | SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_type); |
| 605 | 508 | ||
| 606 | bool SurfaceTargetIsLayered(SurfaceTarget target); | 509 | bool SurfaceTargetIsLayered(SurfaceTarget target); |
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp index 002df414f..6fe815135 100644 --- a/src/video_core/texture_cache/surface_base.cpp +++ b/src/video_core/texture_cache/surface_base.cpp | |||
| @@ -18,15 +18,20 @@ MICROPROFILE_DEFINE(GPU_Flush_Texture, "GPU", "Texture Flush", MP_RGB(128, 192, | |||
| 18 | 18 | ||
| 19 | using Tegra::Texture::ConvertFromGuestToHost; | 19 | using Tegra::Texture::ConvertFromGuestToHost; |
| 20 | using VideoCore::MortonSwizzleMode; | 20 | using VideoCore::MortonSwizzleMode; |
| 21 | using VideoCore::Surface::SurfaceCompression; | 21 | using VideoCore::Surface::IsPixelFormatASTC; |
| 22 | using VideoCore::Surface::PixelFormat; | ||
| 22 | 23 | ||
| 23 | StagingCache::StagingCache() = default; | 24 | StagingCache::StagingCache() = default; |
| 24 | 25 | ||
| 25 | StagingCache::~StagingCache() = default; | 26 | StagingCache::~StagingCache() = default; |
| 26 | 27 | ||
| 27 | SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params) | 28 | SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params, |
| 28 | : params{params}, host_memory_size{params.GetHostSizeInBytes()}, gpu_addr{gpu_addr}, | 29 | bool is_astc_supported) |
| 29 | mipmap_sizes(params.num_levels), mipmap_offsets(params.num_levels) { | 30 | : params{params}, gpu_addr{gpu_addr}, mipmap_sizes(params.num_levels), |
| 31 | mipmap_offsets(params.num_levels) { | ||
| 32 | is_converted = IsPixelFormatASTC(params.pixel_format) && !is_astc_supported; | ||
| 33 | host_memory_size = params.GetHostSizeInBytes(is_converted); | ||
| 34 | |||
| 30 | std::size_t offset = 0; | 35 | std::size_t offset = 0; |
| 31 | for (u32 level = 0; level < params.num_levels; ++level) { | 36 | for (u32 level = 0; level < params.num_levels; ++level) { |
| 32 | const std::size_t mipmap_size{params.GetGuestMipmapSize(level)}; | 37 | const std::size_t mipmap_size{params.GetGuestMipmapSize(level)}; |
| @@ -164,7 +169,7 @@ void SurfaceBaseImpl::SwizzleFunc(MortonSwizzleMode mode, u8* memory, const Surf | |||
| 164 | 169 | ||
| 165 | std::size_t guest_offset{mipmap_offsets[level]}; | 170 | std::size_t guest_offset{mipmap_offsets[level]}; |
| 166 | if (params.is_layered) { | 171 | if (params.is_layered) { |
| 167 | std::size_t host_offset{0}; | 172 | std::size_t host_offset = 0; |
| 168 | const std::size_t guest_stride = layer_size; | 173 | const std::size_t guest_stride = layer_size; |
| 169 | const std::size_t host_stride = params.GetHostLayerSize(level); | 174 | const std::size_t host_stride = params.GetHostLayerSize(level); |
| 170 | for (u32 layer = 0; layer < params.depth; ++layer) { | 175 | for (u32 layer = 0; layer < params.depth; ++layer) { |
| @@ -206,7 +211,7 @@ void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager, | |||
| 206 | ASSERT_MSG(params.block_width == 0, "Block width is defined as {} on texture target {}", | 211 | ASSERT_MSG(params.block_width == 0, "Block width is defined as {} on texture target {}", |
| 207 | params.block_width, static_cast<u32>(params.target)); | 212 | params.block_width, static_cast<u32>(params.target)); |
| 208 | for (u32 level = 0; level < params.num_levels; ++level) { | 213 | for (u32 level = 0; level < params.num_levels; ++level) { |
| 209 | const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)}; | 214 | const std::size_t host_offset{params.GetHostMipmapLevelOffset(level, false)}; |
| 210 | SwizzleFunc(MortonSwizzleMode::MortonToLinear, host_ptr, params, | 215 | SwizzleFunc(MortonSwizzleMode::MortonToLinear, host_ptr, params, |
| 211 | staging_buffer.data() + host_offset, level); | 216 | staging_buffer.data() + host_offset, level); |
| 212 | } | 217 | } |
| @@ -219,7 +224,7 @@ void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager, | |||
| 219 | const u32 height{(params.height + block_height - 1) / block_height}; | 224 | const u32 height{(params.height + block_height - 1) / block_height}; |
| 220 | const u32 copy_size{width * bpp}; | 225 | const u32 copy_size{width * bpp}; |
| 221 | if (params.pitch == copy_size) { | 226 | if (params.pitch == copy_size) { |
| 222 | std::memcpy(staging_buffer.data(), host_ptr, params.GetHostSizeInBytes()); | 227 | std::memcpy(staging_buffer.data(), host_ptr, params.GetHostSizeInBytes(false)); |
| 223 | } else { | 228 | } else { |
| 224 | const u8* start{host_ptr}; | 229 | const u8* start{host_ptr}; |
| 225 | u8* write_to{staging_buffer.data()}; | 230 | u8* write_to{staging_buffer.data()}; |
| @@ -231,19 +236,15 @@ void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager, | |||
| 231 | } | 236 | } |
| 232 | } | 237 | } |
| 233 | 238 | ||
| 234 | auto compression_type = params.GetCompressionType(); | 239 | if (!is_converted && params.pixel_format != PixelFormat::S8Z24) { |
| 235 | if (compression_type == SurfaceCompression::None || | ||
| 236 | compression_type == SurfaceCompression::Compressed) | ||
| 237 | return; | 240 | return; |
| 241 | } | ||
| 238 | 242 | ||
| 239 | for (u32 level_up = params.num_levels; level_up > 0; --level_up) { | 243 | for (u32 level = params.num_levels; level--;) { |
| 240 | const u32 level = level_up - 1; | 244 | const std::size_t in_host_offset{params.GetHostMipmapLevelOffset(level, false)}; |
| 241 | const std::size_t in_host_offset{params.GetHostMipmapLevelOffset(level)}; | 245 | const std::size_t out_host_offset{params.GetHostMipmapLevelOffset(level, is_converted)}; |
| 242 | const std::size_t out_host_offset = compression_type == SurfaceCompression::Rearranged | 246 | u8* const in_buffer = staging_buffer.data() + in_host_offset; |
| 243 | ? in_host_offset | 247 | u8* const out_buffer = staging_buffer.data() + out_host_offset; |
| 244 | : params.GetConvertedMipmapOffset(level); | ||
| 245 | u8* in_buffer = staging_buffer.data() + in_host_offset; | ||
| 246 | u8* out_buffer = staging_buffer.data() + out_host_offset; | ||
| 247 | ConvertFromGuestToHost(in_buffer, out_buffer, params.pixel_format, | 248 | ConvertFromGuestToHost(in_buffer, out_buffer, params.pixel_format, |
| 248 | params.GetMipWidth(level), params.GetMipHeight(level), | 249 | params.GetMipWidth(level), params.GetMipHeight(level), |
| 249 | params.GetMipDepth(level), true, true); | 250 | params.GetMipDepth(level), true, true); |
| @@ -273,7 +274,7 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager, | |||
| 273 | if (params.is_tiled) { | 274 | if (params.is_tiled) { |
| 274 | ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width); | 275 | ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width); |
| 275 | for (u32 level = 0; level < params.num_levels; ++level) { | 276 | for (u32 level = 0; level < params.num_levels; ++level) { |
| 276 | const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)}; | 277 | const std::size_t host_offset{params.GetHostMipmapLevelOffset(level, false)}; |
| 277 | SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params, | 278 | SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params, |
| 278 | staging_buffer.data() + host_offset, level); | 279 | staging_buffer.data() + host_offset, level); |
| 279 | } | 280 | } |
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h index 5f79bb0aa..d7882a031 100644 --- a/src/video_core/texture_cache/surface_base.h +++ b/src/video_core/texture_cache/surface_base.h | |||
| @@ -131,6 +131,10 @@ public: | |||
| 131 | return !params.is_tiled; | 131 | return !params.is_tiled; |
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | bool IsConverted() const { | ||
| 135 | return is_converted; | ||
| 136 | } | ||
| 137 | |||
| 134 | bool MatchFormat(VideoCore::Surface::PixelFormat pixel_format) const { | 138 | bool MatchFormat(VideoCore::Surface::PixelFormat pixel_format) const { |
| 135 | return params.pixel_format == pixel_format; | 139 | return params.pixel_format == pixel_format; |
| 136 | } | 140 | } |
| @@ -160,7 +164,8 @@ public: | |||
| 160 | } | 164 | } |
| 161 | 165 | ||
| 162 | protected: | 166 | protected: |
| 163 | explicit SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params); | 167 | explicit SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params, |
| 168 | bool is_astc_supported); | ||
| 164 | ~SurfaceBaseImpl() = default; | 169 | ~SurfaceBaseImpl() = default; |
| 165 | 170 | ||
| 166 | virtual void DecorateSurfaceName() = 0; | 171 | virtual void DecorateSurfaceName() = 0; |
| @@ -168,12 +173,13 @@ protected: | |||
| 168 | const SurfaceParams params; | 173 | const SurfaceParams params; |
| 169 | std::size_t layer_size; | 174 | std::size_t layer_size; |
| 170 | std::size_t guest_memory_size; | 175 | std::size_t guest_memory_size; |
| 171 | const std::size_t host_memory_size; | 176 | std::size_t host_memory_size; |
| 172 | GPUVAddr gpu_addr{}; | 177 | GPUVAddr gpu_addr{}; |
| 173 | CacheAddr cache_addr{}; | 178 | CacheAddr cache_addr{}; |
| 174 | CacheAddr cache_addr_end{}; | 179 | CacheAddr cache_addr_end{}; |
| 175 | VAddr cpu_addr{}; | 180 | VAddr cpu_addr{}; |
| 176 | bool is_continuous{}; | 181 | bool is_continuous{}; |
| 182 | bool is_converted{}; | ||
| 177 | 183 | ||
| 178 | std::vector<std::size_t> mipmap_sizes; | 184 | std::vector<std::size_t> mipmap_sizes; |
| 179 | std::vector<std::size_t> mipmap_offsets; | 185 | std::vector<std::size_t> mipmap_offsets; |
| @@ -288,8 +294,9 @@ public: | |||
| 288 | } | 294 | } |
| 289 | 295 | ||
| 290 | protected: | 296 | protected: |
| 291 | explicit SurfaceBase(const GPUVAddr gpu_addr, const SurfaceParams& params) | 297 | explicit SurfaceBase(const GPUVAddr gpu_addr, const SurfaceParams& params, |
| 292 | : SurfaceBaseImpl(gpu_addr, params) {} | 298 | bool is_astc_supported) |
| 299 | : SurfaceBaseImpl(gpu_addr, params, is_astc_supported) {} | ||
| 293 | 300 | ||
| 294 | ~SurfaceBase() = default; | 301 | ~SurfaceBase() = default; |
| 295 | 302 | ||
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp index 9931c5ef7..47b2aafbd 100644 --- a/src/video_core/texture_cache/surface_params.cpp +++ b/src/video_core/texture_cache/surface_params.cpp | |||
| @@ -309,28 +309,26 @@ std::size_t SurfaceParams::GetGuestMipmapLevelOffset(u32 level) const { | |||
| 309 | return offset; | 309 | return offset; |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | std::size_t SurfaceParams::GetHostMipmapLevelOffset(u32 level) const { | 312 | std::size_t SurfaceParams::GetHostMipmapLevelOffset(u32 level, bool is_converted) const { |
| 313 | std::size_t offset = 0; | 313 | std::size_t offset = 0; |
| 314 | for (u32 i = 0; i < level; i++) { | 314 | if (is_converted) { |
| 315 | offset += GetInnerMipmapMemorySize(i, true, false) * GetNumLayers(); | 315 | for (u32 i = 0; i < level; ++i) { |
| 316 | } | 316 | offset += GetConvertedMipmapSize(i) * GetNumLayers(); |
| 317 | return offset; | 317 | } |
| 318 | } | 318 | } else { |
| 319 | 319 | for (u32 i = 0; i < level; ++i) { | |
| 320 | std::size_t SurfaceParams::GetConvertedMipmapOffset(u32 level) const { | 320 | offset += GetInnerMipmapMemorySize(i, true, false) * GetNumLayers(); |
| 321 | std::size_t offset = 0; | 321 | } |
| 322 | for (u32 i = 0; i < level; i++) { | ||
| 323 | offset += GetConvertedMipmapSize(i); | ||
| 324 | } | 322 | } |
| 325 | return offset; | 323 | return offset; |
| 326 | } | 324 | } |
| 327 | 325 | ||
| 328 | std::size_t SurfaceParams::GetConvertedMipmapSize(u32 level) const { | 326 | std::size_t SurfaceParams::GetConvertedMipmapSize(u32 level) const { |
| 329 | constexpr std::size_t rgba8_bpp = 4ULL; | 327 | constexpr std::size_t rgba8_bpp = 4ULL; |
| 330 | const std::size_t width_t = GetMipWidth(level); | 328 | const std::size_t mip_width = GetMipWidth(level); |
| 331 | const std::size_t height_t = GetMipHeight(level); | 329 | const std::size_t mip_height = GetMipHeight(level); |
| 332 | const std::size_t depth_t = is_layered ? depth : GetMipDepth(level); | 330 | const std::size_t mip_depth = is_layered ? 1 : GetMipDepth(level); |
| 333 | return width_t * height_t * depth_t * rgba8_bpp; | 331 | return mip_width * mip_height * mip_depth * rgba8_bpp; |
| 334 | } | 332 | } |
| 335 | 333 | ||
| 336 | std::size_t SurfaceParams::GetLayerSize(bool as_host_size, bool uncompressed) const { | 334 | std::size_t SurfaceParams::GetLayerSize(bool as_host_size, bool uncompressed) const { |
diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h index 995cc3818..24957df8d 100644 --- a/src/video_core/texture_cache/surface_params.h +++ b/src/video_core/texture_cache/surface_params.h | |||
| @@ -20,8 +20,6 @@ namespace VideoCommon { | |||
| 20 | 20 | ||
| 21 | class FormatLookupTable; | 21 | class FormatLookupTable; |
| 22 | 22 | ||
| 23 | using VideoCore::Surface::SurfaceCompression; | ||
| 24 | |||
| 25 | class SurfaceParams { | 23 | class SurfaceParams { |
| 26 | public: | 24 | public: |
| 27 | /// Creates SurfaceCachedParams from a texture configuration. | 25 | /// Creates SurfaceCachedParams from a texture configuration. |
| @@ -67,16 +65,14 @@ public: | |||
| 67 | return GetInnerMemorySize(false, false, false); | 65 | return GetInnerMemorySize(false, false, false); |
| 68 | } | 66 | } |
| 69 | 67 | ||
| 70 | std::size_t GetHostSizeInBytes() const { | 68 | std::size_t GetHostSizeInBytes(bool is_converted) const { |
| 71 | std::size_t host_size_in_bytes; | 69 | if (!is_converted) { |
| 72 | if (GetCompressionType() == SurfaceCompression::Converted) { | 70 | return GetInnerMemorySize(true, false, false); |
| 73 | // ASTC is uncompressed in software, in emulated as RGBA8 | 71 | } |
| 74 | host_size_in_bytes = 0; | 72 | // ASTC is uncompressed in software, in emulated as RGBA8 |
| 75 | for (u32 level = 0; level < num_levels; ++level) { | 73 | std::size_t host_size_in_bytes = 0; |
| 76 | host_size_in_bytes += GetConvertedMipmapSize(level); | 74 | for (u32 level = 0; level < num_levels; ++level) { |
| 77 | } | 75 | host_size_in_bytes += GetConvertedMipmapSize(level) * GetNumLayers(); |
| 78 | } else { | ||
| 79 | host_size_in_bytes = GetInnerMemorySize(true, false, false); | ||
| 80 | } | 76 | } |
| 81 | return host_size_in_bytes; | 77 | return host_size_in_bytes; |
| 82 | } | 78 | } |
| @@ -107,9 +103,8 @@ public: | |||
| 107 | u32 GetMipBlockDepth(u32 level) const; | 103 | u32 GetMipBlockDepth(u32 level) const; |
| 108 | 104 | ||
| 109 | /// Returns the best possible row/pitch alignment for the surface. | 105 | /// Returns the best possible row/pitch alignment for the surface. |
| 110 | u32 GetRowAlignment(u32 level) const { | 106 | u32 GetRowAlignment(u32 level, bool is_converted) const { |
| 111 | const u32 bpp = | 107 | const u32 bpp = is_converted ? 4 : GetBytesPerPixel(); |
| 112 | GetCompressionType() == SurfaceCompression::Converted ? 4 : GetBytesPerPixel(); | ||
| 113 | return 1U << Common::CountTrailingZeroes32(GetMipWidth(level) * bpp); | 108 | return 1U << Common::CountTrailingZeroes32(GetMipWidth(level) * bpp); |
| 114 | } | 109 | } |
| 115 | 110 | ||
| @@ -117,11 +112,7 @@ public: | |||
| 117 | std::size_t GetGuestMipmapLevelOffset(u32 level) const; | 112 | std::size_t GetGuestMipmapLevelOffset(u32 level) const; |
| 118 | 113 | ||
| 119 | /// Returns the offset in bytes in host memory (linear) of a given mipmap level. | 114 | /// Returns the offset in bytes in host memory (linear) of a given mipmap level. |
| 120 | std::size_t GetHostMipmapLevelOffset(u32 level) const; | 115 | std::size_t GetHostMipmapLevelOffset(u32 level, bool is_converted) const; |
| 121 | |||
| 122 | /// Returns the offset in bytes in host memory (linear) of a given mipmap level | ||
| 123 | /// for a texture that is converted in host gpu. | ||
| 124 | std::size_t GetConvertedMipmapOffset(u32 level) const; | ||
| 125 | 116 | ||
| 126 | /// Returns the size in bytes in guest memory of a given mipmap level. | 117 | /// Returns the size in bytes in guest memory of a given mipmap level. |
| 127 | std::size_t GetGuestMipmapSize(u32 level) const { | 118 | std::size_t GetGuestMipmapSize(u32 level) const { |
| @@ -196,11 +187,6 @@ public: | |||
| 196 | pixel_format < VideoCore::Surface::PixelFormat::MaxDepthStencilFormat; | 187 | pixel_format < VideoCore::Surface::PixelFormat::MaxDepthStencilFormat; |
| 197 | } | 188 | } |
| 198 | 189 | ||
| 199 | /// Returns how the compression should be handled for this texture. | ||
| 200 | SurfaceCompression GetCompressionType() const { | ||
| 201 | return VideoCore::Surface::GetFormatCompressionType(pixel_format); | ||
| 202 | } | ||
| 203 | |||
| 204 | /// Returns is the surface is a TextureBuffer type of surface. | 190 | /// Returns is the surface is a TextureBuffer type of surface. |
| 205 | bool IsBuffer() const { | 191 | bool IsBuffer() const { |
| 206 | return target == VideoCore::Surface::SurfaceTarget::TextureBuffer; | 192 | return target == VideoCore::Surface::SurfaceTarget::TextureBuffer; |
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 6cdbe63d0..c8f8d659d 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h | |||
| @@ -289,8 +289,9 @@ public: | |||
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | protected: | 291 | protected: |
| 292 | TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer) | 292 | explicit TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, |
| 293 | : system{system}, rasterizer{rasterizer} { | 293 | bool is_astc_supported) |
| 294 | : system{system}, is_astc_supported{is_astc_supported}, rasterizer{rasterizer} { | ||
| 294 | for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) { | 295 | for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) { |
| 295 | SetEmptyColorBuffer(i); | 296 | SetEmptyColorBuffer(i); |
| 296 | } | 297 | } |
| @@ -381,6 +382,7 @@ protected: | |||
| 381 | } | 382 | } |
| 382 | 383 | ||
| 383 | Core::System& system; | 384 | Core::System& system; |
| 385 | const bool is_astc_supported; | ||
| 384 | 386 | ||
| 385 | private: | 387 | private: |
| 386 | enum class RecycleStrategy : u32 { | 388 | enum class RecycleStrategy : u32 { |