summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--externals/CMakeLists.txt15
m---------externals/sirit0
-rw-r--r--src/core/hle/kernel/process.cpp6
-rw-r--r--src/core/hle/kernel/readable_event.cpp2
-rw-r--r--src/core/hle/kernel/resource_limit.cpp6
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp25
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h18
-rw-r--r--src/core/settings.h1
-rw-r--r--src/video_core/CMakeLists.txt11
-rw-r--r--src/video_core/buffer_cache/buffer_block.h27
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h215
-rw-r--r--src/video_core/engines/const_buffer_engine_interface.h1
-rw-r--r--src/video_core/engines/kepler_compute.cpp5
-rw-r--r--src/video_core/engines/kepler_compute.h2
-rw-r--r--src/video_core/engines/maxwell_3d.cpp22
-rw-r--r--src/video_core/engines/maxwell_3d.h21
-rw-r--r--src/video_core/macro/macro.cpp45
-rw-r--r--src/video_core/macro/macro.h128
-rw-r--r--src/video_core/macro/macro_interpreter.cpp (renamed from src/video_core/macro_interpreter.cpp)198
-rw-r--r--src/video_core/macro/macro_interpreter.h (renamed from src/video_core/macro_interpreter.h)51
-rw-r--r--src/video_core/macro/macro_jit_x64.cpp640
-rw-r--r--src/video_core/macro/macro_jit_x64.h100
-rw-r--r--src/video_core/rasterizer_cache.cpp7
-rw-r--r--src/video_core/rasterizer_cache.h253
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp21
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.h19
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp15
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp136
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h21
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp87
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h51
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp14
-rw-r--r--src/video_core/renderer_opengl/gl_shader_disk_cache.cpp64
-rw-r--r--src/video_core/renderer_opengl/gl_shader_disk_cache.h1
-rw-r--r--src/video_core/renderer_opengl/gl_stream_buffer.cpp8
-rw-r--r--src/video_core/renderer_opengl/gl_stream_buffer.h11
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp53
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.h6
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp22
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h17
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp88
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h33
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp82
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h18
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp137
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.h9
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp80
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h33
-rw-r--r--src/video_core/shader/decode/texture.cpp55
-rw-r--r--src/video_core/shader/node.h75
-rw-r--r--src/video_core/shader/node_helper.h2
-rw-r--r--src/video_core/shader/registry.cpp20
-rw-r--r--src/video_core/shader/registry.h35
-rw-r--r--src/video_core/shader/shader_ir.h14
-rw-r--r--src/video_core/shader/track.cpp78
-rw-r--r--src/video_core/shader_cache.h228
-rw-r--r--src/video_core/texture_cache/surface_base.cpp7
-rw-r--r--src/video_core/texture_cache/surface_base.h13
-rw-r--r--src/video_core/texture_cache/surface_params.cpp19
-rw-r--r--src/video_core/texture_cache/texture_cache.h119
-rw-r--r--src/yuzu/configuration/config.cpp3
-rw-r--r--src/yuzu/configuration/configure_debug.cpp3
-rw-r--r--src/yuzu/configuration/configure_debug.ui13
-rw-r--r--src/yuzu_cmd/config.cpp2
-rw-r--r--src/yuzu_cmd/default_ini.h2
69 files changed, 2359 insertions, 1166 deletions
diff --git a/externals/CMakeLists.txt b/externals/CMakeLists.txt
index df7a5e0a9..9be5b2780 100644
--- a/externals/CMakeLists.txt
+++ b/externals/CMakeLists.txt
@@ -4,6 +4,13 @@ list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/CMakeModules")
4list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/externals/find-modules") 4list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/externals/find-modules")
5include(DownloadExternals) 5include(DownloadExternals)
6 6
7# xbyak
8if (ARCHITECTURE_x86 OR ARCHITECTURE_x86_64)
9 add_library(xbyak INTERFACE)
10 target_include_directories(xbyak SYSTEM INTERFACE ./xbyak/xbyak)
11 target_compile_definitions(xbyak INTERFACE XBYAK_NO_OP_NAMES)
12endif()
13
7# Catch 14# Catch
8add_library(catch-single-include INTERFACE) 15add_library(catch-single-include INTERFACE)
9target_include_directories(catch-single-include INTERFACE catch/single_include) 16target_include_directories(catch-single-include INTERFACE catch/single_include)
@@ -75,11 +82,3 @@ if (ENABLE_WEB_SERVICE)
75 target_compile_definitions(httplib INTERFACE -DCPPHTTPLIB_OPENSSL_SUPPORT) 82 target_compile_definitions(httplib INTERFACE -DCPPHTTPLIB_OPENSSL_SUPPORT)
76 target_link_libraries(httplib INTERFACE OpenSSL::SSL OpenSSL::Crypto) 83 target_link_libraries(httplib INTERFACE OpenSSL::SSL OpenSSL::Crypto)
77endif() 84endif()
78
79if (NOT TARGET xbyak)
80 if (ARCHITECTURE_x86 OR ARCHITECTURE_x86_64)
81 add_library(xbyak INTERFACE)
82 target_include_directories(xbyak SYSTEM INTERFACE ./xbyak/xbyak)
83 target_compile_definitions(xbyak INTERFACE XBYAK_NO_OP_NAMES)
84 endif()
85endif()
diff --git a/externals/sirit b/externals/sirit
Subproject a62c5bbc100a5e5a31ea0ccc4a78d8fa6a4167c Subproject eefca56afd49379bdebc97ded8b480839f93088
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 36724569f..c4c5199b1 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -132,7 +132,8 @@ std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
132 132
133u64 Process::GetTotalPhysicalMemoryAvailable() const { 133u64 Process::GetTotalPhysicalMemoryAvailable() const {
134 const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) + 134 const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) +
135 page_table->GetTotalHeapSize() + image_size + main_thread_stack_size}; 135 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
136 main_thread_stack_size};
136 137
137 if (capacity < memory_usage_capacity) { 138 if (capacity < memory_usage_capacity) {
138 return capacity; 139 return capacity;
@@ -146,7 +147,8 @@ u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
146} 147}
147 148
148u64 Process::GetTotalPhysicalMemoryUsed() const { 149u64 Process::GetTotalPhysicalMemoryUsed() const {
149 return image_size + main_thread_stack_size + page_table->GetTotalHeapSize(); 150 return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() +
151 GetSystemResourceSize();
150} 152}
151 153
152u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { 154u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 00860fcbd..ef5e19e63 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -38,7 +38,7 @@ void ReadableEvent::Clear() {
38 38
39ResultCode ReadableEvent::Reset() { 39ResultCode ReadableEvent::Reset() {
40 if (!is_signaled) { 40 if (!is_signaled) {
41 LOG_ERROR(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", 41 LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
42 GetObjectId(), GetTypeName(), GetName()); 42 GetObjectId(), GetTypeName(), GetName());
43 return ERR_INVALID_STATE; 43 return ERR_INVALID_STATE;
44 } 44 }
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index d9beaa3a4..212e442f4 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -24,13 +24,9 @@ bool ResourceLimit::Reserve(ResourceType resource, s64 amount, u64 timeout) {
24 const std::size_t index{ResourceTypeToIndex(resource)}; 24 const std::size_t index{ResourceTypeToIndex(resource)};
25 25
26 s64 new_value = current[index] + amount; 26 s64 new_value = current[index] + amount;
27 while (new_value > limit[index] && available[index] + amount <= limit[index]) { 27 if (new_value > limit[index] && available[index] + amount <= limit[index]) {
28 // TODO(bunnei): This is wrong for multicore, we should wait the calling thread for timeout 28 // TODO(bunnei): This is wrong for multicore, we should wait the calling thread for timeout
29 new_value = current[index] + amount; 29 new_value = current[index] + amount;
30
31 if (timeout >= 0) {
32 break;
33 }
34 } 30 }
35 31
36 if (new_value <= limit[index]) { 32 if (new_value <= limit[index]) {
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
index cc2192e5c..0d913334e 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
@@ -25,7 +25,7 @@ u32 nvhost_ctrl_gpu::ioctl(Ioctl command, const std::vector<u8>& input,
25 case IoctlCommand::IocGetCharacteristicsCommand: 25 case IoctlCommand::IocGetCharacteristicsCommand:
26 return GetCharacteristics(input, output, output2, version); 26 return GetCharacteristics(input, output, output2, version);
27 case IoctlCommand::IocGetTPCMasksCommand: 27 case IoctlCommand::IocGetTPCMasksCommand:
28 return GetTPCMasks(input, output); 28 return GetTPCMasks(input, output, output2, version);
29 case IoctlCommand::IocGetActiveSlotMaskCommand: 29 case IoctlCommand::IocGetActiveSlotMaskCommand:
30 return GetActiveSlotMask(input, output); 30 return GetActiveSlotMask(input, output);
31 case IoctlCommand::IocZcullGetCtxSizeCommand: 31 case IoctlCommand::IocZcullGetCtxSizeCommand:
@@ -98,17 +98,22 @@ u32 nvhost_ctrl_gpu::GetCharacteristics(const std::vector<u8>& input, std::vecto
98 return 0; 98 return 0;
99} 99}
100 100
101u32 nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output) { 101u32 nvhost_ctrl_gpu::GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output,
102 std::vector<u8>& output2, IoctlVersion version) {
102 IoctlGpuGetTpcMasksArgs params{}; 103 IoctlGpuGetTpcMasksArgs params{};
103 std::memcpy(&params, input.data(), input.size()); 104 std::memcpy(&params, input.data(), input.size());
104 LOG_INFO(Service_NVDRV, "called, mask=0x{:X}, mask_buf_addr=0x{:X}", params.mask_buf_size, 105 LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
105 params.mask_buf_addr); 106 if (params.mask_buffer_size != 0) {
106 // TODO(ogniK): Confirm value on hardware 107 params.tcp_mask = 3;
107 if (params.mask_buf_size) 108 }
108 params.tpc_mask_size = 4 * 1; // 4 * num_gpc 109
109 else 110 if (version == IoctlVersion::Version3) {
110 params.tpc_mask_size = 0; 111 std::memcpy(output.data(), input.data(), output.size());
111 std::memcpy(output.data(), &params, sizeof(params)); 112 std::memcpy(output2.data(), &params.tcp_mask, output2.size());
113 } else {
114 std::memcpy(output.data(), &params, output.size());
115 }
116
112 return 0; 117 return 0;
113} 118}
114 119
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index 07b644ec5..ef60f72ce 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -92,16 +92,11 @@ private:
92 "IoctlCharacteristics is incorrect size"); 92 "IoctlCharacteristics is incorrect size");
93 93
94 struct IoctlGpuGetTpcMasksArgs { 94 struct IoctlGpuGetTpcMasksArgs {
95 /// [in] TPC mask buffer size reserved by userspace. Should be at least 95 u32_le mask_buffer_size{};
96 /// sizeof(__u32) * fls(gpc_mask) to receive TPC mask for each GPC. 96 INSERT_PADDING_WORDS(1);
97 /// [out] full kernel buffer size 97 u64_le mask_buffer_address{};
98 u32_le mask_buf_size; 98 u32_le tcp_mask{};
99 u32_le reserved; 99 INSERT_PADDING_WORDS(1);
100
101 /// [in] pointer to TPC mask buffer. It will receive one 32-bit TPC mask per GPC or 0 if
102 /// GPC is not enabled or not present. This parameter is ignored if mask_buf_size is 0.
103 u64_le mask_buf_addr;
104 u64_le tpc_mask_size; // Nintendo add this?
105 }; 100 };
106 static_assert(sizeof(IoctlGpuGetTpcMasksArgs) == 24, 101 static_assert(sizeof(IoctlGpuGetTpcMasksArgs) == 24,
107 "IoctlGpuGetTpcMasksArgs is incorrect size"); 102 "IoctlGpuGetTpcMasksArgs is incorrect size");
@@ -166,7 +161,8 @@ private:
166 161
167 u32 GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output, 162 u32 GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
168 std::vector<u8>& output2, IoctlVersion version); 163 std::vector<u8>& output2, IoctlVersion version);
169 u32 GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output); 164 u32 GetTPCMasks(const std::vector<u8>& input, std::vector<u8>& output, std::vector<u8>& output2,
165 IoctlVersion version);
170 u32 GetActiveSlotMask(const std::vector<u8>& input, std::vector<u8>& output); 166 u32 GetActiveSlotMask(const std::vector<u8>& input, std::vector<u8>& output);
171 u32 ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u8>& output); 167 u32 ZCullGetCtxSize(const std::vector<u8>& input, std::vector<u8>& output);
172 u32 ZCullGetInfo(const std::vector<u8>& input, std::vector<u8>& output); 168 u32 ZCullGetInfo(const std::vector<u8>& input, std::vector<u8>& output);
diff --git a/src/core/settings.h b/src/core/settings.h
index 9d916d5cb..33e1e06cd 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -474,6 +474,7 @@ struct Values {
474 bool reporting_services; 474 bool reporting_services;
475 bool quest_flag; 475 bool quest_flag;
476 bool disable_cpu_opt; 476 bool disable_cpu_opt;
477 bool disable_macro_jit;
477 478
478 // BCAT 479 // BCAT
479 std::string bcat_backend; 480 std::string bcat_backend;
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index d6ee82836..39d5d8401 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -25,6 +25,12 @@ add_library(video_core STATIC
25 engines/shader_bytecode.h 25 engines/shader_bytecode.h
26 engines/shader_header.h 26 engines/shader_header.h
27 engines/shader_type.h 27 engines/shader_type.h
28 macro/macro.cpp
29 macro/macro.h
30 macro/macro_interpreter.cpp
31 macro/macro_interpreter.h
32 macro/macro_jit_x64.cpp
33 macro/macro_jit_x64.h
28 fence_manager.h 34 fence_manager.h
29 gpu.cpp 35 gpu.cpp
30 gpu.h 36 gpu.h
@@ -36,8 +42,6 @@ add_library(video_core STATIC
36 gpu_thread.h 42 gpu_thread.h
37 guest_driver.cpp 43 guest_driver.cpp
38 guest_driver.h 44 guest_driver.h
39 macro_interpreter.cpp
40 macro_interpreter.h
41 memory_manager.cpp 45 memory_manager.cpp
42 memory_manager.h 46 memory_manager.h
43 morton.cpp 47 morton.cpp
@@ -45,8 +49,6 @@ add_library(video_core STATIC
45 query_cache.h 49 query_cache.h
46 rasterizer_accelerated.cpp 50 rasterizer_accelerated.cpp
47 rasterizer_accelerated.h 51 rasterizer_accelerated.h
48 rasterizer_cache.cpp
49 rasterizer_cache.h
50 rasterizer_interface.h 52 rasterizer_interface.h
51 renderer_base.cpp 53 renderer_base.cpp
52 renderer_base.h 54 renderer_base.h
@@ -89,6 +91,7 @@ add_library(video_core STATIC
89 renderer_opengl/utils.h 91 renderer_opengl/utils.h
90 sampler_cache.cpp 92 sampler_cache.cpp
91 sampler_cache.h 93 sampler_cache.h
94 shader_cache.h
92 shader/decode/arithmetic.cpp 95 shader/decode/arithmetic.cpp
93 shader/decode/arithmetic_immediate.cpp 96 shader/decode/arithmetic_immediate.cpp
94 shader/decode/bfe.cpp 97 shader/decode/bfe.cpp
diff --git a/src/video_core/buffer_cache/buffer_block.h b/src/video_core/buffer_cache/buffer_block.h
index e35ee0b67..e64170e66 100644
--- a/src/video_core/buffer_cache/buffer_block.h
+++ b/src/video_core/buffer_cache/buffer_block.h
@@ -15,48 +15,47 @@ namespace VideoCommon {
15 15
16class BufferBlock { 16class BufferBlock {
17public: 17public:
18 bool Overlaps(const VAddr start, const VAddr end) const { 18 bool Overlaps(VAddr start, VAddr end) const {
19 return (cpu_addr < end) && (cpu_addr_end > start); 19 return (cpu_addr < end) && (cpu_addr_end > start);
20 } 20 }
21 21
22 bool IsInside(const VAddr other_start, const VAddr other_end) const { 22 bool IsInside(VAddr other_start, VAddr other_end) const {
23 return cpu_addr <= other_start && other_end <= cpu_addr_end; 23 return cpu_addr <= other_start && other_end <= cpu_addr_end;
24 } 24 }
25 25
26 std::size_t GetOffset(const VAddr in_addr) { 26 std::size_t Offset(VAddr in_addr) const {
27 return static_cast<std::size_t>(in_addr - cpu_addr); 27 return static_cast<std::size_t>(in_addr - cpu_addr);
28 } 28 }
29 29
30 VAddr GetCpuAddr() const { 30 VAddr CpuAddr() const {
31 return cpu_addr; 31 return cpu_addr;
32 } 32 }
33 33
34 VAddr GetCpuAddrEnd() const { 34 VAddr CpuAddrEnd() const {
35 return cpu_addr_end; 35 return cpu_addr_end;
36 } 36 }
37 37
38 void SetCpuAddr(const VAddr new_addr) { 38 void SetCpuAddr(VAddr new_addr) {
39 cpu_addr = new_addr; 39 cpu_addr = new_addr;
40 cpu_addr_end = new_addr + size; 40 cpu_addr_end = new_addr + size;
41 } 41 }
42 42
43 std::size_t GetSize() const { 43 std::size_t Size() const {
44 return size; 44 return size;
45 } 45 }
46 46
47 void SetEpoch(u64 new_epoch) { 47 u64 Epoch() const {
48 epoch = new_epoch; 48 return epoch;
49 } 49 }
50 50
51 u64 GetEpoch() { 51 void SetEpoch(u64 new_epoch) {
52 return epoch; 52 epoch = new_epoch;
53 } 53 }
54 54
55protected: 55protected:
56 explicit BufferBlock(VAddr cpu_addr, const std::size_t size) : size{size} { 56 explicit BufferBlock(VAddr cpu_addr_, std::size_t size_) : size{size_} {
57 SetCpuAddr(cpu_addr); 57 SetCpuAddr(cpu_addr_);
58 } 58 }
59 ~BufferBlock() = default;
60 59
61private: 60private:
62 VAddr cpu_addr{}; 61 VAddr cpu_addr{};
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index b88fce2cd..308d8b55f 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -30,12 +30,16 @@
30 30
31namespace VideoCommon { 31namespace VideoCommon {
32 32
33template <typename OwnerBuffer, typename BufferType, typename StreamBuffer> 33template <typename Buffer, typename BufferType, typename StreamBuffer>
34class BufferCache { 34class BufferCache {
35 using IntervalSet = boost::icl::interval_set<VAddr>; 35 using IntervalSet = boost::icl::interval_set<VAddr>;
36 using IntervalType = typename IntervalSet::interval_type; 36 using IntervalType = typename IntervalSet::interval_type;
37 using VectorMapInterval = boost::container::small_vector<MapInterval*, 1>; 37 using VectorMapInterval = boost::container::small_vector<MapInterval*, 1>;
38 38
39 static constexpr u64 WRITE_PAGE_BIT = 11;
40 static constexpr u64 BLOCK_PAGE_BITS = 21;
41 static constexpr u64 BLOCK_PAGE_SIZE = 1ULL << BLOCK_PAGE_BITS;
42
39public: 43public:
40 using BufferInfo = std::pair<BufferType, u64>; 44 using BufferInfo = std::pair<BufferType, u64>;
41 45
@@ -82,7 +86,7 @@ public:
82 } 86 }
83 } 87 }
84 88
85 OwnerBuffer block = GetBlock(cpu_addr, size); 89 Buffer* const block = GetBlock(cpu_addr, size);
86 MapInterval* const map = MapAddress(block, gpu_addr, cpu_addr, size); 90 MapInterval* const map = MapAddress(block, gpu_addr, cpu_addr, size);
87 if (!map) { 91 if (!map) {
88 return {GetEmptyBuffer(size), 0}; 92 return {GetEmptyBuffer(size), 0};
@@ -98,7 +102,7 @@ public:
98 } 102 }
99 } 103 }
100 104
101 return {ToHandle(block), static_cast<u64>(block->GetOffset(cpu_addr))}; 105 return {block->Handle(), static_cast<u64>(block->Offset(cpu_addr))};
102 } 106 }
103 107
104 /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset. 108 /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset.
@@ -110,31 +114,37 @@ public:
110 }); 114 });
111 } 115 }
112 116
113 void Map(std::size_t max_size) { 117 /// Prepares the buffer cache for data uploading
118 /// @param max_size Maximum number of bytes that will be uploaded
119 /// @return True when a stream buffer invalidation was required, false otherwise
120 bool Map(std::size_t max_size) {
114 std::lock_guard lock{mutex}; 121 std::lock_guard lock{mutex};
115 122
123 bool invalidated;
116 std::tie(buffer_ptr, buffer_offset_base, invalidated) = stream_buffer->Map(max_size, 4); 124 std::tie(buffer_ptr, buffer_offset_base, invalidated) = stream_buffer->Map(max_size, 4);
117 buffer_offset = buffer_offset_base; 125 buffer_offset = buffer_offset_base;
126
127 return invalidated;
118 } 128 }
119 129
120 /// Finishes the upload stream, returns true on bindings invalidation. 130 /// Finishes the upload stream
121 bool Unmap() { 131 void Unmap() {
122 std::lock_guard lock{mutex}; 132 std::lock_guard lock{mutex};
123
124 stream_buffer->Unmap(buffer_offset - buffer_offset_base); 133 stream_buffer->Unmap(buffer_offset - buffer_offset_base);
125 return std::exchange(invalidated, false);
126 } 134 }
127 135
136 /// Function called at the end of each frame, inteded for deferred operations
128 void TickFrame() { 137 void TickFrame() {
129 ++epoch; 138 ++epoch;
139
130 while (!pending_destruction.empty()) { 140 while (!pending_destruction.empty()) {
131 // Delay at least 4 frames before destruction. 141 // Delay at least 4 frames before destruction.
132 // This is due to triple buffering happening on some drivers. 142 // This is due to triple buffering happening on some drivers.
133 static constexpr u64 epochs_to_destroy = 5; 143 static constexpr u64 epochs_to_destroy = 5;
134 if (pending_destruction.front()->GetEpoch() + epochs_to_destroy > epoch) { 144 if (pending_destruction.front()->Epoch() + epochs_to_destroy > epoch) {
135 break; 145 break;
136 } 146 }
137 pending_destruction.pop_front(); 147 pending_destruction.pop();
138 } 148 }
139 } 149 }
140 150
@@ -249,23 +259,21 @@ public:
249 259
250protected: 260protected:
251 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, 261 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
252 std::unique_ptr<StreamBuffer> stream_buffer) 262 std::unique_ptr<StreamBuffer> stream_buffer_)
253 : rasterizer{rasterizer}, system{system}, stream_buffer{std::move(stream_buffer)}, 263 : rasterizer{rasterizer}, system{system}, stream_buffer{std::move(stream_buffer_)},
254 stream_buffer_handle{this->stream_buffer->GetHandle()} {} 264 stream_buffer_handle{stream_buffer->Handle()} {}
255 265
256 ~BufferCache() = default; 266 ~BufferCache() = default;
257 267
258 virtual BufferType ToHandle(const OwnerBuffer& storage) = 0; 268 virtual std::shared_ptr<Buffer> CreateBlock(VAddr cpu_addr, std::size_t size) = 0;
259 269
260 virtual OwnerBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0; 270 virtual void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
261
262 virtual void UploadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size,
263 const u8* data) = 0; 271 const u8* data) = 0;
264 272
265 virtual void DownloadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size, 273 virtual void DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
266 u8* data) = 0; 274 u8* data) = 0;
267 275
268 virtual void CopyBlock(const OwnerBuffer& src, const OwnerBuffer& dst, std::size_t src_offset, 276 virtual void CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
269 std::size_t dst_offset, std::size_t size) = 0; 277 std::size_t dst_offset, std::size_t size) = 0;
270 278
271 virtual BufferInfo ConstBufferUpload(const void* raw_pointer, std::size_t size) { 279 virtual BufferInfo ConstBufferUpload(const void* raw_pointer, std::size_t size) {
@@ -321,7 +329,7 @@ protected:
321 } 329 }
322 330
323private: 331private:
324 MapInterval* MapAddress(const OwnerBuffer& block, GPUVAddr gpu_addr, VAddr cpu_addr, 332 MapInterval* MapAddress(const Buffer* block, GPUVAddr gpu_addr, VAddr cpu_addr,
325 std::size_t size) { 333 std::size_t size) {
326 const VectorMapInterval overlaps = GetMapsInRange(cpu_addr, size); 334 const VectorMapInterval overlaps = GetMapsInRange(cpu_addr, size);
327 if (overlaps.empty()) { 335 if (overlaps.empty()) {
@@ -329,11 +337,11 @@ private:
329 const VAddr cpu_addr_end = cpu_addr + size; 337 const VAddr cpu_addr_end = cpu_addr + size;
330 if (memory_manager.IsGranularRange(gpu_addr, size)) { 338 if (memory_manager.IsGranularRange(gpu_addr, size)) {
331 u8* host_ptr = memory_manager.GetPointer(gpu_addr); 339 u8* host_ptr = memory_manager.GetPointer(gpu_addr);
332 UploadBlockData(block, block->GetOffset(cpu_addr), size, host_ptr); 340 UploadBlockData(*block, block->Offset(cpu_addr), size, host_ptr);
333 } else { 341 } else {
334 staging_buffer.resize(size); 342 staging_buffer.resize(size);
335 memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); 343 memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size);
336 UploadBlockData(block, block->GetOffset(cpu_addr), size, staging_buffer.data()); 344 UploadBlockData(*block, block->Offset(cpu_addr), size, staging_buffer.data());
337 } 345 }
338 return Register(MapInterval(cpu_addr, cpu_addr_end, gpu_addr)); 346 return Register(MapInterval(cpu_addr, cpu_addr_end, gpu_addr));
339 } 347 }
@@ -376,7 +384,7 @@ private:
376 return map; 384 return map;
377 } 385 }
378 386
379 void UpdateBlock(const OwnerBuffer& block, VAddr start, VAddr end, 387 void UpdateBlock(const Buffer* block, VAddr start, VAddr end,
380 const VectorMapInterval& overlaps) { 388 const VectorMapInterval& overlaps) {
381 const IntervalType base_interval{start, end}; 389 const IntervalType base_interval{start, end};
382 IntervalSet interval_set{}; 390 IntervalSet interval_set{};
@@ -386,13 +394,13 @@ private:
386 interval_set.subtract(subtract); 394 interval_set.subtract(subtract);
387 } 395 }
388 for (auto& interval : interval_set) { 396 for (auto& interval : interval_set) {
389 std::size_t size = interval.upper() - interval.lower(); 397 const std::size_t size = interval.upper() - interval.lower();
390 if (size > 0) { 398 if (size == 0) {
391 staging_buffer.resize(size); 399 continue;
392 system.Memory().ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size);
393 UploadBlockData(block, block->GetOffset(interval.lower()), size,
394 staging_buffer.data());
395 } 400 }
401 staging_buffer.resize(size);
402 system.Memory().ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size);
403 UploadBlockData(*block, block->Offset(interval.lower()), size, staging_buffer.data());
396 } 404 }
397 } 405 }
398 406
@@ -422,10 +430,14 @@ private:
422 } 430 }
423 431
424 void FlushMap(MapInterval* map) { 432 void FlushMap(MapInterval* map) {
433 const auto it = blocks.find(map->start >> BLOCK_PAGE_BITS);
434 ASSERT_OR_EXECUTE(it != blocks.end(), return;);
435
436 std::shared_ptr<Buffer> block = it->second;
437
425 const std::size_t size = map->end - map->start; 438 const std::size_t size = map->end - map->start;
426 OwnerBuffer block = blocks[map->start >> block_page_bits];
427 staging_buffer.resize(size); 439 staging_buffer.resize(size);
428 DownloadBlockData(block, block->GetOffset(map->start), size, staging_buffer.data()); 440 DownloadBlockData(*block, block->Offset(map->start), size, staging_buffer.data());
429 system.Memory().WriteBlockUnsafe(map->start, staging_buffer.data(), size); 441 system.Memory().WriteBlockUnsafe(map->start, staging_buffer.data(), size);
430 map->MarkAsModified(false, 0); 442 map->MarkAsModified(false, 0);
431 } 443 }
@@ -448,97 +460,89 @@ private:
448 buffer_offset = offset_aligned; 460 buffer_offset = offset_aligned;
449 } 461 }
450 462
451 OwnerBuffer EnlargeBlock(OwnerBuffer buffer) { 463 std::shared_ptr<Buffer> EnlargeBlock(std::shared_ptr<Buffer> buffer) {
452 const std::size_t old_size = buffer->GetSize(); 464 const std::size_t old_size = buffer->Size();
453 const std::size_t new_size = old_size + block_page_size; 465 const std::size_t new_size = old_size + BLOCK_PAGE_SIZE;
454 const VAddr cpu_addr = buffer->GetCpuAddr(); 466 const VAddr cpu_addr = buffer->CpuAddr();
455 OwnerBuffer new_buffer = CreateBlock(cpu_addr, new_size); 467 std::shared_ptr<Buffer> new_buffer = CreateBlock(cpu_addr, new_size);
456 CopyBlock(buffer, new_buffer, 0, 0, old_size); 468 CopyBlock(*buffer, *new_buffer, 0, 0, old_size);
457 buffer->SetEpoch(epoch); 469 QueueDestruction(std::move(buffer));
458 pending_destruction.push_back(buffer); 470
459 const VAddr cpu_addr_end = cpu_addr + new_size - 1; 471 const VAddr cpu_addr_end = cpu_addr + new_size - 1;
460 u64 page_start = cpu_addr >> block_page_bits; 472 const u64 page_end = cpu_addr_end >> BLOCK_PAGE_BITS;
461 const u64 page_end = cpu_addr_end >> block_page_bits; 473 for (u64 page_start = cpu_addr >> BLOCK_PAGE_BITS; page_start <= page_end; ++page_start) {
462 while (page_start <= page_end) { 474 blocks.insert_or_assign(page_start, new_buffer);
463 blocks[page_start] = new_buffer;
464 ++page_start;
465 } 475 }
476
466 return new_buffer; 477 return new_buffer;
467 } 478 }
468 479
469 OwnerBuffer MergeBlocks(OwnerBuffer first, OwnerBuffer second) { 480 std::shared_ptr<Buffer> MergeBlocks(std::shared_ptr<Buffer> first,
470 const std::size_t size_1 = first->GetSize(); 481 std::shared_ptr<Buffer> second) {
471 const std::size_t size_2 = second->GetSize(); 482 const std::size_t size_1 = first->Size();
472 const VAddr first_addr = first->GetCpuAddr(); 483 const std::size_t size_2 = second->Size();
473 const VAddr second_addr = second->GetCpuAddr(); 484 const VAddr first_addr = first->CpuAddr();
485 const VAddr second_addr = second->CpuAddr();
474 const VAddr new_addr = std::min(first_addr, second_addr); 486 const VAddr new_addr = std::min(first_addr, second_addr);
475 const std::size_t new_size = size_1 + size_2; 487 const std::size_t new_size = size_1 + size_2;
476 OwnerBuffer new_buffer = CreateBlock(new_addr, new_size); 488
477 CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1); 489 std::shared_ptr<Buffer> new_buffer = CreateBlock(new_addr, new_size);
478 CopyBlock(second, new_buffer, 0, new_buffer->GetOffset(second_addr), size_2); 490 CopyBlock(*first, *new_buffer, 0, new_buffer->Offset(first_addr), size_1);
479 first->SetEpoch(epoch); 491 CopyBlock(*second, *new_buffer, 0, new_buffer->Offset(second_addr), size_2);
480 second->SetEpoch(epoch); 492 QueueDestruction(std::move(first));
481 pending_destruction.push_back(first); 493 QueueDestruction(std::move(second));
482 pending_destruction.push_back(second); 494
483 const VAddr cpu_addr_end = new_addr + new_size - 1; 495 const VAddr cpu_addr_end = new_addr + new_size - 1;
484 u64 page_start = new_addr >> block_page_bits; 496 const u64 page_end = cpu_addr_end >> BLOCK_PAGE_BITS;
485 const u64 page_end = cpu_addr_end >> block_page_bits; 497 for (u64 page_start = new_addr >> BLOCK_PAGE_BITS; page_start <= page_end; ++page_start) {
486 while (page_start <= page_end) { 498 blocks.insert_or_assign(page_start, new_buffer);
487 blocks[page_start] = new_buffer;
488 ++page_start;
489 } 499 }
490 return new_buffer; 500 return new_buffer;
491 } 501 }
492 502
493 OwnerBuffer GetBlock(const VAddr cpu_addr, const std::size_t size) { 503 Buffer* GetBlock(VAddr cpu_addr, std::size_t size) {
494 OwnerBuffer found; 504 std::shared_ptr<Buffer> found;
505
495 const VAddr cpu_addr_end = cpu_addr + size - 1; 506 const VAddr cpu_addr_end = cpu_addr + size - 1;
496 u64 page_start = cpu_addr >> block_page_bits; 507 const u64 page_end = cpu_addr_end >> BLOCK_PAGE_BITS;
497 const u64 page_end = cpu_addr_end >> block_page_bits; 508 for (u64 page_start = cpu_addr >> BLOCK_PAGE_BITS; page_start <= page_end; ++page_start) {
498 while (page_start <= page_end) {
499 auto it = blocks.find(page_start); 509 auto it = blocks.find(page_start);
500 if (it == blocks.end()) { 510 if (it == blocks.end()) {
501 if (found) { 511 if (found) {
502 found = EnlargeBlock(found); 512 found = EnlargeBlock(found);
503 } else { 513 continue;
504 const VAddr start_addr = (page_start << block_page_bits);
505 found = CreateBlock(start_addr, block_page_size);
506 blocks[page_start] = found;
507 }
508 } else {
509 if (found) {
510 if (found == it->second) {
511 ++page_start;
512 continue;
513 }
514 found = MergeBlocks(found, it->second);
515 } else {
516 found = it->second;
517 } 514 }
515 const VAddr start_addr = page_start << BLOCK_PAGE_BITS;
516 found = CreateBlock(start_addr, BLOCK_PAGE_SIZE);
517 blocks.insert_or_assign(page_start, found);
518 continue;
519 }
520 if (!found) {
521 found = it->second;
522 continue;
523 }
524 if (found != it->second) {
525 found = MergeBlocks(std::move(found), it->second);
518 } 526 }
519 ++page_start;
520 } 527 }
521 return found; 528 return found.get();
522 } 529 }
523 530
524 void MarkRegionAsWritten(const VAddr start, const VAddr end) { 531 void MarkRegionAsWritten(VAddr start, VAddr end) {
525 u64 page_start = start >> write_page_bit; 532 const u64 page_end = end >> WRITE_PAGE_BIT;
526 const u64 page_end = end >> write_page_bit; 533 for (u64 page_start = start >> WRITE_PAGE_BIT; page_start <= page_end; ++page_start) {
527 while (page_start <= page_end) {
528 auto it = written_pages.find(page_start); 534 auto it = written_pages.find(page_start);
529 if (it != written_pages.end()) { 535 if (it != written_pages.end()) {
530 it->second = it->second + 1; 536 it->second = it->second + 1;
531 } else { 537 } else {
532 written_pages[page_start] = 1; 538 written_pages.insert_or_assign(page_start, 1);
533 } 539 }
534 ++page_start;
535 } 540 }
536 } 541 }
537 542
538 void UnmarkRegionAsWritten(const VAddr start, const VAddr end) { 543 void UnmarkRegionAsWritten(VAddr start, VAddr end) {
539 u64 page_start = start >> write_page_bit; 544 const u64 page_end = end >> WRITE_PAGE_BIT;
540 const u64 page_end = end >> write_page_bit; 545 for (u64 page_start = start >> WRITE_PAGE_BIT; page_start <= page_end; ++page_start) {
541 while (page_start <= page_end) {
542 auto it = written_pages.find(page_start); 546 auto it = written_pages.find(page_start);
543 if (it != written_pages.end()) { 547 if (it != written_pages.end()) {
544 if (it->second > 1) { 548 if (it->second > 1) {
@@ -547,22 +551,24 @@ private:
547 written_pages.erase(it); 551 written_pages.erase(it);
548 } 552 }
549 } 553 }
550 ++page_start;
551 } 554 }
552 } 555 }
553 556
554 bool IsRegionWritten(const VAddr start, const VAddr end) const { 557 bool IsRegionWritten(VAddr start, VAddr end) const {
555 u64 page_start = start >> write_page_bit; 558 const u64 page_end = end >> WRITE_PAGE_BIT;
556 const u64 page_end = end >> write_page_bit; 559 for (u64 page_start = start >> WRITE_PAGE_BIT; page_start <= page_end; ++page_start) {
557 while (page_start <= page_end) {
558 if (written_pages.count(page_start) > 0) { 560 if (written_pages.count(page_start) > 0) {
559 return true; 561 return true;
560 } 562 }
561 ++page_start;
562 } 563 }
563 return false; 564 return false;
564 } 565 }
565 566
567 void QueueDestruction(std::shared_ptr<Buffer> buffer) {
568 buffer->SetEpoch(epoch);
569 pending_destruction.push(std::move(buffer));
570 }
571
566 void MarkForAsyncFlush(MapInterval* map) { 572 void MarkForAsyncFlush(MapInterval* map) {
567 if (!uncommitted_flushes) { 573 if (!uncommitted_flushes) {
568 uncommitted_flushes = std::make_shared<std::unordered_set<MapInterval*>>(); 574 uncommitted_flushes = std::make_shared<std::unordered_set<MapInterval*>>();
@@ -574,9 +580,7 @@ private:
574 Core::System& system; 580 Core::System& system;
575 581
576 std::unique_ptr<StreamBuffer> stream_buffer; 582 std::unique_ptr<StreamBuffer> stream_buffer;
577 BufferType stream_buffer_handle{}; 583 BufferType stream_buffer_handle;
578
579 bool invalidated = false;
580 584
581 u8* buffer_ptr = nullptr; 585 u8* buffer_ptr = nullptr;
582 u64 buffer_offset = 0; 586 u64 buffer_offset = 0;
@@ -586,18 +590,15 @@ private:
586 boost::intrusive::set<MapInterval, boost::intrusive::compare<MapIntervalCompare>> 590 boost::intrusive::set<MapInterval, boost::intrusive::compare<MapIntervalCompare>>
587 mapped_addresses; 591 mapped_addresses;
588 592
589 static constexpr u64 write_page_bit = 11;
590 std::unordered_map<u64, u32> written_pages; 593 std::unordered_map<u64, u32> written_pages;
594 std::unordered_map<u64, std::shared_ptr<Buffer>> blocks;
591 595
592 static constexpr u64 block_page_bits = 21; 596 std::queue<std::shared_ptr<Buffer>> pending_destruction;
593 static constexpr u64 block_page_size = 1ULL << block_page_bits;
594 std::unordered_map<u64, OwnerBuffer> blocks;
595
596 std::list<OwnerBuffer> pending_destruction;
597 u64 epoch = 0; 597 u64 epoch = 0;
598 u64 modified_ticks = 0; 598 u64 modified_ticks = 0;
599 599
600 std::vector<u8> staging_buffer; 600 std::vector<u8> staging_buffer;
601
601 std::list<MapInterval*> marked_for_unregister; 602 std::list<MapInterval*> marked_for_unregister;
602 603
603 std::shared_ptr<std::unordered_set<MapInterval*>> uncommitted_flushes; 604 std::shared_ptr<std::unordered_set<MapInterval*>> uncommitted_flushes;
diff --git a/src/video_core/engines/const_buffer_engine_interface.h b/src/video_core/engines/const_buffer_engine_interface.h
index ebe139504..f46e81bb7 100644
--- a/src/video_core/engines/const_buffer_engine_interface.h
+++ b/src/video_core/engines/const_buffer_engine_interface.h
@@ -93,6 +93,7 @@ public:
93 virtual SamplerDescriptor AccessBoundSampler(ShaderType stage, u64 offset) const = 0; 93 virtual SamplerDescriptor AccessBoundSampler(ShaderType stage, u64 offset) const = 0;
94 virtual SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer, 94 virtual SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer,
95 u64 offset) const = 0; 95 u64 offset) const = 0;
96 virtual SamplerDescriptor AccessSampler(u32 handle) const = 0;
96 virtual u32 GetBoundBuffer() const = 0; 97 virtual u32 GetBoundBuffer() const = 0;
97 98
98 virtual VideoCore::GuestDriverProfile& AccessGuestDriverProfile() = 0; 99 virtual VideoCore::GuestDriverProfile& AccessGuestDriverProfile() = 0;
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index f6237fc6a..a82b06a38 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -92,8 +92,11 @@ SamplerDescriptor KeplerCompute::AccessBindlessSampler(ShaderType stage, u64 con
92 ASSERT(stage == ShaderType::Compute); 92 ASSERT(stage == ShaderType::Compute);
93 const auto& tex_info_buffer = launch_description.const_buffer_config[const_buffer]; 93 const auto& tex_info_buffer = launch_description.const_buffer_config[const_buffer];
94 const GPUVAddr tex_info_address = tex_info_buffer.Address() + offset; 94 const GPUVAddr tex_info_address = tex_info_buffer.Address() + offset;
95 return AccessSampler(memory_manager.Read<u32>(tex_info_address));
96}
95 97
96 const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(tex_info_address)}; 98SamplerDescriptor KeplerCompute::AccessSampler(u32 handle) const {
99 const Texture::TextureHandle tex_handle{handle};
97 const Texture::FullTextureInfo tex_info = GetTextureInfo(tex_handle); 100 const Texture::FullTextureInfo tex_info = GetTextureInfo(tex_handle);
98 SamplerDescriptor result = SamplerDescriptor::FromTIC(tex_info.tic); 101 SamplerDescriptor result = SamplerDescriptor::FromTIC(tex_info.tic);
99 result.is_shadow.Assign(tex_info.tsc.depth_compare_enabled.Value()); 102 result.is_shadow.Assign(tex_info.tsc.depth_compare_enabled.Value());
diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h
index 18ceedfaf..b7f668d88 100644
--- a/src/video_core/engines/kepler_compute.h
+++ b/src/video_core/engines/kepler_compute.h
@@ -219,6 +219,8 @@ public:
219 SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer, 219 SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer,
220 u64 offset) const override; 220 u64 offset) const override;
221 221
222 SamplerDescriptor AccessSampler(u32 handle) const override;
223
222 u32 GetBoundBuffer() const override { 224 u32 GetBoundBuffer() const override {
223 return regs.tex_cb_index; 225 return regs.tex_cb_index;
224 } 226 }
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 13ef2e42d..ea3c8a963 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -25,9 +25,8 @@ constexpr u32 MacroRegistersStart = 0xE00;
25Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 25Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
26 MemoryManager& memory_manager) 26 MemoryManager& memory_manager)
27 : system{system}, rasterizer{rasterizer}, memory_manager{memory_manager}, 27 : system{system}, rasterizer{rasterizer}, memory_manager{memory_manager},
28 macro_interpreter{*this}, upload_state{memory_manager, regs.upload} { 28 macro_engine{GetMacroEngine(*this)}, upload_state{memory_manager, regs.upload} {
29 dirty.flags.flip(); 29 dirty.flags.flip();
30
31 InitializeRegisterDefaults(); 30 InitializeRegisterDefaults();
32} 31}
33 32
@@ -120,7 +119,7 @@ void Maxwell3D::InitializeRegisterDefaults() {
120 mme_inline[MAXWELL3D_REG_INDEX(index_array.count)] = true; 119 mme_inline[MAXWELL3D_REG_INDEX(index_array.count)] = true;
121} 120}
122 121
123void Maxwell3D::CallMacroMethod(u32 method, std::size_t num_parameters, const u32* parameters) { 122void Maxwell3D::CallMacroMethod(u32 method, const std::vector<u32>& parameters) {
124 // Reset the current macro. 123 // Reset the current macro.
125 executing_macro = 0; 124 executing_macro = 0;
126 125
@@ -129,7 +128,7 @@ void Maxwell3D::CallMacroMethod(u32 method, std::size_t num_parameters, const u3
129 ((method - MacroRegistersStart) >> 1) % static_cast<u32>(macro_positions.size()); 128 ((method - MacroRegistersStart) >> 1) % static_cast<u32>(macro_positions.size());
130 129
131 // Execute the current macro. 130 // Execute the current macro.
132 macro_interpreter.Execute(macro_positions[entry], num_parameters, parameters); 131 macro_engine->Execute(macro_positions[entry], parameters);
133 if (mme_draw.current_mode != MMEDrawMode::Undefined) { 132 if (mme_draw.current_mode != MMEDrawMode::Undefined) {
134 FlushMMEInlineDraw(); 133 FlushMMEInlineDraw();
135 } 134 }
@@ -165,7 +164,7 @@ void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
165 164
166 // Call the macro when there are no more parameters in the command buffer 165 // Call the macro when there are no more parameters in the command buffer
167 if (is_last_call) { 166 if (is_last_call) {
168 CallMacroMethod(executing_macro, macro_params.size(), macro_params.data()); 167 CallMacroMethod(executing_macro, macro_params);
169 macro_params.clear(); 168 macro_params.clear();
170 } 169 }
171 return; 170 return;
@@ -201,7 +200,7 @@ void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
201 break; 200 break;
202 } 201 }
203 case MAXWELL3D_REG_INDEX(macros.data): { 202 case MAXWELL3D_REG_INDEX(macros.data): {
204 ProcessMacroUpload(arg); 203 macro_engine->AddCode(regs.macros.upload_address, arg);
205 break; 204 break;
206 } 205 }
207 case MAXWELL3D_REG_INDEX(macros.bind): { 206 case MAXWELL3D_REG_INDEX(macros.bind): {
@@ -310,7 +309,7 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
310 309
311 // Call the macro when there are no more parameters in the command buffer 310 // Call the macro when there are no more parameters in the command buffer
312 if (amount == methods_pending) { 311 if (amount == methods_pending) {
313 CallMacroMethod(executing_macro, macro_params.size(), macro_params.data()); 312 CallMacroMethod(executing_macro, macro_params);
314 macro_params.clear(); 313 macro_params.clear();
315 } 314 }
316 return; 315 return;
@@ -424,9 +423,7 @@ void Maxwell3D::FlushMMEInlineDraw() {
424} 423}
425 424
426void Maxwell3D::ProcessMacroUpload(u32 data) { 425void Maxwell3D::ProcessMacroUpload(u32 data) {
427 ASSERT_MSG(regs.macros.upload_address < macro_memory.size(), 426 macro_engine->AddCode(regs.macros.upload_address++, data);
428 "upload_address exceeded macro_memory size!");
429 macro_memory[regs.macros.upload_address++] = data;
430} 427}
431 428
432void Maxwell3D::ProcessMacroBind(u32 data) { 429void Maxwell3D::ProcessMacroBind(u32 data) {
@@ -743,8 +740,11 @@ SamplerDescriptor Maxwell3D::AccessBindlessSampler(ShaderType stage, u64 const_b
743 const auto& shader = state.shader_stages[static_cast<std::size_t>(stage)]; 740 const auto& shader = state.shader_stages[static_cast<std::size_t>(stage)];
744 const auto& tex_info_buffer = shader.const_buffers[const_buffer]; 741 const auto& tex_info_buffer = shader.const_buffers[const_buffer];
745 const GPUVAddr tex_info_address = tex_info_buffer.address + offset; 742 const GPUVAddr tex_info_address = tex_info_buffer.address + offset;
743 return AccessSampler(memory_manager.Read<u32>(tex_info_address));
744}
746 745
747 const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(tex_info_address)}; 746SamplerDescriptor Maxwell3D::AccessSampler(u32 handle) const {
747 const Texture::TextureHandle tex_handle{handle};
748 const Texture::FullTextureInfo tex_info = GetTextureInfo(tex_handle); 748 const Texture::FullTextureInfo tex_info = GetTextureInfo(tex_handle);
749 SamplerDescriptor result = SamplerDescriptor::FromTIC(tex_info.tic); 749 SamplerDescriptor result = SamplerDescriptor::FromTIC(tex_info.tic);
750 result.is_shadow.Assign(tex_info.tsc.depth_compare_enabled.Value()); 750 result.is_shadow.Assign(tex_info.tsc.depth_compare_enabled.Value());
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 05dd6b39b..d5fe25065 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -23,7 +23,7 @@
23#include "video_core/engines/engine_upload.h" 23#include "video_core/engines/engine_upload.h"
24#include "video_core/engines/shader_type.h" 24#include "video_core/engines/shader_type.h"
25#include "video_core/gpu.h" 25#include "video_core/gpu.h"
26#include "video_core/macro_interpreter.h" 26#include "video_core/macro/macro.h"
27#include "video_core/textures/texture.h" 27#include "video_core/textures/texture.h"
28 28
29namespace Core { 29namespace Core {
@@ -598,6 +598,7 @@ public:
598 BitField<4, 3, u32> block_height; 598 BitField<4, 3, u32> block_height;
599 BitField<8, 3, u32> block_depth; 599 BitField<8, 3, u32> block_depth;
600 BitField<12, 1, InvMemoryLayout> type; 600 BitField<12, 1, InvMemoryLayout> type;
601 BitField<16, 1, u32> is_3d;
601 } memory_layout; 602 } memory_layout;
602 union { 603 union {
603 BitField<0, 16, u32> layers; 604 BitField<0, 16, u32> layers;
@@ -1403,6 +1404,8 @@ public:
1403 SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer, 1404 SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer,
1404 u64 offset) const override; 1405 u64 offset) const override;
1405 1406
1407 SamplerDescriptor AccessSampler(u32 handle) const override;
1408
1406 u32 GetBoundBuffer() const override { 1409 u32 GetBoundBuffer() const override {
1407 return regs.tex_cb_index; 1410 return regs.tex_cb_index;
1408 } 1411 }
@@ -1411,15 +1414,6 @@ public:
1411 1414
1412 const VideoCore::GuestDriverProfile& AccessGuestDriverProfile() const override; 1415 const VideoCore::GuestDriverProfile& AccessGuestDriverProfile() const override;
1413 1416
1414 /// Memory for macro code - it's undetermined how big this is, however 1MB is much larger than
1415 /// we've seen used.
1416 using MacroMemory = std::array<u32, 0x40000>;
1417
1418 /// Gets a reference to macro memory.
1419 const MacroMemory& GetMacroMemory() const {
1420 return macro_memory;
1421 }
1422
1423 bool ShouldExecute() const { 1417 bool ShouldExecute() const {
1424 return execute_on; 1418 return execute_on;
1425 } 1419 }
@@ -1468,16 +1462,13 @@ private:
1468 1462
1469 std::array<bool, Regs::NUM_REGS> mme_inline{}; 1463 std::array<bool, Regs::NUM_REGS> mme_inline{};
1470 1464
1471 /// Memory for macro code
1472 MacroMemory macro_memory;
1473
1474 /// Macro method that is currently being executed / being fed parameters. 1465 /// Macro method that is currently being executed / being fed parameters.
1475 u32 executing_macro = 0; 1466 u32 executing_macro = 0;
1476 /// Parameters that have been submitted to the macro call so far. 1467 /// Parameters that have been submitted to the macro call so far.
1477 std::vector<u32> macro_params; 1468 std::vector<u32> macro_params;
1478 1469
1479 /// Interpreter for the macro codes uploaded to the GPU. 1470 /// Interpreter for the macro codes uploaded to the GPU.
1480 MacroInterpreter macro_interpreter; 1471 std::unique_ptr<MacroEngine> macro_engine;
1481 1472
1482 static constexpr u32 null_cb_data = 0xFFFFFFFF; 1473 static constexpr u32 null_cb_data = 0xFFFFFFFF;
1483 struct { 1474 struct {
@@ -1506,7 +1497,7 @@ private:
1506 * @param num_parameters Number of arguments 1497 * @param num_parameters Number of arguments
1507 * @param parameters Arguments to the method call 1498 * @param parameters Arguments to the method call
1508 */ 1499 */
1509 void CallMacroMethod(u32 method, std::size_t num_parameters, const u32* parameters); 1500 void CallMacroMethod(u32 method, const std::vector<u32>& parameters);
1510 1501
1511 /// Handles writes to the macro uploading register. 1502 /// Handles writes to the macro uploading register.
1512 void ProcessMacroUpload(u32 data); 1503 void ProcessMacroUpload(u32 data);
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp
new file mode 100644
index 000000000..89077a2d8
--- /dev/null
+++ b/src/video_core/macro/macro.cpp
@@ -0,0 +1,45 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "core/settings.h"
8#include "video_core/macro/macro.h"
9#include "video_core/macro/macro_interpreter.h"
10#include "video_core/macro/macro_jit_x64.h"
11
12namespace Tegra {
13
14void MacroEngine::AddCode(u32 method, u32 data) {
15 uploaded_macro_code[method].push_back(data);
16}
17
18void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
19 auto compiled_macro = macro_cache.find(method);
20 if (compiled_macro != macro_cache.end()) {
21 compiled_macro->second->Execute(parameters, method);
22 } else {
23 // Macro not compiled, check if it's uploaded and if so, compile it
24 auto macro_code = uploaded_macro_code.find(method);
25 if (macro_code == uploaded_macro_code.end()) {
26 UNREACHABLE_MSG("Macro 0x{0:x} was not uploaded", method);
27 return;
28 }
29 macro_cache[method] = Compile(macro_code->second);
30 macro_cache[method]->Execute(parameters, method);
31 }
32}
33
34std::unique_ptr<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d) {
35 if (Settings::values.disable_macro_jit) {
36 return std::make_unique<MacroInterpreter>(maxwell3d);
37 }
38#ifdef ARCHITECTURE_x86_64
39 return std::make_unique<MacroJITx64>(maxwell3d);
40#else
41 return std::make_unique<MacroInterpreter>(maxwell3d);
42#endif
43}
44
45} // namespace Tegra
diff --git a/src/video_core/macro/macro.h b/src/video_core/macro/macro.h
new file mode 100644
index 000000000..b76ed891f
--- /dev/null
+++ b/src/video_core/macro/macro.h
@@ -0,0 +1,128 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <unordered_map>
9#include <vector>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12
13namespace Tegra {
14namespace Engines {
15class Maxwell3D;
16}
17namespace Macro {
18constexpr std::size_t NUM_MACRO_REGISTERS = 8;
19enum class Operation : u32 {
20 ALU = 0,
21 AddImmediate = 1,
22 ExtractInsert = 2,
23 ExtractShiftLeftImmediate = 3,
24 ExtractShiftLeftRegister = 4,
25 Read = 5,
26 Unused = 6, // This operation doesn't seem to be a valid encoding.
27 Branch = 7,
28};
29
30enum class ALUOperation : u32 {
31 Add = 0,
32 AddWithCarry = 1,
33 Subtract = 2,
34 SubtractWithBorrow = 3,
35 // Operations 4-7 don't seem to be valid encodings.
36 Xor = 8,
37 Or = 9,
38 And = 10,
39 AndNot = 11,
40 Nand = 12
41};
42
43enum class ResultOperation : u32 {
44 IgnoreAndFetch = 0,
45 Move = 1,
46 MoveAndSetMethod = 2,
47 FetchAndSend = 3,
48 MoveAndSend = 4,
49 FetchAndSetMethod = 5,
50 MoveAndSetMethodFetchAndSend = 6,
51 MoveAndSetMethodSend = 7
52};
53
54enum class BranchCondition : u32 {
55 Zero = 0,
56 NotZero = 1,
57};
58
59union Opcode {
60 u32 raw;
61 BitField<0, 3, Operation> operation;
62 BitField<4, 3, ResultOperation> result_operation;
63 BitField<4, 1, BranchCondition> branch_condition;
64 // If set on a branch, then the branch doesn't have a delay slot.
65 BitField<5, 1, u32> branch_annul;
66 BitField<7, 1, u32> is_exit;
67 BitField<8, 3, u32> dst;
68 BitField<11, 3, u32> src_a;
69 BitField<14, 3, u32> src_b;
70 // The signed immediate overlaps the second source operand and the alu operation.
71 BitField<14, 18, s32> immediate;
72
73 BitField<17, 5, ALUOperation> alu_operation;
74
75 // Bitfield instructions data
76 BitField<17, 5, u32> bf_src_bit;
77 BitField<22, 5, u32> bf_size;
78 BitField<27, 5, u32> bf_dst_bit;
79
80 u32 GetBitfieldMask() const {
81 return (1 << bf_size) - 1;
82 }
83
84 s32 GetBranchTarget() const {
85 return static_cast<s32>(immediate * sizeof(u32));
86 }
87};
88
89union MethodAddress {
90 u32 raw;
91 BitField<0, 12, u32> address;
92 BitField<12, 6, u32> increment;
93};
94
95} // namespace Macro
96
97class CachedMacro {
98public:
99 virtual ~CachedMacro() = default;
100 /**
101 * Executes the macro code with the specified input parameters.
102 * @param code The macro byte code to execute
103 * @param parameters The parameters of the macro
104 */
105 virtual void Execute(const std::vector<u32>& parameters, u32 method) = 0;
106};
107
108class MacroEngine {
109public:
110 virtual ~MacroEngine() = default;
111
112 // Store the uploaded macro code to compile them when they're called.
113 void AddCode(u32 method, u32 data);
114
115 // Compiles the macro if its not in the cache, and executes the compiled macro
116 void Execute(u32 method, const std::vector<u32>& parameters);
117
118protected:
119 virtual std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) = 0;
120
121private:
122 std::unordered_map<u32, std::unique_ptr<CachedMacro>> macro_cache;
123 std::unordered_map<u32, std::vector<u32>> uploaded_macro_code;
124};
125
126std::unique_ptr<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d);
127
128} // namespace Tegra
diff --git a/src/video_core/macro_interpreter.cpp b/src/video_core/macro/macro_interpreter.cpp
index 947364928..5edff27aa 100644
--- a/src/video_core/macro_interpreter.cpp
+++ b/src/video_core/macro/macro_interpreter.cpp
@@ -1,4 +1,4 @@
1// Copyright 2018 yuzu Emulator Project 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
@@ -6,109 +6,46 @@
6#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "common/microprofile.h" 7#include "common/microprofile.h"
8#include "video_core/engines/maxwell_3d.h" 8#include "video_core/engines/maxwell_3d.h"
9#include "video_core/macro_interpreter.h" 9#include "video_core/macro/macro_interpreter.h"
10 10
11MICROPROFILE_DEFINE(MacroInterp, "GPU", "Execute macro interpreter", MP_RGB(128, 128, 192)); 11MICROPROFILE_DEFINE(MacroInterp, "GPU", "Execute macro interpreter", MP_RGB(128, 128, 192));
12 12
13namespace Tegra { 13namespace Tegra {
14namespace { 14MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
15enum class Operation : u32 {
16 ALU = 0,
17 AddImmediate = 1,
18 ExtractInsert = 2,
19 ExtractShiftLeftImmediate = 3,
20 ExtractShiftLeftRegister = 4,
21 Read = 5,
22 Unused = 6, // This operation doesn't seem to be a valid encoding.
23 Branch = 7,
24};
25} // Anonymous namespace
26
27enum class MacroInterpreter::ALUOperation : u32 {
28 Add = 0,
29 AddWithCarry = 1,
30 Subtract = 2,
31 SubtractWithBorrow = 3,
32 // Operations 4-7 don't seem to be valid encodings.
33 Xor = 8,
34 Or = 9,
35 And = 10,
36 AndNot = 11,
37 Nand = 12
38};
39
40enum class MacroInterpreter::ResultOperation : u32 {
41 IgnoreAndFetch = 0,
42 Move = 1,
43 MoveAndSetMethod = 2,
44 FetchAndSend = 3,
45 MoveAndSend = 4,
46 FetchAndSetMethod = 5,
47 MoveAndSetMethodFetchAndSend = 6,
48 MoveAndSetMethodSend = 7
49};
50
51enum class MacroInterpreter::BranchCondition : u32 {
52 Zero = 0,
53 NotZero = 1,
54};
55
56union MacroInterpreter::Opcode {
57 u32 raw;
58 BitField<0, 3, Operation> operation;
59 BitField<4, 3, ResultOperation> result_operation;
60 BitField<4, 1, BranchCondition> branch_condition;
61 // If set on a branch, then the branch doesn't have a delay slot.
62 BitField<5, 1, u32> branch_annul;
63 BitField<7, 1, u32> is_exit;
64 BitField<8, 3, u32> dst;
65 BitField<11, 3, u32> src_a;
66 BitField<14, 3, u32> src_b;
67 // The signed immediate overlaps the second source operand and the alu operation.
68 BitField<14, 18, s32> immediate;
69
70 BitField<17, 5, ALUOperation> alu_operation;
71
72 // Bitfield instructions data
73 BitField<17, 5, u32> bf_src_bit;
74 BitField<22, 5, u32> bf_size;
75 BitField<27, 5, u32> bf_dst_bit;
76
77 u32 GetBitfieldMask() const {
78 return (1 << bf_size) - 1;
79 }
80 15
81 s32 GetBranchTarget() const { 16std::unique_ptr<CachedMacro> MacroInterpreter::Compile(const std::vector<u32>& code) {
82 return static_cast<s32>(immediate * sizeof(u32)); 17 return std::make_unique<MacroInterpreterImpl>(maxwell3d, code);
83 } 18}
84};
85 19
86MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {} 20MacroInterpreterImpl::MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d,
21 const std::vector<u32>& code)
22 : maxwell3d(maxwell3d), code(code) {}
87 23
88void MacroInterpreter::Execute(u32 offset, std::size_t num_parameters, const u32* parameters) { 24void MacroInterpreterImpl::Execute(const std::vector<u32>& parameters, u32 method) {
89 MICROPROFILE_SCOPE(MacroInterp); 25 MICROPROFILE_SCOPE(MacroInterp);
90 Reset(); 26 Reset();
91 27
92 registers[1] = parameters[0]; 28 registers[1] = parameters[0];
29 num_parameters = parameters.size();
93 30
94 if (num_parameters > parameters_capacity) { 31 if (num_parameters > parameters_capacity) {
95 parameters_capacity = num_parameters; 32 parameters_capacity = num_parameters;
96 this->parameters = std::make_unique<u32[]>(num_parameters); 33 this->parameters = std::make_unique<u32[]>(num_parameters);
97 } 34 }
98 std::memcpy(this->parameters.get(), parameters, num_parameters * sizeof(u32)); 35 std::memcpy(this->parameters.get(), parameters.data(), num_parameters * sizeof(u32));
99 this->num_parameters = num_parameters; 36 this->num_parameters = num_parameters;
100 37
101 // Execute the code until we hit an exit condition. 38 // Execute the code until we hit an exit condition.
102 bool keep_executing = true; 39 bool keep_executing = true;
103 while (keep_executing) { 40 while (keep_executing) {
104 keep_executing = Step(offset, false); 41 keep_executing = Step(false);
105 } 42 }
106 43
107 // Assert the the macro used all the input parameters 44 // Assert the the macro used all the input parameters
108 ASSERT(next_parameter_index == num_parameters); 45 ASSERT(next_parameter_index == num_parameters);
109} 46}
110 47
111void MacroInterpreter::Reset() { 48void MacroInterpreterImpl::Reset() {
112 registers = {}; 49 registers = {};
113 pc = 0; 50 pc = 0;
114 delayed_pc = {}; 51 delayed_pc = {};
@@ -120,10 +57,10 @@ void MacroInterpreter::Reset() {
120 carry_flag = false; 57 carry_flag = false;
121} 58}
122 59
123bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) { 60bool MacroInterpreterImpl::Step(bool is_delay_slot) {
124 u32 base_address = pc; 61 u32 base_address = pc;
125 62
126 Opcode opcode = GetOpcode(offset); 63 Macro::Opcode opcode = GetOpcode();
127 pc += 4; 64 pc += 4;
128 65
129 // Update the program counter if we were delayed 66 // Update the program counter if we were delayed
@@ -134,18 +71,18 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
134 } 71 }
135 72
136 switch (opcode.operation) { 73 switch (opcode.operation) {
137 case Operation::ALU: { 74 case Macro::Operation::ALU: {
138 u32 result = GetALUResult(opcode.alu_operation, GetRegister(opcode.src_a), 75 u32 result = GetALUResult(opcode.alu_operation, GetRegister(opcode.src_a),
139 GetRegister(opcode.src_b)); 76 GetRegister(opcode.src_b));
140 ProcessResult(opcode.result_operation, opcode.dst, result); 77 ProcessResult(opcode.result_operation, opcode.dst, result);
141 break; 78 break;
142 } 79 }
143 case Operation::AddImmediate: { 80 case Macro::Operation::AddImmediate: {
144 ProcessResult(opcode.result_operation, opcode.dst, 81 ProcessResult(opcode.result_operation, opcode.dst,
145 GetRegister(opcode.src_a) + opcode.immediate); 82 GetRegister(opcode.src_a) + opcode.immediate);
146 break; 83 break;
147 } 84 }
148 case Operation::ExtractInsert: { 85 case Macro::Operation::ExtractInsert: {
149 u32 dst = GetRegister(opcode.src_a); 86 u32 dst = GetRegister(opcode.src_a);
150 u32 src = GetRegister(opcode.src_b); 87 u32 src = GetRegister(opcode.src_b);
151 88
@@ -155,7 +92,7 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
155 ProcessResult(opcode.result_operation, opcode.dst, dst); 92 ProcessResult(opcode.result_operation, opcode.dst, dst);
156 break; 93 break;
157 } 94 }
158 case Operation::ExtractShiftLeftImmediate: { 95 case Macro::Operation::ExtractShiftLeftImmediate: {
159 u32 dst = GetRegister(opcode.src_a); 96 u32 dst = GetRegister(opcode.src_a);
160 u32 src = GetRegister(opcode.src_b); 97 u32 src = GetRegister(opcode.src_b);
161 98
@@ -164,7 +101,7 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
164 ProcessResult(opcode.result_operation, opcode.dst, result); 101 ProcessResult(opcode.result_operation, opcode.dst, result);
165 break; 102 break;
166 } 103 }
167 case Operation::ExtractShiftLeftRegister: { 104 case Macro::Operation::ExtractShiftLeftRegister: {
168 u32 dst = GetRegister(opcode.src_a); 105 u32 dst = GetRegister(opcode.src_a);
169 u32 src = GetRegister(opcode.src_b); 106 u32 src = GetRegister(opcode.src_b);
170 107
@@ -173,12 +110,12 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
173 ProcessResult(opcode.result_operation, opcode.dst, result); 110 ProcessResult(opcode.result_operation, opcode.dst, result);
174 break; 111 break;
175 } 112 }
176 case Operation::Read: { 113 case Macro::Operation::Read: {
177 u32 result = Read(GetRegister(opcode.src_a) + opcode.immediate); 114 u32 result = Read(GetRegister(opcode.src_a) + opcode.immediate);
178 ProcessResult(opcode.result_operation, opcode.dst, result); 115 ProcessResult(opcode.result_operation, opcode.dst, result);
179 break; 116 break;
180 } 117 }
181 case Operation::Branch: { 118 case Macro::Operation::Branch: {
182 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid"); 119 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid");
183 u32 value = GetRegister(opcode.src_a); 120 u32 value = GetRegister(opcode.src_a);
184 bool taken = EvaluateBranchCondition(opcode.branch_condition, value); 121 bool taken = EvaluateBranchCondition(opcode.branch_condition, value);
@@ -191,7 +128,7 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
191 128
192 delayed_pc = base_address + opcode.GetBranchTarget(); 129 delayed_pc = base_address + opcode.GetBranchTarget();
193 // Execute one more instruction due to the delay slot. 130 // Execute one more instruction due to the delay slot.
194 return Step(offset, true); 131 return Step(true);
195 } 132 }
196 break; 133 break;
197 } 134 }
@@ -204,51 +141,44 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
204 // cause an exit if it's executed inside a delay slot. 141 // cause an exit if it's executed inside a delay slot.
205 if (opcode.is_exit && !is_delay_slot) { 142 if (opcode.is_exit && !is_delay_slot) {
206 // Exit has a delay slot, execute the next instruction 143 // Exit has a delay slot, execute the next instruction
207 Step(offset, true); 144 Step(true);
208 return false; 145 return false;
209 } 146 }
210 147
211 return true; 148 return true;
212} 149}
213 150
214MacroInterpreter::Opcode MacroInterpreter::GetOpcode(u32 offset) const { 151u32 MacroInterpreterImpl::GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b) {
215 const auto& macro_memory{maxwell3d.GetMacroMemory()};
216 ASSERT((pc % sizeof(u32)) == 0);
217 ASSERT((pc + offset) < macro_memory.size() * sizeof(u32));
218 return {macro_memory[offset + pc / sizeof(u32)]};
219}
220
221u32 MacroInterpreter::GetALUResult(ALUOperation operation, u32 src_a, u32 src_b) {
222 switch (operation) { 152 switch (operation) {
223 case ALUOperation::Add: { 153 case Macro::ALUOperation::Add: {
224 const u64 result{static_cast<u64>(src_a) + src_b}; 154 const u64 result{static_cast<u64>(src_a) + src_b};
225 carry_flag = result > 0xffffffff; 155 carry_flag = result > 0xffffffff;
226 return static_cast<u32>(result); 156 return static_cast<u32>(result);
227 } 157 }
228 case ALUOperation::AddWithCarry: { 158 case Macro::ALUOperation::AddWithCarry: {
229 const u64 result{static_cast<u64>(src_a) + src_b + (carry_flag ? 1ULL : 0ULL)}; 159 const u64 result{static_cast<u64>(src_a) + src_b + (carry_flag ? 1ULL : 0ULL)};
230 carry_flag = result > 0xffffffff; 160 carry_flag = result > 0xffffffff;
231 return static_cast<u32>(result); 161 return static_cast<u32>(result);
232 } 162 }
233 case ALUOperation::Subtract: { 163 case Macro::ALUOperation::Subtract: {
234 const u64 result{static_cast<u64>(src_a) - src_b}; 164 const u64 result{static_cast<u64>(src_a) - src_b};
235 carry_flag = result < 0x100000000; 165 carry_flag = result < 0x100000000;
236 return static_cast<u32>(result); 166 return static_cast<u32>(result);
237 } 167 }
238 case ALUOperation::SubtractWithBorrow: { 168 case Macro::ALUOperation::SubtractWithBorrow: {
239 const u64 result{static_cast<u64>(src_a) - src_b - (carry_flag ? 0ULL : 1ULL)}; 169 const u64 result{static_cast<u64>(src_a) - src_b - (carry_flag ? 0ULL : 1ULL)};
240 carry_flag = result < 0x100000000; 170 carry_flag = result < 0x100000000;
241 return static_cast<u32>(result); 171 return static_cast<u32>(result);
242 } 172 }
243 case ALUOperation::Xor: 173 case Macro::ALUOperation::Xor:
244 return src_a ^ src_b; 174 return src_a ^ src_b;
245 case ALUOperation::Or: 175 case Macro::ALUOperation::Or:
246 return src_a | src_b; 176 return src_a | src_b;
247 case ALUOperation::And: 177 case Macro::ALUOperation::And:
248 return src_a & src_b; 178 return src_a & src_b;
249 case ALUOperation::AndNot: 179 case Macro::ALUOperation::AndNot:
250 return src_a & ~src_b; 180 return src_a & ~src_b;
251 case ALUOperation::Nand: 181 case Macro::ALUOperation::Nand:
252 return ~(src_a & src_b); 182 return ~(src_a & src_b);
253 183
254 default: 184 default:
@@ -257,43 +187,43 @@ u32 MacroInterpreter::GetALUResult(ALUOperation operation, u32 src_a, u32 src_b)
257 } 187 }
258} 188}
259 189
260void MacroInterpreter::ProcessResult(ResultOperation operation, u32 reg, u32 result) { 190void MacroInterpreterImpl::ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result) {
261 switch (operation) { 191 switch (operation) {
262 case ResultOperation::IgnoreAndFetch: 192 case Macro::ResultOperation::IgnoreAndFetch:
263 // Fetch parameter and ignore result. 193 // Fetch parameter and ignore result.
264 SetRegister(reg, FetchParameter()); 194 SetRegister(reg, FetchParameter());
265 break; 195 break;
266 case ResultOperation::Move: 196 case Macro::ResultOperation::Move:
267 // Move result. 197 // Move result.
268 SetRegister(reg, result); 198 SetRegister(reg, result);
269 break; 199 break;
270 case ResultOperation::MoveAndSetMethod: 200 case Macro::ResultOperation::MoveAndSetMethod:
271 // Move result and use as Method Address. 201 // Move result and use as Method Address.
272 SetRegister(reg, result); 202 SetRegister(reg, result);
273 SetMethodAddress(result); 203 SetMethodAddress(result);
274 break; 204 break;
275 case ResultOperation::FetchAndSend: 205 case Macro::ResultOperation::FetchAndSend:
276 // Fetch parameter and send result. 206 // Fetch parameter and send result.
277 SetRegister(reg, FetchParameter()); 207 SetRegister(reg, FetchParameter());
278 Send(result); 208 Send(result);
279 break; 209 break;
280 case ResultOperation::MoveAndSend: 210 case Macro::ResultOperation::MoveAndSend:
281 // Move and send result. 211 // Move and send result.
282 SetRegister(reg, result); 212 SetRegister(reg, result);
283 Send(result); 213 Send(result);
284 break; 214 break;
285 case ResultOperation::FetchAndSetMethod: 215 case Macro::ResultOperation::FetchAndSetMethod:
286 // Fetch parameter and use result as Method Address. 216 // Fetch parameter and use result as Method Address.
287 SetRegister(reg, FetchParameter()); 217 SetRegister(reg, FetchParameter());
288 SetMethodAddress(result); 218 SetMethodAddress(result);
289 break; 219 break;
290 case ResultOperation::MoveAndSetMethodFetchAndSend: 220 case Macro::ResultOperation::MoveAndSetMethodFetchAndSend:
291 // Move result and use as Method Address, then fetch and send parameter. 221 // Move result and use as Method Address, then fetch and send parameter.
292 SetRegister(reg, result); 222 SetRegister(reg, result);
293 SetMethodAddress(result); 223 SetMethodAddress(result);
294 Send(FetchParameter()); 224 Send(FetchParameter());
295 break; 225 break;
296 case ResultOperation::MoveAndSetMethodSend: 226 case Macro::ResultOperation::MoveAndSetMethodSend:
297 // Move result and use as Method Address, then send bits 12:17 of result. 227 // Move result and use as Method Address, then send bits 12:17 of result.
298 SetRegister(reg, result); 228 SetRegister(reg, result);
299 SetMethodAddress(result); 229 SetMethodAddress(result);
@@ -304,16 +234,28 @@ void MacroInterpreter::ProcessResult(ResultOperation operation, u32 reg, u32 res
304 } 234 }
305} 235}
306 236
307u32 MacroInterpreter::FetchParameter() { 237bool MacroInterpreterImpl::EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const {
308 ASSERT(next_parameter_index < num_parameters); 238 switch (cond) {
309 return parameters[next_parameter_index++]; 239 case Macro::BranchCondition::Zero:
240 return value == 0;
241 case Macro::BranchCondition::NotZero:
242 return value != 0;
243 }
244 UNREACHABLE();
245 return true;
310} 246}
311 247
312u32 MacroInterpreter::GetRegister(u32 register_id) const { 248Macro::Opcode MacroInterpreterImpl::GetOpcode() const {
249 ASSERT((pc % sizeof(u32)) == 0);
250 ASSERT(pc < code.size() * sizeof(u32));
251 return {code[pc / sizeof(u32)]};
252}
253
254u32 MacroInterpreterImpl::GetRegister(u32 register_id) const {
313 return registers.at(register_id); 255 return registers.at(register_id);
314} 256}
315 257
316void MacroInterpreter::SetRegister(u32 register_id, u32 value) { 258void MacroInterpreterImpl::SetRegister(u32 register_id, u32 value) {
317 // Register 0 is hardwired as the zero register. 259 // Register 0 is hardwired as the zero register.
318 // Ensure no writes to it actually occur. 260 // Ensure no writes to it actually occur.
319 if (register_id == 0) { 261 if (register_id == 0) {
@@ -323,30 +265,24 @@ void MacroInterpreter::SetRegister(u32 register_id, u32 value) {
323 registers.at(register_id) = value; 265 registers.at(register_id) = value;
324} 266}
325 267
326void MacroInterpreter::SetMethodAddress(u32 address) { 268void MacroInterpreterImpl::SetMethodAddress(u32 address) {
327 method_address.raw = address; 269 method_address.raw = address;
328} 270}
329 271
330void MacroInterpreter::Send(u32 value) { 272void MacroInterpreterImpl::Send(u32 value) {
331 maxwell3d.CallMethodFromMME(method_address.address, value); 273 maxwell3d.CallMethodFromMME(method_address.address, value);
332 // Increment the method address by the method increment. 274 // Increment the method address by the method increment.
333 method_address.address.Assign(method_address.address.Value() + 275 method_address.address.Assign(method_address.address.Value() +
334 method_address.increment.Value()); 276 method_address.increment.Value());
335} 277}
336 278
337u32 MacroInterpreter::Read(u32 method) const { 279u32 MacroInterpreterImpl::Read(u32 method) const {
338 return maxwell3d.GetRegisterValue(method); 280 return maxwell3d.GetRegisterValue(method);
339} 281}
340 282
341bool MacroInterpreter::EvaluateBranchCondition(BranchCondition cond, u32 value) const { 283u32 MacroInterpreterImpl::FetchParameter() {
342 switch (cond) { 284 ASSERT(next_parameter_index < num_parameters);
343 case BranchCondition::Zero: 285 return parameters[next_parameter_index++];
344 return value == 0;
345 case BranchCondition::NotZero:
346 return value != 0;
347 }
348 UNREACHABLE();
349 return true;
350} 286}
351 287
352} // namespace Tegra 288} // namespace Tegra
diff --git a/src/video_core/macro_interpreter.h b/src/video_core/macro/macro_interpreter.h
index 631146d89..90217fc89 100644
--- a/src/video_core/macro_interpreter.h
+++ b/src/video_core/macro/macro_interpreter.h
@@ -1,44 +1,37 @@
1// Copyright 2018 yuzu Emulator Project 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#pragma once 5#pragma once
6
7#include <array> 6#include <array>
8#include <optional> 7#include <optional>
9 8#include <vector>
10#include "common/bit_field.h" 9#include "common/bit_field.h"
11#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/macro/macro.h"
12 12
13namespace Tegra { 13namespace Tegra {
14namespace Engines { 14namespace Engines {
15class Maxwell3D; 15class Maxwell3D;
16} 16}
17 17
18class MacroInterpreter final { 18class MacroInterpreter final : public MacroEngine {
19public: 19public:
20 explicit MacroInterpreter(Engines::Maxwell3D& maxwell3d); 20 explicit MacroInterpreter(Engines::Maxwell3D& maxwell3d);
21 21
22 /** 22protected:
23 * Executes the macro code with the specified input parameters. 23 std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override;
24 * @param offset Offset to start execution at.
25 * @param parameters The parameters of the macro.
26 */
27 void Execute(u32 offset, std::size_t num_parameters, const u32* parameters);
28 24
29private: 25private:
30 enum class ALUOperation : u32; 26 Engines::Maxwell3D& maxwell3d;
31 enum class BranchCondition : u32; 27};
32 enum class ResultOperation : u32;
33
34 union Opcode;
35 28
36 union MethodAddress { 29class MacroInterpreterImpl : public CachedMacro {
37 u32 raw; 30public:
38 BitField<0, 12, u32> address; 31 MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code);
39 BitField<12, 6, u32> increment; 32 void Execute(const std::vector<u32>& parameters, u32 method) override;
40 };
41 33
34private:
42 /// Resets the execution engine state, zeroing registers, etc. 35 /// Resets the execution engine state, zeroing registers, etc.
43 void Reset(); 36 void Reset();
44 37
@@ -49,20 +42,20 @@ private:
49 * @param is_delay_slot Whether the current step is being executed due to a delay slot in a 42 * @param is_delay_slot Whether the current step is being executed due to a delay slot in a
50 * previous instruction. 43 * previous instruction.
51 */ 44 */
52 bool Step(u32 offset, bool is_delay_slot); 45 bool Step(bool is_delay_slot);
53 46
54 /// Calculates the result of an ALU operation. src_a OP src_b; 47 /// Calculates the result of an ALU operation. src_a OP src_b;
55 u32 GetALUResult(ALUOperation operation, u32 src_a, u32 src_b); 48 u32 GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b);
56 49
57 /// Performs the result operation on the input result and stores it in the specified register 50 /// Performs the result operation on the input result and stores it in the specified register
58 /// (if necessary). 51 /// (if necessary).
59 void ProcessResult(ResultOperation operation, u32 reg, u32 result); 52 void ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result);
60 53
61 /// Evaluates the branch condition and returns whether the branch should be taken or not. 54 /// Evaluates the branch condition and returns whether the branch should be taken or not.
62 bool EvaluateBranchCondition(BranchCondition cond, u32 value) const; 55 bool EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const;
63 56
64 /// Reads an opcode at the current program counter location. 57 /// Reads an opcode at the current program counter location.
65 Opcode GetOpcode(u32 offset) const; 58 Macro::Opcode GetOpcode() const;
66 59
67 /// Returns the specified register's value. Register 0 is hardcoded to always return 0. 60 /// Returns the specified register's value. Register 0 is hardcoded to always return 0.
68 u32 GetRegister(u32 register_id) const; 61 u32 GetRegister(u32 register_id) const;
@@ -89,13 +82,11 @@ private:
89 /// Program counter to execute at after the delay slot is executed. 82 /// Program counter to execute at after the delay slot is executed.
90 std::optional<u32> delayed_pc; 83 std::optional<u32> delayed_pc;
91 84
92 static constexpr std::size_t NumMacroRegisters = 8;
93
94 /// General purpose macro registers. 85 /// General purpose macro registers.
95 std::array<u32, NumMacroRegisters> registers = {}; 86 std::array<u32, Macro::NUM_MACRO_REGISTERS> registers = {};
96 87
97 /// Method address to use for the next Send instruction. 88 /// Method address to use for the next Send instruction.
98 MethodAddress method_address = {}; 89 Macro::MethodAddress method_address = {};
99 90
100 /// Input parameters of the current macro. 91 /// Input parameters of the current macro.
101 std::unique_ptr<u32[]> parameters; 92 std::unique_ptr<u32[]> parameters;
@@ -105,5 +96,7 @@ private:
105 u32 next_parameter_index = 0; 96 u32 next_parameter_index = 0;
106 97
107 bool carry_flag = false; 98 bool carry_flag = false;
99 const std::vector<u32>& code;
108}; 100};
101
109} // namespace Tegra 102} // namespace Tegra
diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp
new file mode 100644
index 000000000..11c1cc3be
--- /dev/null
+++ b/src/video_core/macro/macro_jit_x64.cpp
@@ -0,0 +1,640 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "common/microprofile.h"
8#include "common/x64/xbyak_util.h"
9#include "video_core/engines/maxwell_3d.h"
10#include "video_core/macro/macro_interpreter.h"
11#include "video_core/macro/macro_jit_x64.h"
12
13MICROPROFILE_DEFINE(MacroJitCompile, "GPU", "Compile macro JIT", MP_RGB(173, 255, 47));
14MICROPROFILE_DEFINE(MacroJitExecute, "GPU", "Execute macro JIT", MP_RGB(255, 255, 0));
15
16namespace Tegra {
17static const Xbyak::Reg64 PARAMETERS = Xbyak::util::r9;
18static const Xbyak::Reg64 REGISTERS = Xbyak::util::r10;
19static const Xbyak::Reg64 STATE = Xbyak::util::r11;
20static const Xbyak::Reg64 NEXT_PARAMETER = Xbyak::util::r12;
21static const Xbyak::Reg32 RESULT = Xbyak::util::r13d;
22static const Xbyak::Reg64 RESULT_64 = Xbyak::util::r13;
23static const Xbyak::Reg32 METHOD_ADDRESS = Xbyak::util::r14d;
24static const Xbyak::Reg64 METHOD_ADDRESS_64 = Xbyak::util::r14;
25static const Xbyak::Reg64 BRANCH_HOLDER = Xbyak::util::r15;
26
27static const std::bitset<32> PERSISTENT_REGISTERS = Common::X64::BuildRegSet({
28 PARAMETERS,
29 REGISTERS,
30 STATE,
31 NEXT_PARAMETER,
32 RESULT,
33 METHOD_ADDRESS,
34 BRANCH_HOLDER,
35});
36
37MacroJITx64::MacroJITx64(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
38
39std::unique_ptr<CachedMacro> MacroJITx64::Compile(const std::vector<u32>& code) {
40 return std::make_unique<MacroJITx64Impl>(maxwell3d, code);
41}
42
43MacroJITx64Impl::MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code)
44 : Xbyak::CodeGenerator(MAX_CODE_SIZE), code(code), maxwell3d(maxwell3d) {
45 Compile();
46}
47
48MacroJITx64Impl::~MacroJITx64Impl() = default;
49
50void MacroJITx64Impl::Execute(const std::vector<u32>& parameters, u32 method) {
51 MICROPROFILE_SCOPE(MacroJitExecute);
52 ASSERT_OR_EXECUTE(program != nullptr, { return; });
53 JITState state{};
54 state.maxwell3d = &maxwell3d;
55 state.registers = {};
56 state.parameters = parameters.data();
57 program(&state);
58}
59
60void MacroJITx64Impl::Compile_ALU(Macro::Opcode opcode) {
61 const bool is_a_zero = opcode.src_a == 0;
62 const bool is_b_zero = opcode.src_b == 0;
63 const bool valid_operation = !is_a_zero && !is_b_zero;
64 const bool is_move_operation = !is_a_zero && is_b_zero;
65 const bool has_zero_register = is_a_zero || is_b_zero;
66
67 Xbyak::Reg64 src_a;
68 Xbyak::Reg32 src_b;
69
70 if (!optimizer.zero_reg_skip) {
71 src_a = Compile_GetRegister(opcode.src_a, RESULT_64);
72 src_b = Compile_GetRegister(opcode.src_b, ebx);
73 } else {
74 if (!is_a_zero) {
75 src_a = Compile_GetRegister(opcode.src_a, RESULT_64);
76 }
77 if (!is_b_zero) {
78 src_b = Compile_GetRegister(opcode.src_b, ebx);
79 }
80 }
81 Xbyak::Label skip_carry{};
82
83 bool has_emitted = false;
84
85 switch (opcode.alu_operation) {
86 case Macro::ALUOperation::Add:
87 if (optimizer.zero_reg_skip) {
88 if (valid_operation) {
89 add(src_a, src_b);
90 }
91 } else {
92 add(src_a, src_b);
93 }
94
95 if (!optimizer.can_skip_carry) {
96 setc(byte[STATE + offsetof(JITState, carry_flag)]);
97 }
98 break;
99 case Macro::ALUOperation::AddWithCarry:
100 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
101 adc(src_a, src_b);
102 setc(byte[STATE + offsetof(JITState, carry_flag)]);
103 break;
104 case Macro::ALUOperation::Subtract:
105 if (optimizer.zero_reg_skip) {
106 if (valid_operation) {
107 sub(src_a, src_b);
108 has_emitted = true;
109 }
110 } else {
111 sub(src_a, src_b);
112 has_emitted = true;
113 }
114 if (!optimizer.can_skip_carry && has_emitted) {
115 setc(byte[STATE + offsetof(JITState, carry_flag)]);
116 }
117 break;
118 case Macro::ALUOperation::SubtractWithBorrow:
119 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
120 sbb(src_a, src_b);
121 setc(byte[STATE + offsetof(JITState, carry_flag)]);
122 break;
123 case Macro::ALUOperation::Xor:
124 if (optimizer.zero_reg_skip) {
125 if (valid_operation) {
126 xor_(src_a, src_b);
127 }
128 } else {
129 xor_(src_a, src_b);
130 }
131 break;
132 case Macro::ALUOperation::Or:
133 if (optimizer.zero_reg_skip) {
134 if (valid_operation) {
135 or_(src_a, src_b);
136 }
137 } else {
138 or_(src_a, src_b);
139 }
140 break;
141 case Macro::ALUOperation::And:
142 if (optimizer.zero_reg_skip) {
143 if (!has_zero_register) {
144 and_(src_a, src_b);
145 }
146 } else {
147 and_(src_a, src_b);
148 }
149 break;
150 case Macro::ALUOperation::AndNot:
151 if (optimizer.zero_reg_skip) {
152 if (!is_a_zero) {
153 not_(src_b);
154 and_(src_a, src_b);
155 }
156 } else {
157 not_(src_b);
158 and_(src_a, src_b);
159 }
160 break;
161 case Macro::ALUOperation::Nand:
162 if (optimizer.zero_reg_skip) {
163 if (!is_a_zero) {
164 and_(src_a, src_b);
165 not_(src_a);
166 }
167 } else {
168 and_(src_a, src_b);
169 not_(src_a);
170 }
171 break;
172 default:
173 UNIMPLEMENTED_MSG("Unimplemented ALU operation {}",
174 static_cast<std::size_t>(opcode.alu_operation.Value()));
175 break;
176 }
177 Compile_ProcessResult(opcode.result_operation, opcode.dst);
178}
179
180void MacroJITx64Impl::Compile_AddImmediate(Macro::Opcode opcode) {
181 if (optimizer.skip_dummy_addimmediate) {
182 // Games tend to use this as an exit instruction placeholder. It's to encode an instruction
183 // without doing anything. In our case we can just not emit anything.
184 if (opcode.result_operation == Macro::ResultOperation::Move && opcode.dst == 0) {
185 return;
186 }
187 }
188 // Check for redundant moves
189 if (optimizer.optimize_for_method_move &&
190 opcode.result_operation == Macro::ResultOperation::MoveAndSetMethod) {
191 if (next_opcode.has_value()) {
192 const auto next = *next_opcode;
193 if (next.result_operation == Macro::ResultOperation::MoveAndSetMethod) {
194 return;
195 }
196 }
197 }
198 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
199 if (opcode.immediate == 0) {
200 xor_(RESULT, RESULT);
201 } else {
202 mov(RESULT, opcode.immediate);
203 }
204 } else {
205 auto result = Compile_GetRegister(opcode.src_a, RESULT);
206 if (opcode.immediate > 2) {
207 add(result, opcode.immediate);
208 } else if (opcode.immediate == 1) {
209 inc(result);
210 } else if (opcode.immediate < 0) {
211 sub(result, opcode.immediate * -1);
212 }
213 }
214 Compile_ProcessResult(opcode.result_operation, opcode.dst);
215}
216
217void MacroJITx64Impl::Compile_ExtractInsert(Macro::Opcode opcode) {
218 auto dst = Compile_GetRegister(opcode.src_a, RESULT);
219 auto src = Compile_GetRegister(opcode.src_b, eax);
220
221 if (opcode.bf_src_bit != 0 && opcode.bf_src_bit != 31) {
222 shr(src, opcode.bf_src_bit);
223 } else if (opcode.bf_src_bit == 31) {
224 xor_(src, src);
225 }
226 // Don't bother masking the whole register since we're using a 32 bit register
227 if (opcode.bf_size != 31 && opcode.bf_size != 0) {
228 and_(src, opcode.GetBitfieldMask());
229 } else if (opcode.bf_size == 0) {
230 xor_(src, src);
231 }
232 if (opcode.bf_dst_bit != 31 && opcode.bf_dst_bit != 0) {
233 shl(src, opcode.bf_dst_bit);
234 } else if (opcode.bf_dst_bit == 31) {
235 xor_(src, src);
236 }
237
238 const u32 mask = ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit);
239 if (mask != 0xffffffff) {
240 and_(dst, mask);
241 }
242 or_(dst, src);
243 Compile_ProcessResult(opcode.result_operation, opcode.dst);
244}
245
246void MacroJITx64Impl::Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode) {
247 auto dst = Compile_GetRegister(opcode.src_a, eax);
248 auto src = Compile_GetRegister(opcode.src_b, RESULT);
249
250 shr(src, al);
251 if (opcode.bf_size != 0 && opcode.bf_size != 31) {
252 and_(src, opcode.GetBitfieldMask());
253 } else if (opcode.bf_size == 0) {
254 xor_(src, src);
255 }
256
257 if (opcode.bf_dst_bit != 0 && opcode.bf_dst_bit != 31) {
258 shl(src, opcode.bf_dst_bit);
259 } else if (opcode.bf_dst_bit == 31) {
260 xor_(src, src);
261 }
262 Compile_ProcessResult(opcode.result_operation, opcode.dst);
263}
264
265void MacroJITx64Impl::Compile_ExtractShiftLeftRegister(Macro::Opcode opcode) {
266 auto dst = Compile_GetRegister(opcode.src_a, eax);
267 auto src = Compile_GetRegister(opcode.src_b, RESULT);
268
269 if (opcode.bf_src_bit != 0) {
270 shr(src, opcode.bf_src_bit);
271 }
272
273 if (opcode.bf_size != 31) {
274 and_(src, opcode.GetBitfieldMask());
275 }
276 shl(src, al);
277 Compile_ProcessResult(opcode.result_operation, opcode.dst);
278}
279
280static u32 Read(Engines::Maxwell3D* maxwell3d, u32 method) {
281 return maxwell3d->GetRegisterValue(method);
282}
283
284static void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) {
285 maxwell3d->CallMethodFromMME(method_address.address, value);
286}
287
288void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) {
289 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
290 if (opcode.immediate == 0) {
291 xor_(RESULT, RESULT);
292 } else {
293 mov(RESULT, opcode.immediate);
294 }
295 } else {
296 auto result = Compile_GetRegister(opcode.src_a, RESULT);
297 if (opcode.immediate > 2) {
298 add(result, opcode.immediate);
299 } else if (opcode.immediate == 1) {
300 inc(result);
301 } else if (opcode.immediate < 0) {
302 sub(result, opcode.immediate * -1);
303 }
304 }
305 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
306 mov(Common::X64::ABI_PARAM1, qword[STATE]);
307 mov(Common::X64::ABI_PARAM2, RESULT);
308 Common::X64::CallFarFunction(*this, &Read);
309 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
310 mov(RESULT, Common::X64::ABI_RETURN.cvt32());
311 Compile_ProcessResult(opcode.result_operation, opcode.dst);
312}
313
314void Tegra::MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) {
315 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
316 mov(Common::X64::ABI_PARAM1, qword[STATE]);
317 mov(Common::X64::ABI_PARAM2, METHOD_ADDRESS);
318 mov(Common::X64::ABI_PARAM3, value);
319 Common::X64::CallFarFunction(*this, &Send);
320 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
321
322 Xbyak::Label dont_process{};
323 // Get increment
324 test(METHOD_ADDRESS, 0x3f000);
325 // If zero, method address doesn't update
326 je(dont_process);
327
328 mov(ecx, METHOD_ADDRESS);
329 and_(METHOD_ADDRESS, 0xfff);
330 shr(ecx, 12);
331 and_(ecx, 0x3f);
332 lea(eax, ptr[rcx + METHOD_ADDRESS_64]);
333 sal(ecx, 12);
334 or_(eax, ecx);
335
336 mov(METHOD_ADDRESS, eax);
337
338 L(dont_process);
339}
340
341void Tegra::MacroJITx64Impl::Compile_Branch(Macro::Opcode opcode) {
342 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid");
343 const s32 jump_address =
344 static_cast<s32>(pc) + static_cast<s32>(opcode.GetBranchTarget() / sizeof(s32));
345
346 Xbyak::Label end;
347 auto value = Compile_GetRegister(opcode.src_a, eax);
348 test(value, value);
349 if (optimizer.has_delayed_pc) {
350 switch (opcode.branch_condition) {
351 case Macro::BranchCondition::Zero:
352 jne(end, T_NEAR);
353 break;
354 case Macro::BranchCondition::NotZero:
355 je(end, T_NEAR);
356 break;
357 }
358
359 if (opcode.branch_annul) {
360 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
361 jmp(labels[jump_address], T_NEAR);
362 } else {
363 Xbyak::Label handle_post_exit{};
364 Xbyak::Label skip{};
365 jmp(skip, T_NEAR);
366 if (opcode.is_exit) {
367 L(handle_post_exit);
368 // Execute 1 instruction
369 mov(BRANCH_HOLDER, end_of_code);
370 // Jump to next instruction to skip delay slot check
371 jmp(labels[jump_address], T_NEAR);
372 } else {
373 L(handle_post_exit);
374 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
375 jmp(labels[jump_address], T_NEAR);
376 }
377 L(skip);
378 mov(BRANCH_HOLDER, handle_post_exit);
379 jmp(delay_skip[pc], T_NEAR);
380 }
381 } else {
382 switch (opcode.branch_condition) {
383 case Macro::BranchCondition::Zero:
384 je(labels[jump_address], T_NEAR);
385 break;
386 case Macro::BranchCondition::NotZero:
387 jne(labels[jump_address], T_NEAR);
388 break;
389 }
390 }
391
392 L(end);
393}
394
395void Tegra::MacroJITx64Impl::Optimizer_ScanFlags() {
396 optimizer.can_skip_carry = true;
397 optimizer.has_delayed_pc = false;
398 for (auto raw_op : code) {
399 Macro::Opcode op{};
400 op.raw = raw_op;
401
402 if (op.operation == Macro::Operation::ALU) {
403 // Scan for any ALU operations which actually use the carry flag, if they don't exist in
404 // our current code we can skip emitting the carry flag handling operations
405 if (op.alu_operation == Macro::ALUOperation::AddWithCarry ||
406 op.alu_operation == Macro::ALUOperation::SubtractWithBorrow) {
407 optimizer.can_skip_carry = false;
408 }
409 }
410
411 if (op.operation == Macro::Operation::Branch) {
412 if (!op.branch_annul) {
413 optimizer.has_delayed_pc = true;
414 }
415 }
416 }
417}
418
419void MacroJITx64Impl::Compile() {
420 MICROPROFILE_SCOPE(MacroJitCompile);
421 bool keep_executing = true;
422 labels.fill(Xbyak::Label());
423
424 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
425 // JIT state
426 mov(STATE, Common::X64::ABI_PARAM1);
427 mov(PARAMETERS, qword[Common::X64::ABI_PARAM1 +
428 static_cast<Xbyak::uint32>(offsetof(JITState, parameters))]);
429 mov(REGISTERS, Common::X64::ABI_PARAM1);
430 add(REGISTERS, static_cast<Xbyak::uint32>(offsetof(JITState, registers)));
431 xor_(RESULT, RESULT);
432 xor_(METHOD_ADDRESS, METHOD_ADDRESS);
433 xor_(NEXT_PARAMETER, NEXT_PARAMETER);
434 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
435
436 mov(dword[REGISTERS + 4], Compile_FetchParameter());
437
438 // Track get register for zero registers and mark it as no-op
439 optimizer.zero_reg_skip = true;
440
441 // AddImmediate tends to be used as a NOP instruction, if we detect this we can
442 // completely skip the entire code path and no emit anything
443 optimizer.skip_dummy_addimmediate = true;
444
445 // SMO tends to emit a lot of unnecessary method moves, we can mitigate this by only emitting
446 // one if our register isn't "dirty"
447 optimizer.optimize_for_method_move = true;
448
449 // Check to see if we can skip emitting certain instructions
450 Optimizer_ScanFlags();
451
452 const u32 op_count = static_cast<u32>(code.size());
453 for (u32 i = 0; i < op_count; i++) {
454 if (i < op_count - 1) {
455 pc = i + 1;
456 next_opcode = GetOpCode();
457 } else {
458 next_opcode = {};
459 }
460 pc = i;
461 Compile_NextInstruction();
462 }
463
464 L(end_of_code);
465
466 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
467 ret();
468 ready();
469 program = getCode<ProgramType>();
470}
471
472bool MacroJITx64Impl::Compile_NextInstruction() {
473 const auto opcode = GetOpCode();
474 if (labels[pc].getAddress()) {
475 return false;
476 }
477
478 L(labels[pc]);
479
480 switch (opcode.operation) {
481 case Macro::Operation::ALU:
482 Compile_ALU(opcode);
483 break;
484 case Macro::Operation::AddImmediate:
485 Compile_AddImmediate(opcode);
486 break;
487 case Macro::Operation::ExtractInsert:
488 Compile_ExtractInsert(opcode);
489 break;
490 case Macro::Operation::ExtractShiftLeftImmediate:
491 Compile_ExtractShiftLeftImmediate(opcode);
492 break;
493 case Macro::Operation::ExtractShiftLeftRegister:
494 Compile_ExtractShiftLeftRegister(opcode);
495 break;
496 case Macro::Operation::Read:
497 Compile_Read(opcode);
498 break;
499 case Macro::Operation::Branch:
500 Compile_Branch(opcode);
501 break;
502 default:
503 UNIMPLEMENTED_MSG("Unimplemented opcode {}", opcode.operation.Value());
504 break;
505 }
506
507 if (optimizer.has_delayed_pc) {
508 if (opcode.is_exit) {
509 mov(rax, end_of_code);
510 test(BRANCH_HOLDER, BRANCH_HOLDER);
511 cmove(BRANCH_HOLDER, rax);
512 // Jump to next instruction to skip delay slot check
513 je(labels[pc + 1], T_NEAR);
514 } else {
515 // TODO(ogniK): Optimize delay slot branching
516 Xbyak::Label no_delay_slot{};
517 test(BRANCH_HOLDER, BRANCH_HOLDER);
518 je(no_delay_slot, T_NEAR);
519 mov(rax, BRANCH_HOLDER);
520 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
521 jmp(rax);
522 L(no_delay_slot);
523 }
524 L(delay_skip[pc]);
525 if (opcode.is_exit) {
526 return false;
527 }
528 } else {
529 test(BRANCH_HOLDER, BRANCH_HOLDER);
530 jne(end_of_code, T_NEAR);
531 if (opcode.is_exit) {
532 inc(BRANCH_HOLDER);
533 return false;
534 }
535 }
536 return true;
537}
538
539Xbyak::Reg32 Tegra::MacroJITx64Impl::Compile_FetchParameter() {
540 mov(eax, dword[PARAMETERS + NEXT_PARAMETER * sizeof(u32)]);
541 inc(NEXT_PARAMETER);
542 return eax;
543}
544
545Xbyak::Reg32 MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg32 dst) {
546 if (index == 0) {
547 // Register 0 is always zero
548 xor_(dst, dst);
549 } else {
550 mov(dst, dword[REGISTERS + index * sizeof(u32)]);
551 }
552
553 return dst;
554}
555
556Xbyak::Reg64 Tegra::MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg64 dst) {
557 if (index == 0) {
558 // Register 0 is always zero
559 xor_(dst, dst);
560 } else {
561 mov(dst, dword[REGISTERS + index * sizeof(u32)]);
562 }
563
564 return dst;
565}
566
567void Tegra::MacroJITx64Impl::Compile_WriteCarry(Xbyak::Reg64 dst) {
568 Xbyak::Label zero{}, end{};
569 xor_(ecx, ecx);
570 shr(dst, 32);
571 setne(cl);
572 mov(dword[STATE + offsetof(JITState, carry_flag)], ecx);
573}
574
575void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u32 reg) {
576 auto SetRegister = [=](u32 reg, Xbyak::Reg32 result) {
577 // Register 0 is supposed to always return 0. NOP is implemented as a store to the zero
578 // register.
579 if (reg == 0) {
580 return;
581 }
582 mov(dword[REGISTERS + reg * sizeof(u32)], result);
583 };
584 auto SetMethodAddress = [=](Xbyak::Reg32 reg) { mov(METHOD_ADDRESS, reg); };
585
586 switch (operation) {
587 case Macro::ResultOperation::IgnoreAndFetch:
588 SetRegister(reg, Compile_FetchParameter());
589 break;
590 case Macro::ResultOperation::Move:
591 SetRegister(reg, RESULT);
592 break;
593 case Macro::ResultOperation::MoveAndSetMethod:
594 SetRegister(reg, RESULT);
595 SetMethodAddress(RESULT);
596 break;
597 case Macro::ResultOperation::FetchAndSend:
598 // Fetch parameter and send result.
599 SetRegister(reg, Compile_FetchParameter());
600 Compile_Send(RESULT);
601 break;
602 case Macro::ResultOperation::MoveAndSend:
603 // Move and send result.
604 SetRegister(reg, RESULT);
605 Compile_Send(RESULT);
606 break;
607 case Macro::ResultOperation::FetchAndSetMethod:
608 // Fetch parameter and use result as Method Address.
609 SetRegister(reg, Compile_FetchParameter());
610 SetMethodAddress(RESULT);
611 break;
612 case Macro::ResultOperation::MoveAndSetMethodFetchAndSend:
613 // Move result and use as Method Address, then fetch and send parameter.
614 SetRegister(reg, RESULT);
615 SetMethodAddress(RESULT);
616 Compile_Send(Compile_FetchParameter());
617 break;
618 case Macro::ResultOperation::MoveAndSetMethodSend:
619 // Move result and use as Method Address, then send bits 12:17 of result.
620 SetRegister(reg, RESULT);
621 SetMethodAddress(RESULT);
622 shr(RESULT, 12);
623 and_(RESULT, 0b111111);
624 Compile_Send(RESULT);
625 break;
626 default:
627 UNIMPLEMENTED_MSG("Unimplemented macro operation {}", static_cast<std::size_t>(operation));
628 }
629}
630
631Macro::Opcode MacroJITx64Impl::GetOpCode() const {
632 ASSERT(pc < code.size());
633 return {code[pc]};
634}
635
636std::bitset<32> MacroJITx64Impl::PersistentCallerSavedRegs() const {
637 return PERSISTENT_REGISTERS & Common::X64::ABI_ALL_CALLER_SAVED;
638}
639
640} // namespace Tegra
diff --git a/src/video_core/macro/macro_jit_x64.h b/src/video_core/macro/macro_jit_x64.h
new file mode 100644
index 000000000..71f738b9a
--- /dev/null
+++ b/src/video_core/macro/macro_jit_x64.h
@@ -0,0 +1,100 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <bitset>
9#include <xbyak.h>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12#include "common/x64/xbyak_abi.h"
13#include "video_core/macro/macro.h"
14
15namespace Tegra {
16
17namespace Engines {
18class Maxwell3D;
19}
20
21/// MAX_CODE_SIZE is arbitrarily chosen based on current booting games
22constexpr size_t MAX_CODE_SIZE = 0x10000;
23
24class MacroJITx64 final : public MacroEngine {
25public:
26 explicit MacroJITx64(Engines::Maxwell3D& maxwell3d);
27
28protected:
29 std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override;
30
31private:
32 Engines::Maxwell3D& maxwell3d;
33};
34
35class MacroJITx64Impl : public Xbyak::CodeGenerator, public CachedMacro {
36public:
37 MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code);
38 ~MacroJITx64Impl();
39
40 void Execute(const std::vector<u32>& parameters, u32 method) override;
41
42 void Compile_ALU(Macro::Opcode opcode);
43 void Compile_AddImmediate(Macro::Opcode opcode);
44 void Compile_ExtractInsert(Macro::Opcode opcode);
45 void Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode);
46 void Compile_ExtractShiftLeftRegister(Macro::Opcode opcode);
47 void Compile_Read(Macro::Opcode opcode);
48 void Compile_Branch(Macro::Opcode opcode);
49
50private:
51 void Optimizer_ScanFlags();
52
53 void Compile();
54 bool Compile_NextInstruction();
55
56 Xbyak::Reg32 Compile_FetchParameter();
57 Xbyak::Reg32 Compile_GetRegister(u32 index, Xbyak::Reg32 dst);
58 Xbyak::Reg64 Compile_GetRegister(u32 index, Xbyak::Reg64 dst);
59 void Compile_WriteCarry(Xbyak::Reg64 dst);
60
61 void Compile_ProcessResult(Macro::ResultOperation operation, u32 reg);
62 void Compile_Send(Xbyak::Reg32 value);
63
64 Macro::Opcode GetOpCode() const;
65 std::bitset<32> PersistentCallerSavedRegs() const;
66
67 struct JITState {
68 Engines::Maxwell3D* maxwell3d{};
69 std::array<u32, Macro::NUM_MACRO_REGISTERS> registers{};
70 const u32* parameters{};
71 u32 carry_flag{};
72 };
73 static_assert(offsetof(JITState, maxwell3d) == 0, "Maxwell3D is not at 0x0");
74 using ProgramType = void (*)(JITState*);
75
76 struct OptimizerState {
77 bool can_skip_carry{};
78 bool has_delayed_pc{};
79 bool zero_reg_skip{};
80 bool skip_dummy_addimmediate{};
81 bool optimize_for_method_move{};
82 };
83 OptimizerState optimizer{};
84
85 std::optional<Macro::Opcode> next_opcode{};
86 ProgramType program{nullptr};
87
88 std::array<Xbyak::Label, MAX_CODE_SIZE> labels;
89 std::array<Xbyak::Label, MAX_CODE_SIZE> delay_skip;
90 Xbyak::Label end_of_code{};
91
92 bool is_delay_slot{};
93 u32 pc{};
94 std::optional<u32> delayed_pc;
95
96 const std::vector<u32>& code;
97 Engines::Maxwell3D& maxwell3d;
98};
99
100} // namespace Tegra
diff --git a/src/video_core/rasterizer_cache.cpp b/src/video_core/rasterizer_cache.cpp
deleted file mode 100644
index 093b2cdf4..000000000
--- a/src/video_core/rasterizer_cache.cpp
+++ /dev/null
@@ -1,7 +0,0 @@
1// Copyright 2018 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "video_core/rasterizer_cache.h"
6
7RasterizerCacheObject::~RasterizerCacheObject() = default;
diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h
deleted file mode 100644
index 096ee337c..000000000
--- a/src/video_core/rasterizer_cache.h
+++ /dev/null
@@ -1,253 +0,0 @@
1// Copyright 2018 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <mutex>
8#include <set>
9#include <unordered_map>
10
11#include <boost/icl/interval_map.hpp>
12#include <boost/range/iterator_range_core.hpp>
13
14#include "common/common_types.h"
15#include "core/settings.h"
16#include "video_core/gpu.h"
17#include "video_core/rasterizer_interface.h"
18
19class RasterizerCacheObject {
20public:
21 explicit RasterizerCacheObject(const VAddr cpu_addr) : cpu_addr{cpu_addr} {}
22
23 virtual ~RasterizerCacheObject();
24
25 VAddr GetCpuAddr() const {
26 return cpu_addr;
27 }
28
29 /// Gets the size of the shader in guest memory, required for cache management
30 virtual std::size_t GetSizeInBytes() const = 0;
31
32 /// Sets whether the cached object should be considered registered
33 void SetIsRegistered(bool registered) {
34 is_registered = registered;
35 }
36
37 /// Returns true if the cached object is registered
38 bool IsRegistered() const {
39 return is_registered;
40 }
41
42 /// Returns true if the cached object is dirty
43 bool IsDirty() const {
44 return is_dirty;
45 }
46
47 /// Returns ticks from when this cached object was last modified
48 u64 GetLastModifiedTicks() const {
49 return last_modified_ticks;
50 }
51
52 /// Marks an object as recently modified, used to specify whether it is clean or dirty
53 template <class T>
54 void MarkAsModified(bool dirty, T& cache) {
55 is_dirty = dirty;
56 last_modified_ticks = cache.GetModifiedTicks();
57 }
58
59 void SetMemoryMarked(bool is_memory_marked_) {
60 is_memory_marked = is_memory_marked_;
61 }
62
63 bool IsMemoryMarked() const {
64 return is_memory_marked;
65 }
66
67 void SetSyncPending(bool is_sync_pending_) {
68 is_sync_pending = is_sync_pending_;
69 }
70
71 bool IsSyncPending() const {
72 return is_sync_pending;
73 }
74
75private:
76 bool is_registered{}; ///< Whether the object is currently registered with the cache
77 bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory)
78 bool is_memory_marked{}; ///< Whether the object is marking rasterizer memory.
79 bool is_sync_pending{}; ///< Whether the object is pending deletion.
80 u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing
81 VAddr cpu_addr{}; ///< Cpu address memory, unique from emulated virtual address space
82};
83
84template <class T>
85class RasterizerCache : NonCopyable {
86 friend class RasterizerCacheObject;
87
88public:
89 explicit RasterizerCache(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} {}
90
91 /// Write any cached resources overlapping the specified region back to memory
92 void FlushRegion(VAddr addr, std::size_t size) {
93 std::lock_guard lock{mutex};
94
95 const auto& objects{GetSortedObjectsFromRegion(addr, size)};
96 for (auto& object : objects) {
97 FlushObject(object);
98 }
99 }
100
101 /// Mark the specified region as being invalidated
102 void InvalidateRegion(VAddr addr, u64 size) {
103 std::lock_guard lock{mutex};
104
105 const auto& objects{GetSortedObjectsFromRegion(addr, size)};
106 for (auto& object : objects) {
107 if (!object->IsRegistered()) {
108 // Skip duplicates
109 continue;
110 }
111 Unregister(object);
112 }
113 }
114
115 void OnCPUWrite(VAddr addr, std::size_t size) {
116 std::lock_guard lock{mutex};
117
118 for (const auto& object : GetSortedObjectsFromRegion(addr, size)) {
119 if (object->IsRegistered()) {
120 UnmarkMemory(object);
121 object->SetSyncPending(true);
122 marked_for_unregister.emplace_back(object);
123 }
124 }
125 }
126
127 void SyncGuestHost() {
128 std::lock_guard lock{mutex};
129
130 for (const auto& object : marked_for_unregister) {
131 if (object->IsRegistered()) {
132 object->SetSyncPending(false);
133 Unregister(object);
134 }
135 }
136 marked_for_unregister.clear();
137 }
138
139 /// Invalidates everything in the cache
140 void InvalidateAll() {
141 std::lock_guard lock{mutex};
142
143 while (interval_cache.begin() != interval_cache.end()) {
144 Unregister(*interval_cache.begin()->second.begin());
145 }
146 }
147
148protected:
149 /// Tries to get an object from the cache with the specified cache address
150 T TryGet(VAddr addr) const {
151 const auto iter = map_cache.find(addr);
152 if (iter != map_cache.end())
153 return iter->second;
154 return nullptr;
155 }
156
157 /// Register an object into the cache
158 virtual void Register(const T& object) {
159 std::lock_guard lock{mutex};
160
161 object->SetIsRegistered(true);
162 interval_cache.add({GetInterval(object), ObjectSet{object}});
163 map_cache.insert({object->GetCpuAddr(), object});
164 rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), 1);
165 object->SetMemoryMarked(true);
166 }
167
168 /// Unregisters an object from the cache
169 virtual void Unregister(const T& object) {
170 std::lock_guard lock{mutex};
171
172 UnmarkMemory(object);
173 object->SetIsRegistered(false);
174 if (object->IsSyncPending()) {
175 marked_for_unregister.remove(object);
176 object->SetSyncPending(false);
177 }
178 const VAddr addr = object->GetCpuAddr();
179 interval_cache.subtract({GetInterval(object), ObjectSet{object}});
180 map_cache.erase(addr);
181 }
182
183 void UnmarkMemory(const T& object) {
184 if (!object->IsMemoryMarked()) {
185 return;
186 }
187 rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), -1);
188 object->SetMemoryMarked(false);
189 }
190
191 /// Returns a ticks counter used for tracking when cached objects were last modified
192 u64 GetModifiedTicks() {
193 std::lock_guard lock{mutex};
194
195 return ++modified_ticks;
196 }
197
198 virtual void FlushObjectInner(const T& object) = 0;
199
200 /// Flushes the specified object, updating appropriate cache state as needed
201 void FlushObject(const T& object) {
202 std::lock_guard lock{mutex};
203
204 if (!object->IsDirty()) {
205 return;
206 }
207 FlushObjectInner(object);
208 object->MarkAsModified(false, *this);
209 }
210
211 std::recursive_mutex mutex;
212
213private:
214 /// Returns a list of cached objects from the specified memory region, ordered by access time
215 std::vector<T> GetSortedObjectsFromRegion(VAddr addr, u64 size) {
216 if (size == 0) {
217 return {};
218 }
219
220 std::vector<T> objects;
221 const ObjectInterval interval{addr, addr + size};
222 for (auto& pair : boost::make_iterator_range(interval_cache.equal_range(interval))) {
223 for (auto& cached_object : pair.second) {
224 if (!cached_object) {
225 continue;
226 }
227 objects.push_back(cached_object);
228 }
229 }
230
231 std::sort(objects.begin(), objects.end(), [](const T& a, const T& b) -> bool {
232 return a->GetLastModifiedTicks() < b->GetLastModifiedTicks();
233 });
234
235 return objects;
236 }
237
238 using ObjectSet = std::set<T>;
239 using ObjectCache = std::unordered_map<VAddr, T>;
240 using IntervalCache = boost::icl::interval_map<VAddr, ObjectSet>;
241 using ObjectInterval = typename IntervalCache::interval_type;
242
243 static auto GetInterval(const T& object) {
244 return ObjectInterval::right_open(object->GetCpuAddr(),
245 object->GetCpuAddr() + object->GetSizeInBytes());
246 }
247
248 ObjectCache map_cache;
249 IntervalCache interval_cache; ///< Cache of objects
250 u64 modified_ticks{}; ///< Counter of cache state ticks, used for in-order flushing
251 VideoCore::RasterizerInterface& rasterizer;
252 std::list<T> marked_for_unregister;
253};
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 9964ea894..ad0577a4f 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -22,13 +22,12 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
22 22
23MICROPROFILE_DEFINE(OpenGL_Buffer_Download, "OpenGL", "Buffer Download", MP_RGB(192, 192, 128)); 23MICROPROFILE_DEFINE(OpenGL_Buffer_Download, "OpenGL", "Buffer Download", MP_RGB(192, 192, 128));
24 24
25CachedBufferBlock::CachedBufferBlock(VAddr cpu_addr, const std::size_t size) 25Buffer::Buffer(VAddr cpu_addr, const std::size_t size) : VideoCommon::BufferBlock{cpu_addr, size} {
26 : VideoCommon::BufferBlock{cpu_addr, size} {
27 gl_buffer.Create(); 26 gl_buffer.Create();
28 glNamedBufferData(gl_buffer.handle, static_cast<GLsizeiptr>(size), nullptr, GL_DYNAMIC_DRAW); 27 glNamedBufferData(gl_buffer.handle, static_cast<GLsizeiptr>(size), nullptr, GL_DYNAMIC_DRAW);
29} 28}
30 29
31CachedBufferBlock::~CachedBufferBlock() = default; 30Buffer::~Buffer() = default;
32 31
33OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system, 32OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system,
34 const Device& device, std::size_t stream_size) 33 const Device& device, std::size_t stream_size)
@@ -48,12 +47,8 @@ OGLBufferCache::~OGLBufferCache() {
48 glDeleteBuffers(static_cast<GLsizei>(std::size(cbufs)), std::data(cbufs)); 47 glDeleteBuffers(static_cast<GLsizei>(std::size(cbufs)), std::data(cbufs));
49} 48}
50 49
51Buffer OGLBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { 50std::shared_ptr<Buffer> OGLBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
52 return std::make_shared<CachedBufferBlock>(cpu_addr, size); 51 return std::make_shared<Buffer>(cpu_addr, size);
53}
54
55GLuint OGLBufferCache::ToHandle(const Buffer& buffer) {
56 return buffer->GetHandle();
57} 52}
58 53
59GLuint OGLBufferCache::GetEmptyBuffer(std::size_t) { 54GLuint OGLBufferCache::GetEmptyBuffer(std::size_t) {
@@ -62,7 +57,7 @@ GLuint OGLBufferCache::GetEmptyBuffer(std::size_t) {
62 57
63void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 58void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
64 const u8* data) { 59 const u8* data) {
65 glNamedBufferSubData(buffer->GetHandle(), static_cast<GLintptr>(offset), 60 glNamedBufferSubData(buffer.Handle(), static_cast<GLintptr>(offset),
66 static_cast<GLsizeiptr>(size), data); 61 static_cast<GLsizeiptr>(size), data);
67} 62}
68 63
@@ -70,20 +65,20 @@ void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
70 u8* data) { 65 u8* data) {
71 MICROPROFILE_SCOPE(OpenGL_Buffer_Download); 66 MICROPROFILE_SCOPE(OpenGL_Buffer_Download);
72 glMemoryBarrier(GL_BUFFER_UPDATE_BARRIER_BIT); 67 glMemoryBarrier(GL_BUFFER_UPDATE_BARRIER_BIT);
73 glGetNamedBufferSubData(buffer->GetHandle(), static_cast<GLintptr>(offset), 68 glGetNamedBufferSubData(buffer.Handle(), static_cast<GLintptr>(offset),
74 static_cast<GLsizeiptr>(size), data); 69 static_cast<GLsizeiptr>(size), data);
75} 70}
76 71
77void OGLBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset, 72void OGLBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
78 std::size_t dst_offset, std::size_t size) { 73 std::size_t dst_offset, std::size_t size) {
79 glCopyNamedBufferSubData(src->GetHandle(), dst->GetHandle(), static_cast<GLintptr>(src_offset), 74 glCopyNamedBufferSubData(src.Handle(), dst.Handle(), static_cast<GLintptr>(src_offset),
80 static_cast<GLintptr>(dst_offset), static_cast<GLsizeiptr>(size)); 75 static_cast<GLintptr>(dst_offset), static_cast<GLsizeiptr>(size));
81} 76}
82 77
83OGLBufferCache::BufferInfo OGLBufferCache::ConstBufferUpload(const void* raw_pointer, 78OGLBufferCache::BufferInfo OGLBufferCache::ConstBufferUpload(const void* raw_pointer,
84 std::size_t size) { 79 std::size_t size) {
85 DEBUG_ASSERT(cbuf_cursor < std::size(cbufs)); 80 DEBUG_ASSERT(cbuf_cursor < std::size(cbufs));
86 const GLuint& cbuf = cbufs[cbuf_cursor++]; 81 const GLuint cbuf = cbufs[cbuf_cursor++];
87 glNamedBufferSubData(cbuf, 0, static_cast<GLsizeiptr>(size), raw_pointer); 82 glNamedBufferSubData(cbuf, 0, static_cast<GLsizeiptr>(size), raw_pointer);
88 return {cbuf, 0}; 83 return {cbuf, 0};
89} 84}
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index a9e86cfc7..a49aaf9c4 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -10,7 +10,6 @@
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/buffer_cache/buffer_cache.h" 11#include "video_core/buffer_cache/buffer_cache.h"
12#include "video_core/engines/maxwell_3d.h" 12#include "video_core/engines/maxwell_3d.h"
13#include "video_core/rasterizer_cache.h"
14#include "video_core/renderer_opengl/gl_resource_manager.h" 13#include "video_core/renderer_opengl/gl_resource_manager.h"
15#include "video_core/renderer_opengl/gl_stream_buffer.h" 14#include "video_core/renderer_opengl/gl_stream_buffer.h"
16 15
@@ -24,17 +23,12 @@ class Device;
24class OGLStreamBuffer; 23class OGLStreamBuffer;
25class RasterizerOpenGL; 24class RasterizerOpenGL;
26 25
27class CachedBufferBlock; 26class Buffer : public VideoCommon::BufferBlock {
28
29using Buffer = std::shared_ptr<CachedBufferBlock>;
30using GenericBufferCache = VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuffer>;
31
32class CachedBufferBlock : public VideoCommon::BufferBlock {
33public: 27public:
34 explicit CachedBufferBlock(VAddr cpu_addr, const std::size_t size); 28 explicit Buffer(VAddr cpu_addr, const std::size_t size);
35 ~CachedBufferBlock(); 29 ~Buffer();
36 30
37 GLuint GetHandle() const { 31 GLuint Handle() const {
38 return gl_buffer.handle; 32 return gl_buffer.handle;
39 } 33 }
40 34
@@ -42,6 +36,7 @@ private:
42 OGLBuffer gl_buffer; 36 OGLBuffer gl_buffer;
43}; 37};
44 38
39using GenericBufferCache = VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuffer>;
45class OGLBufferCache final : public GenericBufferCache { 40class OGLBufferCache final : public GenericBufferCache {
46public: 41public:
47 explicit OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system, 42 explicit OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system,
@@ -55,9 +50,7 @@ public:
55 } 50 }
56 51
57protected: 52protected:
58 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; 53 std::shared_ptr<Buffer> CreateBlock(VAddr cpu_addr, std::size_t size) override;
59
60 GLuint ToHandle(const Buffer& buffer) override;
61 54
62 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 55 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
63 const u8* data) override; 56 const u8* data) override;
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index b772c37d9..890fc6c63 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -185,12 +185,20 @@ bool IsASTCSupported() {
185Device::Device() 185Device::Device()
186 : max_uniform_buffers{BuildMaxUniformBuffers()}, base_bindings{BuildBaseBindings()} { 186 : max_uniform_buffers{BuildMaxUniformBuffers()}, base_bindings{BuildBaseBindings()} {
187 const std::string_view vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR)); 187 const std::string_view vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR));
188 const auto renderer = reinterpret_cast<const char*>(glGetString(GL_RENDERER)); 188 const std::string_view version = reinterpret_cast<const char*>(glGetString(GL_VERSION));
189 const std::vector extensions = GetExtensions(); 189 const std::vector extensions = GetExtensions();
190 190
191 const bool is_nvidia = vendor == "NVIDIA Corporation"; 191 const bool is_nvidia = vendor == "NVIDIA Corporation";
192 const bool is_amd = vendor == "ATI Technologies Inc."; 192 const bool is_amd = vendor == "ATI Technologies Inc.";
193 193
194 bool disable_fast_buffer_sub_data = false;
195 if (is_nvidia && version == "4.6.0 NVIDIA 443.24") {
196 LOG_WARNING(
197 Render_OpenGL,
198 "Beta driver 443.24 is known to have issues. There might be performance issues.");
199 disable_fast_buffer_sub_data = true;
200 }
201
194 uniform_buffer_alignment = GetInteger<std::size_t>(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT); 202 uniform_buffer_alignment = GetInteger<std::size_t>(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT);
195 shader_storage_alignment = GetInteger<std::size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT); 203 shader_storage_alignment = GetInteger<std::size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT);
196 max_vertex_attributes = GetInteger<u32>(GL_MAX_VERTEX_ATTRIBS); 204 max_vertex_attributes = GetInteger<u32>(GL_MAX_VERTEX_ATTRIBS);
@@ -204,9 +212,10 @@ Device::Device()
204 has_variable_aoffi = TestVariableAoffi(); 212 has_variable_aoffi = TestVariableAoffi();
205 has_component_indexing_bug = is_amd; 213 has_component_indexing_bug = is_amd;
206 has_precise_bug = TestPreciseBug(); 214 has_precise_bug = TestPreciseBug();
207 has_fast_buffer_sub_data = is_nvidia; 215 has_fast_buffer_sub_data = is_nvidia && !disable_fast_buffer_sub_data;
208 use_assembly_shaders = Settings::values.use_assembly_shaders && GLAD_GL_NV_gpu_program5 && 216 use_assembly_shaders = Settings::values.use_assembly_shaders && GLAD_GL_NV_gpu_program5 &&
209 GLAD_GL_NV_compute_program5; 217 GLAD_GL_NV_compute_program5 && GLAD_GL_NV_transform_feedback &&
218 GLAD_GL_NV_transform_feedback2;
210 219
211 LOG_INFO(Render_OpenGL, "Renderer_VariableAOFFI: {}", has_variable_aoffi); 220 LOG_INFO(Render_OpenGL, "Renderer_VariableAOFFI: {}", has_variable_aoffi);
212 LOG_INFO(Render_OpenGL, "Renderer_ComponentIndexingBug: {}", has_component_indexing_bug); 221 LOG_INFO(Render_OpenGL, "Renderer_ComponentIndexingBug: {}", has_component_indexing_bug);
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 55e79aaf6..2d6c11320 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -30,6 +30,7 @@
30#include "video_core/renderer_opengl/gl_shader_cache.h" 30#include "video_core/renderer_opengl/gl_shader_cache.h"
31#include "video_core/renderer_opengl/maxwell_to_gl.h" 31#include "video_core/renderer_opengl/maxwell_to_gl.h"
32#include "video_core/renderer_opengl/renderer_opengl.h" 32#include "video_core/renderer_opengl/renderer_opengl.h"
33#include "video_core/shader_cache.h"
33 34
34namespace OpenGL { 35namespace OpenGL {
35 36
@@ -65,10 +66,22 @@ constexpr std::size_t NumSupportedVertexAttributes = 16;
65template <typename Engine, typename Entry> 66template <typename Engine, typename Entry>
66Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry, 67Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
67 ShaderType shader_type, std::size_t index = 0) { 68 ShaderType shader_type, std::size_t index = 0) {
69 if constexpr (std::is_same_v<Entry, SamplerEntry>) {
70 if (entry.is_separated) {
71 const u32 buffer_1 = entry.buffer;
72 const u32 buffer_2 = entry.secondary_buffer;
73 const u32 offset_1 = entry.offset;
74 const u32 offset_2 = entry.secondary_offset;
75 const u32 handle_1 = engine.AccessConstBuffer32(shader_type, buffer_1, offset_1);
76 const u32 handle_2 = engine.AccessConstBuffer32(shader_type, buffer_2, offset_2);
77 return engine.GetTextureInfo(handle_1 | handle_2);
78 }
79 }
68 if (entry.is_bindless) { 80 if (entry.is_bindless) {
69 const auto tex_handle = engine.AccessConstBuffer32(shader_type, entry.buffer, entry.offset); 81 const u32 handle = engine.AccessConstBuffer32(shader_type, entry.buffer, entry.offset);
70 return engine.GetTextureInfo(tex_handle); 82 return engine.GetTextureInfo(handle);
71 } 83 }
84
72 const auto& gpu_profile = engine.AccessGuestDriverProfile(); 85 const auto& gpu_profile = engine.AccessGuestDriverProfile();
73 const u32 offset = entry.offset + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize()); 86 const u32 offset = entry.offset + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
74 if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) { 87 if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
@@ -93,6 +106,34 @@ std::size_t GetConstBufferSize(const Tegra::Engines::ConstBufferInfo& buffer,
93 return buffer.size; 106 return buffer.size;
94} 107}
95 108
109/// Translates hardware transform feedback indices
110/// @param location Hardware location
111/// @return Pair of ARB_transform_feedback3 token stream first and third arguments
112/// @note Read https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_transform_feedback3.txt
113std::pair<GLint, GLint> TransformFeedbackEnum(u8 location) {
114 const u8 index = location / 4;
115 if (index >= 8 && index <= 39) {
116 return {GL_GENERIC_ATTRIB_NV, index - 8};
117 }
118 if (index >= 48 && index <= 55) {
119 return {GL_TEXTURE_COORD_NV, index - 48};
120 }
121 switch (index) {
122 case 7:
123 return {GL_POSITION, 0};
124 case 40:
125 return {GL_PRIMARY_COLOR_NV, 0};
126 case 41:
127 return {GL_SECONDARY_COLOR_NV, 0};
128 case 42:
129 return {GL_BACK_PRIMARY_COLOR_NV, 0};
130 case 43:
131 return {GL_BACK_SECONDARY_COLOR_NV, 0};
132 }
133 UNIMPLEMENTED_MSG("index={}", static_cast<int>(index));
134 return {GL_POSITION, 0};
135}
136
96void oglEnable(GLenum cap, bool state) { 137void oglEnable(GLenum cap, bool state) {
97 (state ? glEnable : glDisable)(cap); 138 (state ? glEnable : glDisable)(cap);
98} 139}
@@ -282,7 +323,7 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
282 continue; 323 continue;
283 } 324 }
284 325
285 Shader shader{shader_cache.GetStageProgram(program)}; 326 Shader* const shader = shader_cache.GetStageProgram(program);
286 327
287 if (device.UseAssemblyShaders()) { 328 if (device.UseAssemblyShaders()) {
288 // Check for ARB limitation. We only have 16 SSBOs per context state. To workaround this 329 // Check for ARB limitation. We only have 16 SSBOs per context state. To workaround this
@@ -576,7 +617,16 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
576 (Maxwell::MaxConstBufferSize + device.GetUniformBufferAlignment()); 617 (Maxwell::MaxConstBufferSize + device.GetUniformBufferAlignment());
577 618
578 // Prepare the vertex array. 619 // Prepare the vertex array.
579 buffer_cache.Map(buffer_size); 620 const bool invalidated = buffer_cache.Map(buffer_size);
621
622 if (invalidated) {
623 // When the stream buffer has been invalidated, we have to consider vertex buffers as dirty
624 auto& dirty = gpu.dirty.flags;
625 dirty[Dirty::VertexBuffers] = true;
626 for (int index = Dirty::VertexBuffer0; index <= Dirty::VertexBuffer31; ++index) {
627 dirty[index] = true;
628 }
629 }
580 630
581 // Prepare vertex array format. 631 // Prepare vertex array format.
582 SetupVertexFormat(); 632 SetupVertexFormat();
@@ -842,7 +892,7 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
842 return true; 892 return true;
843} 893}
844 894
845void RasterizerOpenGL::SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader) { 895void RasterizerOpenGL::SetupDrawConstBuffers(std::size_t stage_index, Shader* shader) {
846 static constexpr std::array PARAMETER_LUT = { 896 static constexpr std::array PARAMETER_LUT = {
847 GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV, GL_TESS_CONTROL_PROGRAM_PARAMETER_BUFFER_NV, 897 GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV, GL_TESS_CONTROL_PROGRAM_PARAMETER_BUFFER_NV,
848 GL_TESS_EVALUATION_PROGRAM_PARAMETER_BUFFER_NV, GL_GEOMETRY_PROGRAM_PARAMETER_BUFFER_NV, 898 GL_TESS_EVALUATION_PROGRAM_PARAMETER_BUFFER_NV, GL_GEOMETRY_PROGRAM_PARAMETER_BUFFER_NV,
@@ -872,7 +922,7 @@ void RasterizerOpenGL::SetupDrawConstBuffers(std::size_t stage_index, const Shad
872 } 922 }
873} 923}
874 924
875void RasterizerOpenGL::SetupComputeConstBuffers(const Shader& kernel) { 925void RasterizerOpenGL::SetupComputeConstBuffers(Shader* kernel) {
876 MICROPROFILE_SCOPE(OpenGL_UBO); 926 MICROPROFILE_SCOPE(OpenGL_UBO);
877 const auto& launch_desc = system.GPU().KeplerCompute().launch_description; 927 const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
878 const auto& entries = kernel->GetEntries(); 928 const auto& entries = kernel->GetEntries();
@@ -941,7 +991,7 @@ void RasterizerOpenGL::SetupConstBuffer(GLenum stage, u32 binding,
941 } 991 }
942} 992}
943 993
944void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader) { 994void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, Shader* shader) {
945 auto& gpu{system.GPU()}; 995 auto& gpu{system.GPU()};
946 auto& memory_manager{gpu.MemoryManager()}; 996 auto& memory_manager{gpu.MemoryManager()};
947 const auto cbufs{gpu.Maxwell3D().state.shader_stages[stage_index]}; 997 const auto cbufs{gpu.Maxwell3D().state.shader_stages[stage_index]};
@@ -956,7 +1006,7 @@ void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shad
956 } 1006 }
957} 1007}
958 1008
959void RasterizerOpenGL::SetupComputeGlobalMemory(const Shader& kernel) { 1009void RasterizerOpenGL::SetupComputeGlobalMemory(Shader* kernel) {
960 auto& gpu{system.GPU()}; 1010 auto& gpu{system.GPU()};
961 auto& memory_manager{gpu.MemoryManager()}; 1011 auto& memory_manager{gpu.MemoryManager()};
962 const auto cbufs{gpu.KeplerCompute().launch_description.const_buffer_config}; 1012 const auto cbufs{gpu.KeplerCompute().launch_description.const_buffer_config};
@@ -979,7 +1029,7 @@ void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& e
979 static_cast<GLsizeiptr>(size)); 1029 static_cast<GLsizeiptr>(size));
980} 1030}
981 1031
982void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader& shader) { 1032void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, Shader* shader) {
983 MICROPROFILE_SCOPE(OpenGL_Texture); 1033 MICROPROFILE_SCOPE(OpenGL_Texture);
984 const auto& maxwell3d = system.GPU().Maxwell3D(); 1034 const auto& maxwell3d = system.GPU().Maxwell3D();
985 u32 binding = device.GetBaseBindings(stage_index).sampler; 1035 u32 binding = device.GetBaseBindings(stage_index).sampler;
@@ -992,7 +1042,7 @@ void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader&
992 } 1042 }
993} 1043}
994 1044
995void RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) { 1045void RasterizerOpenGL::SetupComputeTextures(Shader* kernel) {
996 MICROPROFILE_SCOPE(OpenGL_Texture); 1046 MICROPROFILE_SCOPE(OpenGL_Texture);
997 const auto& compute = system.GPU().KeplerCompute(); 1047 const auto& compute = system.GPU().KeplerCompute();
998 u32 binding = 0; 1048 u32 binding = 0;
@@ -1021,7 +1071,7 @@ void RasterizerOpenGL::SetupTexture(u32 binding, const Tegra::Texture::FullTextu
1021 } 1071 }
1022} 1072}
1023 1073
1024void RasterizerOpenGL::SetupDrawImages(std::size_t stage_index, const Shader& shader) { 1074void RasterizerOpenGL::SetupDrawImages(std::size_t stage_index, Shader* shader) {
1025 const auto& maxwell3d = system.GPU().Maxwell3D(); 1075 const auto& maxwell3d = system.GPU().Maxwell3D();
1026 u32 binding = device.GetBaseBindings(stage_index).image; 1076 u32 binding = device.GetBaseBindings(stage_index).image;
1027 for (const auto& entry : shader->GetEntries().images) { 1077 for (const auto& entry : shader->GetEntries().images) {
@@ -1031,7 +1081,7 @@ void RasterizerOpenGL::SetupDrawImages(std::size_t stage_index, const Shader& sh
1031 } 1081 }
1032} 1082}
1033 1083
1034void RasterizerOpenGL::SetupComputeImages(const Shader& shader) { 1084void RasterizerOpenGL::SetupComputeImages(Shader* shader) {
1035 const auto& compute = system.GPU().KeplerCompute(); 1085 const auto& compute = system.GPU().KeplerCompute();
1036 u32 binding = 0; 1086 u32 binding = 0;
1037 for (const auto& entry : shader->GetEntries().images) { 1087 for (const auto& entry : shader->GetEntries().images) {
@@ -1547,12 +1597,70 @@ void RasterizerOpenGL::SyncFramebufferSRGB() {
1547 oglEnable(GL_FRAMEBUFFER_SRGB, gpu.regs.framebuffer_srgb); 1597 oglEnable(GL_FRAMEBUFFER_SRGB, gpu.regs.framebuffer_srgb);
1548} 1598}
1549 1599
1600void RasterizerOpenGL::SyncTransformFeedback() {
1601 // TODO(Rodrigo): Inject SKIP_COMPONENTS*_NV when required. An unimplemented message will signal
1602 // when this is required.
1603 const auto& regs = system.GPU().Maxwell3D().regs;
1604
1605 static constexpr std::size_t STRIDE = 3;
1606 std::array<GLint, 128 * STRIDE * Maxwell::NumTransformFeedbackBuffers> attribs;
1607 std::array<GLint, Maxwell::NumTransformFeedbackBuffers> streams;
1608
1609 GLint* cursor = attribs.data();
1610 GLint* current_stream = streams.data();
1611
1612 for (std::size_t feedback = 0; feedback < Maxwell::NumTransformFeedbackBuffers; ++feedback) {
1613 const auto& layout = regs.tfb_layouts[feedback];
1614 UNIMPLEMENTED_IF_MSG(layout.stride != layout.varying_count * 4, "Stride padding");
1615 if (layout.varying_count == 0) {
1616 continue;
1617 }
1618
1619 *current_stream = static_cast<GLint>(feedback);
1620 if (current_stream != streams.data()) {
1621 // When stepping one stream, push the expected token
1622 cursor[0] = GL_NEXT_BUFFER_NV;
1623 cursor[1] = 0;
1624 cursor[2] = 0;
1625 cursor += STRIDE;
1626 }
1627 ++current_stream;
1628
1629 const auto& locations = regs.tfb_varying_locs[feedback];
1630 std::optional<u8> current_index;
1631 for (u32 offset = 0; offset < layout.varying_count; ++offset) {
1632 const u8 location = locations[offset];
1633 const u8 index = location / 4;
1634
1635 if (current_index == index) {
1636 // Increase number of components of the previous attachment
1637 ++cursor[-2];
1638 continue;
1639 }
1640 current_index = index;
1641
1642 std::tie(cursor[0], cursor[2]) = TransformFeedbackEnum(location);
1643 cursor[1] = 1;
1644 cursor += STRIDE;
1645 }
1646 }
1647
1648 const GLsizei num_attribs = static_cast<GLsizei>((cursor - attribs.data()) / STRIDE);
1649 const GLsizei num_strides = static_cast<GLsizei>(current_stream - streams.data());
1650 glTransformFeedbackStreamAttribsNV(num_attribs, attribs.data(), num_strides, streams.data(),
1651 GL_INTERLEAVED_ATTRIBS);
1652}
1653
1550void RasterizerOpenGL::BeginTransformFeedback(GLenum primitive_mode) { 1654void RasterizerOpenGL::BeginTransformFeedback(GLenum primitive_mode) {
1551 const auto& regs = system.GPU().Maxwell3D().regs; 1655 const auto& regs = system.GPU().Maxwell3D().regs;
1552 if (regs.tfb_enabled == 0) { 1656 if (regs.tfb_enabled == 0) {
1553 return; 1657 return;
1554 } 1658 }
1555 1659
1660 if (device.UseAssemblyShaders()) {
1661 SyncTransformFeedback();
1662 }
1663
1556 UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) || 1664 UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) ||
1557 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) || 1665 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) ||
1558 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::Geometry)); 1666 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::Geometry));
@@ -1579,6 +1687,10 @@ void RasterizerOpenGL::BeginTransformFeedback(GLenum primitive_mode) {
1579 static_cast<GLsizeiptr>(size)); 1687 static_cast<GLsizeiptr>(size));
1580 } 1688 }
1581 1689
1690 // We may have to call BeginTransformFeedbackNV here since they seem to call different
1691 // implementations on Nvidia's driver (the pointer is different) but we are using
1692 // ARB_transform_feedback3 features with NV_transform_feedback interactions and the ARB
1693 // extension doesn't define BeginTransformFeedback (without NV) interactions. It just works.
1582 glBeginTransformFeedback(GL_POINTS); 1694 glBeginTransformFeedback(GL_POINTS);
1583} 1695}
1584 1696
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index f5dc56a0e..4f082592f 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -19,7 +19,6 @@
19#include "video_core/engines/const_buffer_info.h" 19#include "video_core/engines/const_buffer_info.h"
20#include "video_core/engines/maxwell_3d.h" 20#include "video_core/engines/maxwell_3d.h"
21#include "video_core/rasterizer_accelerated.h" 21#include "video_core/rasterizer_accelerated.h"
22#include "video_core/rasterizer_cache.h"
23#include "video_core/rasterizer_interface.h" 22#include "video_core/rasterizer_interface.h"
24#include "video_core/renderer_opengl/gl_buffer_cache.h" 23#include "video_core/renderer_opengl/gl_buffer_cache.h"
25#include "video_core/renderer_opengl/gl_device.h" 24#include "video_core/renderer_opengl/gl_device.h"
@@ -100,10 +99,10 @@ private:
100 void ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil); 99 void ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil);
101 100
102 /// Configures the current constbuffers to use for the draw command. 101 /// Configures the current constbuffers to use for the draw command.
103 void SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader); 102 void SetupDrawConstBuffers(std::size_t stage_index, Shader* shader);
104 103
105 /// Configures the current constbuffers to use for the kernel invocation. 104 /// Configures the current constbuffers to use for the kernel invocation.
106 void SetupComputeConstBuffers(const Shader& kernel); 105 void SetupComputeConstBuffers(Shader* kernel);
107 106
108 /// Configures a constant buffer. 107 /// Configures a constant buffer.
109 void SetupConstBuffer(GLenum stage, u32 binding, const Tegra::Engines::ConstBufferInfo& buffer, 108 void SetupConstBuffer(GLenum stage, u32 binding, const Tegra::Engines::ConstBufferInfo& buffer,
@@ -111,30 +110,30 @@ private:
111 std::size_t unified_offset); 110 std::size_t unified_offset);
112 111
113 /// Configures the current global memory entries to use for the draw command. 112 /// Configures the current global memory entries to use for the draw command.
114 void SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader); 113 void SetupDrawGlobalMemory(std::size_t stage_index, Shader* shader);
115 114
116 /// Configures the current global memory entries to use for the kernel invocation. 115 /// Configures the current global memory entries to use for the kernel invocation.
117 void SetupComputeGlobalMemory(const Shader& kernel); 116 void SetupComputeGlobalMemory(Shader* kernel);
118 117
119 /// Configures a constant buffer. 118 /// Configures a constant buffer.
120 void SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& entry, GPUVAddr gpu_addr, 119 void SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& entry, GPUVAddr gpu_addr,
121 std::size_t size); 120 std::size_t size);
122 121
123 /// Configures the current textures to use for the draw command. 122 /// Configures the current textures to use for the draw command.
124 void SetupDrawTextures(std::size_t stage_index, const Shader& shader); 123 void SetupDrawTextures(std::size_t stage_index, Shader* shader);
125 124
126 /// Configures the textures used in a compute shader. 125 /// Configures the textures used in a compute shader.
127 void SetupComputeTextures(const Shader& kernel); 126 void SetupComputeTextures(Shader* kernel);
128 127
129 /// Configures a texture. 128 /// Configures a texture.
130 void SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture, 129 void SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture,
131 const SamplerEntry& entry); 130 const SamplerEntry& entry);
132 131
133 /// Configures images in a graphics shader. 132 /// Configures images in a graphics shader.
134 void SetupDrawImages(std::size_t stage_index, const Shader& shader); 133 void SetupDrawImages(std::size_t stage_index, Shader* shader);
135 134
136 /// Configures images in a compute shader. 135 /// Configures images in a compute shader.
137 void SetupComputeImages(const Shader& shader); 136 void SetupComputeImages(Shader* shader);
138 137
139 /// Configures an image. 138 /// Configures an image.
140 void SetupImage(u32 binding, const Tegra::Texture::TICEntry& tic, const ImageEntry& entry); 139 void SetupImage(u32 binding, const Tegra::Texture::TICEntry& tic, const ImageEntry& entry);
@@ -202,6 +201,10 @@ private:
202 /// Syncs the framebuffer sRGB state to match the guest state 201 /// Syncs the framebuffer sRGB state to match the guest state
203 void SyncFramebufferSRGB(); 202 void SyncFramebufferSRGB();
204 203
204 /// Syncs transform feedback state to match guest state
205 /// @note Only valid on assembly shaders
206 void SyncTransformFeedback();
207
205 /// Begin a transform feedback 208 /// Begin a transform feedback
206 void BeginTransformFeedback(GLenum primitive_mode); 209 void BeginTransformFeedback(GLenum primitive_mode);
207 210
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index a991ca64a..c28486b1d 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -29,6 +29,7 @@
29#include "video_core/shader/memory_util.h" 29#include "video_core/shader/memory_util.h"
30#include "video_core/shader/registry.h" 30#include "video_core/shader/registry.h"
31#include "video_core/shader/shader_ir.h" 31#include "video_core/shader/shader_ir.h"
32#include "video_core/shader_cache.h"
32 33
33namespace OpenGL { 34namespace OpenGL {
34 35
@@ -194,12 +195,9 @@ std::unordered_set<GLenum> GetSupportedFormats() {
194 195
195} // Anonymous namespace 196} // Anonymous namespace
196 197
197CachedShader::CachedShader(VAddr cpu_addr, std::size_t size_in_bytes, 198Shader::Shader(std::shared_ptr<VideoCommon::Shader::Registry> registry_, ShaderEntries entries_,
198 std::shared_ptr<VideoCommon::Shader::Registry> registry, 199 ProgramSharedPtr program_)
199 ShaderEntries entries, ProgramSharedPtr program_) 200 : registry{std::move(registry_)}, entries{std::move(entries_)}, program{std::move(program_)} {
200 : RasterizerCacheObject{cpu_addr}, registry{std::move(registry)}, entries{std::move(entries)},
201 size_in_bytes{size_in_bytes}, program{std::move(program_)} {
202 // Assign either the assembly program or source program. We can't have both.
203 handle = program->assembly_program.handle; 201 handle = program->assembly_program.handle;
204 if (handle == 0) { 202 if (handle == 0) {
205 handle = program->source_program.handle; 203 handle = program->source_program.handle;
@@ -207,16 +205,16 @@ CachedShader::CachedShader(VAddr cpu_addr, std::size_t size_in_bytes,
207 ASSERT(handle != 0); 205 ASSERT(handle != 0);
208} 206}
209 207
210CachedShader::~CachedShader() = default; 208Shader::~Shader() = default;
211 209
212GLuint CachedShader::GetHandle() const { 210GLuint Shader::GetHandle() const {
213 DEBUG_ASSERT(registry->IsConsistent()); 211 DEBUG_ASSERT(registry->IsConsistent());
214 return handle; 212 return handle;
215} 213}
216 214
217Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params, 215std::unique_ptr<Shader> Shader::CreateStageFromMemory(const ShaderParameters& params,
218 Maxwell::ShaderProgram program_type, ProgramCode code, 216 Maxwell::ShaderProgram program_type,
219 ProgramCode code_b) { 217 ProgramCode code, ProgramCode code_b) {
220 const auto shader_type = GetShaderType(program_type); 218 const auto shader_type = GetShaderType(program_type);
221 const std::size_t size_in_bytes = code.size() * sizeof(u64); 219 const std::size_t size_in_bytes = code.size() * sizeof(u64);
222 220
@@ -241,12 +239,12 @@ Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params,
241 entry.bindless_samplers = registry->GetBindlessSamplers(); 239 entry.bindless_samplers = registry->GetBindlessSamplers();
242 params.disk_cache.SaveEntry(std::move(entry)); 240 params.disk_cache.SaveEntry(std::move(entry));
243 241
244 return std::shared_ptr<CachedShader>( 242 return std::unique_ptr<Shader>(new Shader(
245 new CachedShader(params.cpu_addr, size_in_bytes, std::move(registry), 243 std::move(registry), MakeEntries(params.device, ir, shader_type), std::move(program)));
246 MakeEntries(params.device, ir, shader_type), std::move(program)));
247} 244}
248 245
249Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code) { 246std::unique_ptr<Shader> Shader::CreateKernelFromMemory(const ShaderParameters& params,
247 ProgramCode code) {
250 const std::size_t size_in_bytes = code.size() * sizeof(u64); 248 const std::size_t size_in_bytes = code.size() * sizeof(u64);
251 249
252 auto& engine = params.system.GPU().KeplerCompute(); 250 auto& engine = params.system.GPU().KeplerCompute();
@@ -266,23 +264,23 @@ Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, Prog
266 entry.bindless_samplers = registry->GetBindlessSamplers(); 264 entry.bindless_samplers = registry->GetBindlessSamplers();
267 params.disk_cache.SaveEntry(std::move(entry)); 265 params.disk_cache.SaveEntry(std::move(entry));
268 266
269 return std::shared_ptr<CachedShader>( 267 return std::unique_ptr<Shader>(new Shader(std::move(registry),
270 new CachedShader(params.cpu_addr, size_in_bytes, std::move(registry), 268 MakeEntries(params.device, ir, ShaderType::Compute),
271 MakeEntries(params.device, ir, ShaderType::Compute), std::move(program))); 269 std::move(program)));
272} 270}
273 271
274Shader CachedShader::CreateFromCache(const ShaderParameters& params, 272std::unique_ptr<Shader> Shader::CreateFromCache(const ShaderParameters& params,
275 const PrecompiledShader& precompiled_shader, 273 const PrecompiledShader& precompiled_shader) {
276 std::size_t size_in_bytes) { 274 return std::unique_ptr<Shader>(new Shader(
277 return std::shared_ptr<CachedShader>( 275 precompiled_shader.registry, precompiled_shader.entries, precompiled_shader.program));
278 new CachedShader(params.cpu_addr, size_in_bytes, precompiled_shader.registry,
279 precompiled_shader.entries, precompiled_shader.program));
280} 276}
281 277
282ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system, 278ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system,
283 Core::Frontend::EmuWindow& emu_window, const Device& device) 279 Core::Frontend::EmuWindow& emu_window, const Device& device)
284 : RasterizerCache{rasterizer}, system{system}, emu_window{emu_window}, device{device}, 280 : VideoCommon::ShaderCache<Shader>{rasterizer}, system{system},
285 disk_cache{system} {} 281 emu_window{emu_window}, device{device}, disk_cache{system} {}
282
283ShaderCacheOpenGL::~ShaderCacheOpenGL() = default;
286 284
287void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading, 285void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading,
288 const VideoCore::DiskResourceLoadCallback& callback) { 286 const VideoCore::DiskResourceLoadCallback& callback) {
@@ -436,7 +434,7 @@ ProgramSharedPtr ShaderCacheOpenGL::GeneratePrecompiledProgram(
436 return program; 434 return program;
437} 435}
438 436
439Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { 437Shader* ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
440 if (!system.GPU().Maxwell3D().dirty.flags[Dirty::Shaders]) { 438 if (!system.GPU().Maxwell3D().dirty.flags[Dirty::Shaders]) {
441 return last_shaders[static_cast<std::size_t>(program)]; 439 return last_shaders[static_cast<std::size_t>(program)];
442 } 440 }
@@ -446,8 +444,7 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
446 444
447 // Look up shader in the cache based on address 445 // Look up shader in the cache based on address
448 const auto cpu_addr{memory_manager.GpuToCpuAddress(address)}; 446 const auto cpu_addr{memory_manager.GpuToCpuAddress(address)};
449 Shader shader{cpu_addr ? TryGet(*cpu_addr) : null_shader}; 447 if (Shader* const shader{cpu_addr ? TryGet(*cpu_addr) : null_shader.get()}) {
450 if (shader) {
451 return last_shaders[static_cast<std::size_t>(program)] = shader; 448 return last_shaders[static_cast<std::size_t>(program)] = shader;
452 } 449 }
453 450
@@ -468,30 +465,29 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
468 const ShaderParameters params{system, disk_cache, device, 465 const ShaderParameters params{system, disk_cache, device,
469 *cpu_addr, host_ptr, unique_identifier}; 466 *cpu_addr, host_ptr, unique_identifier};
470 467
468 std::unique_ptr<Shader> shader;
471 const auto found = runtime_cache.find(unique_identifier); 469 const auto found = runtime_cache.find(unique_identifier);
472 if (found == runtime_cache.end()) { 470 if (found == runtime_cache.end()) {
473 shader = CachedShader::CreateStageFromMemory(params, program, std::move(code), 471 shader = Shader::CreateStageFromMemory(params, program, std::move(code), std::move(code_b));
474 std::move(code_b));
475 } else { 472 } else {
476 const std::size_t size_in_bytes = code.size() * sizeof(u64); 473 shader = Shader::CreateFromCache(params, found->second);
477 shader = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
478 } 474 }
479 475
476 Shader* const result = shader.get();
480 if (cpu_addr) { 477 if (cpu_addr) {
481 Register(shader); 478 Register(std::move(shader), *cpu_addr, code.size() * sizeof(u64));
482 } else { 479 } else {
483 null_shader = shader; 480 null_shader = std::move(shader);
484 } 481 }
485 482
486 return last_shaders[static_cast<std::size_t>(program)] = shader; 483 return last_shaders[static_cast<std::size_t>(program)] = result;
487} 484}
488 485
489Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) { 486Shader* ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
490 auto& memory_manager{system.GPU().MemoryManager()}; 487 auto& memory_manager{system.GPU().MemoryManager()};
491 const auto cpu_addr{memory_manager.GpuToCpuAddress(code_addr)}; 488 const auto cpu_addr{memory_manager.GpuToCpuAddress(code_addr)};
492 489
493 auto kernel = cpu_addr ? TryGet(*cpu_addr) : null_kernel; 490 if (Shader* const kernel = cpu_addr ? TryGet(*cpu_addr) : null_kernel.get()) {
494 if (kernel) {
495 return kernel; 491 return kernel;
496 } 492 }
497 493
@@ -503,20 +499,21 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
503 const ShaderParameters params{system, disk_cache, device, 499 const ShaderParameters params{system, disk_cache, device,
504 *cpu_addr, host_ptr, unique_identifier}; 500 *cpu_addr, host_ptr, unique_identifier};
505 501
502 std::unique_ptr<Shader> kernel;
506 const auto found = runtime_cache.find(unique_identifier); 503 const auto found = runtime_cache.find(unique_identifier);
507 if (found == runtime_cache.end()) { 504 if (found == runtime_cache.end()) {
508 kernel = CachedShader::CreateKernelFromMemory(params, std::move(code)); 505 kernel = Shader::CreateKernelFromMemory(params, std::move(code));
509 } else { 506 } else {
510 const std::size_t size_in_bytes = code.size() * sizeof(u64); 507 kernel = Shader::CreateFromCache(params, found->second);
511 kernel = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
512 } 508 }
513 509
510 Shader* const result = kernel.get();
514 if (cpu_addr) { 511 if (cpu_addr) {
515 Register(kernel); 512 Register(std::move(kernel), *cpu_addr, code.size() * sizeof(u64));
516 } else { 513 } else {
517 null_kernel = kernel; 514 null_kernel = std::move(kernel);
518 } 515 }
519 return kernel; 516 return result;
520} 517}
521 518
522} // namespace OpenGL 519} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index b2ae8d7f9..6848f1388 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -18,12 +18,12 @@
18 18
19#include "common/common_types.h" 19#include "common/common_types.h"
20#include "video_core/engines/shader_type.h" 20#include "video_core/engines/shader_type.h"
21#include "video_core/rasterizer_cache.h"
22#include "video_core/renderer_opengl/gl_resource_manager.h" 21#include "video_core/renderer_opengl/gl_resource_manager.h"
23#include "video_core/renderer_opengl/gl_shader_decompiler.h" 22#include "video_core/renderer_opengl/gl_shader_decompiler.h"
24#include "video_core/renderer_opengl/gl_shader_disk_cache.h" 23#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
25#include "video_core/shader/registry.h" 24#include "video_core/shader/registry.h"
26#include "video_core/shader/shader_ir.h" 25#include "video_core/shader/shader_ir.h"
26#include "video_core/shader_cache.h"
27 27
28namespace Core { 28namespace Core {
29class System; 29class System;
@@ -35,12 +35,10 @@ class EmuWindow;
35 35
36namespace OpenGL { 36namespace OpenGL {
37 37
38class CachedShader;
39class Device; 38class Device;
40class RasterizerOpenGL; 39class RasterizerOpenGL;
41struct UnspecializedShader; 40struct UnspecializedShader;
42 41
43using Shader = std::shared_ptr<CachedShader>;
44using Maxwell = Tegra::Engines::Maxwell3D::Regs; 42using Maxwell = Tegra::Engines::Maxwell3D::Regs;
45 43
46struct ProgramHandle { 44struct ProgramHandle {
@@ -64,62 +62,53 @@ struct ShaderParameters {
64 u64 unique_identifier; 62 u64 unique_identifier;
65}; 63};
66 64
67class CachedShader final : public RasterizerCacheObject { 65class Shader final {
68public: 66public:
69 ~CachedShader(); 67 ~Shader();
70 68
71 /// Gets the GL program handle for the shader 69 /// Gets the GL program handle for the shader
72 GLuint GetHandle() const; 70 GLuint GetHandle() const;
73 71
74 /// Returns the size in bytes of the shader
75 std::size_t GetSizeInBytes() const override {
76 return size_in_bytes;
77 }
78
79 /// Gets the shader entries for the shader 72 /// Gets the shader entries for the shader
80 const ShaderEntries& GetEntries() const { 73 const ShaderEntries& GetEntries() const {
81 return entries; 74 return entries;
82 } 75 }
83 76
84 static Shader CreateStageFromMemory(const ShaderParameters& params, 77 static std::unique_ptr<Shader> CreateStageFromMemory(const ShaderParameters& params,
85 Maxwell::ShaderProgram program_type, 78 Maxwell::ShaderProgram program_type,
86 ProgramCode program_code, ProgramCode program_code_b); 79 ProgramCode program_code,
87 static Shader CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code); 80 ProgramCode program_code_b);
81 static std::unique_ptr<Shader> CreateKernelFromMemory(const ShaderParameters& params,
82 ProgramCode code);
88 83
89 static Shader CreateFromCache(const ShaderParameters& params, 84 static std::unique_ptr<Shader> CreateFromCache(const ShaderParameters& params,
90 const PrecompiledShader& precompiled_shader, 85 const PrecompiledShader& precompiled_shader);
91 std::size_t size_in_bytes);
92 86
93private: 87private:
94 explicit CachedShader(VAddr cpu_addr, std::size_t size_in_bytes, 88 explicit Shader(std::shared_ptr<VideoCommon::Shader::Registry> registry, ShaderEntries entries,
95 std::shared_ptr<VideoCommon::Shader::Registry> registry, 89 ProgramSharedPtr program);
96 ShaderEntries entries, ProgramSharedPtr program);
97 90
98 std::shared_ptr<VideoCommon::Shader::Registry> registry; 91 std::shared_ptr<VideoCommon::Shader::Registry> registry;
99 ShaderEntries entries; 92 ShaderEntries entries;
100 std::size_t size_in_bytes = 0;
101 ProgramSharedPtr program; 93 ProgramSharedPtr program;
102 GLuint handle = 0; 94 GLuint handle = 0;
103}; 95};
104 96
105class ShaderCacheOpenGL final : public RasterizerCache<Shader> { 97class ShaderCacheOpenGL final : public VideoCommon::ShaderCache<Shader> {
106public: 98public:
107 explicit ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system, 99 explicit ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system,
108 Core::Frontend::EmuWindow& emu_window, const Device& device); 100 Core::Frontend::EmuWindow& emu_window, const Device& device);
101 ~ShaderCacheOpenGL() override;
109 102
110 /// Loads disk cache for the current game 103 /// Loads disk cache for the current game
111 void LoadDiskCache(const std::atomic_bool& stop_loading, 104 void LoadDiskCache(const std::atomic_bool& stop_loading,
112 const VideoCore::DiskResourceLoadCallback& callback); 105 const VideoCore::DiskResourceLoadCallback& callback);
113 106
114 /// Gets the current specified shader stage program 107 /// Gets the current specified shader stage program
115 Shader GetStageProgram(Maxwell::ShaderProgram program); 108 Shader* GetStageProgram(Maxwell::ShaderProgram program);
116 109
117 /// Gets a compute kernel in the passed address 110 /// Gets a compute kernel in the passed address
118 Shader GetComputeKernel(GPUVAddr code_addr); 111 Shader* GetComputeKernel(GPUVAddr code_addr);
119
120protected:
121 // We do not have to flush this cache as things in it are never modified by us.
122 void FlushObjectInner(const Shader& object) override {}
123 112
124private: 113private:
125 ProgramSharedPtr GeneratePrecompiledProgram( 114 ProgramSharedPtr GeneratePrecompiledProgram(
@@ -132,10 +121,10 @@ private:
132 ShaderDiskCacheOpenGL disk_cache; 121 ShaderDiskCacheOpenGL disk_cache;
133 std::unordered_map<u64, PrecompiledShader> runtime_cache; 122 std::unordered_map<u64, PrecompiledShader> runtime_cache;
134 123
135 Shader null_shader{}; 124 std::unique_ptr<Shader> null_shader;
136 Shader null_kernel{}; 125 std::unique_ptr<Shader> null_kernel;
137 126
138 std::array<Shader, Maxwell::MaxShaderProgram> last_shaders; 127 std::array<Shader*, Maxwell::MaxShaderProgram> last_shaders{};
139}; 128};
140 129
141} // namespace OpenGL 130} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 502b95973..d6e30b321 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -626,7 +626,9 @@ private:
626 break; 626 break;
627 } 627 }
628 } 628 }
629 if (stage != ShaderType::Vertex || device.HasVertexViewportLayer()) { 629
630 if (stage != ShaderType::Geometry &&
631 (stage != ShaderType::Vertex || device.HasVertexViewportLayer())) {
630 if (ir.UsesLayer()) { 632 if (ir.UsesLayer()) {
631 code.AddLine("int gl_Layer;"); 633 code.AddLine("int gl_Layer;");
632 } 634 }
@@ -655,6 +657,16 @@ private:
655 --code.scope; 657 --code.scope;
656 code.AddLine("}};"); 658 code.AddLine("}};");
657 code.AddNewLine(); 659 code.AddNewLine();
660
661 if (stage == ShaderType::Geometry) {
662 if (ir.UsesLayer()) {
663 code.AddLine("out int gl_Layer;");
664 }
665 if (ir.UsesViewportIndex()) {
666 code.AddLine("out int gl_ViewportIndex;");
667 }
668 }
669 code.AddNewLine();
658 } 670 }
659 671
660 void DeclareRegisters() { 672 void DeclareRegisters() {
diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
index 9e95a122b..653c3f2f9 100644
--- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
@@ -29,6 +29,8 @@ using VideoCommon::Shader::KeyMap;
29 29
30namespace { 30namespace {
31 31
32using VideoCommon::Shader::SeparateSamplerKey;
33
32using ShaderCacheVersionHash = std::array<u8, 64>; 34using ShaderCacheVersionHash = std::array<u8, 64>;
33 35
34struct ConstBufferKey { 36struct ConstBufferKey {
@@ -37,18 +39,26 @@ struct ConstBufferKey {
37 u32 value = 0; 39 u32 value = 0;
38}; 40};
39 41
40struct BoundSamplerKey { 42struct BoundSamplerEntry {
41 u32 offset = 0; 43 u32 offset = 0;
42 Tegra::Engines::SamplerDescriptor sampler; 44 Tegra::Engines::SamplerDescriptor sampler;
43}; 45};
44 46
45struct BindlessSamplerKey { 47struct SeparateSamplerEntry {
48 u32 cbuf1 = 0;
49 u32 cbuf2 = 0;
50 u32 offset1 = 0;
51 u32 offset2 = 0;
52 Tegra::Engines::SamplerDescriptor sampler;
53};
54
55struct BindlessSamplerEntry {
46 u32 cbuf = 0; 56 u32 cbuf = 0;
47 u32 offset = 0; 57 u32 offset = 0;
48 Tegra::Engines::SamplerDescriptor sampler; 58 Tegra::Engines::SamplerDescriptor sampler;
49}; 59};
50 60
51constexpr u32 NativeVersion = 20; 61constexpr u32 NativeVersion = 21;
52 62
53ShaderCacheVersionHash GetShaderCacheVersionHash() { 63ShaderCacheVersionHash GetShaderCacheVersionHash() {
54 ShaderCacheVersionHash hash{}; 64 ShaderCacheVersionHash hash{};
@@ -87,12 +97,14 @@ bool ShaderDiskCacheEntry::Load(FileUtil::IOFile& file) {
87 u32 texture_handler_size_value; 97 u32 texture_handler_size_value;
88 u32 num_keys; 98 u32 num_keys;
89 u32 num_bound_samplers; 99 u32 num_bound_samplers;
100 u32 num_separate_samplers;
90 u32 num_bindless_samplers; 101 u32 num_bindless_samplers;
91 if (file.ReadArray(&unique_identifier, 1) != 1 || file.ReadArray(&bound_buffer, 1) != 1 || 102 if (file.ReadArray(&unique_identifier, 1) != 1 || file.ReadArray(&bound_buffer, 1) != 1 ||
92 file.ReadArray(&is_texture_handler_size_known, 1) != 1 || 103 file.ReadArray(&is_texture_handler_size_known, 1) != 1 ||
93 file.ReadArray(&texture_handler_size_value, 1) != 1 || 104 file.ReadArray(&texture_handler_size_value, 1) != 1 ||
94 file.ReadArray(&graphics_info, 1) != 1 || file.ReadArray(&compute_info, 1) != 1 || 105 file.ReadArray(&graphics_info, 1) != 1 || file.ReadArray(&compute_info, 1) != 1 ||
95 file.ReadArray(&num_keys, 1) != 1 || file.ReadArray(&num_bound_samplers, 1) != 1 || 106 file.ReadArray(&num_keys, 1) != 1 || file.ReadArray(&num_bound_samplers, 1) != 1 ||
107 file.ReadArray(&num_separate_samplers, 1) != 1 ||
96 file.ReadArray(&num_bindless_samplers, 1) != 1) { 108 file.ReadArray(&num_bindless_samplers, 1) != 1) {
97 return false; 109 return false;
98 } 110 }
@@ -101,23 +113,32 @@ bool ShaderDiskCacheEntry::Load(FileUtil::IOFile& file) {
101 } 113 }
102 114
103 std::vector<ConstBufferKey> flat_keys(num_keys); 115 std::vector<ConstBufferKey> flat_keys(num_keys);
104 std::vector<BoundSamplerKey> flat_bound_samplers(num_bound_samplers); 116 std::vector<BoundSamplerEntry> flat_bound_samplers(num_bound_samplers);
105 std::vector<BindlessSamplerKey> flat_bindless_samplers(num_bindless_samplers); 117 std::vector<SeparateSamplerEntry> flat_separate_samplers(num_separate_samplers);
118 std::vector<BindlessSamplerEntry> flat_bindless_samplers(num_bindless_samplers);
106 if (file.ReadArray(flat_keys.data(), flat_keys.size()) != flat_keys.size() || 119 if (file.ReadArray(flat_keys.data(), flat_keys.size()) != flat_keys.size() ||
107 file.ReadArray(flat_bound_samplers.data(), flat_bound_samplers.size()) != 120 file.ReadArray(flat_bound_samplers.data(), flat_bound_samplers.size()) !=
108 flat_bound_samplers.size() || 121 flat_bound_samplers.size() ||
122 file.ReadArray(flat_separate_samplers.data(), flat_separate_samplers.size()) !=
123 flat_separate_samplers.size() ||
109 file.ReadArray(flat_bindless_samplers.data(), flat_bindless_samplers.size()) != 124 file.ReadArray(flat_bindless_samplers.data(), flat_bindless_samplers.size()) !=
110 flat_bindless_samplers.size()) { 125 flat_bindless_samplers.size()) {
111 return false; 126 return false;
112 } 127 }
113 for (const auto& key : flat_keys) { 128 for (const auto& entry : flat_keys) {
114 keys.insert({{key.cbuf, key.offset}, key.value}); 129 keys.insert({{entry.cbuf, entry.offset}, entry.value});
115 } 130 }
116 for (const auto& key : flat_bound_samplers) { 131 for (const auto& entry : flat_bound_samplers) {
117 bound_samplers.emplace(key.offset, key.sampler); 132 bound_samplers.emplace(entry.offset, entry.sampler);
118 } 133 }
119 for (const auto& key : flat_bindless_samplers) { 134 for (const auto& entry : flat_separate_samplers) {
120 bindless_samplers.insert({{key.cbuf, key.offset}, key.sampler}); 135 SeparateSamplerKey key;
136 key.buffers = {entry.cbuf1, entry.cbuf2};
137 key.offsets = {entry.offset1, entry.offset2};
138 separate_samplers.emplace(key, entry.sampler);
139 }
140 for (const auto& entry : flat_bindless_samplers) {
141 bindless_samplers.insert({{entry.cbuf, entry.offset}, entry.sampler});
121 } 142 }
122 143
123 return true; 144 return true;
@@ -142,6 +163,7 @@ bool ShaderDiskCacheEntry::Save(FileUtil::IOFile& file) const {
142 file.WriteObject(graphics_info) != 1 || file.WriteObject(compute_info) != 1 || 163 file.WriteObject(graphics_info) != 1 || file.WriteObject(compute_info) != 1 ||
143 file.WriteObject(static_cast<u32>(keys.size())) != 1 || 164 file.WriteObject(static_cast<u32>(keys.size())) != 1 ||
144 file.WriteObject(static_cast<u32>(bound_samplers.size())) != 1 || 165 file.WriteObject(static_cast<u32>(bound_samplers.size())) != 1 ||
166 file.WriteObject(static_cast<u32>(separate_samplers.size())) != 1 ||
145 file.WriteObject(static_cast<u32>(bindless_samplers.size())) != 1) { 167 file.WriteObject(static_cast<u32>(bindless_samplers.size())) != 1) {
146 return false; 168 return false;
147 } 169 }
@@ -152,22 +174,34 @@ bool ShaderDiskCacheEntry::Save(FileUtil::IOFile& file) const {
152 flat_keys.push_back(ConstBufferKey{address.first, address.second, value}); 174 flat_keys.push_back(ConstBufferKey{address.first, address.second, value});
153 } 175 }
154 176
155 std::vector<BoundSamplerKey> flat_bound_samplers; 177 std::vector<BoundSamplerEntry> flat_bound_samplers;
156 flat_bound_samplers.reserve(bound_samplers.size()); 178 flat_bound_samplers.reserve(bound_samplers.size());
157 for (const auto& [address, sampler] : bound_samplers) { 179 for (const auto& [address, sampler] : bound_samplers) {
158 flat_bound_samplers.push_back(BoundSamplerKey{address, sampler}); 180 flat_bound_samplers.push_back(BoundSamplerEntry{address, sampler});
181 }
182
183 std::vector<SeparateSamplerEntry> flat_separate_samplers;
184 flat_separate_samplers.reserve(separate_samplers.size());
185 for (const auto& [key, sampler] : separate_samplers) {
186 SeparateSamplerEntry entry;
187 std::tie(entry.cbuf1, entry.cbuf2) = key.buffers;
188 std::tie(entry.offset1, entry.offset2) = key.offsets;
189 entry.sampler = sampler;
190 flat_separate_samplers.push_back(entry);
159 } 191 }
160 192
161 std::vector<BindlessSamplerKey> flat_bindless_samplers; 193 std::vector<BindlessSamplerEntry> flat_bindless_samplers;
162 flat_bindless_samplers.reserve(bindless_samplers.size()); 194 flat_bindless_samplers.reserve(bindless_samplers.size());
163 for (const auto& [address, sampler] : bindless_samplers) { 195 for (const auto& [address, sampler] : bindless_samplers) {
164 flat_bindless_samplers.push_back( 196 flat_bindless_samplers.push_back(
165 BindlessSamplerKey{address.first, address.second, sampler}); 197 BindlessSamplerEntry{address.first, address.second, sampler});
166 } 198 }
167 199
168 return file.WriteArray(flat_keys.data(), flat_keys.size()) == flat_keys.size() && 200 return file.WriteArray(flat_keys.data(), flat_keys.size()) == flat_keys.size() &&
169 file.WriteArray(flat_bound_samplers.data(), flat_bound_samplers.size()) == 201 file.WriteArray(flat_bound_samplers.data(), flat_bound_samplers.size()) ==
170 flat_bound_samplers.size() && 202 flat_bound_samplers.size() &&
203 file.WriteArray(flat_separate_samplers.data(), flat_separate_samplers.size()) ==
204 flat_separate_samplers.size() &&
171 file.WriteArray(flat_bindless_samplers.data(), flat_bindless_samplers.size()) == 205 file.WriteArray(flat_bindless_samplers.data(), flat_bindless_samplers.size()) ==
172 flat_bindless_samplers.size(); 206 flat_bindless_samplers.size();
173} 207}
diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.h b/src/video_core/renderer_opengl/gl_shader_disk_cache.h
index d5be52e40..a79cef0e9 100644
--- a/src/video_core/renderer_opengl/gl_shader_disk_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.h
@@ -57,6 +57,7 @@ struct ShaderDiskCacheEntry {
57 VideoCommon::Shader::ComputeInfo compute_info; 57 VideoCommon::Shader::ComputeInfo compute_info;
58 VideoCommon::Shader::KeyMap keys; 58 VideoCommon::Shader::KeyMap keys;
59 VideoCommon::Shader::BoundSamplerMap bound_samplers; 59 VideoCommon::Shader::BoundSamplerMap bound_samplers;
60 VideoCommon::Shader::SeparateSamplerMap separate_samplers;
60 VideoCommon::Shader::BindlessSamplerMap bindless_samplers; 61 VideoCommon::Shader::BindlessSamplerMap bindless_samplers;
61}; 62};
62 63
diff --git a/src/video_core/renderer_opengl/gl_stream_buffer.cpp b/src/video_core/renderer_opengl/gl_stream_buffer.cpp
index 6ec328c53..932a2f69e 100644
--- a/src/video_core/renderer_opengl/gl_stream_buffer.cpp
+++ b/src/video_core/renderer_opengl/gl_stream_buffer.cpp
@@ -49,14 +49,6 @@ OGLStreamBuffer::~OGLStreamBuffer() {
49 gl_buffer.Release(); 49 gl_buffer.Release();
50} 50}
51 51
52GLuint OGLStreamBuffer::GetHandle() const {
53 return gl_buffer.handle;
54}
55
56GLsizeiptr OGLStreamBuffer::GetSize() const {
57 return buffer_size;
58}
59
60std::tuple<u8*, GLintptr, bool> OGLStreamBuffer::Map(GLsizeiptr size, GLintptr alignment) { 52std::tuple<u8*, GLintptr, bool> OGLStreamBuffer::Map(GLsizeiptr size, GLintptr alignment) {
61 ASSERT(size <= buffer_size); 53 ASSERT(size <= buffer_size);
62 ASSERT(alignment <= buffer_size); 54 ASSERT(alignment <= buffer_size);
diff --git a/src/video_core/renderer_opengl/gl_stream_buffer.h b/src/video_core/renderer_opengl/gl_stream_buffer.h
index f8383cbd4..866da3594 100644
--- a/src/video_core/renderer_opengl/gl_stream_buffer.h
+++ b/src/video_core/renderer_opengl/gl_stream_buffer.h
@@ -17,9 +17,6 @@ public:
17 bool use_persistent = true); 17 bool use_persistent = true);
18 ~OGLStreamBuffer(); 18 ~OGLStreamBuffer();
19 19
20 GLuint GetHandle() const;
21 GLsizeiptr GetSize() const;
22
23 /* 20 /*
24 * Allocates a linear chunk of memory in the GPU buffer with at least "size" bytes 21 * Allocates a linear chunk of memory in the GPU buffer with at least "size" bytes
25 * and the optional alignment requirement. 22 * and the optional alignment requirement.
@@ -32,6 +29,14 @@ public:
32 29
33 void Unmap(GLsizeiptr size); 30 void Unmap(GLsizeiptr size);
34 31
32 GLuint Handle() const {
33 return gl_buffer.handle;
34 }
35
36 GLsizeiptr Size() const {
37 return buffer_size;
38 }
39
35private: 40private:
36 OGLBuffer gl_buffer; 41 OGLBuffer gl_buffer;
37 42
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
index 57db5a08b..61505879b 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -263,9 +263,14 @@ CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& param
263 target = GetTextureTarget(params.target); 263 target = GetTextureTarget(params.target);
264 texture = CreateTexture(params, target, internal_format, texture_buffer); 264 texture = CreateTexture(params, target, internal_format, texture_buffer);
265 DecorateSurfaceName(); 265 DecorateSurfaceName();
266 main_view = CreateViewInner( 266
267 ViewParams(params.target, 0, params.is_layered ? params.depth : 1, 0, params.num_levels), 267 u32 num_layers = 1;
268 true); 268 if (params.is_layered || params.target == SurfaceTarget::Texture3D) {
269 num_layers = params.depth;
270 }
271
272 main_view =
273 CreateViewInner(ViewParams(params.target, 0, num_layers, 0, params.num_levels), true);
269} 274}
270 275
271CachedSurface::~CachedSurface() = default; 276CachedSurface::~CachedSurface() = default;
@@ -413,20 +418,23 @@ CachedSurfaceView::CachedSurfaceView(CachedSurface& surface, const ViewParams& p
413 418
414CachedSurfaceView::~CachedSurfaceView() = default; 419CachedSurfaceView::~CachedSurfaceView() = default;
415 420
416void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const { 421void CachedSurfaceView::Attach(GLenum attachment, GLenum fb_target) const {
417 ASSERT(params.num_levels == 1); 422 ASSERT(params.num_levels == 1);
418 423
424 if (params.target == SurfaceTarget::Texture3D) {
425 if (params.num_layers > 1) {
426 ASSERT(params.base_layer == 0);
427 glFramebufferTexture(fb_target, attachment, surface.texture.handle, params.base_level);
428 } else {
429 glFramebufferTexture3D(fb_target, attachment, target, surface.texture.handle,
430 params.base_level, params.base_layer);
431 }
432 return;
433 }
434
419 if (params.num_layers > 1) { 435 if (params.num_layers > 1) {
420 // Layered framebuffer attachments
421 UNIMPLEMENTED_IF(params.base_layer != 0); 436 UNIMPLEMENTED_IF(params.base_layer != 0);
422 437 glFramebufferTexture(fb_target, attachment, GetTexture(), 0);
423 switch (params.target) {
424 case SurfaceTarget::Texture2DArray:
425 glFramebufferTexture(target, attachment, GetTexture(), 0);
426 break;
427 default:
428 UNIMPLEMENTED();
429 }
430 return; 438 return;
431 } 439 }
432 440
@@ -434,16 +442,16 @@ void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const {
434 const GLuint texture = surface.GetTexture(); 442 const GLuint texture = surface.GetTexture();
435 switch (surface.GetSurfaceParams().target) { 443 switch (surface.GetSurfaceParams().target) {
436 case SurfaceTarget::Texture1D: 444 case SurfaceTarget::Texture1D:
437 glFramebufferTexture1D(target, attachment, view_target, texture, params.base_level); 445 glFramebufferTexture1D(fb_target, attachment, view_target, texture, params.base_level);
438 break; 446 break;
439 case SurfaceTarget::Texture2D: 447 case SurfaceTarget::Texture2D:
440 glFramebufferTexture2D(target, attachment, view_target, texture, params.base_level); 448 glFramebufferTexture2D(fb_target, attachment, view_target, texture, params.base_level);
441 break; 449 break;
442 case SurfaceTarget::Texture1DArray: 450 case SurfaceTarget::Texture1DArray:
443 case SurfaceTarget::Texture2DArray: 451 case SurfaceTarget::Texture2DArray:
444 case SurfaceTarget::TextureCubemap: 452 case SurfaceTarget::TextureCubemap:
445 case SurfaceTarget::TextureCubeArray: 453 case SurfaceTarget::TextureCubeArray:
446 glFramebufferTextureLayer(target, attachment, texture, params.base_level, 454 glFramebufferTextureLayer(fb_target, attachment, texture, params.base_level,
447 params.base_layer); 455 params.base_layer);
448 break; 456 break;
449 default: 457 default:
@@ -500,8 +508,13 @@ OGLTextureView CachedSurfaceView::CreateTextureView() const {
500 OGLTextureView texture_view; 508 OGLTextureView texture_view;
501 texture_view.Create(); 509 texture_view.Create();
502 510
503 glTextureView(texture_view.handle, target, surface.texture.handle, format, params.base_level, 511 if (target == GL_TEXTURE_3D) {
504 params.num_levels, params.base_layer, params.num_layers); 512 glTextureView(texture_view.handle, target, surface.texture.handle, format,
513 params.base_level, params.num_levels, 0, 1);
514 } else {
515 glTextureView(texture_view.handle, target, surface.texture.handle, format,
516 params.base_level, params.num_levels, params.base_layer, params.num_layers);
517 }
505 ApplyTextureDefaults(surface.GetSurfaceParams(), texture_view.handle); 518 ApplyTextureDefaults(surface.GetSurfaceParams(), texture_view.handle);
506 519
507 return texture_view; 520 return texture_view;
@@ -544,8 +557,8 @@ void TextureCacheOpenGL::ImageBlit(View& src_view, View& dst_view,
544 const Tegra::Engines::Fermi2D::Config& copy_config) { 557 const Tegra::Engines::Fermi2D::Config& copy_config) {
545 const auto& src_params{src_view->GetSurfaceParams()}; 558 const auto& src_params{src_view->GetSurfaceParams()};
546 const auto& dst_params{dst_view->GetSurfaceParams()}; 559 const auto& dst_params{dst_view->GetSurfaceParams()};
547 UNIMPLEMENTED_IF(src_params.target == SurfaceTarget::Texture3D); 560 UNIMPLEMENTED_IF(src_params.depth != 1);
548 UNIMPLEMENTED_IF(dst_params.target == SurfaceTarget::Texture3D); 561 UNIMPLEMENTED_IF(dst_params.depth != 1);
549 562
550 state_tracker.NotifyScissor0(); 563 state_tracker.NotifyScissor0();
551 state_tracker.NotifyFramebuffer(); 564 state_tracker.NotifyFramebuffer();
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h
index 8a2ac8603..bfc4ddf5d 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.h
+++ b/src/video_core/renderer_opengl/gl_texture_cache.h
@@ -80,8 +80,10 @@ public:
80 explicit CachedSurfaceView(CachedSurface& surface, const ViewParams& params, bool is_proxy); 80 explicit CachedSurfaceView(CachedSurface& surface, const ViewParams& params, bool is_proxy);
81 ~CachedSurfaceView(); 81 ~CachedSurfaceView();
82 82
83 /// Attaches this texture view to the current bound GL_DRAW_FRAMEBUFFER 83 /// @brief Attaches this texture view to the currently bound fb_target framebuffer
84 void Attach(GLenum attachment, GLenum target) const; 84 /// @param attachment Attachment to bind textures to
85 /// @param fb_target Framebuffer target to attach to (e.g. DRAW_FRAMEBUFFER)
86 void Attach(GLenum attachment, GLenum fb_target) const;
85 87
86 GLuint GetTexture(Tegra::Texture::SwizzleSource x_source, 88 GLuint GetTexture(Tegra::Texture::SwizzleSource x_source,
87 Tegra::Texture::SwizzleSource y_source, 89 Tegra::Texture::SwizzleSource y_source,
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index e7952924a..6214fcbc3 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -751,11 +751,9 @@ void RendererOpenGL::RenderScreenshot() {
751} 751}
752 752
753bool RendererOpenGL::Init() { 753bool RendererOpenGL::Init() {
754 if (GLAD_GL_KHR_debug) { 754 if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) {
755 glEnable(GL_DEBUG_OUTPUT); 755 glEnable(GL_DEBUG_OUTPUT);
756 if (Settings::values.renderer_debug) { 756 glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
757 glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
758 }
759 glDebugMessageCallback(DebugHandler, nullptr); 757 glDebugMessageCallback(DebugHandler, nullptr);
760 } 758 }
761 759
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 5f33d9e40..1fde38328 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -37,8 +37,8 @@ std::unique_ptr<VKStreamBuffer> CreateStreamBuffer(const VKDevice& device, VKSch
37 37
38} // Anonymous namespace 38} // Anonymous namespace
39 39
40CachedBufferBlock::CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager, 40Buffer::Buffer(const VKDevice& device, VKMemoryManager& memory_manager, VAddr cpu_addr,
41 VAddr cpu_addr, std::size_t size) 41 std::size_t size)
42 : VideoCommon::BufferBlock{cpu_addr, size} { 42 : VideoCommon::BufferBlock{cpu_addr, size} {
43 VkBufferCreateInfo ci; 43 VkBufferCreateInfo ci;
44 ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; 44 ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
@@ -54,7 +54,7 @@ CachedBufferBlock::CachedBufferBlock(const VKDevice& device, VKMemoryManager& me
54 buffer.commit = memory_manager.Commit(buffer.handle, false); 54 buffer.commit = memory_manager.Commit(buffer.handle, false);
55} 55}
56 56
57CachedBufferBlock::~CachedBufferBlock() = default; 57Buffer::~Buffer() = default;
58 58
59VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, 59VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
60 const VKDevice& device, VKMemoryManager& memory_manager, 60 const VKDevice& device, VKMemoryManager& memory_manager,
@@ -67,12 +67,8 @@ VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::S
67 67
68VKBufferCache::~VKBufferCache() = default; 68VKBufferCache::~VKBufferCache() = default;
69 69
70Buffer VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { 70std::shared_ptr<Buffer> VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
71 return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size); 71 return std::make_shared<Buffer>(device, memory_manager, cpu_addr, size);
72}
73
74VkBuffer VKBufferCache::ToHandle(const Buffer& buffer) {
75 return buffer->GetHandle();
76} 72}
77 73
78VkBuffer VKBufferCache::GetEmptyBuffer(std::size_t size) { 74VkBuffer VKBufferCache::GetEmptyBuffer(std::size_t size) {
@@ -91,7 +87,7 @@ void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, st
91 std::memcpy(staging.commit->Map(size), data, size); 87 std::memcpy(staging.commit->Map(size), data, size);
92 88
93 scheduler.RequestOutsideRenderPassOperationContext(); 89 scheduler.RequestOutsideRenderPassOperationContext();
94 scheduler.Record([staging = *staging.handle, buffer = buffer->GetHandle(), offset, 90 scheduler.Record([staging = *staging.handle, buffer = buffer.Handle(), offset,
95 size](vk::CommandBuffer cmdbuf) { 91 size](vk::CommandBuffer cmdbuf) {
96 cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size}); 92 cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size});
97 93
@@ -114,7 +110,7 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
114 u8* data) { 110 u8* data) {
115 const auto& staging = staging_pool.GetUnusedBuffer(size, true); 111 const auto& staging = staging_pool.GetUnusedBuffer(size, true);
116 scheduler.RequestOutsideRenderPassOperationContext(); 112 scheduler.RequestOutsideRenderPassOperationContext();
117 scheduler.Record([staging = *staging.handle, buffer = buffer->GetHandle(), offset, 113 scheduler.Record([staging = *staging.handle, buffer = buffer.Handle(), offset,
118 size](vk::CommandBuffer cmdbuf) { 114 size](vk::CommandBuffer cmdbuf) {
119 VkBufferMemoryBarrier barrier; 115 VkBufferMemoryBarrier barrier;
120 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; 116 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
@@ -141,8 +137,8 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
141void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset, 137void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
142 std::size_t dst_offset, std::size_t size) { 138 std::size_t dst_offset, std::size_t size) {
143 scheduler.RequestOutsideRenderPassOperationContext(); 139 scheduler.RequestOutsideRenderPassOperationContext();
144 scheduler.Record([src_buffer = src->GetHandle(), dst_buffer = dst->GetHandle(), src_offset, 140 scheduler.Record([src_buffer = src.Handle(), dst_buffer = dst.Handle(), src_offset, dst_offset,
145 dst_offset, size](vk::CommandBuffer cmdbuf) { 141 size](vk::CommandBuffer cmdbuf) {
146 cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size}); 142 cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size});
147 143
148 std::array<VkBufferMemoryBarrier, 2> barriers; 144 std::array<VkBufferMemoryBarrier, 2> barriers;
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index a54583e7d..9ebbef835 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -8,7 +8,6 @@
8 8
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "video_core/buffer_cache/buffer_cache.h" 10#include "video_core/buffer_cache/buffer_cache.h"
11#include "video_core/rasterizer_cache.h"
12#include "video_core/renderer_vulkan/vk_memory_manager.h" 11#include "video_core/renderer_vulkan/vk_memory_manager.h"
13#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 12#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
14#include "video_core/renderer_vulkan/vk_stream_buffer.h" 13#include "video_core/renderer_vulkan/vk_stream_buffer.h"
@@ -24,13 +23,13 @@ class VKDevice;
24class VKMemoryManager; 23class VKMemoryManager;
25class VKScheduler; 24class VKScheduler;
26 25
27class CachedBufferBlock final : public VideoCommon::BufferBlock { 26class Buffer final : public VideoCommon::BufferBlock {
28public: 27public:
29 explicit CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager, 28 explicit Buffer(const VKDevice& device, VKMemoryManager& memory_manager, VAddr cpu_addr,
30 VAddr cpu_addr, std::size_t size); 29 std::size_t size);
31 ~CachedBufferBlock(); 30 ~Buffer();
32 31
33 VkBuffer GetHandle() const { 32 VkBuffer Handle() const {
34 return *buffer.handle; 33 return *buffer.handle;
35 } 34 }
36 35
@@ -38,8 +37,6 @@ private:
38 VKBuffer buffer; 37 VKBuffer buffer;
39}; 38};
40 39
41using Buffer = std::shared_ptr<CachedBufferBlock>;
42
43class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> { 40class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> {
44public: 41public:
45 explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, 42 explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
@@ -50,9 +47,7 @@ public:
50 VkBuffer GetEmptyBuffer(std::size_t size) override; 47 VkBuffer GetEmptyBuffer(std::size_t size) override;
51 48
52protected: 49protected:
53 VkBuffer ToHandle(const Buffer& buffer) override; 50 std::shared_ptr<Buffer> CreateBlock(VAddr cpu_addr, std::size_t size) override;
54
55 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
56 51
57 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 52 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
58 const u8* data) override; 53 const u8* data) override;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 8e1b46277..281bf9ac3 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -53,8 +53,9 @@ vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
53 }; 53 };
54 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size()); 54 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
55 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size()); 55 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size());
56 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.texel_buffers.size()); 56 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.uniform_texels.size());
57 add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size()); 57 add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size());
58 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
58 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size()); 59 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
59 60
60 VkDescriptorSetLayoutCreateInfo ci; 61 VkDescriptorSetLayoutCreateInfo ci;
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index 890fd52cf..9259b618d 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -42,6 +42,7 @@ vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
42 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60}, 42 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60},
43 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64}, 43 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
44 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64}, 44 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
45 {VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, num_sets * 64},
45 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}}; 46 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}};
46 47
47 VkDescriptorPoolCreateInfo ci; 48 VkDescriptorPoolCreateInfo ci;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 65a1c6245..ea66e621e 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -27,6 +27,7 @@
27#include "video_core/renderer_vulkan/wrapper.h" 27#include "video_core/renderer_vulkan/wrapper.h"
28#include "video_core/shader/compiler_settings.h" 28#include "video_core/shader/compiler_settings.h"
29#include "video_core/shader/memory_util.h" 29#include "video_core/shader/memory_util.h"
30#include "video_core/shader_cache.h"
30 31
31namespace Vulkan { 32namespace Vulkan {
32 33
@@ -45,6 +46,7 @@ constexpr VkDescriptorType UNIFORM_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
45constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; 46constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
46constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; 47constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
47constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; 48constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
49constexpr VkDescriptorType STORAGE_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
48constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; 50constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
49 51
50constexpr VideoCommon::Shader::CompilerSettings compiler_settings{ 52constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
@@ -104,8 +106,9 @@ u32 FillDescriptorLayout(const ShaderEntries& entries,
104 u32 binding = base_binding; 106 u32 binding = base_binding;
105 AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers); 107 AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers);
106 AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers); 108 AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers);
107 AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.texel_buffers); 109 AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.uniform_texels);
108 AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers); 110 AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers);
111 AddBindings<STORAGE_TEXEL_BUFFER>(bindings, binding, flags, entries.storage_texels);
109 AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images); 112 AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images);
110 return binding; 113 return binding;
111} 114}
@@ -130,19 +133,18 @@ bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) con
130 return std::memcmp(&rhs, this, sizeof *this) == 0; 133 return std::memcmp(&rhs, this, sizeof *this) == 0;
131} 134}
132 135
133CachedShader::CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, 136Shader::Shader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr,
134 GPUVAddr gpu_addr, VAddr cpu_addr, ProgramCode program_code, 137 VideoCommon::Shader::ProgramCode program_code, u32 main_offset)
135 u32 main_offset) 138 : gpu_addr{gpu_addr}, program_code{std::move(program_code)},
136 : RasterizerCacheObject{cpu_addr}, gpu_addr{gpu_addr}, program_code{std::move(program_code)},
137 registry{stage, GetEngine(system, stage)}, shader_ir{this->program_code, main_offset, 139 registry{stage, GetEngine(system, stage)}, shader_ir{this->program_code, main_offset,
138 compiler_settings, registry}, 140 compiler_settings, registry},
139 entries{GenerateShaderEntries(shader_ir)} {} 141 entries{GenerateShaderEntries(shader_ir)} {}
140 142
141CachedShader::~CachedShader() = default; 143Shader::~Shader() = default;
142 144
143Tegra::Engines::ConstBufferEngineInterface& CachedShader::GetEngine( 145Tegra::Engines::ConstBufferEngineInterface& Shader::GetEngine(Core::System& system,
144 Core::System& system, Tegra::Engines::ShaderType stage) { 146 Tegra::Engines::ShaderType stage) {
145 if (stage == Tegra::Engines::ShaderType::Compute) { 147 if (stage == ShaderType::Compute) {
146 return system.GPU().KeplerCompute(); 148 return system.GPU().KeplerCompute();
147 } else { 149 } else {
148 return system.GPU().Maxwell3D(); 150 return system.GPU().Maxwell3D();
@@ -154,16 +156,16 @@ VKPipelineCache::VKPipelineCache(Core::System& system, RasterizerVulkan& rasteri
154 VKDescriptorPool& descriptor_pool, 156 VKDescriptorPool& descriptor_pool,
155 VKUpdateDescriptorQueue& update_descriptor_queue, 157 VKUpdateDescriptorQueue& update_descriptor_queue,
156 VKRenderPassCache& renderpass_cache) 158 VKRenderPassCache& renderpass_cache)
157 : RasterizerCache{rasterizer}, system{system}, device{device}, scheduler{scheduler}, 159 : VideoCommon::ShaderCache<Shader>{rasterizer}, system{system}, device{device},
158 descriptor_pool{descriptor_pool}, update_descriptor_queue{update_descriptor_queue}, 160 scheduler{scheduler}, descriptor_pool{descriptor_pool},
159 renderpass_cache{renderpass_cache} {} 161 update_descriptor_queue{update_descriptor_queue}, renderpass_cache{renderpass_cache} {}
160 162
161VKPipelineCache::~VKPipelineCache() = default; 163VKPipelineCache::~VKPipelineCache() = default;
162 164
163std::array<Shader, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() { 165std::array<Shader*, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
164 const auto& gpu = system.GPU().Maxwell3D(); 166 const auto& gpu = system.GPU().Maxwell3D();
165 167
166 std::array<Shader, Maxwell::MaxShaderProgram> shaders; 168 std::array<Shader*, Maxwell::MaxShaderProgram> shaders{};
167 for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) { 169 for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
168 const auto program{static_cast<Maxwell::ShaderProgram>(index)}; 170 const auto program{static_cast<Maxwell::ShaderProgram>(index)};
169 171
@@ -176,24 +178,28 @@ std::array<Shader, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
176 const GPUVAddr program_addr{GetShaderAddress(system, program)}; 178 const GPUVAddr program_addr{GetShaderAddress(system, program)};
177 const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr); 179 const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
178 ASSERT(cpu_addr); 180 ASSERT(cpu_addr);
179 auto shader = cpu_addr ? TryGet(*cpu_addr) : null_shader; 181
180 if (!shader) { 182 Shader* result = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
183 if (!result) {
181 const auto host_ptr{memory_manager.GetPointer(program_addr)}; 184 const auto host_ptr{memory_manager.GetPointer(program_addr)};
182 185
183 // No shader found - create a new one 186 // No shader found - create a new one
184 constexpr u32 stage_offset = STAGE_MAIN_OFFSET; 187 constexpr u32 stage_offset = STAGE_MAIN_OFFSET;
185 const auto stage = static_cast<Tegra::Engines::ShaderType>(index == 0 ? 0 : index - 1); 188 const auto stage = static_cast<ShaderType>(index == 0 ? 0 : index - 1);
186 ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, false); 189 ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, false);
190 const std::size_t size_in_bytes = code.size() * sizeof(u64);
191
192 auto shader = std::make_unique<Shader>(system, stage, program_addr, std::move(code),
193 stage_offset);
194 result = shader.get();
187 195
188 shader = std::make_shared<CachedShader>(system, stage, program_addr, *cpu_addr,
189 std::move(code), stage_offset);
190 if (cpu_addr) { 196 if (cpu_addr) {
191 Register(shader); 197 Register(std::move(shader), *cpu_addr, size_in_bytes);
192 } else { 198 } else {
193 null_shader = shader; 199 null_shader = std::move(shader);
194 } 200 }
195 } 201 }
196 shaders[index] = std::move(shader); 202 shaders[index] = result;
197 } 203 }
198 return last_shaders = shaders; 204 return last_shaders = shaders;
199} 205}
@@ -234,19 +240,22 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
234 const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr); 240 const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
235 ASSERT(cpu_addr); 241 ASSERT(cpu_addr);
236 242
237 auto shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel; 243 Shader* shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel.get();
238 if (!shader) { 244 if (!shader) {
239 // No shader found - create a new one 245 // No shader found - create a new one
240 const auto host_ptr = memory_manager.GetPointer(program_addr); 246 const auto host_ptr = memory_manager.GetPointer(program_addr);
241 247
242 ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, true); 248 ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, true);
243 shader = std::make_shared<CachedShader>(system, Tegra::Engines::ShaderType::Compute, 249 const std::size_t size_in_bytes = code.size() * sizeof(u64);
244 program_addr, *cpu_addr, std::move(code), 250
245 KERNEL_MAIN_OFFSET); 251 auto shader_info = std::make_unique<Shader>(system, ShaderType::Compute, program_addr,
252 std::move(code), KERNEL_MAIN_OFFSET);
253 shader = shader_info.get();
254
246 if (cpu_addr) { 255 if (cpu_addr) {
247 Register(shader); 256 Register(std::move(shader_info), *cpu_addr, size_in_bytes);
248 } else { 257 } else {
249 null_kernel = shader; 258 null_kernel = std::move(shader_info);
250 } 259 }
251 } 260 }
252 261
@@ -262,7 +271,7 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
262 return *entry; 271 return *entry;
263} 272}
264 273
265void VKPipelineCache::Unregister(const Shader& shader) { 274void VKPipelineCache::OnShaderRemoval(Shader* shader) {
266 bool finished = false; 275 bool finished = false;
267 const auto Finish = [&] { 276 const auto Finish = [&] {
268 // TODO(Rodrigo): Instead of finishing here, wait for the fences that use this pipeline and 277 // TODO(Rodrigo): Instead of finishing here, wait for the fences that use this pipeline and
@@ -294,8 +303,6 @@ void VKPipelineCache::Unregister(const Shader& shader) {
294 Finish(); 303 Finish();
295 it = compute_cache.erase(it); 304 it = compute_cache.erase(it);
296 } 305 }
297
298 RasterizerCache::Unregister(shader);
299} 306}
300 307
301std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> 308std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>>
@@ -330,12 +337,11 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
330 } 337 }
331 338
332 const GPUVAddr gpu_addr = GetShaderAddress(system, program_enum); 339 const GPUVAddr gpu_addr = GetShaderAddress(system, program_enum);
333 const auto cpu_addr = memory_manager.GpuToCpuAddress(gpu_addr); 340 const std::optional<VAddr> cpu_addr = memory_manager.GpuToCpuAddress(gpu_addr);
334 const auto shader = cpu_addr ? TryGet(*cpu_addr) : null_shader; 341 Shader* const shader = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
335 ASSERT(shader);
336 342
337 const std::size_t stage = index == 0 ? 0 : index - 1; // Stage indices are 0 - 5 343 const std::size_t stage = index == 0 ? 0 : index - 1; // Stage indices are 0 - 5
338 const auto program_type = GetShaderType(program_enum); 344 const ShaderType program_type = GetShaderType(program_enum);
339 const auto& entries = shader->GetEntries(); 345 const auto& entries = shader->GetEntries();
340 program[stage] = { 346 program[stage] = {
341 Decompile(device, shader->GetIR(), program_type, shader->GetRegistry(), specialization), 347 Decompile(device, shader->GetIR(), program_type, shader->GetRegistry(), specialization),
@@ -377,16 +383,17 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
377 return; 383 return;
378 } 384 }
379 385
380 if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER) { 386 if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER ||
381 // Nvidia has a bug where updating multiple uniform texels at once causes the driver to 387 descriptor_type == STORAGE_TEXEL_BUFFER) {
382 // crash. 388 // Nvidia has a bug where updating multiple texels at once causes the driver to crash.
389 // Note: Fixed in driver Windows 443.24, Linux 440.66.15
383 for (u32 i = 0; i < count; ++i) { 390 for (u32 i = 0; i < count; ++i) {
384 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back(); 391 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
385 entry.dstBinding = binding + i; 392 entry.dstBinding = binding + i;
386 entry.dstArrayElement = 0; 393 entry.dstArrayElement = 0;
387 entry.descriptorCount = 1; 394 entry.descriptorCount = 1;
388 entry.descriptorType = descriptor_type; 395 entry.descriptorType = descriptor_type;
389 entry.offset = offset + i * entry_size; 396 entry.offset = static_cast<std::size_t>(offset + i * entry_size);
390 entry.stride = entry_size; 397 entry.stride = entry_size;
391 } 398 }
392 } else if (count > 0) { 399 } else if (count > 0) {
@@ -407,8 +414,9 @@ void FillDescriptorUpdateTemplateEntries(
407 std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) { 414 std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) {
408 AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers); 415 AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers);
409 AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers); 416 AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers);
410 AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.texel_buffers); 417 AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.uniform_texels);
411 AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers); 418 AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers);
419 AddEntry<STORAGE_TEXEL_BUFFER>(template_entries, offset, binding, entries.storage_texels);
412 AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images); 420 AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images);
413} 421}
414 422
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 0b5796fef..0a36e5112 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -17,7 +17,6 @@
17#include "common/common_types.h" 17#include "common/common_types.h"
18#include "video_core/engines/const_buffer_engine_interface.h" 18#include "video_core/engines/const_buffer_engine_interface.h"
19#include "video_core/engines/maxwell_3d.h" 19#include "video_core/engines/maxwell_3d.h"
20#include "video_core/rasterizer_cache.h"
21#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 20#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
22#include "video_core/renderer_vulkan/vk_graphics_pipeline.h" 21#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
23#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 22#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
@@ -26,6 +25,7 @@
26#include "video_core/shader/memory_util.h" 25#include "video_core/shader/memory_util.h"
27#include "video_core/shader/registry.h" 26#include "video_core/shader/registry.h"
28#include "video_core/shader/shader_ir.h" 27#include "video_core/shader/shader_ir.h"
28#include "video_core/shader_cache.h"
29 29
30namespace Core { 30namespace Core {
31class System; 31class System;
@@ -41,8 +41,6 @@ class VKFence;
41class VKScheduler; 41class VKScheduler;
42class VKUpdateDescriptorQueue; 42class VKUpdateDescriptorQueue;
43 43
44class CachedShader;
45using Shader = std::shared_ptr<CachedShader>;
46using Maxwell = Tegra::Engines::Maxwell3D::Regs; 44using Maxwell = Tegra::Engines::Maxwell3D::Regs;
47 45
48struct GraphicsPipelineCacheKey { 46struct GraphicsPipelineCacheKey {
@@ -102,21 +100,16 @@ struct hash<Vulkan::ComputePipelineCacheKey> {
102 100
103namespace Vulkan { 101namespace Vulkan {
104 102
105class CachedShader final : public RasterizerCacheObject { 103class Shader {
106public: 104public:
107 explicit CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, 105 explicit Shader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr,
108 VAddr cpu_addr, VideoCommon::Shader::ProgramCode program_code, 106 VideoCommon::Shader::ProgramCode program_code, u32 main_offset);
109 u32 main_offset); 107 ~Shader();
110 ~CachedShader();
111 108
112 GPUVAddr GetGpuAddr() const { 109 GPUVAddr GetGpuAddr() const {
113 return gpu_addr; 110 return gpu_addr;
114 } 111 }
115 112
116 std::size_t GetSizeInBytes() const override {
117 return program_code.size() * sizeof(u64);
118 }
119
120 VideoCommon::Shader::ShaderIR& GetIR() { 113 VideoCommon::Shader::ShaderIR& GetIR() {
121 return shader_ir; 114 return shader_ir;
122 } 115 }
@@ -144,25 +137,23 @@ private:
144 ShaderEntries entries; 137 ShaderEntries entries;
145}; 138};
146 139
147class VKPipelineCache final : public RasterizerCache<Shader> { 140class VKPipelineCache final : public VideoCommon::ShaderCache<Shader> {
148public: 141public:
149 explicit VKPipelineCache(Core::System& system, RasterizerVulkan& rasterizer, 142 explicit VKPipelineCache(Core::System& system, RasterizerVulkan& rasterizer,
150 const VKDevice& device, VKScheduler& scheduler, 143 const VKDevice& device, VKScheduler& scheduler,
151 VKDescriptorPool& descriptor_pool, 144 VKDescriptorPool& descriptor_pool,
152 VKUpdateDescriptorQueue& update_descriptor_queue, 145 VKUpdateDescriptorQueue& update_descriptor_queue,
153 VKRenderPassCache& renderpass_cache); 146 VKRenderPassCache& renderpass_cache);
154 ~VKPipelineCache(); 147 ~VKPipelineCache() override;
155 148
156 std::array<Shader, Maxwell::MaxShaderProgram> GetShaders(); 149 std::array<Shader*, Maxwell::MaxShaderProgram> GetShaders();
157 150
158 VKGraphicsPipeline& GetGraphicsPipeline(const GraphicsPipelineCacheKey& key); 151 VKGraphicsPipeline& GetGraphicsPipeline(const GraphicsPipelineCacheKey& key);
159 152
160 VKComputePipeline& GetComputePipeline(const ComputePipelineCacheKey& key); 153 VKComputePipeline& GetComputePipeline(const ComputePipelineCacheKey& key);
161 154
162protected: 155protected:
163 void Unregister(const Shader& shader) override; 156 void OnShaderRemoval(Shader* shader) final;
164
165 void FlushObjectInner(const Shader& object) override {}
166 157
167private: 158private:
168 std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders( 159 std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders(
@@ -175,10 +166,10 @@ private:
175 VKUpdateDescriptorQueue& update_descriptor_queue; 166 VKUpdateDescriptorQueue& update_descriptor_queue;
176 VKRenderPassCache& renderpass_cache; 167 VKRenderPassCache& renderpass_cache;
177 168
178 Shader null_shader{}; 169 std::unique_ptr<Shader> null_shader;
179 Shader null_kernel{}; 170 std::unique_ptr<Shader> null_kernel;
180 171
181 std::array<Shader, Maxwell::MaxShaderProgram> last_shaders; 172 std::array<Shader*, Maxwell::MaxShaderProgram> last_shaders{};
182 173
183 GraphicsPipelineCacheKey last_graphics_key; 174 GraphicsPipelineCacheKey last_graphics_key;
184 VKGraphicsPipeline* last_graphics_pipeline = nullptr; 175 VKGraphicsPipeline* last_graphics_pipeline = nullptr;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index a3d992ed3..184b2238a 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -38,6 +38,7 @@
38#include "video_core/renderer_vulkan/vk_texture_cache.h" 38#include "video_core/renderer_vulkan/vk_texture_cache.h"
39#include "video_core/renderer_vulkan/vk_update_descriptor.h" 39#include "video_core/renderer_vulkan/vk_update_descriptor.h"
40#include "video_core/renderer_vulkan/wrapper.h" 40#include "video_core/renderer_vulkan/wrapper.h"
41#include "video_core/shader_cache.h"
41 42
42namespace Vulkan { 43namespace Vulkan {
43 44
@@ -98,7 +99,7 @@ VkRect2D GetScissorState(const Maxwell& regs, std::size_t index) {
98} 99}
99 100
100std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses( 101std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses(
101 const std::array<Shader, Maxwell::MaxShaderProgram>& shaders) { 102 const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders) {
102 std::array<GPUVAddr, Maxwell::MaxShaderProgram> addresses; 103 std::array<GPUVAddr, Maxwell::MaxShaderProgram> addresses;
103 for (std::size_t i = 0; i < std::size(addresses); ++i) { 104 for (std::size_t i = 0; i < std::size(addresses); ++i) {
104 addresses[i] = shaders[i] ? shaders[i]->GetGpuAddr() : 0; 105 addresses[i] = shaders[i] ? shaders[i]->GetGpuAddr() : 0;
@@ -117,6 +118,17 @@ template <typename Engine, typename Entry>
117Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry, 118Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
118 std::size_t stage, std::size_t index = 0) { 119 std::size_t stage, std::size_t index = 0) {
119 const auto stage_type = static_cast<Tegra::Engines::ShaderType>(stage); 120 const auto stage_type = static_cast<Tegra::Engines::ShaderType>(stage);
121 if constexpr (std::is_same_v<Entry, SamplerEntry>) {
122 if (entry.is_separated) {
123 const u32 buffer_1 = entry.buffer;
124 const u32 buffer_2 = entry.secondary_buffer;
125 const u32 offset_1 = entry.offset;
126 const u32 offset_2 = entry.secondary_offset;
127 const u32 handle_1 = engine.AccessConstBuffer32(stage_type, buffer_1, offset_1);
128 const u32 handle_2 = engine.AccessConstBuffer32(stage_type, buffer_2, offset_2);
129 return engine.GetTextureInfo(handle_1 | handle_2);
130 }
131 }
120 if (entry.is_bindless) { 132 if (entry.is_bindless) {
121 const auto tex_handle = engine.AccessConstBuffer32(stage_type, entry.buffer, entry.offset); 133 const auto tex_handle = engine.AccessConstBuffer32(stage_type, entry.buffer, entry.offset);
122 return engine.GetTextureInfo(tex_handle); 134 return engine.GetTextureInfo(tex_handle);
@@ -468,8 +480,9 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
468 const auto& entries = pipeline.GetEntries(); 480 const auto& entries = pipeline.GetEntries();
469 SetupComputeConstBuffers(entries); 481 SetupComputeConstBuffers(entries);
470 SetupComputeGlobalBuffers(entries); 482 SetupComputeGlobalBuffers(entries);
471 SetupComputeTexelBuffers(entries); 483 SetupComputeUniformTexels(entries);
472 SetupComputeTextures(entries); 484 SetupComputeTextures(entries);
485 SetupComputeStorageTexels(entries);
473 SetupComputeImages(entries); 486 SetupComputeImages(entries);
474 487
475 buffer_cache.Unmap(); 488 buffer_cache.Unmap();
@@ -715,7 +728,7 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
715 if (!view) { 728 if (!view) {
716 return false; 729 return false;
717 } 730 }
718 key.views.push_back(view->GetHandle()); 731 key.views.push_back(view->GetAttachment());
719 key.width = std::min(key.width, view->GetWidth()); 732 key.width = std::min(key.width, view->GetWidth());
720 key.height = std::min(key.height, view->GetHeight()); 733 key.height = std::min(key.height, view->GetHeight());
721 key.layers = std::min(key.layers, view->GetNumLayers()); 734 key.layers = std::min(key.layers, view->GetNumLayers());
@@ -775,20 +788,21 @@ RasterizerVulkan::DrawParameters RasterizerVulkan::SetupGeometry(FixedPipelineSt
775} 788}
776 789
777void RasterizerVulkan::SetupShaderDescriptors( 790void RasterizerVulkan::SetupShaderDescriptors(
778 const std::array<Shader, Maxwell::MaxShaderProgram>& shaders) { 791 const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders) {
779 texture_cache.GuardSamplers(true); 792 texture_cache.GuardSamplers(true);
780 793
781 for (std::size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) { 794 for (std::size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
782 // Skip VertexA stage 795 // Skip VertexA stage
783 const auto& shader = shaders[stage + 1]; 796 Shader* const shader = shaders[stage + 1];
784 if (!shader) { 797 if (!shader) {
785 continue; 798 continue;
786 } 799 }
787 const auto& entries = shader->GetEntries(); 800 const auto& entries = shader->GetEntries();
788 SetupGraphicsConstBuffers(entries, stage); 801 SetupGraphicsConstBuffers(entries, stage);
789 SetupGraphicsGlobalBuffers(entries, stage); 802 SetupGraphicsGlobalBuffers(entries, stage);
790 SetupGraphicsTexelBuffers(entries, stage); 803 SetupGraphicsUniformTexels(entries, stage);
791 SetupGraphicsTextures(entries, stage); 804 SetupGraphicsTextures(entries, stage);
805 SetupGraphicsStorageTexels(entries, stage);
792 SetupGraphicsImages(entries, stage); 806 SetupGraphicsImages(entries, stage);
793 } 807 }
794 texture_cache.GuardSamplers(false); 808 texture_cache.GuardSamplers(false);
@@ -838,6 +852,10 @@ void RasterizerVulkan::BeginTransformFeedback() {
838 if (regs.tfb_enabled == 0) { 852 if (regs.tfb_enabled == 0) {
839 return; 853 return;
840 } 854 }
855 if (!device.IsExtTransformFeedbackSupported()) {
856 LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported");
857 return;
858 }
841 859
842 UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) || 860 UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) ||
843 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) || 861 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) ||
@@ -866,6 +884,9 @@ void RasterizerVulkan::EndTransformFeedback() {
866 if (regs.tfb_enabled == 0) { 884 if (regs.tfb_enabled == 0) {
867 return; 885 return;
868 } 886 }
887 if (!device.IsExtTransformFeedbackSupported()) {
888 return;
889 }
869 890
870 scheduler.Record( 891 scheduler.Record(
871 [](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); }); 892 [](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); });
@@ -976,12 +997,12 @@ void RasterizerVulkan::SetupGraphicsGlobalBuffers(const ShaderEntries& entries,
976 } 997 }
977} 998}
978 999
979void RasterizerVulkan::SetupGraphicsTexelBuffers(const ShaderEntries& entries, std::size_t stage) { 1000void RasterizerVulkan::SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage) {
980 MICROPROFILE_SCOPE(Vulkan_Textures); 1001 MICROPROFILE_SCOPE(Vulkan_Textures);
981 const auto& gpu = system.GPU().Maxwell3D(); 1002 const auto& gpu = system.GPU().Maxwell3D();
982 for (const auto& entry : entries.texel_buffers) { 1003 for (const auto& entry : entries.uniform_texels) {
983 const auto image = GetTextureInfo(gpu, entry, stage).tic; 1004 const auto image = GetTextureInfo(gpu, entry, stage).tic;
984 SetupTexelBuffer(image, entry); 1005 SetupUniformTexels(image, entry);
985 } 1006 }
986} 1007}
987 1008
@@ -996,6 +1017,15 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
996 } 1017 }
997} 1018}
998 1019
1020void RasterizerVulkan::SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage) {
1021 MICROPROFILE_SCOPE(Vulkan_Textures);
1022 const auto& gpu = system.GPU().Maxwell3D();
1023 for (const auto& entry : entries.storage_texels) {
1024 const auto image = GetTextureInfo(gpu, entry, stage).tic;
1025 SetupStorageTexel(image, entry);
1026 }
1027}
1028
999void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage) { 1029void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage) {
1000 MICROPROFILE_SCOPE(Vulkan_Images); 1030 MICROPROFILE_SCOPE(Vulkan_Images);
1001 const auto& gpu = system.GPU().Maxwell3D(); 1031 const auto& gpu = system.GPU().Maxwell3D();
@@ -1028,12 +1058,12 @@ void RasterizerVulkan::SetupComputeGlobalBuffers(const ShaderEntries& entries) {
1028 } 1058 }
1029} 1059}
1030 1060
1031void RasterizerVulkan::SetupComputeTexelBuffers(const ShaderEntries& entries) { 1061void RasterizerVulkan::SetupComputeUniformTexels(const ShaderEntries& entries) {
1032 MICROPROFILE_SCOPE(Vulkan_Textures); 1062 MICROPROFILE_SCOPE(Vulkan_Textures);
1033 const auto& gpu = system.GPU().KeplerCompute(); 1063 const auto& gpu = system.GPU().KeplerCompute();
1034 for (const auto& entry : entries.texel_buffers) { 1064 for (const auto& entry : entries.uniform_texels) {
1035 const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic; 1065 const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic;
1036 SetupTexelBuffer(image, entry); 1066 SetupUniformTexels(image, entry);
1037 } 1067 }
1038} 1068}
1039 1069
@@ -1048,6 +1078,15 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
1048 } 1078 }
1049} 1079}
1050 1080
1081void RasterizerVulkan::SetupComputeStorageTexels(const ShaderEntries& entries) {
1082 MICROPROFILE_SCOPE(Vulkan_Textures);
1083 const auto& gpu = system.GPU().KeplerCompute();
1084 for (const auto& entry : entries.storage_texels) {
1085 const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic;
1086 SetupStorageTexel(image, entry);
1087 }
1088}
1089
1051void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) { 1090void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) {
1052 MICROPROFILE_SCOPE(Vulkan_Images); 1091 MICROPROFILE_SCOPE(Vulkan_Images);
1053 const auto& gpu = system.GPU().KeplerCompute(); 1092 const auto& gpu = system.GPU().KeplerCompute();
@@ -1097,8 +1136,8 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
1097 update_descriptor_queue.AddBuffer(buffer, offset, size); 1136 update_descriptor_queue.AddBuffer(buffer, offset, size);
1098} 1137}
1099 1138
1100void RasterizerVulkan::SetupTexelBuffer(const Tegra::Texture::TICEntry& tic, 1139void RasterizerVulkan::SetupUniformTexels(const Tegra::Texture::TICEntry& tic,
1101 const TexelBufferEntry& entry) { 1140 const UniformTexelEntry& entry) {
1102 const auto view = texture_cache.GetTextureSurface(tic, entry); 1141 const auto view = texture_cache.GetTextureSurface(tic, entry);
1103 ASSERT(view->IsBufferView()); 1142 ASSERT(view->IsBufferView());
1104 1143
@@ -1110,8 +1149,8 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
1110 auto view = texture_cache.GetTextureSurface(texture.tic, entry); 1149 auto view = texture_cache.GetTextureSurface(texture.tic, entry);
1111 ASSERT(!view->IsBufferView()); 1150 ASSERT(!view->IsBufferView());
1112 1151
1113 const auto image_view = view->GetHandle(texture.tic.x_source, texture.tic.y_source, 1152 const VkImageView image_view = view->GetImageView(texture.tic.x_source, texture.tic.y_source,
1114 texture.tic.z_source, texture.tic.w_source); 1153 texture.tic.z_source, texture.tic.w_source);
1115 const auto sampler = sampler_cache.GetSampler(texture.tsc); 1154 const auto sampler = sampler_cache.GetSampler(texture.tsc);
1116 update_descriptor_queue.AddSampledImage(sampler, image_view); 1155 update_descriptor_queue.AddSampledImage(sampler, image_view);
1117 1156
@@ -1120,6 +1159,14 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
1120 sampled_views.push_back(ImageView{std::move(view), image_layout}); 1159 sampled_views.push_back(ImageView{std::move(view), image_layout});
1121} 1160}
1122 1161
1162void RasterizerVulkan::SetupStorageTexel(const Tegra::Texture::TICEntry& tic,
1163 const StorageTexelEntry& entry) {
1164 const auto view = texture_cache.GetImageSurface(tic, entry);
1165 ASSERT(view->IsBufferView());
1166
1167 update_descriptor_queue.AddTexelBuffer(view->GetBufferView());
1168}
1169
1123void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) { 1170void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) {
1124 auto view = texture_cache.GetImageSurface(tic, entry); 1171 auto view = texture_cache.GetImageSurface(tic, entry);
1125 1172
@@ -1129,7 +1176,8 @@ void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const Ima
1129 1176
1130 UNIMPLEMENTED_IF(tic.IsBuffer()); 1177 UNIMPLEMENTED_IF(tic.IsBuffer());
1131 1178
1132 const auto image_view = view->GetHandle(tic.x_source, tic.y_source, tic.z_source, tic.w_source); 1179 const VkImageView image_view =
1180 view->GetImageView(tic.x_source, tic.y_source, tic.z_source, tic.w_source);
1133 update_descriptor_queue.AddImage(image_view); 1181 update_descriptor_queue.AddImage(image_view);
1134 1182
1135 const auto image_layout = update_descriptor_queue.GetLastImageLayout(); 1183 const auto image_layout = update_descriptor_queue.GetLastImageLayout();
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 0ed0e48c6..c8c187606 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -168,7 +168,7 @@ private:
168 bool is_indexed, bool is_instanced); 168 bool is_indexed, bool is_instanced);
169 169
170 /// Setup descriptors in the graphics pipeline. 170 /// Setup descriptors in the graphics pipeline.
171 void SetupShaderDescriptors(const std::array<Shader, Maxwell::MaxShaderProgram>& shaders); 171 void SetupShaderDescriptors(const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders);
172 172
173 void SetupImageTransitions(Texceptions texceptions, 173 void SetupImageTransitions(Texceptions texceptions,
174 const std::array<View, Maxwell::NumRenderTargets>& color_attachments, 174 const std::array<View, Maxwell::NumRenderTargets>& color_attachments,
@@ -193,12 +193,15 @@ private:
193 /// Setup global buffers in the graphics pipeline. 193 /// Setup global buffers in the graphics pipeline.
194 void SetupGraphicsGlobalBuffers(const ShaderEntries& entries, std::size_t stage); 194 void SetupGraphicsGlobalBuffers(const ShaderEntries& entries, std::size_t stage);
195 195
196 /// Setup texel buffers in the graphics pipeline. 196 /// Setup uniform texels in the graphics pipeline.
197 void SetupGraphicsTexelBuffers(const ShaderEntries& entries, std::size_t stage); 197 void SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage);
198 198
199 /// Setup textures in the graphics pipeline. 199 /// Setup textures in the graphics pipeline.
200 void SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage); 200 void SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage);
201 201
202 /// Setup storage texels in the graphics pipeline.
203 void SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage);
204
202 /// Setup images in the graphics pipeline. 205 /// Setup images in the graphics pipeline.
203 void SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage); 206 void SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage);
204 207
@@ -209,11 +212,14 @@ private:
209 void SetupComputeGlobalBuffers(const ShaderEntries& entries); 212 void SetupComputeGlobalBuffers(const ShaderEntries& entries);
210 213
211 /// Setup texel buffers in the compute pipeline. 214 /// Setup texel buffers in the compute pipeline.
212 void SetupComputeTexelBuffers(const ShaderEntries& entries); 215 void SetupComputeUniformTexels(const ShaderEntries& entries);
213 216
214 /// Setup textures in the compute pipeline. 217 /// Setup textures in the compute pipeline.
215 void SetupComputeTextures(const ShaderEntries& entries); 218 void SetupComputeTextures(const ShaderEntries& entries);
216 219
220 /// Setup storage texels in the compute pipeline.
221 void SetupComputeStorageTexels(const ShaderEntries& entries);
222
217 /// Setup images in the compute pipeline. 223 /// Setup images in the compute pipeline.
218 void SetupComputeImages(const ShaderEntries& entries); 224 void SetupComputeImages(const ShaderEntries& entries);
219 225
@@ -222,10 +228,12 @@ private:
222 228
223 void SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAddr address); 229 void SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAddr address);
224 230
225 void SetupTexelBuffer(const Tegra::Texture::TICEntry& image, const TexelBufferEntry& entry); 231 void SetupUniformTexels(const Tegra::Texture::TICEntry& image, const UniformTexelEntry& entry);
226 232
227 void SetupTexture(const Tegra::Texture::FullTextureInfo& texture, const SamplerEntry& entry); 233 void SetupTexture(const Tegra::Texture::FullTextureInfo& texture, const SamplerEntry& entry);
228 234
235 void SetupStorageTexel(const Tegra::Texture::TICEntry& tic, const StorageTexelEntry& entry);
236
229 void SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry); 237 void SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry);
230 238
231 void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs); 239 void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs);
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index a13e8baa7..97429cc59 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -400,8 +400,9 @@ private:
400 u32 binding = specialization.base_binding; 400 u32 binding = specialization.base_binding;
401 binding = DeclareConstantBuffers(binding); 401 binding = DeclareConstantBuffers(binding);
402 binding = DeclareGlobalBuffers(binding); 402 binding = DeclareGlobalBuffers(binding);
403 binding = DeclareTexelBuffers(binding); 403 binding = DeclareUniformTexels(binding);
404 binding = DeclareSamplers(binding); 404 binding = DeclareSamplers(binding);
405 binding = DeclareStorageTexels(binding);
405 binding = DeclareImages(binding); 406 binding = DeclareImages(binding);
406 407
407 const Id main = OpFunction(t_void, {}, TypeFunction(t_void)); 408 const Id main = OpFunction(t_void, {}, TypeFunction(t_void));
@@ -889,7 +890,7 @@ private:
889 return binding; 890 return binding;
890 } 891 }
891 892
892 u32 DeclareTexelBuffers(u32 binding) { 893 u32 DeclareUniformTexels(u32 binding) {
893 for (const auto& sampler : ir.GetSamplers()) { 894 for (const auto& sampler : ir.GetSamplers()) {
894 if (!sampler.is_buffer) { 895 if (!sampler.is_buffer) {
895 continue; 896 continue;
@@ -910,7 +911,7 @@ private:
910 Decorate(id, spv::Decoration::Binding, binding++); 911 Decorate(id, spv::Decoration::Binding, binding++);
911 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); 912 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
912 913
913 texel_buffers.emplace(sampler.index, TexelBuffer{image_type, id}); 914 uniform_texels.emplace(sampler.index, TexelBuffer{image_type, id});
914 } 915 }
915 return binding; 916 return binding;
916 } 917 }
@@ -945,31 +946,48 @@ private:
945 return binding; 946 return binding;
946 } 947 }
947 948
948 u32 DeclareImages(u32 binding) { 949 u32 DeclareStorageTexels(u32 binding) {
949 for (const auto& image : ir.GetImages()) { 950 for (const auto& image : ir.GetImages()) {
950 const auto [dim, arrayed] = GetImageDim(image); 951 if (image.type != Tegra::Shader::ImageType::TextureBuffer) {
951 constexpr int depth = 0; 952 continue;
952 constexpr bool ms = false;
953 constexpr int sampled = 2; // This won't be accessed with a sampler
954 constexpr auto format = spv::ImageFormat::Unknown;
955 const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
956 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
957 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
958 AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
959
960 Decorate(id, spv::Decoration::Binding, binding++);
961 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
962 if (image.is_read && !image.is_written) {
963 Decorate(id, spv::Decoration::NonWritable);
964 } else if (image.is_written && !image.is_read) {
965 Decorate(id, spv::Decoration::NonReadable);
966 } 953 }
954 DeclareImage(image, binding);
955 }
956 return binding;
957 }
967 958
968 images.emplace(image.index, StorageImage{image_type, id}); 959 u32 DeclareImages(u32 binding) {
960 for (const auto& image : ir.GetImages()) {
961 if (image.type == Tegra::Shader::ImageType::TextureBuffer) {
962 continue;
963 }
964 DeclareImage(image, binding);
969 } 965 }
970 return binding; 966 return binding;
971 } 967 }
972 968
969 void DeclareImage(const Image& image, u32& binding) {
970 const auto [dim, arrayed] = GetImageDim(image);
971 constexpr int depth = 0;
972 constexpr bool ms = false;
973 constexpr int sampled = 2; // This won't be accessed with a sampler
974 const auto format = image.is_atomic ? spv::ImageFormat::R32ui : spv::ImageFormat::Unknown;
975 const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
976 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
977 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
978 AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
979
980 Decorate(id, spv::Decoration::Binding, binding++);
981 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
982 if (image.is_read && !image.is_written) {
983 Decorate(id, spv::Decoration::NonWritable);
984 } else if (image.is_written && !image.is_read) {
985 Decorate(id, spv::Decoration::NonReadable);
986 }
987
988 images.emplace(image.index, StorageImage{image_type, id});
989 }
990
973 bool IsRenderTargetEnabled(u32 rt) const { 991 bool IsRenderTargetEnabled(u32 rt) const {
974 for (u32 component = 0; component < 4; ++component) { 992 for (u32 component = 0; component < 4; ++component) {
975 if (header.ps.IsColorComponentOutputEnabled(rt, component)) { 993 if (header.ps.IsColorComponentOutputEnabled(rt, component)) {
@@ -1256,7 +1274,7 @@ private:
1256 } else { 1274 } else {
1257 UNREACHABLE_MSG("Unmanaged offset node type"); 1275 UNREACHABLE_MSG("Unmanaged offset node type");
1258 } 1276 }
1259 pointer = OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0), buffer_index, 1277 pointer = OpAccessChain(t_cbuf_float, buffer_id, v_uint_zero, buffer_index,
1260 buffer_element); 1278 buffer_element);
1261 } 1279 }
1262 return {OpLoad(t_float, pointer), Type::Float}; 1280 return {OpLoad(t_float, pointer), Type::Float};
@@ -1611,7 +1629,7 @@ private:
1611 1629
1612 const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b); 1630 const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b);
1613 const Id carry = OpCompositeExtract(t_uint, result, 1); 1631 const Id carry = OpCompositeExtract(t_uint, result, 1);
1614 return {OpINotEqual(t_bool, carry, Constant(t_uint, 0)), Type::Bool}; 1632 return {OpINotEqual(t_bool, carry, v_uint_zero), Type::Bool};
1615 } 1633 }
1616 1634
1617 Expression LogicalAssign(Operation operation) { 1635 Expression LogicalAssign(Operation operation) {
@@ -1674,7 +1692,7 @@ private:
1674 const auto& meta = std::get<MetaTexture>(operation.GetMeta()); 1692 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
1675 const u32 index = meta.sampler.index; 1693 const u32 index = meta.sampler.index;
1676 if (meta.sampler.is_buffer) { 1694 if (meta.sampler.is_buffer) {
1677 const auto& entry = texel_buffers.at(index); 1695 const auto& entry = uniform_texels.at(index);
1678 return OpLoad(entry.image_type, entry.image); 1696 return OpLoad(entry.image_type, entry.image);
1679 } else { 1697 } else {
1680 const auto& entry = sampled_images.at(index); 1698 const auto& entry = sampled_images.at(index);
@@ -1951,39 +1969,20 @@ private:
1951 return {}; 1969 return {};
1952 } 1970 }
1953 1971
1954 Expression AtomicImageAdd(Operation operation) { 1972 template <Id (Module::*func)(Id, Id, Id, Id, Id)>
1955 UNIMPLEMENTED(); 1973 Expression AtomicImage(Operation operation) {
1956 return {}; 1974 const auto& meta{std::get<MetaImage>(operation.GetMeta())};
1957 } 1975 ASSERT(meta.values.size() == 1);
1958
1959 Expression AtomicImageMin(Operation operation) {
1960 UNIMPLEMENTED();
1961 return {};
1962 }
1963
1964 Expression AtomicImageMax(Operation operation) {
1965 UNIMPLEMENTED();
1966 return {};
1967 }
1968
1969 Expression AtomicImageAnd(Operation operation) {
1970 UNIMPLEMENTED();
1971 return {};
1972 }
1973
1974 Expression AtomicImageOr(Operation operation) {
1975 UNIMPLEMENTED();
1976 return {};
1977 }
1978 1976
1979 Expression AtomicImageXor(Operation operation) { 1977 const Id coordinate = GetCoordinates(operation, Type::Int);
1980 UNIMPLEMENTED(); 1978 const Id image = images.at(meta.image.index).image;
1981 return {}; 1979 const Id sample = v_uint_zero;
1982 } 1980 const Id pointer = OpImageTexelPointer(t_image_uint, image, coordinate, sample);
1983 1981
1984 Expression AtomicImageExchange(Operation operation) { 1982 const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
1985 UNIMPLEMENTED(); 1983 const Id semantics = v_uint_zero;
1986 return {}; 1984 const Id value = AsUint(Visit(meta.values[0]));
1985 return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint};
1987 } 1986 }
1988 1987
1989 template <Id (Module::*func)(Id, Id, Id, Id, Id)> 1988 template <Id (Module::*func)(Id, Id, Id, Id, Id)>
@@ -1998,7 +1997,7 @@ private:
1998 return {v_float_zero, Type::Float}; 1997 return {v_float_zero, Type::Float};
1999 } 1998 }
2000 const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device)); 1999 const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
2001 const Id semantics = Constant(t_uint, 0); 2000 const Id semantics = v_uint_zero;
2002 const Id value = AsUint(Visit(operation[1])); 2001 const Id value = AsUint(Visit(operation[1]));
2003 2002
2004 return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint}; 2003 return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint};
@@ -2622,11 +2621,11 @@ private:
2622 2621
2623 &SPIRVDecompiler::ImageLoad, 2622 &SPIRVDecompiler::ImageLoad,
2624 &SPIRVDecompiler::ImageStore, 2623 &SPIRVDecompiler::ImageStore,
2625 &SPIRVDecompiler::AtomicImageAdd, 2624 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicIAdd>,
2626 &SPIRVDecompiler::AtomicImageAnd, 2625 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicAnd>,
2627 &SPIRVDecompiler::AtomicImageOr, 2626 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicOr>,
2628 &SPIRVDecompiler::AtomicImageXor, 2627 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicXor>,
2629 &SPIRVDecompiler::AtomicImageExchange, 2628 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicExchange>,
2630 2629
2631 &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>, 2630 &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>,
2632 &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>, 2631 &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>,
@@ -2768,8 +2767,11 @@ private:
2768 Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); 2767 Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0);
2769 const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct); 2768 const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct);
2770 2769
2770 const Id t_image_uint = TypePointer(spv::StorageClass::Image, t_uint);
2771
2771 const Id v_float_zero = Constant(t_float, 0.0f); 2772 const Id v_float_zero = Constant(t_float, 0.0f);
2772 const Id v_float_one = Constant(t_float, 1.0f); 2773 const Id v_float_one = Constant(t_float, 1.0f);
2774 const Id v_uint_zero = Constant(t_uint, 0);
2773 2775
2774 // Nvidia uses these defaults for varyings (e.g. position and generic attributes) 2776 // Nvidia uses these defaults for varyings (e.g. position and generic attributes)
2775 const Id v_varying_default = 2777 const Id v_varying_default =
@@ -2794,15 +2796,16 @@ private:
2794 std::unordered_map<u8, GenericVaryingDescription> output_attributes; 2796 std::unordered_map<u8, GenericVaryingDescription> output_attributes;
2795 std::map<u32, Id> constant_buffers; 2797 std::map<u32, Id> constant_buffers;
2796 std::map<GlobalMemoryBase, Id> global_buffers; 2798 std::map<GlobalMemoryBase, Id> global_buffers;
2797 std::map<u32, TexelBuffer> texel_buffers; 2799 std::map<u32, TexelBuffer> uniform_texels;
2798 std::map<u32, SampledImage> sampled_images; 2800 std::map<u32, SampledImage> sampled_images;
2801 std::map<u32, TexelBuffer> storage_texels;
2799 std::map<u32, StorageImage> images; 2802 std::map<u32, StorageImage> images;
2800 2803
2804 std::array<Id, Maxwell::NumRenderTargets> frag_colors{};
2801 Id instance_index{}; 2805 Id instance_index{};
2802 Id vertex_index{}; 2806 Id vertex_index{};
2803 Id base_instance{}; 2807 Id base_instance{};
2804 Id base_vertex{}; 2808 Id base_vertex{};
2805 std::array<Id, Maxwell::NumRenderTargets> frag_colors{};
2806 Id frag_depth{}; 2809 Id frag_depth{};
2807 Id frag_coord{}; 2810 Id frag_coord{};
2808 Id front_facing{}; 2811 Id front_facing{};
@@ -3058,13 +3061,17 @@ ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) {
3058 } 3061 }
3059 for (const auto& sampler : ir.GetSamplers()) { 3062 for (const auto& sampler : ir.GetSamplers()) {
3060 if (sampler.is_buffer) { 3063 if (sampler.is_buffer) {
3061 entries.texel_buffers.emplace_back(sampler); 3064 entries.uniform_texels.emplace_back(sampler);
3062 } else { 3065 } else {
3063 entries.samplers.emplace_back(sampler); 3066 entries.samplers.emplace_back(sampler);
3064 } 3067 }
3065 } 3068 }
3066 for (const auto& image : ir.GetImages()) { 3069 for (const auto& image : ir.GetImages()) {
3067 entries.images.emplace_back(image); 3070 if (image.type == Tegra::Shader::ImageType::TextureBuffer) {
3071 entries.storage_texels.emplace_back(image);
3072 } else {
3073 entries.images.emplace_back(image);
3074 }
3068 } 3075 }
3069 for (const auto& attribute : ir.GetInputAttributes()) { 3076 for (const auto& attribute : ir.GetInputAttributes()) {
3070 if (IsGenericAttribute(attribute)) { 3077 if (IsGenericAttribute(attribute)) {
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
index b7af26388..2b0e90396 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
@@ -21,8 +21,9 @@ class VKDevice;
21namespace Vulkan { 21namespace Vulkan {
22 22
23using Maxwell = Tegra::Engines::Maxwell3D::Regs; 23using Maxwell = Tegra::Engines::Maxwell3D::Regs;
24using TexelBufferEntry = VideoCommon::Shader::Sampler; 24using UniformTexelEntry = VideoCommon::Shader::Sampler;
25using SamplerEntry = VideoCommon::Shader::Sampler; 25using SamplerEntry = VideoCommon::Shader::Sampler;
26using StorageTexelEntry = VideoCommon::Shader::Image;
26using ImageEntry = VideoCommon::Shader::Image; 27using ImageEntry = VideoCommon::Shader::Image;
27 28
28constexpr u32 DESCRIPTOR_SET = 0; 29constexpr u32 DESCRIPTOR_SET = 0;
@@ -66,13 +67,15 @@ private:
66struct ShaderEntries { 67struct ShaderEntries {
67 u32 NumBindings() const { 68 u32 NumBindings() const {
68 return static_cast<u32>(const_buffers.size() + global_buffers.size() + 69 return static_cast<u32>(const_buffers.size() + global_buffers.size() +
69 texel_buffers.size() + samplers.size() + images.size()); 70 uniform_texels.size() + samplers.size() + storage_texels.size() +
71 images.size());
70 } 72 }
71 73
72 std::vector<ConstBufferEntry> const_buffers; 74 std::vector<ConstBufferEntry> const_buffers;
73 std::vector<GlobalBufferEntry> global_buffers; 75 std::vector<GlobalBufferEntry> global_buffers;
74 std::vector<TexelBufferEntry> texel_buffers; 76 std::vector<UniformTexelEntry> uniform_texels;
75 std::vector<SamplerEntry> samplers; 77 std::vector<SamplerEntry> samplers;
78 std::vector<StorageTexelEntry> storage_texels;
76 std::vector<ImageEntry> images; 79 std::vector<ImageEntry> images;
77 std::set<u32> attributes; 80 std::set<u32> attributes;
78 std::array<bool, Maxwell::NumClipDistances> clip_distances{}; 81 std::array<bool, Maxwell::NumClipDistances> clip_distances{};
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h
index dfddf7ad6..c765c60a0 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.h
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h
@@ -35,7 +35,7 @@ public:
35 /// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy. 35 /// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy.
36 void Unmap(u64 size); 36 void Unmap(u64 size);
37 37
38 VkBuffer GetHandle() const { 38 VkBuffer Handle() const {
39 return *buffer; 39 return *buffer;
40 } 40 }
41 41
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 2f1d5021d..430031665 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -100,8 +100,8 @@ vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
100 ci.pNext = nullptr; 100 ci.pNext = nullptr;
101 ci.flags = 0; 101 ci.flags = 0;
102 ci.size = static_cast<VkDeviceSize>(host_memory_size); 102 ci.size = static_cast<VkDeviceSize>(host_memory_size);
103 ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | 103 ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
104 VK_BUFFER_USAGE_TRANSFER_DST_BIT; 104 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
105 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 105 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
106 ci.queueFamilyIndexCount = 0; 106 ci.queueFamilyIndexCount = 0;
107 ci.pQueueFamilyIndices = nullptr; 107 ci.pQueueFamilyIndices = nullptr;
@@ -167,6 +167,7 @@ VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceP
167 ci.extent = {params.width, params.height, 1}; 167 ci.extent = {params.width, params.height, 1};
168 break; 168 break;
169 case SurfaceTarget::Texture3D: 169 case SurfaceTarget::Texture3D:
170 ci.flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
170 ci.extent = {params.width, params.height, params.depth}; 171 ci.extent = {params.width, params.height, params.depth};
171 break; 172 break;
172 case SurfaceTarget::TextureBuffer: 173 case SurfaceTarget::TextureBuffer:
@@ -176,6 +177,12 @@ VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceP
176 return ci; 177 return ci;
177} 178}
178 179
180u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source, Tegra::Texture::SwizzleSource y_source,
181 Tegra::Texture::SwizzleSource z_source, Tegra::Texture::SwizzleSource w_source) {
182 return (static_cast<u32>(x_source) << 24) | (static_cast<u32>(y_source) << 16) |
183 (static_cast<u32>(z_source) << 8) | static_cast<u32>(w_source);
184}
185
179} // Anonymous namespace 186} // Anonymous namespace
180 187
181CachedSurface::CachedSurface(Core::System& system, const VKDevice& device, 188CachedSurface::CachedSurface(Core::System& system, const VKDevice& device,
@@ -203,9 +210,11 @@ CachedSurface::CachedSurface(Core::System& system, const VKDevice& device,
203 } 210 }
204 211
205 // TODO(Rodrigo): Move this to a virtual function. 212 // TODO(Rodrigo): Move this to a virtual function.
206 main_view = CreateViewInner( 213 u32 num_layers = 1;
207 ViewParams(params.target, 0, static_cast<u32>(params.GetNumLayers()), 0, params.num_levels), 214 if (params.is_layered || params.target == SurfaceTarget::Texture3D) {
208 true); 215 num_layers = params.depth;
216 }
217 main_view = CreateView(ViewParams(params.target, 0, num_layers, 0, params.num_levels));
209} 218}
210 219
211CachedSurface::~CachedSurface() = default; 220CachedSurface::~CachedSurface() = default;
@@ -253,12 +262,8 @@ void CachedSurface::DecorateSurfaceName() {
253} 262}
254 263
255View CachedSurface::CreateView(const ViewParams& params) { 264View CachedSurface::CreateView(const ViewParams& params) {
256 return CreateViewInner(params, false);
257}
258
259View CachedSurface::CreateViewInner(const ViewParams& params, bool is_proxy) {
260 // TODO(Rodrigo): Add name decorations 265 // TODO(Rodrigo): Add name decorations
261 return views[params] = std::make_shared<CachedSurfaceView>(device, *this, params, is_proxy); 266 return views[params] = std::make_shared<CachedSurfaceView>(device, *this, params);
262} 267}
263 268
264void CachedSurface::UploadBuffer(const std::vector<u8>& staging_buffer) { 269void CachedSurface::UploadBuffer(const std::vector<u8>& staging_buffer) {
@@ -342,18 +347,27 @@ VkImageSubresourceRange CachedSurface::GetImageSubresourceRange() const {
342} 347}
343 348
344CachedSurfaceView::CachedSurfaceView(const VKDevice& device, CachedSurface& surface, 349CachedSurfaceView::CachedSurfaceView(const VKDevice& device, CachedSurface& surface,
345 const ViewParams& params, bool is_proxy) 350 const ViewParams& params)
346 : VideoCommon::ViewBase{params}, params{surface.GetSurfaceParams()}, 351 : VideoCommon::ViewBase{params}, params{surface.GetSurfaceParams()},
347 image{surface.GetImageHandle()}, buffer_view{surface.GetBufferViewHandle()}, 352 image{surface.GetImageHandle()}, buffer_view{surface.GetBufferViewHandle()},
348 aspect_mask{surface.GetAspectMask()}, device{device}, surface{surface}, 353 aspect_mask{surface.GetAspectMask()}, device{device}, surface{surface},
349 base_layer{params.base_layer}, num_layers{params.num_layers}, base_level{params.base_level}, 354 base_level{params.base_level}, num_levels{params.num_levels},
350 num_levels{params.num_levels}, image_view_type{image ? GetImageViewType(params.target) 355 image_view_type{image ? GetImageViewType(params.target) : VK_IMAGE_VIEW_TYPE_1D} {
351 : VK_IMAGE_VIEW_TYPE_1D} {} 356 if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) {
357 base_layer = 0;
358 num_layers = 1;
359 base_slice = params.base_layer;
360 num_slices = params.num_layers;
361 } else {
362 base_layer = params.base_layer;
363 num_layers = params.num_layers;
364 }
365}
352 366
353CachedSurfaceView::~CachedSurfaceView() = default; 367CachedSurfaceView::~CachedSurfaceView() = default;
354 368
355VkImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y_source, 369VkImageView CachedSurfaceView::GetImageView(SwizzleSource x_source, SwizzleSource y_source,
356 SwizzleSource z_source, SwizzleSource w_source) { 370 SwizzleSource z_source, SwizzleSource w_source) {
357 const u32 new_swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source); 371 const u32 new_swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source);
358 if (last_image_view && last_swizzle == new_swizzle) { 372 if (last_image_view && last_swizzle == new_swizzle) {
359 return last_image_view; 373 return last_image_view;
@@ -399,6 +413,11 @@ VkImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y
399 }); 413 });
400 } 414 }
401 415
416 if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) {
417 ASSERT(base_slice == 0);
418 ASSERT(num_slices == params.depth);
419 }
420
402 VkImageViewCreateInfo ci; 421 VkImageViewCreateInfo ci;
403 ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; 422 ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
404 ci.pNext = nullptr; 423 ci.pNext = nullptr;
@@ -417,6 +436,35 @@ VkImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y
417 return last_image_view = *image_view; 436 return last_image_view = *image_view;
418} 437}
419 438
439VkImageView CachedSurfaceView::GetAttachment() {
440 if (render_target) {
441 return *render_target;
442 }
443
444 VkImageViewCreateInfo ci;
445 ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
446 ci.pNext = nullptr;
447 ci.flags = 0;
448 ci.image = surface.GetImageHandle();
449 ci.format = surface.GetImage().GetFormat();
450 ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
451 VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
452 ci.subresourceRange.aspectMask = aspect_mask;
453 ci.subresourceRange.baseMipLevel = base_level;
454 ci.subresourceRange.levelCount = num_levels;
455 if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) {
456 ci.viewType = num_slices > 1 ? VK_IMAGE_VIEW_TYPE_2D_ARRAY : VK_IMAGE_VIEW_TYPE_2D;
457 ci.subresourceRange.baseArrayLayer = base_slice;
458 ci.subresourceRange.layerCount = num_slices;
459 } else {
460 ci.viewType = image_view_type;
461 ci.subresourceRange.baseArrayLayer = base_layer;
462 ci.subresourceRange.layerCount = num_layers;
463 }
464 render_target = device.GetLogical().CreateImageView(ci);
465 return *render_target;
466}
467
420VKTextureCache::VKTextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 468VKTextureCache::VKTextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
421 const VKDevice& device, VKResourceManager& resource_manager, 469 const VKDevice& device, VKResourceManager& resource_manager,
422 VKMemoryManager& memory_manager, VKScheduler& scheduler, 470 VKMemoryManager& memory_manager, VKScheduler& scheduler,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index f211ccb1e..807e26c8a 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -91,7 +91,6 @@ protected:
91 void DecorateSurfaceName(); 91 void DecorateSurfaceName();
92 92
93 View CreateView(const ViewParams& params) override; 93 View CreateView(const ViewParams& params) override;
94 View CreateViewInner(const ViewParams& params, bool is_proxy);
95 94
96private: 95private:
97 void UploadBuffer(const std::vector<u8>& staging_buffer); 96 void UploadBuffer(const std::vector<u8>& staging_buffer);
@@ -120,23 +119,20 @@ private:
120class CachedSurfaceView final : public VideoCommon::ViewBase { 119class CachedSurfaceView final : public VideoCommon::ViewBase {
121public: 120public:
122 explicit CachedSurfaceView(const VKDevice& device, CachedSurface& surface, 121 explicit CachedSurfaceView(const VKDevice& device, CachedSurface& surface,
123 const ViewParams& params, bool is_proxy); 122 const ViewParams& params);
124 ~CachedSurfaceView(); 123 ~CachedSurfaceView();
125 124
126 VkImageView GetHandle(Tegra::Texture::SwizzleSource x_source, 125 VkImageView GetImageView(Tegra::Texture::SwizzleSource x_source,
127 Tegra::Texture::SwizzleSource y_source, 126 Tegra::Texture::SwizzleSource y_source,
128 Tegra::Texture::SwizzleSource z_source, 127 Tegra::Texture::SwizzleSource z_source,
129 Tegra::Texture::SwizzleSource w_source); 128 Tegra::Texture::SwizzleSource w_source);
129
130 VkImageView GetAttachment();
130 131
131 bool IsSameSurface(const CachedSurfaceView& rhs) const { 132 bool IsSameSurface(const CachedSurfaceView& rhs) const {
132 return &surface == &rhs.surface; 133 return &surface == &rhs.surface;
133 } 134 }
134 135
135 VkImageView GetHandle() {
136 return GetHandle(Tegra::Texture::SwizzleSource::R, Tegra::Texture::SwizzleSource::G,
137 Tegra::Texture::SwizzleSource::B, Tegra::Texture::SwizzleSource::A);
138 }
139
140 u32 GetWidth() const { 136 u32 GetWidth() const {
141 return params.GetMipWidth(base_level); 137 return params.GetMipWidth(base_level);
142 } 138 }
@@ -180,14 +176,6 @@ public:
180 } 176 }
181 177
182private: 178private:
183 static u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source,
184 Tegra::Texture::SwizzleSource y_source,
185 Tegra::Texture::SwizzleSource z_source,
186 Tegra::Texture::SwizzleSource w_source) {
187 return (static_cast<u32>(x_source) << 24) | (static_cast<u32>(y_source) << 16) |
188 (static_cast<u32>(z_source) << 8) | static_cast<u32>(w_source);
189 }
190
191 // Store a copy of these values to avoid double dereference when reading them 179 // Store a copy of these values to avoid double dereference when reading them
192 const SurfaceParams params; 180 const SurfaceParams params;
193 const VkImage image; 181 const VkImage image;
@@ -196,15 +184,18 @@ private:
196 184
197 const VKDevice& device; 185 const VKDevice& device;
198 CachedSurface& surface; 186 CachedSurface& surface;
199 const u32 base_layer;
200 const u32 num_layers;
201 const u32 base_level; 187 const u32 base_level;
202 const u32 num_levels; 188 const u32 num_levels;
203 const VkImageViewType image_view_type; 189 const VkImageViewType image_view_type;
190 u32 base_layer = 0;
191 u32 num_layers = 0;
192 u32 base_slice = 0;
193 u32 num_slices = 0;
204 194
205 VkImageView last_image_view = nullptr; 195 VkImageView last_image_view = nullptr;
206 u32 last_swizzle = 0; 196 u32 last_swizzle = 0;
207 197
198 vk::ImageView render_target;
208 std::unordered_map<u32, vk::ImageView> view_cache; 199 std::unordered_map<u32, vk::ImageView> view_cache;
209}; 200};
210 201
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index 8f0bb996e..29ebf65ba 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -357,13 +357,11 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
357 return pc; 357 return pc;
358} 358}
359 359
360ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(SamplerInfo info, u32 offset, 360ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(
361 std::optional<u32> buffer) { 361 SamplerInfo info, std::optional<Tegra::Engines::SamplerDescriptor> sampler) {
362 if (info.IsComplete()) { 362 if (info.IsComplete()) {
363 return info; 363 return info;
364 } 364 }
365 const auto sampler = buffer ? registry.ObtainBindlessSampler(*buffer, offset)
366 : registry.ObtainBoundSampler(offset);
367 if (!sampler) { 365 if (!sampler) {
368 LOG_WARNING(HW_GPU, "Unknown sampler info"); 366 LOG_WARNING(HW_GPU, "Unknown sampler info");
369 info.type = info.type.value_or(Tegra::Shader::TextureType::Texture2D); 367 info.type = info.type.value_or(Tegra::Shader::TextureType::Texture2D);
@@ -381,8 +379,8 @@ ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(SamplerInfo info, u32 offset,
381 379
382std::optional<Sampler> ShaderIR::GetSampler(Tegra::Shader::Sampler sampler, 380std::optional<Sampler> ShaderIR::GetSampler(Tegra::Shader::Sampler sampler,
383 SamplerInfo sampler_info) { 381 SamplerInfo sampler_info) {
384 const auto offset = static_cast<u32>(sampler.index.Value()); 382 const u32 offset = static_cast<u32>(sampler.index.Value());
385 const auto info = GetSamplerInfo(sampler_info, offset); 383 const auto info = GetSamplerInfo(sampler_info, registry.ObtainBoundSampler(offset));
386 384
387 // If this sampler has already been used, return the existing mapping. 385 // If this sampler has already been used, return the existing mapping.
388 const auto it = std::find_if(used_samplers.begin(), used_samplers.end(), 386 const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
@@ -404,20 +402,19 @@ std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg,
404 const Node sampler_register = GetRegister(reg); 402 const Node sampler_register = GetRegister(reg);
405 const auto [base_node, tracked_sampler_info] = 403 const auto [base_node, tracked_sampler_info] =
406 TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size())); 404 TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size()));
407 ASSERT(base_node != nullptr); 405 if (!base_node) {
408 if (base_node == nullptr) { 406 UNREACHABLE();
409 return std::nullopt; 407 return std::nullopt;
410 } 408 }
411 409
412 if (const auto bindless_sampler_info = 410 if (const auto sampler_info = std::get_if<BindlessSamplerNode>(&*tracked_sampler_info)) {
413 std::get_if<BindlessSamplerNode>(&*tracked_sampler_info)) { 411 const u32 buffer = sampler_info->index;
414 const u32 buffer = bindless_sampler_info->GetIndex(); 412 const u32 offset = sampler_info->offset;
415 const u32 offset = bindless_sampler_info->GetOffset(); 413 info = GetSamplerInfo(info, registry.ObtainBindlessSampler(buffer, offset));
416 info = GetSamplerInfo(info, offset, buffer);
417 414
418 // If this sampler has already been used, return the existing mapping. 415 // If this sampler has already been used, return the existing mapping.
419 const auto it = std::find_if(used_samplers.begin(), used_samplers.end(), 416 const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
420 [buffer = buffer, offset = offset](const Sampler& entry) { 417 [buffer, offset](const Sampler& entry) {
421 return entry.buffer == buffer && entry.offset == offset; 418 return entry.buffer == buffer && entry.offset == offset;
422 }); 419 });
423 if (it != used_samplers.end()) { 420 if (it != used_samplers.end()) {
@@ -431,10 +428,32 @@ std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg,
431 return used_samplers.emplace_back(next_index, offset, buffer, *info.type, *info.is_array, 428 return used_samplers.emplace_back(next_index, offset, buffer, *info.type, *info.is_array,
432 *info.is_shadow, *info.is_buffer, false); 429 *info.is_shadow, *info.is_buffer, false);
433 } 430 }
434 if (const auto array_sampler_info = std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) { 431 if (const auto sampler_info = std::get_if<SeparateSamplerNode>(&*tracked_sampler_info)) {
435 const u32 base_offset = array_sampler_info->GetBaseOffset() / 4; 432 const std::pair indices = sampler_info->indices;
436 index_var = GetCustomVariable(array_sampler_info->GetIndexVar()); 433 const std::pair offsets = sampler_info->offsets;
437 info = GetSamplerInfo(info, base_offset); 434 info = GetSamplerInfo(info, registry.ObtainSeparateSampler(indices, offsets));
435
436 // Try to use an already created sampler if it exists
437 const auto it = std::find_if(
438 used_samplers.begin(), used_samplers.end(), [indices, offsets](const Sampler& entry) {
439 return offsets == std::pair{entry.offset, entry.secondary_offset} &&
440 indices == std::pair{entry.buffer, entry.secondary_buffer};
441 });
442 if (it != used_samplers.end()) {
443 ASSERT(it->is_separated && it->type == info.type && it->is_array == info.is_array &&
444 it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer);
445 return *it;
446 }
447
448 // Otherwise create a new mapping for this sampler
449 const u32 next_index = static_cast<u32>(used_samplers.size());
450 return used_samplers.emplace_back(next_index, offsets, indices, *info.type, *info.is_array,
451 *info.is_shadow, *info.is_buffer);
452 }
453 if (const auto sampler_info = std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) {
454 const u32 base_offset = sampler_info->base_offset / 4;
455 index_var = GetCustomVariable(sampler_info->bindless_var);
456 info = GetSamplerInfo(info, registry.ObtainBoundSampler(base_offset));
438 457
439 // If this sampler has already been used, return the existing mapping. 458 // If this sampler has already been used, return the existing mapping.
440 const auto it = std::find_if( 459 const auto it = std::find_if(
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index c5e5165ff..8f230d57a 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -275,10 +275,11 @@ using Node = std::shared_ptr<NodeData>;
275using Node4 = std::array<Node, 4>; 275using Node4 = std::array<Node, 4>;
276using NodeBlock = std::vector<Node>; 276using NodeBlock = std::vector<Node>;
277 277
278class BindlessSamplerNode; 278struct ArraySamplerNode;
279class ArraySamplerNode; 279struct BindlessSamplerNode;
280struct SeparateSamplerNode;
280 281
281using TrackSamplerData = std::variant<BindlessSamplerNode, ArraySamplerNode>; 282using TrackSamplerData = std::variant<BindlessSamplerNode, SeparateSamplerNode, ArraySamplerNode>;
282using TrackSampler = std::shared_ptr<TrackSamplerData>; 283using TrackSampler = std::shared_ptr<TrackSamplerData>;
283 284
284struct Sampler { 285struct Sampler {
@@ -288,63 +289,51 @@ struct Sampler {
288 : index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow}, 289 : index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow},
289 is_buffer{is_buffer}, is_indexed{is_indexed} {} 290 is_buffer{is_buffer}, is_indexed{is_indexed} {}
290 291
292 /// Separate sampler constructor
293 constexpr explicit Sampler(u32 index, std::pair<u32, u32> offsets, std::pair<u32, u32> buffers,
294 Tegra::Shader::TextureType type, bool is_array, bool is_shadow,
295 bool is_buffer)
296 : index{index}, offset{offsets.first}, secondary_offset{offsets.second},
297 buffer{buffers.first}, secondary_buffer{buffers.second}, type{type}, is_array{is_array},
298 is_shadow{is_shadow}, is_buffer{is_buffer}, is_separated{true} {}
299
291 /// Bindless samplers constructor 300 /// Bindless samplers constructor
292 constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type, 301 constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type,
293 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed) 302 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
294 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array}, 303 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array},
295 is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {} 304 is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {}
296 305
297 u32 index = 0; ///< Emulated index given for the this sampler. 306 u32 index = 0; ///< Emulated index given for the this sampler.
298 u32 offset = 0; ///< Offset in the const buffer from where the sampler is being read. 307 u32 offset = 0; ///< Offset in the const buffer from where the sampler is being read.
299 u32 buffer = 0; ///< Buffer where the bindless sampler is being read (unused on bound samplers). 308 u32 secondary_offset = 0; ///< Secondary offset in the const buffer.
300 u32 size = 1; ///< Size of the sampler. 309 u32 buffer = 0; ///< Buffer where the bindless sampler is read.
310 u32 secondary_buffer = 0; ///< Secondary buffer where the bindless sampler is read.
311 u32 size = 1; ///< Size of the sampler.
301 312
302 Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc) 313 Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc)
303 bool is_array = false; ///< Whether the texture is being sampled as an array texture or not. 314 bool is_array = false; ///< Whether the texture is being sampled as an array texture or not.
304 bool is_shadow = false; ///< Whether the texture is being sampled as a depth texture or not. 315 bool is_shadow = false; ///< Whether the texture is being sampled as a depth texture or not.
305 bool is_buffer = false; ///< Whether the texture is a texture buffer without sampler. 316 bool is_buffer = false; ///< Whether the texture is a texture buffer without sampler.
306 bool is_bindless = false; ///< Whether this sampler belongs to a bindless texture or not. 317 bool is_bindless = false; ///< Whether this sampler belongs to a bindless texture or not.
307 bool is_indexed = false; ///< Whether this sampler is an indexed array of textures. 318 bool is_indexed = false; ///< Whether this sampler is an indexed array of textures.
319 bool is_separated = false; ///< Whether the image and sampler is separated or not.
308}; 320};
309 321
310/// Represents a tracked bindless sampler into a direct const buffer 322/// Represents a tracked bindless sampler into a direct const buffer
311class ArraySamplerNode final { 323struct ArraySamplerNode {
312public:
313 explicit ArraySamplerNode(u32 index, u32 base_offset, u32 bindless_var)
314 : index{index}, base_offset{base_offset}, bindless_var{bindless_var} {}
315
316 constexpr u32 GetIndex() const {
317 return index;
318 }
319
320 constexpr u32 GetBaseOffset() const {
321 return base_offset;
322 }
323
324 constexpr u32 GetIndexVar() const {
325 return bindless_var;
326 }
327
328private:
329 u32 index; 324 u32 index;
330 u32 base_offset; 325 u32 base_offset;
331 u32 bindless_var; 326 u32 bindless_var;
332}; 327};
333 328
334/// Represents a tracked bindless sampler into a direct const buffer 329/// Represents a tracked separate sampler image pair that was folded statically
335class BindlessSamplerNode final { 330struct SeparateSamplerNode {
336public: 331 std::pair<u32, u32> indices;
337 explicit BindlessSamplerNode(u32 index, u32 offset) : index{index}, offset{offset} {} 332 std::pair<u32, u32> offsets;
338 333};
339 constexpr u32 GetIndex() const {
340 return index;
341 }
342
343 constexpr u32 GetOffset() const {
344 return offset;
345 }
346 334
347private: 335/// Represents a tracked bindless sampler into a direct const buffer
336struct BindlessSamplerNode {
348 u32 index; 337 u32 index;
349 u32 offset; 338 u32 offset;
350}; 339};
diff --git a/src/video_core/shader/node_helper.h b/src/video_core/shader/node_helper.h
index 11231bbea..1e0886185 100644
--- a/src/video_core/shader/node_helper.h
+++ b/src/video_core/shader/node_helper.h
@@ -48,7 +48,7 @@ Node MakeNode(Args&&... args) {
48template <typename T, typename... Args> 48template <typename T, typename... Args>
49TrackSampler MakeTrackSampler(Args&&... args) { 49TrackSampler MakeTrackSampler(Args&&... args) {
50 static_assert(std::is_convertible_v<T, TrackSamplerData>); 50 static_assert(std::is_convertible_v<T, TrackSamplerData>);
51 return std::make_shared<TrackSamplerData>(T(std::forward<Args>(args)...)); 51 return std::make_shared<TrackSamplerData>(T{std::forward<Args>(args)...});
52} 52}
53 53
54template <typename... Args> 54template <typename... Args>
diff --git a/src/video_core/shader/registry.cpp b/src/video_core/shader/registry.cpp
index af70b3f35..cdf274e54 100644
--- a/src/video_core/shader/registry.cpp
+++ b/src/video_core/shader/registry.cpp
@@ -93,6 +93,26 @@ std::optional<SamplerDescriptor> Registry::ObtainBoundSampler(u32 offset) {
93 return value; 93 return value;
94} 94}
95 95
96std::optional<Tegra::Engines::SamplerDescriptor> Registry::ObtainSeparateSampler(
97 std::pair<u32, u32> buffers, std::pair<u32, u32> offsets) {
98 SeparateSamplerKey key;
99 key.buffers = buffers;
100 key.offsets = offsets;
101 const auto iter = separate_samplers.find(key);
102 if (iter != separate_samplers.end()) {
103 return iter->second;
104 }
105 if (!engine) {
106 return std::nullopt;
107 }
108
109 const u32 handle_1 = engine->AccessConstBuffer32(stage, key.buffers.first, key.offsets.first);
110 const u32 handle_2 = engine->AccessConstBuffer32(stage, key.buffers.second, key.offsets.second);
111 const SamplerDescriptor value = engine->AccessSampler(handle_1 | handle_2);
112 separate_samplers.emplace(key, value);
113 return value;
114}
115
96std::optional<Tegra::Engines::SamplerDescriptor> Registry::ObtainBindlessSampler(u32 buffer, 116std::optional<Tegra::Engines::SamplerDescriptor> Registry::ObtainBindlessSampler(u32 buffer,
97 u32 offset) { 117 u32 offset) {
98 const std::pair key = {buffer, offset}; 118 const std::pair key = {buffer, offset};
diff --git a/src/video_core/shader/registry.h b/src/video_core/shader/registry.h
index 0c80d35fd..231206765 100644
--- a/src/video_core/shader/registry.h
+++ b/src/video_core/shader/registry.h
@@ -19,8 +19,39 @@
19 19
20namespace VideoCommon::Shader { 20namespace VideoCommon::Shader {
21 21
22struct SeparateSamplerKey {
23 std::pair<u32, u32> buffers;
24 std::pair<u32, u32> offsets;
25};
26
27} // namespace VideoCommon::Shader
28
29namespace std {
30
31template <>
32struct hash<VideoCommon::Shader::SeparateSamplerKey> {
33 std::size_t operator()(const VideoCommon::Shader::SeparateSamplerKey& key) const noexcept {
34 return std::hash<u32>{}(key.buffers.first ^ key.buffers.second ^ key.offsets.first ^
35 key.offsets.second);
36 }
37};
38
39template <>
40struct equal_to<VideoCommon::Shader::SeparateSamplerKey> {
41 bool operator()(const VideoCommon::Shader::SeparateSamplerKey& lhs,
42 const VideoCommon::Shader::SeparateSamplerKey& rhs) const noexcept {
43 return lhs.buffers == rhs.buffers && lhs.offsets == rhs.offsets;
44 }
45};
46
47} // namespace std
48
49namespace VideoCommon::Shader {
50
22using KeyMap = std::unordered_map<std::pair<u32, u32>, u32, Common::PairHash>; 51using KeyMap = std::unordered_map<std::pair<u32, u32>, u32, Common::PairHash>;
23using BoundSamplerMap = std::unordered_map<u32, Tegra::Engines::SamplerDescriptor>; 52using BoundSamplerMap = std::unordered_map<u32, Tegra::Engines::SamplerDescriptor>;
53using SeparateSamplerMap =
54 std::unordered_map<SeparateSamplerKey, Tegra::Engines::SamplerDescriptor>;
24using BindlessSamplerMap = 55using BindlessSamplerMap =
25 std::unordered_map<std::pair<u32, u32>, Tegra::Engines::SamplerDescriptor, Common::PairHash>; 56 std::unordered_map<std::pair<u32, u32>, Tegra::Engines::SamplerDescriptor, Common::PairHash>;
26 57
@@ -73,6 +104,9 @@ public:
73 104
74 std::optional<Tegra::Engines::SamplerDescriptor> ObtainBoundSampler(u32 offset); 105 std::optional<Tegra::Engines::SamplerDescriptor> ObtainBoundSampler(u32 offset);
75 106
107 std::optional<Tegra::Engines::SamplerDescriptor> ObtainSeparateSampler(
108 std::pair<u32, u32> buffers, std::pair<u32, u32> offsets);
109
76 std::optional<Tegra::Engines::SamplerDescriptor> ObtainBindlessSampler(u32 buffer, u32 offset); 110 std::optional<Tegra::Engines::SamplerDescriptor> ObtainBindlessSampler(u32 buffer, u32 offset);
77 111
78 /// Inserts a key. 112 /// Inserts a key.
@@ -128,6 +162,7 @@ private:
128 Tegra::Engines::ConstBufferEngineInterface* engine = nullptr; 162 Tegra::Engines::ConstBufferEngineInterface* engine = nullptr;
129 KeyMap keys; 163 KeyMap keys;
130 BoundSamplerMap bound_samplers; 164 BoundSamplerMap bound_samplers;
165 SeparateSamplerMap separate_samplers;
131 BindlessSamplerMap bindless_samplers; 166 BindlessSamplerMap bindless_samplers;
132 u32 bound_buffer; 167 u32 bound_buffer;
133 GraphicsInfo graphics_info; 168 GraphicsInfo graphics_info;
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index 15ae152f2..3a98b2104 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -330,8 +330,8 @@ private:
330 OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation); 330 OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation);
331 331
332 /// Queries the missing sampler info from the execution context. 332 /// Queries the missing sampler info from the execution context.
333 SamplerInfo GetSamplerInfo(SamplerInfo info, u32 offset, 333 SamplerInfo GetSamplerInfo(SamplerInfo info,
334 std::optional<u32> buffer = std::nullopt); 334 std::optional<Tegra::Engines::SamplerDescriptor> sampler);
335 335
336 /// Accesses a texture sampler. 336 /// Accesses a texture sampler.
337 std::optional<Sampler> GetSampler(Tegra::Shader::Sampler sampler, SamplerInfo info); 337 std::optional<Sampler> GetSampler(Tegra::Shader::Sampler sampler, SamplerInfo info);
@@ -409,8 +409,14 @@ private:
409 409
410 std::tuple<Node, u32, u32> TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) const; 410 std::tuple<Node, u32, u32> TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) const;
411 411
412 std::tuple<Node, TrackSampler> TrackBindlessSampler(Node tracked, const NodeBlock& code, 412 std::pair<Node, TrackSampler> TrackBindlessSampler(Node tracked, const NodeBlock& code,
413 s64 cursor); 413 s64 cursor);
414
415 std::pair<Node, TrackSampler> HandleBindlessIndirectRead(const CbufNode& cbuf,
416 const OperationNode& operation,
417 Node gpr, Node base_offset,
418 Node tracked, const NodeBlock& code,
419 s64 cursor);
414 420
415 std::optional<u32> TrackImmediate(Node tracked, const NodeBlock& code, s64 cursor) const; 421 std::optional<u32> TrackImmediate(Node tracked, const NodeBlock& code, s64 cursor) const;
416 422
diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp
index eb97bfd41..d5ed81442 100644
--- a/src/video_core/shader/track.cpp
+++ b/src/video_core/shader/track.cpp
@@ -14,6 +14,7 @@
14namespace VideoCommon::Shader { 14namespace VideoCommon::Shader {
15 15
16namespace { 16namespace {
17
17std::pair<Node, s64> FindOperation(const NodeBlock& code, s64 cursor, 18std::pair<Node, s64> FindOperation(const NodeBlock& code, s64 cursor,
18 OperationCode operation_code) { 19 OperationCode operation_code) {
19 for (; cursor >= 0; --cursor) { 20 for (; cursor >= 0; --cursor) {
@@ -63,7 +64,8 @@ bool AmendNodeCv(std::size_t amend_index, Node node) {
63 if (const auto operation = std::get_if<OperationNode>(&*node)) { 64 if (const auto operation = std::get_if<OperationNode>(&*node)) {
64 operation->SetAmendIndex(amend_index); 65 operation->SetAmendIndex(amend_index);
65 return true; 66 return true;
66 } else if (const auto conditional = std::get_if<ConditionalNode>(&*node)) { 67 }
68 if (const auto conditional = std::get_if<ConditionalNode>(&*node)) {
67 conditional->SetAmendIndex(amend_index); 69 conditional->SetAmendIndex(amend_index);
68 return true; 70 return true;
69 } 71 }
@@ -72,40 +74,27 @@ bool AmendNodeCv(std::size_t amend_index, Node node) {
72 74
73} // Anonymous namespace 75} // Anonymous namespace
74 76
75std::tuple<Node, TrackSampler> ShaderIR::TrackBindlessSampler(Node tracked, const NodeBlock& code, 77std::pair<Node, TrackSampler> ShaderIR::TrackBindlessSampler(Node tracked, const NodeBlock& code,
76 s64 cursor) { 78 s64 cursor) {
77 if (const auto cbuf = std::get_if<CbufNode>(&*tracked)) { 79 if (const auto cbuf = std::get_if<CbufNode>(&*tracked)) {
80 const u32 cbuf_index = cbuf->GetIndex();
81
78 // Constant buffer found, test if it's an immediate 82 // Constant buffer found, test if it's an immediate
79 const auto& offset = cbuf->GetOffset(); 83 const auto& offset = cbuf->GetOffset();
80 if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) { 84 if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) {
81 auto track = 85 auto track = MakeTrackSampler<BindlessSamplerNode>(cbuf_index, immediate->GetValue());
82 MakeTrackSampler<BindlessSamplerNode>(cbuf->GetIndex(), immediate->GetValue());
83 return {tracked, track}; 86 return {tracked, track};
84 } 87 }
85 if (const auto operation = std::get_if<OperationNode>(&*offset)) { 88 if (const auto operation = std::get_if<OperationNode>(&*offset)) {
86 const u32 bound_buffer = registry.GetBoundBuffer(); 89 const u32 bound_buffer = registry.GetBoundBuffer();
87 if (bound_buffer != cbuf->GetIndex()) { 90 if (bound_buffer != cbuf_index) {
88 return {}; 91 return {};
89 } 92 }
90 const auto pair = DecoupleIndirectRead(*operation); 93 if (const std::optional pair = DecoupleIndirectRead(*operation)) {
91 if (!pair) { 94 auto [gpr, base_offset] = *pair;
92 return {}; 95 return HandleBindlessIndirectRead(*cbuf, *operation, gpr, base_offset, tracked,
96 code, cursor);
93 } 97 }
94 auto [gpr, base_offset] = *pair;
95 const auto offset_inm = std::get_if<ImmediateNode>(&*base_offset);
96 const auto& gpu_driver = registry.AccessGuestDriverProfile();
97 const u32 bindless_cv = NewCustomVariable();
98 Node op =
99 Operation(OperationCode::UDiv, gpr, Immediate(gpu_driver.GetTextureHandlerSize()));
100
101 const Node cv_node = GetCustomVariable(bindless_cv);
102 Node amend_op = Operation(OperationCode::Assign, cv_node, std::move(op));
103 const std::size_t amend_index = DeclareAmend(std::move(amend_op));
104 AmendNodeCv(amend_index, code[cursor]);
105 // TODO Implement Bindless Index custom variable
106 auto track = MakeTrackSampler<ArraySamplerNode>(cbuf->GetIndex(),
107 offset_inm->GetValue(), bindless_cv);
108 return {tracked, track};
109 } 98 }
110 return {}; 99 return {};
111 } 100 }
@@ -122,10 +111,23 @@ std::tuple<Node, TrackSampler> ShaderIR::TrackBindlessSampler(Node tracked, cons
122 return TrackBindlessSampler(source, code, new_cursor); 111 return TrackBindlessSampler(source, code, new_cursor);
123 } 112 }
124 if (const auto operation = std::get_if<OperationNode>(&*tracked)) { 113 if (const auto operation = std::get_if<OperationNode>(&*tracked)) {
125 for (std::size_t i = operation->GetOperandsCount(); i > 0; --i) { 114 const OperationNode& op = *operation;
126 if (auto found = TrackBindlessSampler((*operation)[i - 1], code, cursor); 115
127 std::get<0>(found)) { 116 const OperationCode opcode = operation->GetCode();
128 // Cbuf found in operand. 117 if (opcode == OperationCode::IBitwiseOr || opcode == OperationCode::UBitwiseOr) {
118 ASSERT(op.GetOperandsCount() == 2);
119 auto [node_a, index_a, offset_a] = TrackCbuf(op[0], code, cursor);
120 auto [node_b, index_b, offset_b] = TrackCbuf(op[1], code, cursor);
121 if (node_a && node_b) {
122 auto track = MakeTrackSampler<SeparateSamplerNode>(std::pair{index_a, index_b},
123 std::pair{offset_a, offset_b});
124 return {tracked, std::move(track)};
125 }
126 }
127 std::size_t i = op.GetOperandsCount();
128 while (i--) {
129 if (auto found = TrackBindlessSampler(op[i - 1], code, cursor); std::get<0>(found)) {
130 // Constant buffer found in operand.
129 return found; 131 return found;
130 } 132 }
131 } 133 }
@@ -139,6 +141,26 @@ std::tuple<Node, TrackSampler> ShaderIR::TrackBindlessSampler(Node tracked, cons
139 return {}; 141 return {};
140} 142}
141 143
144std::pair<Node, TrackSampler> ShaderIR::HandleBindlessIndirectRead(
145 const CbufNode& cbuf, const OperationNode& operation, Node gpr, Node base_offset, Node tracked,
146 const NodeBlock& code, s64 cursor) {
147 const auto offset_imm = std::get<ImmediateNode>(*base_offset);
148 const auto& gpu_driver = registry.AccessGuestDriverProfile();
149 const u32 bindless_cv = NewCustomVariable();
150 const u32 texture_handler_size = gpu_driver.GetTextureHandlerSize();
151 Node op = Operation(OperationCode::UDiv, gpr, Immediate(texture_handler_size));
152
153 Node cv_node = GetCustomVariable(bindless_cv);
154 Node amend_op = Operation(OperationCode::Assign, std::move(cv_node), std::move(op));
155 const std::size_t amend_index = DeclareAmend(std::move(amend_op));
156 AmendNodeCv(amend_index, code[cursor]);
157
158 // TODO: Implement bindless index custom variable
159 auto track =
160 MakeTrackSampler<ArraySamplerNode>(cbuf.GetIndex(), offset_imm.GetValue(), bindless_cv);
161 return {tracked, track};
162}
163
142std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& code, 164std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& code,
143 s64 cursor) const { 165 s64 cursor) const {
144 if (const auto cbuf = std::get_if<CbufNode>(&*tracked)) { 166 if (const auto cbuf = std::get_if<CbufNode>(&*tracked)) {
diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h
new file mode 100644
index 000000000..a23c23886
--- /dev/null
+++ b/src/video_core/shader_cache.h
@@ -0,0 +1,228 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <algorithm>
8#include <memory>
9#include <mutex>
10#include <unordered_map>
11#include <utility>
12#include <vector>
13
14#include "common/assert.h"
15#include "common/common_types.h"
16#include "video_core/rasterizer_interface.h"
17
18namespace VideoCommon {
19
20template <class T>
21class ShaderCache {
22 static constexpr u64 PAGE_SHIFT = 14;
23
24 struct Entry {
25 VAddr addr_start;
26 VAddr addr_end;
27 T* data;
28
29 bool is_memory_marked = true;
30
31 constexpr bool Overlaps(VAddr start, VAddr end) const noexcept {
32 return start < addr_end && addr_start < end;
33 }
34 };
35
36public:
37 virtual ~ShaderCache() = default;
38
39 /// @brief Removes shaders inside a given region
40 /// @note Checks for ranges
41 /// @param addr Start address of the invalidation
42 /// @param size Number of bytes of the invalidation
43 void InvalidateRegion(VAddr addr, std::size_t size) {
44 std::scoped_lock lock{invalidation_mutex};
45 InvalidatePagesInRegion(addr, size);
46 RemovePendingShaders();
47 }
48
49 /// @brief Unmarks a memory region as cached and marks it for removal
50 /// @param addr Start address of the CPU write operation
51 /// @param size Number of bytes of the CPU write operation
52 void OnCPUWrite(VAddr addr, std::size_t size) {
53 std::lock_guard lock{invalidation_mutex};
54 InvalidatePagesInRegion(addr, size);
55 }
56
57 /// @brief Flushes delayed removal operations
58 void SyncGuestHost() {
59 std::scoped_lock lock{invalidation_mutex};
60 RemovePendingShaders();
61 }
62
63 /// @brief Tries to obtain a cached shader starting in a given address
64 /// @note Doesn't check for ranges, the given address has to be the start of the shader
65 /// @param addr Start address of the shader, this doesn't cache for region
66 /// @return Pointer to a valid shader, nullptr when nothing is found
67 T* TryGet(VAddr addr) const {
68 std::scoped_lock lock{lookup_mutex};
69
70 const auto it = lookup_cache.find(addr);
71 if (it == lookup_cache.end()) {
72 return nullptr;
73 }
74 return it->second->data;
75 }
76
77protected:
78 explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_) : rasterizer{rasterizer_} {}
79
80 /// @brief Register in the cache a given entry
81 /// @param data Shader to store in the cache
82 /// @param addr Start address of the shader that will be registered
83 /// @param size Size in bytes of the shader
84 void Register(std::unique_ptr<T> data, VAddr addr, std::size_t size) {
85 std::scoped_lock lock{invalidation_mutex, lookup_mutex};
86
87 const VAddr addr_end = addr + size;
88 Entry* const entry = NewEntry(addr, addr_end, data.get());
89
90 const u64 page_end = addr_end >> PAGE_SHIFT;
91 for (u64 page = addr >> PAGE_SHIFT; page <= page_end; ++page) {
92 invalidation_cache[page].push_back(entry);
93 }
94
95 storage.push_back(std::move(data));
96
97 rasterizer.UpdatePagesCachedCount(addr, size, 1);
98 }
99
100 /// @brief Called when a shader is going to be removed
101 /// @param shader Shader that will be removed
102 /// @pre invalidation_cache is locked
103 /// @pre lookup_mutex is locked
104 virtual void OnShaderRemoval([[maybe_unused]] T* shader) {}
105
106private:
107 /// @brief Invalidate pages in a given region
108 /// @pre invalidation_mutex is locked
109 void InvalidatePagesInRegion(VAddr addr, std::size_t size) {
110 const VAddr addr_end = addr + size;
111 const u64 page_end = addr_end >> PAGE_SHIFT;
112 for (u64 page = addr >> PAGE_SHIFT; page <= page_end; ++page) {
113 const auto it = invalidation_cache.find(page);
114 if (it == invalidation_cache.end()) {
115 continue;
116 }
117
118 std::vector<Entry*>& entries = it->second;
119 InvalidatePageEntries(entries, addr, addr_end);
120
121 // If there's nothing else in this page, remove it to avoid overpopulating the hash map.
122 if (entries.empty()) {
123 invalidation_cache.erase(it);
124 }
125 }
126 }
127
128 /// @brief Remove shaders marked for deletion
129 /// @pre invalidation_mutex is locked
130 void RemovePendingShaders() {
131 if (marked_for_removal.empty()) {
132 return;
133 }
134 std::scoped_lock lock{lookup_mutex};
135
136 std::vector<T*> removed_shaders;
137 removed_shaders.reserve(marked_for_removal.size());
138
139 for (Entry* const entry : marked_for_removal) {
140 if (lookup_cache.erase(entry->addr_start) > 0) {
141 removed_shaders.push_back(entry->data);
142 }
143 }
144 marked_for_removal.clear();
145
146 if (!removed_shaders.empty()) {
147 RemoveShadersFromStorage(std::move(removed_shaders));
148 }
149 }
150
151 /// @brief Invalidates entries in a given range for the passed page
152 /// @param entries Vector of entries in the page, it will be modified on overlaps
153 /// @param addr Start address of the invalidation
154 /// @param addr_end Non-inclusive end address of the invalidation
155 /// @pre invalidation_mutex is locked
156 void InvalidatePageEntries(std::vector<Entry*>& entries, VAddr addr, VAddr addr_end) {
157 auto it = entries.begin();
158 while (it != entries.end()) {
159 Entry* const entry = *it;
160 if (!entry->Overlaps(addr, addr_end)) {
161 ++it;
162 continue;
163 }
164 UnmarkMemory(entry);
165 marked_for_removal.push_back(entry);
166
167 it = entries.erase(it);
168 }
169 }
170
171 /// @brief Unmarks an entry from the rasterizer cache
172 /// @param entry Entry to unmark from memory
173 void UnmarkMemory(Entry* entry) {
174 if (!entry->is_memory_marked) {
175 return;
176 }
177 entry->is_memory_marked = false;
178
179 const VAddr addr = entry->addr_start;
180 const std::size_t size = entry->addr_end - addr;
181 rasterizer.UpdatePagesCachedCount(addr, size, -1);
182 }
183
184 /// @brief Removes a vector of shaders from a list
185 /// @param removed_shaders Shaders to be removed from the storage, it can contain duplicates
186 /// @pre invalidation_mutex is locked
187 /// @pre lookup_mutex is locked
188 void RemoveShadersFromStorage(std::vector<T*> removed_shaders) {
189 // Remove duplicates
190 std::sort(removed_shaders.begin(), removed_shaders.end());
191 removed_shaders.erase(std::unique(removed_shaders.begin(), removed_shaders.end()),
192 removed_shaders.end());
193
194 // Now that there are no duplicates, we can notify removals
195 for (T* const shader : removed_shaders) {
196 OnShaderRemoval(shader);
197 }
198
199 // Remove them from the cache
200 const auto is_removed = [&removed_shaders](std::unique_ptr<T>& shader) {
201 return std::find(removed_shaders.begin(), removed_shaders.end(), shader.get()) !=
202 removed_shaders.end();
203 };
204 storage.erase(std::remove_if(storage.begin(), storage.end(), is_removed), storage.end());
205 }
206
207 /// @brief Creates a new entry in the lookup cache and returns its pointer
208 /// @pre lookup_mutex is locked
209 Entry* NewEntry(VAddr addr, VAddr addr_end, T* data) {
210 auto entry = std::make_unique<Entry>(Entry{addr, addr_end, data});
211 Entry* const entry_pointer = entry.get();
212
213 lookup_cache.emplace(addr, std::move(entry));
214 return entry_pointer;
215 }
216
217 VideoCore::RasterizerInterface& rasterizer;
218
219 mutable std::mutex lookup_mutex;
220 std::mutex invalidation_mutex;
221
222 std::unordered_map<u64, std::unique_ptr<Entry>> lookup_cache;
223 std::unordered_map<u64, std::vector<Entry*>> invalidation_cache;
224 std::vector<std::unique_ptr<T>> storage;
225 std::vector<Entry*> marked_for_removal;
226};
227
228} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
index 715f39d0d..94d3a6ae5 100644
--- a/src/video_core/texture_cache/surface_base.cpp
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -248,12 +248,11 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
248 248
249 // Use an extra temporal buffer 249 // Use an extra temporal buffer
250 auto& tmp_buffer = staging_cache.GetBuffer(1); 250 auto& tmp_buffer = staging_cache.GetBuffer(1);
251 // Special case for 3D Texture Segments
252 const bool must_read_current_data =
253 params.block_depth > 0 && params.target == VideoCore::Surface::SurfaceTarget::Texture2D;
254 tmp_buffer.resize(guest_memory_size); 251 tmp_buffer.resize(guest_memory_size);
255 host_ptr = tmp_buffer.data(); 252 host_ptr = tmp_buffer.data();
256 if (must_read_current_data) { 253
254 if (params.target == SurfaceTarget::Texture3D) {
255 // Special case for 3D texture segments
257 memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size); 256 memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
258 } 257 }
259 258
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h
index 79e10ffbb..173f2edba 100644
--- a/src/video_core/texture_cache/surface_base.h
+++ b/src/video_core/texture_cache/surface_base.h
@@ -217,8 +217,8 @@ public:
217 } 217 }
218 218
219 bool IsProtected() const { 219 bool IsProtected() const {
220 // Only 3D Slices are to be protected 220 // Only 3D slices are to be protected
221 return is_target && params.block_depth > 0; 221 return is_target && params.target == SurfaceTarget::Texture3D;
222 } 222 }
223 223
224 bool IsRenderTarget() const { 224 bool IsRenderTarget() const {
@@ -250,6 +250,11 @@ public:
250 return GetView(ViewParams(overview_params.target, 0, num_layers, 0, params.num_levels)); 250 return GetView(ViewParams(overview_params.target, 0, num_layers, 0, params.num_levels));
251 } 251 }
252 252
253 TView Emplace3DView(u32 slice, u32 depth, u32 base_level, u32 num_levels) {
254 return GetView(ViewParams(VideoCore::Surface::SurfaceTarget::Texture3D, slice, depth,
255 base_level, num_levels));
256 }
257
253 std::optional<TView> EmplaceIrregularView(const SurfaceParams& view_params, 258 std::optional<TView> EmplaceIrregularView(const SurfaceParams& view_params,
254 const GPUVAddr view_addr, 259 const GPUVAddr view_addr,
255 const std::size_t candidate_size, const u32 mipmap, 260 const std::size_t candidate_size, const u32 mipmap,
@@ -272,8 +277,8 @@ public:
272 std::optional<TView> EmplaceView(const SurfaceParams& view_params, const GPUVAddr view_addr, 277 std::optional<TView> EmplaceView(const SurfaceParams& view_params, const GPUVAddr view_addr,
273 const std::size_t candidate_size) { 278 const std::size_t candidate_size) {
274 if (params.target == SurfaceTarget::Texture3D || 279 if (params.target == SurfaceTarget::Texture3D ||
275 (params.num_levels == 1 && !params.is_layered) || 280 view_params.target == SurfaceTarget::Texture3D ||
276 view_params.target == SurfaceTarget::Texture3D) { 281 (params.num_levels == 1 && !params.is_layered)) {
277 return {}; 282 return {};
278 } 283 }
279 const auto layer_mipmap{GetLayerMipmap(view_addr)}; 284 const auto layer_mipmap{GetLayerMipmap(view_addr)};
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
index 884fabffe..0b2b2b8c4 100644
--- a/src/video_core/texture_cache/surface_params.cpp
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -215,10 +215,19 @@ SurfaceParams SurfaceParams::CreateForFramebuffer(Core::System& system, std::siz
215 params.num_levels = 1; 215 params.num_levels = 1;
216 params.emulated_levels = 1; 216 params.emulated_levels = 1;
217 217
218 const bool is_layered = config.layers > 1 && params.block_depth == 0; 218 if (config.memory_layout.is_3d != 0) {
219 params.is_layered = is_layered; 219 params.depth = config.layers.Value();
220 params.depth = is_layered ? config.layers.Value() : 1; 220 params.is_layered = false;
221 params.target = is_layered ? SurfaceTarget::Texture2DArray : SurfaceTarget::Texture2D; 221 params.target = SurfaceTarget::Texture3D;
222 } else if (config.layers > 1) {
223 params.depth = config.layers.Value();
224 params.is_layered = true;
225 params.target = SurfaceTarget::Texture2DArray;
226 } else {
227 params.depth = 1;
228 params.is_layered = false;
229 params.target = SurfaceTarget::Texture2D;
230 }
222 return params; 231 return params;
223} 232}
224 233
@@ -237,7 +246,7 @@ SurfaceParams SurfaceParams::CreateForFermiCopySurface(
237 params.width = config.width; 246 params.width = config.width;
238 params.height = config.height; 247 params.height = config.height;
239 params.pitch = config.pitch; 248 params.pitch = config.pitch;
240 // TODO(Rodrigo): Try to guess the surface target from depth and layer parameters 249 // TODO(Rodrigo): Try to guess texture arrays from parameters
241 params.target = SurfaceTarget::Texture2D; 250 params.target = SurfaceTarget::Texture2D;
242 params.depth = 1; 251 params.depth = 1;
243 params.num_levels = 1; 252 params.num_levels = 1;
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 6f63217a2..b543fc8c0 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -298,15 +298,13 @@ public:
298 const GPUVAddr src_gpu_addr = src_config.Address(); 298 const GPUVAddr src_gpu_addr = src_config.Address();
299 const GPUVAddr dst_gpu_addr = dst_config.Address(); 299 const GPUVAddr dst_gpu_addr = dst_config.Address();
300 DeduceBestBlit(src_params, dst_params, src_gpu_addr, dst_gpu_addr); 300 DeduceBestBlit(src_params, dst_params, src_gpu_addr, dst_gpu_addr);
301 const std::optional<VAddr> dst_cpu_addr = 301
302 system.GPU().MemoryManager().GpuToCpuAddress(dst_gpu_addr); 302 const auto& memory_manager = system.GPU().MemoryManager();
303 const std::optional<VAddr> src_cpu_addr = 303 const std::optional<VAddr> dst_cpu_addr = memory_manager.GpuToCpuAddress(dst_gpu_addr);
304 system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr); 304 const std::optional<VAddr> src_cpu_addr = memory_manager.GpuToCpuAddress(src_gpu_addr);
305 std::pair<TSurface, TView> dst_surface = 305 std::pair dst_surface = GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false);
306 GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false); 306 TView src_surface = GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false).second;
307 std::pair<TSurface, TView> src_surface = 307 ImageBlit(src_surface, dst_surface.second, copy_config);
308 GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false);
309 ImageBlit(src_surface.second, dst_surface.second, copy_config);
310 dst_surface.first->MarkAsModified(true, Tick()); 308 dst_surface.first->MarkAsModified(true, Tick());
311 } 309 }
312 310
@@ -508,12 +506,12 @@ private:
508 return RecycleStrategy::Flush; 506 return RecycleStrategy::Flush;
509 } 507 }
510 // 3D Textures decision 508 // 3D Textures decision
511 if (params.block_depth > 1 || params.target == SurfaceTarget::Texture3D) { 509 if (params.target == SurfaceTarget::Texture3D) {
512 return RecycleStrategy::Flush; 510 return RecycleStrategy::Flush;
513 } 511 }
514 for (const auto& s : overlaps) { 512 for (const auto& s : overlaps) {
515 const auto& s_params = s->GetSurfaceParams(); 513 const auto& s_params = s->GetSurfaceParams();
516 if (s_params.block_depth > 1 || s_params.target == SurfaceTarget::Texture3D) { 514 if (s_params.target == SurfaceTarget::Texture3D) {
517 return RecycleStrategy::Flush; 515 return RecycleStrategy::Flush;
518 } 516 }
519 } 517 }
@@ -731,51 +729,9 @@ private:
731 */ 729 */
732 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(VectorSurface& overlaps, 730 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(VectorSurface& overlaps,
733 const SurfaceParams& params, 731 const SurfaceParams& params,
734 const GPUVAddr gpu_addr, 732 GPUVAddr gpu_addr, VAddr cpu_addr,
735 const VAddr cpu_addr,
736 bool preserve_contents) { 733 bool preserve_contents) {
737 if (params.target == SurfaceTarget::Texture3D) { 734 if (params.target != SurfaceTarget::Texture3D) {
738 bool failed = false;
739 if (params.num_levels > 1) {
740 // We can't handle mipmaps in 3D textures yet, better fallback to LLE approach
741 return std::nullopt;
742 }
743 TSurface new_surface = GetUncachedSurface(gpu_addr, params);
744 bool modified = false;
745 for (auto& surface : overlaps) {
746 const SurfaceParams& src_params = surface->GetSurfaceParams();
747 if (src_params.target != SurfaceTarget::Texture2D) {
748 failed = true;
749 break;
750 }
751 if (src_params.height != params.height) {
752 failed = true;
753 break;
754 }
755 if (src_params.block_depth != params.block_depth ||
756 src_params.block_height != params.block_height) {
757 failed = true;
758 break;
759 }
760 const u32 offset = static_cast<u32>(surface->GetCpuAddr() - cpu_addr);
761 const auto offsets = params.GetBlockOffsetXYZ(offset);
762 const auto z = std::get<2>(offsets);
763 modified |= surface->IsModified();
764 const CopyParams copy_params(0, 0, 0, 0, 0, z, 0, 0, params.width, params.height,
765 1);
766 ImageCopy(surface, new_surface, copy_params);
767 }
768 if (failed) {
769 return std::nullopt;
770 }
771 for (const auto& surface : overlaps) {
772 Unregister(surface);
773 }
774 new_surface->MarkAsModified(modified, Tick());
775 Register(new_surface);
776 auto view = new_surface->GetMainView();
777 return {{std::move(new_surface), view}};
778 } else {
779 for (const auto& surface : overlaps) { 735 for (const auto& surface : overlaps) {
780 if (!surface->MatchTarget(params.target)) { 736 if (!surface->MatchTarget(params.target)) {
781 if (overlaps.size() == 1 && surface->GetCpuAddr() == cpu_addr) { 737 if (overlaps.size() == 1 && surface->GetCpuAddr() == cpu_addr) {
@@ -791,11 +747,60 @@ private:
791 continue; 747 continue;
792 } 748 }
793 if (surface->MatchesStructure(params) == MatchStructureResult::FullMatch) { 749 if (surface->MatchesStructure(params) == MatchStructureResult::FullMatch) {
794 return {{surface, surface->GetMainView()}}; 750 return std::make_pair(surface, surface->GetMainView());
795 } 751 }
796 } 752 }
797 return InitializeSurface(gpu_addr, params, preserve_contents); 753 return InitializeSurface(gpu_addr, params, preserve_contents);
798 } 754 }
755
756 if (params.num_levels > 1) {
757 // We can't handle mipmaps in 3D textures yet, better fallback to LLE approach
758 return std::nullopt;
759 }
760
761 if (overlaps.size() == 1) {
762 const auto& surface = overlaps[0];
763 const SurfaceParams& overlap_params = surface->GetSurfaceParams();
764 // Don't attempt to render to textures with more than one level for now
765 // The texture has to be to the right or the sample address if we want to render to it
766 if (overlap_params.num_levels == 1 && cpu_addr >= surface->GetCpuAddr()) {
767 const u32 offset = static_cast<u32>(cpu_addr - surface->GetCpuAddr());
768 const u32 slice = std::get<2>(params.GetBlockOffsetXYZ(offset));
769 if (slice < overlap_params.depth) {
770 auto view = surface->Emplace3DView(slice, params.depth, 0, 1);
771 return std::make_pair(std::move(surface), std::move(view));
772 }
773 }
774 }
775
776 TSurface new_surface = GetUncachedSurface(gpu_addr, params);
777 bool modified = false;
778
779 for (auto& surface : overlaps) {
780 const SurfaceParams& src_params = surface->GetSurfaceParams();
781 if (src_params.target != SurfaceTarget::Texture2D ||
782 src_params.height != params.height ||
783 src_params.block_depth != params.block_depth ||
784 src_params.block_height != params.block_height) {
785 return std::nullopt;
786 }
787 modified |= surface->IsModified();
788
789 const u32 offset = static_cast<u32>(surface->GetCpuAddr() - cpu_addr);
790 const u32 slice = std::get<2>(params.GetBlockOffsetXYZ(offset));
791 const u32 width = params.width;
792 const u32 height = params.height;
793 const CopyParams copy_params(0, 0, 0, 0, 0, slice, 0, 0, width, height, 1);
794 ImageCopy(surface, new_surface, copy_params);
795 }
796 for (const auto& surface : overlaps) {
797 Unregister(surface);
798 }
799 new_surface->MarkAsModified(modified, Tick());
800 Register(new_surface);
801
802 TView view = new_surface->GetMainView();
803 return std::make_pair(std::move(new_surface), std::move(view));
799 } 804 }
800 805
801 /** 806 /**
@@ -873,7 +878,7 @@ private:
873 } 878 }
874 } 879 }
875 880
876 // Check if it's a 3D texture 881 // Manage 3D textures
877 if (params.block_depth > 0) { 882 if (params.block_depth > 0) {
878 auto surface = 883 auto surface =
879 Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr, preserve_contents); 884 Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr, preserve_contents);
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index d6c9e5013..32c81dc70 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -533,6 +533,8 @@ void Config::ReadDebuggingValues() {
533 Settings::values.quest_flag = ReadSetting(QStringLiteral("quest_flag"), false).toBool(); 533 Settings::values.quest_flag = ReadSetting(QStringLiteral("quest_flag"), false).toBool();
534 Settings::values.disable_cpu_opt = 534 Settings::values.disable_cpu_opt =
535 ReadSetting(QStringLiteral("disable_cpu_opt"), false).toBool(); 535 ReadSetting(QStringLiteral("disable_cpu_opt"), false).toBool();
536 Settings::values.disable_macro_jit =
537 ReadSetting(QStringLiteral("disable_macro_jit"), false).toBool();
536 538
537 qt_config->endGroup(); 539 qt_config->endGroup();
538} 540}
@@ -1007,6 +1009,7 @@ void Config::SaveDebuggingValues() {
1007 WriteSetting(QStringLiteral("dump_nso"), Settings::values.dump_nso, false); 1009 WriteSetting(QStringLiteral("dump_nso"), Settings::values.dump_nso, false);
1008 WriteSetting(QStringLiteral("quest_flag"), Settings::values.quest_flag, false); 1010 WriteSetting(QStringLiteral("quest_flag"), Settings::values.quest_flag, false);
1009 WriteSetting(QStringLiteral("disable_cpu_opt"), Settings::values.disable_cpu_opt, false); 1011 WriteSetting(QStringLiteral("disable_cpu_opt"), Settings::values.disable_cpu_opt, false);
1012 WriteSetting(QStringLiteral("disable_macro_jit"), Settings::values.disable_macro_jit, false);
1010 1013
1011 qt_config->endGroup(); 1014 qt_config->endGroup();
1012} 1015}
diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp
index c2026763e..2c77441fd 100644
--- a/src/yuzu/configuration/configure_debug.cpp
+++ b/src/yuzu/configuration/configure_debug.cpp
@@ -39,6 +39,8 @@ void ConfigureDebug::SetConfiguration() {
39 ui->disable_cpu_opt->setChecked(Settings::values.disable_cpu_opt); 39 ui->disable_cpu_opt->setChecked(Settings::values.disable_cpu_opt);
40 ui->enable_graphics_debugging->setEnabled(!Core::System::GetInstance().IsPoweredOn()); 40 ui->enable_graphics_debugging->setEnabled(!Core::System::GetInstance().IsPoweredOn());
41 ui->enable_graphics_debugging->setChecked(Settings::values.renderer_debug); 41 ui->enable_graphics_debugging->setChecked(Settings::values.renderer_debug);
42 ui->disable_macro_jit->setEnabled(!Core::System::GetInstance().IsPoweredOn());
43 ui->disable_macro_jit->setChecked(Settings::values.disable_macro_jit);
42} 44}
43 45
44void ConfigureDebug::ApplyConfiguration() { 46void ConfigureDebug::ApplyConfiguration() {
@@ -51,6 +53,7 @@ void ConfigureDebug::ApplyConfiguration() {
51 Settings::values.quest_flag = ui->quest_flag->isChecked(); 53 Settings::values.quest_flag = ui->quest_flag->isChecked();
52 Settings::values.disable_cpu_opt = ui->disable_cpu_opt->isChecked(); 54 Settings::values.disable_cpu_opt = ui->disable_cpu_opt->isChecked();
53 Settings::values.renderer_debug = ui->enable_graphics_debugging->isChecked(); 55 Settings::values.renderer_debug = ui->enable_graphics_debugging->isChecked();
56 Settings::values.disable_macro_jit = ui->disable_macro_jit->isChecked();
54 Debugger::ToggleConsole(); 57 Debugger::ToggleConsole();
55 Log::Filter filter; 58 Log::Filter filter;
56 filter.ParseFilterString(Settings::values.log_filter); 59 filter.ParseFilterString(Settings::values.log_filter);
diff --git a/src/yuzu/configuration/configure_debug.ui b/src/yuzu/configuration/configure_debug.ui
index e0d4c4a44..46f0208c6 100644
--- a/src/yuzu/configuration/configure_debug.ui
+++ b/src/yuzu/configuration/configure_debug.ui
@@ -148,6 +148,19 @@
148 </property> 148 </property>
149 </widget> 149 </widget>
150 </item> 150 </item>
151 <item>
152 <widget class="QCheckBox" name="disable_macro_jit">
153 <property name="enabled">
154 <bool>true</bool>
155 </property>
156 <property name="whatsThis">
157 <string>When checked, it disables the macro Just In Time compiler. Enabled this makes games run slower</string>
158 </property>
159 <property name="text">
160 <string>Disable Macro JIT</string>
161 </property>
162 </widget>
163 </item>
151 </layout> 164 </layout>
152 </widget> 165 </widget>
153 </item> 166 </item>
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index 5f9cc158e..659b9f701 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -430,6 +430,8 @@ void Config::ReadValues() {
430 Settings::values.quest_flag = sdl2_config->GetBoolean("Debugging", "quest_flag", false); 430 Settings::values.quest_flag = sdl2_config->GetBoolean("Debugging", "quest_flag", false);
431 Settings::values.disable_cpu_opt = 431 Settings::values.disable_cpu_opt =
432 sdl2_config->GetBoolean("Debugging", "disable_cpu_opt", false); 432 sdl2_config->GetBoolean("Debugging", "disable_cpu_opt", false);
433 Settings::values.disable_macro_jit =
434 sdl2_config->GetBoolean("Debugging", "disable_macro_jit", false);
433 435
434 const auto title_list = sdl2_config->Get("AddOns", "title_ids", ""); 436 const auto title_list = sdl2_config->Get("AddOns", "title_ids", "");
435 std::stringstream ss(title_list); 437 std::stringstream ss(title_list);
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h
index 102502084..45c07ed5d 100644
--- a/src/yuzu_cmd/default_ini.h
+++ b/src/yuzu_cmd/default_ini.h
@@ -286,6 +286,8 @@ quest_flag =
286# Determines whether or not JIT CPU optimizations are enabled 286# Determines whether or not JIT CPU optimizations are enabled
287# false: Optimizations Enabled, true: Optimizations Disabled 287# false: Optimizations Enabled, true: Optimizations Disabled
288disable_cpu_opt = 288disable_cpu_opt =
289# Enables/Disables the macro JIT compiler
290disable_macro_jit=false
289 291
290[WebService] 292[WebService]
291# Whether or not to enable telemetry 293# Whether or not to enable telemetry