summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/audio_core/algorithm/interpolate.cpp198
-rw-r--r--src/audio_core/algorithm/interpolate.h9
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/core.cpp8
-rw-r--r--src/core/core.h6
-rw-r--r--src/core/frontend/framebuffer_layout.cpp21
-rw-r--r--src/core/frontend/framebuffer_layout.h15
-rw-r--r--src/core/hardware_properties.h2
-rw-r--r--src/core/hle/kernel/kernel.cpp121
-rw-r--r--src/core/hle/kernel/kernel.h37
-rw-r--r--src/core/hle/kernel/scheduler.cpp56
-rw-r--r--src/core/hle/kernel/scheduler.h46
-rw-r--r--src/core/hle/kernel/thread.cpp12
-rw-r--r--src/core/hle/kernel/thread.h6
-rw-r--r--src/core/hle/kernel/time_manager.cpp44
-rw-r--r--src/core/hle/kernel/time_manager.h43
-rw-r--r--src/core/hle/service/bcat/backend/boxcat.cpp7
-rw-r--r--src/core/hle/service/ldn/ldn.cpp10
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp12
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.h8
-rw-r--r--src/core/settings.h1
-rw-r--r--src/video_core/CMakeLists.txt5
-rw-r--r--src/video_core/engines/maxwell_3d.cpp81
-rw-r--r--src/video_core/engines/maxwell_3d.h51
-rw-r--r--src/video_core/gpu.cpp2
-rw-r--r--src/video_core/memory_manager.cpp17
-rw-r--r--src/video_core/memory_manager.h7
-rw-r--r--src/video_core/query_cache.h359
-rw-r--r--src/video_core/rasterizer_interface.h19
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.cpp120
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.h78
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp78
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h47
-rw-r--r--src/video_core/renderer_opengl/gl_resource_manager.cpp17
-rw-r--r--src/video_core/renderer_opengl/gl_resource_manager.h25
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp35
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp15
-rw-r--r--src/video_core/renderer_vulkan/vk_device.h7
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp122
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h104
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp48
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h21
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.cpp11
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp8
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h15
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp69
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h4
-rw-r--r--src/video_core/shader/decode/conversion.cpp14
-rw-r--r--src/video_core/shader/decode/texture.cpp71
-rw-r--r--src/video_core/texture_cache/surface_base.cpp4
-rw-r--r--src/video_core/texture_cache/surface_params.cpp47
-rw-r--r--src/video_core/texture_cache/surface_params.h5
-rw-r--r--src/video_core/texture_cache/texture_cache.h24
-rw-r--r--src/web_service/web_backend.cpp7
-rw-r--r--src/yuzu/configuration/config.cpp2
-rw-r--r--src/yuzu/configuration/configure_graphics.cpp2
-rw-r--r--src/yuzu/configuration/configure_graphics.ui35
-rw-r--r--src/yuzu_cmd/config.cpp2
-rw-r--r--src/yuzu_cmd/default_ini.h4
-rw-r--r--src/yuzu_tester/config.cpp2
-rw-r--r--src/yuzu_tester/default_ini.h4
62 files changed, 1937 insertions, 319 deletions
diff --git a/src/audio_core/algorithm/interpolate.cpp b/src/audio_core/algorithm/interpolate.cpp
index 5005ba519..a58f24169 100644
--- a/src/audio_core/algorithm/interpolate.cpp
+++ b/src/audio_core/algorithm/interpolate.cpp
@@ -5,6 +5,7 @@
5#define _USE_MATH_DEFINES 5#define _USE_MATH_DEFINES
6 6
7#include <algorithm> 7#include <algorithm>
8#include <climits>
8#include <cmath> 9#include <cmath>
9#include <vector> 10#include <vector>
10#include "audio_core/algorithm/interpolate.h" 11#include "audio_core/algorithm/interpolate.h"
@@ -13,13 +14,131 @@
13 14
14namespace AudioCore { 15namespace AudioCore {
15 16
16/// The Lanczos kernel 17constexpr std::array<s16, 512> curve_lut0 = {
17static double Lanczos(std::size_t a, double x) { 18 6600, 19426, 6722, 3, 6479, 19424, 6845, 9, 6359, 19419, 6968, 15, 6239,
18 if (x == 0.0) 19 19412, 7093, 22, 6121, 19403, 7219, 28, 6004, 19391, 7345, 34, 5888, 19377,
19 return 1.0; 20 7472, 41, 5773, 19361, 7600, 48, 5659, 19342, 7728, 55, 5546, 19321, 7857,
20 const double px = M_PI * x; 21 62, 5434, 19298, 7987, 69, 5323, 19273, 8118, 77, 5213, 19245, 8249, 84,
21 return a * std::sin(px) * std::sin(px / a) / (px * px); 22 5104, 19215, 8381, 92, 4997, 19183, 8513, 101, 4890, 19148, 8646, 109, 4785,
22} 23 19112, 8780, 118, 4681, 19073, 8914, 127, 4579, 19031, 9048, 137, 4477, 18988,
24 9183, 147, 4377, 18942, 9318, 157, 4277, 18895, 9454, 168, 4179, 18845, 9590,
25 179, 4083, 18793, 9726, 190, 3987, 18738, 9863, 202, 3893, 18682, 10000, 215,
26 3800, 18624, 10137, 228, 3709, 18563, 10274, 241, 3618, 18500, 10411, 255, 3529,
27 18436, 10549, 270, 3441, 18369, 10687, 285, 3355, 18300, 10824, 300, 3269, 18230,
28 10962, 317, 3186, 18157, 11100, 334, 3103, 18082, 11238, 351, 3022, 18006, 11375,
29 369, 2942, 17927, 11513, 388, 2863, 17847, 11650, 408, 2785, 17765, 11788, 428,
30 2709, 17681, 11925, 449, 2635, 17595, 12062, 471, 2561, 17507, 12198, 494, 2489,
31 17418, 12334, 517, 2418, 17327, 12470, 541, 2348, 17234, 12606, 566, 2280, 17140,
32 12741, 592, 2213, 17044, 12876, 619, 2147, 16946, 13010, 647, 2083, 16846, 13144,
33 675, 2020, 16745, 13277, 704, 1958, 16643, 13409, 735, 1897, 16539, 13541, 766,
34 1838, 16434, 13673, 798, 1780, 16327, 13803, 832, 1723, 16218, 13933, 866, 1667,
35 16109, 14062, 901, 1613, 15998, 14191, 937, 1560, 15885, 14318, 975, 1508, 15772,
36 14445, 1013, 1457, 15657, 14571, 1052, 1407, 15540, 14695, 1093, 1359, 15423, 14819,
37 1134, 1312, 15304, 14942, 1177, 1266, 15185, 15064, 1221, 1221, 15064, 15185, 1266,
38 1177, 14942, 15304, 1312, 1134, 14819, 15423, 1359, 1093, 14695, 15540, 1407, 1052,
39 14571, 15657, 1457, 1013, 14445, 15772, 1508, 975, 14318, 15885, 1560, 937, 14191,
40 15998, 1613, 901, 14062, 16109, 1667, 866, 13933, 16218, 1723, 832, 13803, 16327,
41 1780, 798, 13673, 16434, 1838, 766, 13541, 16539, 1897, 735, 13409, 16643, 1958,
42 704, 13277, 16745, 2020, 675, 13144, 16846, 2083, 647, 13010, 16946, 2147, 619,
43 12876, 17044, 2213, 592, 12741, 17140, 2280, 566, 12606, 17234, 2348, 541, 12470,
44 17327, 2418, 517, 12334, 17418, 2489, 494, 12198, 17507, 2561, 471, 12062, 17595,
45 2635, 449, 11925, 17681, 2709, 428, 11788, 17765, 2785, 408, 11650, 17847, 2863,
46 388, 11513, 17927, 2942, 369, 11375, 18006, 3022, 351, 11238, 18082, 3103, 334,
47 11100, 18157, 3186, 317, 10962, 18230, 3269, 300, 10824, 18300, 3355, 285, 10687,
48 18369, 3441, 270, 10549, 18436, 3529, 255, 10411, 18500, 3618, 241, 10274, 18563,
49 3709, 228, 10137, 18624, 3800, 215, 10000, 18682, 3893, 202, 9863, 18738, 3987,
50 190, 9726, 18793, 4083, 179, 9590, 18845, 4179, 168, 9454, 18895, 4277, 157,
51 9318, 18942, 4377, 147, 9183, 18988, 4477, 137, 9048, 19031, 4579, 127, 8914,
52 19073, 4681, 118, 8780, 19112, 4785, 109, 8646, 19148, 4890, 101, 8513, 19183,
53 4997, 92, 8381, 19215, 5104, 84, 8249, 19245, 5213, 77, 8118, 19273, 5323,
54 69, 7987, 19298, 5434, 62, 7857, 19321, 5546, 55, 7728, 19342, 5659, 48,
55 7600, 19361, 5773, 41, 7472, 19377, 5888, 34, 7345, 19391, 6004, 28, 7219,
56 19403, 6121, 22, 7093, 19412, 6239, 15, 6968, 19419, 6359, 9, 6845, 19424,
57 6479, 3, 6722, 19426, 6600};
58
59constexpr std::array<s16, 512> curve_lut1 = {
60 -68, 32639, 69, -5, -200, 32630, 212, -15, -328, 32613, 359, -26, -450,
61 32586, 512, -36, -568, 32551, 669, -47, -680, 32507, 832, -58, -788, 32454,
62 1000, -69, -891, 32393, 1174, -80, -990, 32323, 1352, -92, -1084, 32244, 1536,
63 -103, -1173, 32157, 1724, -115, -1258, 32061, 1919, -128, -1338, 31956, 2118, -140,
64 -1414, 31844, 2322, -153, -1486, 31723, 2532, -167, -1554, 31593, 2747, -180, -1617,
65 31456, 2967, -194, -1676, 31310, 3192, -209, -1732, 31157, 3422, -224, -1783, 30995,
66 3657, -240, -1830, 30826, 3897, -256, -1874, 30649, 4143, -272, -1914, 30464, 4393,
67 -289, -1951, 30272, 4648, -307, -1984, 30072, 4908, -325, -2014, 29866, 5172, -343,
68 -2040, 29652, 5442, -362, -2063, 29431, 5716, -382, -2083, 29203, 5994, -403, -2100,
69 28968, 6277, -424, -2114, 28727, 6565, -445, -2125, 28480, 6857, -468, -2133, 28226,
70 7153, -490, -2139, 27966, 7453, -514, -2142, 27700, 7758, -538, -2142, 27428, 8066,
71 -563, -2141, 27151, 8378, -588, -2136, 26867, 8694, -614, -2130, 26579, 9013, -641,
72 -2121, 26285, 9336, -668, -2111, 25987, 9663, -696, -2098, 25683, 9993, -724, -2084,
73 25375, 10326, -753, -2067, 25063, 10662, -783, -2049, 24746, 11000, -813, -2030, 24425,
74 11342, -844, -2009, 24100, 11686, -875, -1986, 23771, 12033, -907, -1962, 23438, 12382,
75 -939, -1937, 23103, 12733, -972, -1911, 22764, 13086, -1005, -1883, 22422, 13441, -1039,
76 -1855, 22077, 13798, -1072, -1825, 21729, 14156, -1107, -1795, 21380, 14516, -1141, -1764,
77 21027, 14877, -1176, -1732, 20673, 15239, -1211, -1700, 20317, 15602, -1246, -1667, 19959,
78 15965, -1282, -1633, 19600, 16329, -1317, -1599, 19239, 16694, -1353, -1564, 18878, 17058,
79 -1388, -1530, 18515, 17423, -1424, -1495, 18151, 17787, -1459, -1459, 17787, 18151, -1495,
80 -1424, 17423, 18515, -1530, -1388, 17058, 18878, -1564, -1353, 16694, 19239, -1599, -1317,
81 16329, 19600, -1633, -1282, 15965, 19959, -1667, -1246, 15602, 20317, -1700, -1211, 15239,
82 20673, -1732, -1176, 14877, 21027, -1764, -1141, 14516, 21380, -1795, -1107, 14156, 21729,
83 -1825, -1072, 13798, 22077, -1855, -1039, 13441, 22422, -1883, -1005, 13086, 22764, -1911,
84 -972, 12733, 23103, -1937, -939, 12382, 23438, -1962, -907, 12033, 23771, -1986, -875,
85 11686, 24100, -2009, -844, 11342, 24425, -2030, -813, 11000, 24746, -2049, -783, 10662,
86 25063, -2067, -753, 10326, 25375, -2084, -724, 9993, 25683, -2098, -696, 9663, 25987,
87 -2111, -668, 9336, 26285, -2121, -641, 9013, 26579, -2130, -614, 8694, 26867, -2136,
88 -588, 8378, 27151, -2141, -563, 8066, 27428, -2142, -538, 7758, 27700, -2142, -514,
89 7453, 27966, -2139, -490, 7153, 28226, -2133, -468, 6857, 28480, -2125, -445, 6565,
90 28727, -2114, -424, 6277, 28968, -2100, -403, 5994, 29203, -2083, -382, 5716, 29431,
91 -2063, -362, 5442, 29652, -2040, -343, 5172, 29866, -2014, -325, 4908, 30072, -1984,
92 -307, 4648, 30272, -1951, -289, 4393, 30464, -1914, -272, 4143, 30649, -1874, -256,
93 3897, 30826, -1830, -240, 3657, 30995, -1783, -224, 3422, 31157, -1732, -209, 3192,
94 31310, -1676, -194, 2967, 31456, -1617, -180, 2747, 31593, -1554, -167, 2532, 31723,
95 -1486, -153, 2322, 31844, -1414, -140, 2118, 31956, -1338, -128, 1919, 32061, -1258,
96 -115, 1724, 32157, -1173, -103, 1536, 32244, -1084, -92, 1352, 32323, -990, -80,
97 1174, 32393, -891, -69, 1000, 32454, -788, -58, 832, 32507, -680, -47, 669,
98 32551, -568, -36, 512, 32586, -450, -26, 359, 32613, -328, -15, 212, 32630,
99 -200, -5, 69, 32639, -68};
100
101constexpr std::array<s16, 512> curve_lut2 = {
102 3195, 26287, 3329, -32, 3064, 26281, 3467, -34, 2936, 26270, 3608, -38, 2811,
103 26253, 3751, -42, 2688, 26230, 3897, -46, 2568, 26202, 4046, -50, 2451, 26169,
104 4199, -54, 2338, 26130, 4354, -58, 2227, 26085, 4512, -63, 2120, 26035, 4673,
105 -67, 2015, 25980, 4837, -72, 1912, 25919, 5004, -76, 1813, 25852, 5174, -81,
106 1716, 25780, 5347, -87, 1622, 25704, 5522, -92, 1531, 25621, 5701, -98, 1442,
107 25533, 5882, -103, 1357, 25440, 6066, -109, 1274, 25342, 6253, -115, 1193, 25239,
108 6442, -121, 1115, 25131, 6635, -127, 1040, 25018, 6830, -133, 967, 24899, 7027,
109 -140, 897, 24776, 7227, -146, 829, 24648, 7430, -153, 764, 24516, 7635, -159,
110 701, 24379, 7842, -166, 641, 24237, 8052, -174, 583, 24091, 8264, -181, 526,
111 23940, 8478, -187, 472, 23785, 8695, -194, 420, 23626, 8914, -202, 371, 23462,
112 9135, -209, 324, 23295, 9358, -215, 279, 23123, 9583, -222, 236, 22948, 9809,
113 -230, 194, 22769, 10038, -237, 154, 22586, 10269, -243, 117, 22399, 10501, -250,
114 81, 22208, 10735, -258, 47, 22015, 10970, -265, 15, 21818, 11206, -271, -16,
115 21618, 11444, -277, -44, 21415, 11684, -283, -71, 21208, 11924, -290, -97, 20999,
116 12166, -296, -121, 20786, 12409, -302, -143, 20571, 12653, -306, -163, 20354, 12898,
117 -311, -183, 20134, 13143, -316, -201, 19911, 13389, -321, -218, 19686, 13635, -325,
118 -234, 19459, 13882, -328, -248, 19230, 14130, -332, -261, 18998, 14377, -335, -273,
119 18765, 14625, -337, -284, 18531, 14873, -339, -294, 18295, 15121, -341, -302, 18057,
120 15369, -341, -310, 17817, 15617, -341, -317, 17577, 15864, -340, -323, 17335, 16111,
121 -340, -328, 17092, 16357, -338, -332, 16848, 16603, -336, -336, 16603, 16848, -332,
122 -338, 16357, 17092, -328, -340, 16111, 17335, -323, -340, 15864, 17577, -317, -341,
123 15617, 17817, -310, -341, 15369, 18057, -302, -341, 15121, 18295, -294, -339, 14873,
124 18531, -284, -337, 14625, 18765, -273, -335, 14377, 18998, -261, -332, 14130, 19230,
125 -248, -328, 13882, 19459, -234, -325, 13635, 19686, -218, -321, 13389, 19911, -201,
126 -316, 13143, 20134, -183, -311, 12898, 20354, -163, -306, 12653, 20571, -143, -302,
127 12409, 20786, -121, -296, 12166, 20999, -97, -290, 11924, 21208, -71, -283, 11684,
128 21415, -44, -277, 11444, 21618, -16, -271, 11206, 21818, 15, -265, 10970, 22015,
129 47, -258, 10735, 22208, 81, -250, 10501, 22399, 117, -243, 10269, 22586, 154,
130 -237, 10038, 22769, 194, -230, 9809, 22948, 236, -222, 9583, 23123, 279, -215,
131 9358, 23295, 324, -209, 9135, 23462, 371, -202, 8914, 23626, 420, -194, 8695,
132 23785, 472, -187, 8478, 23940, 526, -181, 8264, 24091, 583, -174, 8052, 24237,
133 641, -166, 7842, 24379, 701, -159, 7635, 24516, 764, -153, 7430, 24648, 829,
134 -146, 7227, 24776, 897, -140, 7027, 24899, 967, -133, 6830, 25018, 1040, -127,
135 6635, 25131, 1115, -121, 6442, 25239, 1193, -115, 6253, 25342, 1274, -109, 6066,
136 25440, 1357, -103, 5882, 25533, 1442, -98, 5701, 25621, 1531, -92, 5522, 25704,
137 1622, -87, 5347, 25780, 1716, -81, 5174, 25852, 1813, -76, 5004, 25919, 1912,
138 -72, 4837, 25980, 2015, -67, 4673, 26035, 2120, -63, 4512, 26085, 2227, -58,
139 4354, 26130, 2338, -54, 4199, 26169, 2451, -50, 4046, 26202, 2568, -46, 3897,
140 26230, 2688, -42, 3751, 26253, 2811, -38, 3608, 26270, 2936, -34, 3467, 26281,
141 3064, -32, 3329, 26287, 3195};
23 142
24std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input, double ratio) { 143std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input, double ratio) {
25 if (input.size() < 2) 144 if (input.size() < 2)
@@ -30,40 +149,39 @@ std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input,
30 ratio = 1.0; 149 ratio = 1.0;
31 } 150 }
32 151
33 if (ratio != state.current_ratio) { 152 const int step = static_cast<int>(ratio * 0x8000);
34 const double cutoff_frequency = std::min(0.5 / ratio, 0.5 * ratio); 153 const std::array<s16, 512>& lut = [step] {
35 state.nyquist = CascadingFilter::LowPass(std::clamp(cutoff_frequency, 0.0, 0.4), 3); 154 if (step > 0xaaaa) {
36 state.current_ratio = ratio; 155 return curve_lut0;
37 } 156 }
38 state.nyquist.Process(input); 157 if (step <= 0x8000) {
39 158 return curve_lut1;
40 constexpr std::size_t taps = InterpolationState::lanczos_taps;
41 const std::size_t num_frames = input.size() / 2;
42
43 std::vector<s16> output;
44 output.reserve(static_cast<std::size_t>(input.size() / ratio + 4));
45
46 double& pos = state.position;
47 auto& h = state.history;
48 for (std::size_t i = 0; i < num_frames; ++i) {
49 std::rotate(h.begin(), h.end() - 1, h.end());
50 h[0][0] = input[i * 2 + 0];
51 h[0][1] = input[i * 2 + 1];
52
53 while (pos <= 1.0) {
54 double l = 0.0;
55 double r = 0.0;
56 for (std::size_t j = 0; j < h.size(); j++) {
57 const double lanczos_calc = Lanczos(taps, pos + j - taps + 1);
58 l += lanczos_calc * h[j][0];
59 r += lanczos_calc * h[j][1];
60 }
61 output.emplace_back(static_cast<s16>(std::clamp(l, -32768.0, 32767.0)));
62 output.emplace_back(static_cast<s16>(std::clamp(r, -32768.0, 32767.0)));
63
64 pos += ratio;
65 } 159 }
66 pos -= 1.0; 160 return curve_lut2;
161 }();
162
163 std::vector<s16> output(static_cast<std::size_t>(input.size() / ratio));
164 int in_offset = 0;
165 for (std::size_t out_offset = 0; out_offset < output.size(); out_offset += 2) {
166 const int lut_index = (state.fraction >> 8) * 4;
167
168 const int l = input[(in_offset + 0) * 2 + 0] * lut[lut_index + 0] +
169 input[(in_offset + 1) * 2 + 0] * lut[lut_index + 1] +
170 input[(in_offset + 2) * 2 + 0] * lut[lut_index + 2] +
171 input[(in_offset + 3) * 2 + 0] * lut[lut_index + 3];
172
173 const int r = input[(in_offset + 0) * 2 + 1] * lut[lut_index + 0] +
174 input[(in_offset + 1) * 2 + 1] * lut[lut_index + 1] +
175 input[(in_offset + 2) * 2 + 1] * lut[lut_index + 2] +
176 input[(in_offset + 3) * 2 + 1] * lut[lut_index + 3];
177
178 const int new_offset = state.fraction + step;
179
180 in_offset += new_offset >> 15;
181 state.fraction = new_offset & 0x7fff;
182
183 output[out_offset + 0] = static_cast<s16>(std::clamp(l >> 15, SHRT_MIN, SHRT_MAX));
184 output[out_offset + 1] = static_cast<s16>(std::clamp(r >> 15, SHRT_MIN, SHRT_MAX));
67 } 185 }
68 186
69 return output; 187 return output;
diff --git a/src/audio_core/algorithm/interpolate.h b/src/audio_core/algorithm/interpolate.h
index edbd6460f..1b9831a75 100644
--- a/src/audio_core/algorithm/interpolate.h
+++ b/src/audio_core/algorithm/interpolate.h
@@ -6,19 +6,12 @@
6 6
7#include <array> 7#include <array>
8#include <vector> 8#include <vector>
9#include "audio_core/algorithm/filter.h"
10#include "common/common_types.h" 9#include "common/common_types.h"
11 10
12namespace AudioCore { 11namespace AudioCore {
13 12
14struct InterpolationState { 13struct InterpolationState {
15 static constexpr std::size_t lanczos_taps = 4; 14 int fraction = 0;
16 static constexpr std::size_t history_size = lanczos_taps * 2 - 1;
17
18 double current_ratio = 0.0;
19 CascadingFilter nyquist;
20 std::array<std::array<s16, 2>, history_size> history = {};
21 double position = 0;
22}; 15};
23 16
24/// Interpolates input signal to produce output signal. 17/// Interpolates input signal to produce output signal.
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 26612e692..88c06b2ce 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -187,6 +187,8 @@ add_library(core STATIC
187 hle/kernel/synchronization.h 187 hle/kernel/synchronization.h
188 hle/kernel/thread.cpp 188 hle/kernel/thread.cpp
189 hle/kernel/thread.h 189 hle/kernel/thread.h
190 hle/kernel/time_manager.cpp
191 hle/kernel/time_manager.h
190 hle/kernel/transfer_memory.cpp 192 hle/kernel/transfer_memory.cpp
191 hle/kernel/transfer_memory.h 193 hle/kernel/transfer_memory.h
192 hle/kernel/vm_manager.cpp 194 hle/kernel/vm_manager.cpp
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 0eb0c0dca..86e314c94 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -707,4 +707,12 @@ const Service::SM::ServiceManager& System::ServiceManager() const {
707 return *impl->service_manager; 707 return *impl->service_manager;
708} 708}
709 709
710void System::RegisterCoreThread(std::size_t id) {
711 impl->kernel.RegisterCoreThread(id);
712}
713
714void System::RegisterHostThread() {
715 impl->kernel.RegisterHostThread();
716}
717
710} // namespace Core 718} // namespace Core
diff --git a/src/core/core.h b/src/core/core.h
index e69d68fcf..8d862a8e6 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -360,6 +360,12 @@ public:
360 360
361 const CurrentBuildProcessID& GetCurrentProcessBuildID() const; 361 const CurrentBuildProcessID& GetCurrentProcessBuildID() const;
362 362
363 /// Register a host thread as an emulated CPU Core.
364 void RegisterCoreThread(std::size_t id);
365
366 /// Register a host thread as an auxiliary thread.
367 void RegisterHostThread();
368
363private: 369private:
364 System(); 370 System();
365 371
diff --git a/src/core/frontend/framebuffer_layout.cpp b/src/core/frontend/framebuffer_layout.cpp
index d6d2cf3f0..2dc795d56 100644
--- a/src/core/frontend/framebuffer_layout.cpp
+++ b/src/core/frontend/framebuffer_layout.cpp
@@ -27,9 +27,9 @@ FramebufferLayout DefaultFrameLayout(u32 width, u32 height) {
27 // so just calculate them both even if the other isn't showing. 27 // so just calculate them both even if the other isn't showing.
28 FramebufferLayout res{width, height}; 28 FramebufferLayout res{width, height};
29 29
30 const float emulation_aspect_ratio{static_cast<float>(ScreenUndocked::Height) / 30 const float window_aspect_ratio = static_cast<float>(height) / width;
31 ScreenUndocked::Width}; 31 const float emulation_aspect_ratio = EmulationAspectRatio(
32 const auto window_aspect_ratio = static_cast<float>(height) / width; 32 static_cast<AspectRatio>(Settings::values.aspect_ratio), window_aspect_ratio);
33 33
34 const Common::Rectangle<u32> screen_window_area{0, 0, width, height}; 34 const Common::Rectangle<u32> screen_window_area{0, 0, width, height};
35 Common::Rectangle<u32> screen = MaxRectangle(screen_window_area, emulation_aspect_ratio); 35 Common::Rectangle<u32> screen = MaxRectangle(screen_window_area, emulation_aspect_ratio);
@@ -58,4 +58,19 @@ FramebufferLayout FrameLayoutFromResolutionScale(u32 res_scale) {
58 return DefaultFrameLayout(width, height); 58 return DefaultFrameLayout(width, height);
59} 59}
60 60
61float EmulationAspectRatio(AspectRatio aspect, float window_aspect_ratio) {
62 switch (aspect) {
63 case AspectRatio::Default:
64 return static_cast<float>(ScreenUndocked::Height) / ScreenUndocked::Width;
65 case AspectRatio::R4_3:
66 return 3.0f / 4.0f;
67 case AspectRatio::R21_9:
68 return 9.0f / 21.0f;
69 case AspectRatio::StretchToWindow:
70 return window_aspect_ratio;
71 default:
72 return static_cast<float>(ScreenUndocked::Height) / ScreenUndocked::Width;
73 }
74}
75
61} // namespace Layout 76} // namespace Layout
diff --git a/src/core/frontend/framebuffer_layout.h b/src/core/frontend/framebuffer_layout.h
index d2370adde..1d39c1faf 100644
--- a/src/core/frontend/framebuffer_layout.h
+++ b/src/core/frontend/framebuffer_layout.h
@@ -18,6 +18,13 @@ enum ScreenDocked : u32 {
18 HeightDocked = 1080, 18 HeightDocked = 1080,
19}; 19};
20 20
21enum class AspectRatio {
22 Default,
23 R4_3,
24 R21_9,
25 StretchToWindow,
26};
27
21/// Describes the layout of the window framebuffer 28/// Describes the layout of the window framebuffer
22struct FramebufferLayout { 29struct FramebufferLayout {
23 u32 width{ScreenUndocked::Width}; 30 u32 width{ScreenUndocked::Width};
@@ -48,4 +55,12 @@ FramebufferLayout DefaultFrameLayout(u32 width, u32 height);
48 */ 55 */
49FramebufferLayout FrameLayoutFromResolutionScale(u32 res_scale); 56FramebufferLayout FrameLayoutFromResolutionScale(u32 res_scale);
50 57
58/**
59 * Convenience method to determine emulation aspect ratio
60 * @param aspect Represents the index of aspect ratio stored in Settings::values.aspect_ratio
61 * @param window_aspect_ratio Current window aspect ratio
62 * @return Emulation render window aspect ratio
63 */
64float EmulationAspectRatio(AspectRatio aspect, float window_aspect_ratio);
65
51} // namespace Layout 66} // namespace Layout
diff --git a/src/core/hardware_properties.h b/src/core/hardware_properties.h
index 213461b6a..b04e046ed 100644
--- a/src/core/hardware_properties.h
+++ b/src/core/hardware_properties.h
@@ -20,6 +20,8 @@ constexpr u32 NUM_CPU_CORES = 4; // Number of CPU Cores
20 20
21} // namespace Hardware 21} // namespace Hardware
22 22
23constexpr u32 INVALID_HOST_THREAD_ID = 0xFFFFFFFF;
24
23struct EmuThreadHandle { 25struct EmuThreadHandle {
24 u32 host_handle; 26 u32 host_handle;
25 u32 guest_handle; 27 u32 guest_handle;
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 4eb1d8703..9232f4d7e 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -3,9 +3,12 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <atomic> 5#include <atomic>
6#include <bitset>
6#include <functional> 7#include <functional>
7#include <memory> 8#include <memory>
8#include <mutex> 9#include <mutex>
10#include <thread>
11#include <unordered_map>
9#include <utility> 12#include <utility>
10 13
11#include "common/assert.h" 14#include "common/assert.h"
@@ -15,6 +18,7 @@
15#include "core/core.h" 18#include "core/core.h"
16#include "core/core_timing.h" 19#include "core/core_timing.h"
17#include "core/core_timing_util.h" 20#include "core/core_timing_util.h"
21#include "core/hardware_properties.h"
18#include "core/hle/kernel/client_port.h" 22#include "core/hle/kernel/client_port.h"
19#include "core/hle/kernel/errors.h" 23#include "core/hle/kernel/errors.h"
20#include "core/hle/kernel/handle_table.h" 24#include "core/hle/kernel/handle_table.h"
@@ -25,6 +29,7 @@
25#include "core/hle/kernel/scheduler.h" 29#include "core/hle/kernel/scheduler.h"
26#include "core/hle/kernel/synchronization.h" 30#include "core/hle/kernel/synchronization.h"
27#include "core/hle/kernel/thread.h" 31#include "core/hle/kernel/thread.h"
32#include "core/hle/kernel/time_manager.h"
28#include "core/hle/lock.h" 33#include "core/hle/lock.h"
29#include "core/hle/result.h" 34#include "core/hle/result.h"
30#include "core/memory.h" 35#include "core/memory.h"
@@ -44,7 +49,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
44 std::lock_guard lock{HLE::g_hle_lock}; 49 std::lock_guard lock{HLE::g_hle_lock};
45 50
46 std::shared_ptr<Thread> thread = 51 std::shared_ptr<Thread> thread =
47 system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle); 52 system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
48 if (thread == nullptr) { 53 if (thread == nullptr) {
49 LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle); 54 LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
50 return; 55 return;
@@ -97,8 +102,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
97} 102}
98 103
99struct KernelCore::Impl { 104struct KernelCore::Impl {
100 explicit Impl(Core::System& system) 105 explicit Impl(Core::System& system, KernelCore& kernel)
101 : system{system}, global_scheduler{system}, synchronization{system} {} 106 : system{system}, global_scheduler{kernel}, synchronization{system}, time_manager{system} {}
102 107
103 void Initialize(KernelCore& kernel) { 108 void Initialize(KernelCore& kernel) {
104 Shutdown(); 109 Shutdown();
@@ -120,7 +125,7 @@ struct KernelCore::Impl {
120 125
121 system_resource_limit = nullptr; 126 system_resource_limit = nullptr;
122 127
123 thread_wakeup_callback_handle_table.Clear(); 128 global_handle_table.Clear();
124 thread_wakeup_event_type = nullptr; 129 thread_wakeup_event_type = nullptr;
125 preemption_event = nullptr; 130 preemption_event = nullptr;
126 131
@@ -138,8 +143,8 @@ struct KernelCore::Impl {
138 143
139 void InitializePhysicalCores() { 144 void InitializePhysicalCores() {
140 exclusive_monitor = 145 exclusive_monitor =
141 Core::MakeExclusiveMonitor(system.Memory(), global_scheduler.CpuCoresCount()); 146 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
142 for (std::size_t i = 0; i < global_scheduler.CpuCoresCount(); i++) { 147 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
143 cores.emplace_back(system, i, *exclusive_monitor); 148 cores.emplace_back(system, i, *exclusive_monitor);
144 } 149 }
145 } 150 }
@@ -184,6 +189,50 @@ struct KernelCore::Impl {
184 system.Memory().SetCurrentPageTable(*process); 189 system.Memory().SetCurrentPageTable(*process);
185 } 190 }
186 191
192 void RegisterCoreThread(std::size_t core_id) {
193 std::unique_lock lock{register_thread_mutex};
194 const std::thread::id this_id = std::this_thread::get_id();
195 const auto it = host_thread_ids.find(this_id);
196 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
197 ASSERT(it == host_thread_ids.end());
198 ASSERT(!registered_core_threads[core_id]);
199 host_thread_ids[this_id] = static_cast<u32>(core_id);
200 registered_core_threads.set(core_id);
201 }
202
203 void RegisterHostThread() {
204 std::unique_lock lock{register_thread_mutex};
205 const std::thread::id this_id = std::this_thread::get_id();
206 const auto it = host_thread_ids.find(this_id);
207 ASSERT(it == host_thread_ids.end());
208 host_thread_ids[this_id] = registered_thread_ids++;
209 }
210
211 u32 GetCurrentHostThreadID() const {
212 const std::thread::id this_id = std::this_thread::get_id();
213 const auto it = host_thread_ids.find(this_id);
214 if (it == host_thread_ids.end()) {
215 return Core::INVALID_HOST_THREAD_ID;
216 }
217 return it->second;
218 }
219
220 Core::EmuThreadHandle GetCurrentEmuThreadID() const {
221 Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle();
222 result.host_handle = GetCurrentHostThreadID();
223 if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
224 return result;
225 }
226 const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler();
227 const Kernel::Thread* current = sched.GetCurrentThread();
228 if (current != nullptr) {
229 result.guest_handle = current->GetGlobalHandle();
230 } else {
231 result.guest_handle = InvalidHandle;
232 }
233 return result;
234 }
235
187 std::atomic<u32> next_object_id{0}; 236 std::atomic<u32> next_object_id{0};
188 std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; 237 std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
189 std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; 238 std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
@@ -194,15 +243,16 @@ struct KernelCore::Impl {
194 Process* current_process = nullptr; 243 Process* current_process = nullptr;
195 Kernel::GlobalScheduler global_scheduler; 244 Kernel::GlobalScheduler global_scheduler;
196 Kernel::Synchronization synchronization; 245 Kernel::Synchronization synchronization;
246 Kernel::TimeManager time_manager;
197 247
198 std::shared_ptr<ResourceLimit> system_resource_limit; 248 std::shared_ptr<ResourceLimit> system_resource_limit;
199 249
200 std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type; 250 std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type;
201 std::shared_ptr<Core::Timing::EventType> preemption_event; 251 std::shared_ptr<Core::Timing::EventType> preemption_event;
202 252
203 // TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future, 253 // This is the kernel's handle table or supervisor handle table which
204 // allowing us to simply use a pool index or similar. 254 // stores all the objects in place.
205 Kernel::HandleTable thread_wakeup_callback_handle_table; 255 Kernel::HandleTable global_handle_table;
206 256
207 /// Map of named ports managed by the kernel, which can be retrieved using 257 /// Map of named ports managed by the kernel, which can be retrieved using
208 /// the ConnectToPort SVC. 258 /// the ConnectToPort SVC.
@@ -211,11 +261,17 @@ struct KernelCore::Impl {
211 std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor; 261 std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
212 std::vector<Kernel::PhysicalCore> cores; 262 std::vector<Kernel::PhysicalCore> cores;
213 263
264 // 0-3 IDs represent core threads, >3 represent others
265 std::unordered_map<std::thread::id, u32> host_thread_ids;
266 u32 registered_thread_ids{Core::Hardware::NUM_CPU_CORES};
267 std::bitset<Core::Hardware::NUM_CPU_CORES> registered_core_threads;
268 std::mutex register_thread_mutex;
269
214 // System context 270 // System context
215 Core::System& system; 271 Core::System& system;
216}; 272};
217 273
218KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system)} {} 274KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system, *this)} {}
219KernelCore::~KernelCore() { 275KernelCore::~KernelCore() {
220 Shutdown(); 276 Shutdown();
221} 277}
@@ -232,9 +288,8 @@ std::shared_ptr<ResourceLimit> KernelCore::GetSystemResourceLimit() const {
232 return impl->system_resource_limit; 288 return impl->system_resource_limit;
233} 289}
234 290
235std::shared_ptr<Thread> KernelCore::RetrieveThreadFromWakeupCallbackHandleTable( 291std::shared_ptr<Thread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
236 Handle handle) const { 292 return impl->global_handle_table.Get<Thread>(handle);
237 return impl->thread_wakeup_callback_handle_table.Get<Thread>(handle);
238} 293}
239 294
240void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) { 295void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) {
@@ -265,6 +320,14 @@ const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const {
265 return impl->global_scheduler; 320 return impl->global_scheduler;
266} 321}
267 322
323Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) {
324 return impl->cores[id].Scheduler();
325}
326
327const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const {
328 return impl->cores[id].Scheduler();
329}
330
268Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) { 331Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) {
269 return impl->cores[id]; 332 return impl->cores[id];
270} 333}
@@ -281,6 +344,14 @@ const Kernel::Synchronization& KernelCore::Synchronization() const {
281 return impl->synchronization; 344 return impl->synchronization;
282} 345}
283 346
347Kernel::TimeManager& KernelCore::TimeManager() {
348 return impl->time_manager;
349}
350
351const Kernel::TimeManager& KernelCore::TimeManager() const {
352 return impl->time_manager;
353}
354
284Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() { 355Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() {
285 return *impl->exclusive_monitor; 356 return *impl->exclusive_monitor;
286} 357}
@@ -338,12 +409,28 @@ const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallback
338 return impl->thread_wakeup_event_type; 409 return impl->thread_wakeup_event_type;
339} 410}
340 411
341Kernel::HandleTable& KernelCore::ThreadWakeupCallbackHandleTable() { 412Kernel::HandleTable& KernelCore::GlobalHandleTable() {
342 return impl->thread_wakeup_callback_handle_table; 413 return impl->global_handle_table;
414}
415
416const Kernel::HandleTable& KernelCore::GlobalHandleTable() const {
417 return impl->global_handle_table;
418}
419
420void KernelCore::RegisterCoreThread(std::size_t core_id) {
421 impl->RegisterCoreThread(core_id);
422}
423
424void KernelCore::RegisterHostThread() {
425 impl->RegisterHostThread();
426}
427
428u32 KernelCore::GetCurrentHostThreadID() const {
429 return impl->GetCurrentHostThreadID();
343} 430}
344 431
345const Kernel::HandleTable& KernelCore::ThreadWakeupCallbackHandleTable() const { 432Core::EmuThreadHandle KernelCore::GetCurrentEmuThreadID() const {
346 return impl->thread_wakeup_callback_handle_table; 433 return impl->GetCurrentEmuThreadID();
347} 434}
348 435
349} // namespace Kernel 436} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 1eede3063..c4f78ab71 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -11,6 +11,7 @@
11#include "core/hle/kernel/object.h" 11#include "core/hle/kernel/object.h"
12 12
13namespace Core { 13namespace Core {
14struct EmuThreadHandle;
14class ExclusiveMonitor; 15class ExclusiveMonitor;
15class System; 16class System;
16} // namespace Core 17} // namespace Core
@@ -29,8 +30,10 @@ class HandleTable;
29class PhysicalCore; 30class PhysicalCore;
30class Process; 31class Process;
31class ResourceLimit; 32class ResourceLimit;
33class Scheduler;
32class Synchronization; 34class Synchronization;
33class Thread; 35class Thread;
36class TimeManager;
34 37
35/// Represents a single instance of the kernel. 38/// Represents a single instance of the kernel.
36class KernelCore { 39class KernelCore {
@@ -64,7 +67,7 @@ public:
64 std::shared_ptr<ResourceLimit> GetSystemResourceLimit() const; 67 std::shared_ptr<ResourceLimit> GetSystemResourceLimit() const;
65 68
66 /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table. 69 /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
67 std::shared_ptr<Thread> RetrieveThreadFromWakeupCallbackHandleTable(Handle handle) const; 70 std::shared_ptr<Thread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
68 71
69 /// Adds the given shared pointer to an internal list of active processes. 72 /// Adds the given shared pointer to an internal list of active processes.
70 void AppendNewProcess(std::shared_ptr<Process> process); 73 void AppendNewProcess(std::shared_ptr<Process> process);
@@ -87,6 +90,12 @@ public:
87 /// Gets the sole instance of the global scheduler 90 /// Gets the sole instance of the global scheduler
88 const Kernel::GlobalScheduler& GlobalScheduler() const; 91 const Kernel::GlobalScheduler& GlobalScheduler() const;
89 92
93 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
94 Kernel::Scheduler& Scheduler(std::size_t id);
95
96 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
97 const Kernel::Scheduler& Scheduler(std::size_t id) const;
98
90 /// Gets the an instance of the respective physical CPU core. 99 /// Gets the an instance of the respective physical CPU core.
91 Kernel::PhysicalCore& PhysicalCore(std::size_t id); 100 Kernel::PhysicalCore& PhysicalCore(std::size_t id);
92 101
@@ -99,6 +108,12 @@ public:
99 /// Gets the an instance of the Synchronization Interface. 108 /// Gets the an instance of the Synchronization Interface.
100 const Kernel::Synchronization& Synchronization() const; 109 const Kernel::Synchronization& Synchronization() const;
101 110
111 /// Gets the an instance of the TimeManager Interface.
112 Kernel::TimeManager& TimeManager();
113
114 /// Gets the an instance of the TimeManager Interface.
115 const Kernel::TimeManager& TimeManager() const;
116
102 /// Stops execution of 'id' core, in order to reschedule a new thread. 117 /// Stops execution of 'id' core, in order to reschedule a new thread.
103 void PrepareReschedule(std::size_t id); 118 void PrepareReschedule(std::size_t id);
104 119
@@ -120,6 +135,18 @@ public:
120 /// Determines whether or not the given port is a valid named port. 135 /// Determines whether or not the given port is a valid named port.
121 bool IsValidNamedPort(NamedPortTable::const_iterator port) const; 136 bool IsValidNamedPort(NamedPortTable::const_iterator port) const;
122 137
138 /// Gets the current host_thread/guest_thread handle.
139 Core::EmuThreadHandle GetCurrentEmuThreadID() const;
140
141 /// Gets the current host_thread handle.
142 u32 GetCurrentHostThreadID() const;
143
144 /// Register the current thread as a CPU Core Thread.
145 void RegisterCoreThread(std::size_t core_id);
146
147 /// Register the current thread as a non CPU core thread.
148 void RegisterHostThread();
149
123private: 150private:
124 friend class Object; 151 friend class Object;
125 friend class Process; 152 friend class Process;
@@ -140,11 +167,11 @@ private:
140 /// Retrieves the event type used for thread wakeup callbacks. 167 /// Retrieves the event type used for thread wakeup callbacks.
141 const std::shared_ptr<Core::Timing::EventType>& ThreadWakeupCallbackEventType() const; 168 const std::shared_ptr<Core::Timing::EventType>& ThreadWakeupCallbackEventType() const;
142 169
143 /// Provides a reference to the thread wakeup callback handle table. 170 /// Provides a reference to the global handle table.
144 Kernel::HandleTable& ThreadWakeupCallbackHandleTable(); 171 Kernel::HandleTable& GlobalHandleTable();
145 172
146 /// Provides a const reference to the thread wakeup callback handle table. 173 /// Provides a const reference to the global handle table.
147 const Kernel::HandleTable& ThreadWakeupCallbackHandleTable() const; 174 const Kernel::HandleTable& GlobalHandleTable() const;
148 175
149 struct Impl; 176 struct Impl;
150 std::unique_ptr<Impl> impl; 177 std::unique_ptr<Impl> impl;
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 86f1421bf..c65f82fb7 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -18,10 +18,11 @@
18#include "core/hle/kernel/kernel.h" 18#include "core/hle/kernel/kernel.h"
19#include "core/hle/kernel/process.h" 19#include "core/hle/kernel/process.h"
20#include "core/hle/kernel/scheduler.h" 20#include "core/hle/kernel/scheduler.h"
21#include "core/hle/kernel/time_manager.h"
21 22
22namespace Kernel { 23namespace Kernel {
23 24
24GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} {} 25GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
25 26
26GlobalScheduler::~GlobalScheduler() = default; 27GlobalScheduler::~GlobalScheduler() = default;
27 28
@@ -35,7 +36,7 @@ void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
35} 36}
36 37
37void GlobalScheduler::UnloadThread(std::size_t core) { 38void GlobalScheduler::UnloadThread(std::size_t core) {
38 Scheduler& sched = system.Scheduler(core); 39 Scheduler& sched = kernel.Scheduler(core);
39 sched.UnloadThread(); 40 sched.UnloadThread();
40} 41}
41 42
@@ -50,7 +51,7 @@ void GlobalScheduler::SelectThread(std::size_t core) {
50 sched.is_context_switch_pending = sched.selected_thread != sched.current_thread; 51 sched.is_context_switch_pending = sched.selected_thread != sched.current_thread;
51 std::atomic_thread_fence(std::memory_order_seq_cst); 52 std::atomic_thread_fence(std::memory_order_seq_cst);
52 }; 53 };
53 Scheduler& sched = system.Scheduler(core); 54 Scheduler& sched = kernel.Scheduler(core);
54 Thread* current_thread = nullptr; 55 Thread* current_thread = nullptr;
55 // Step 1: Get top thread in schedule queue. 56 // Step 1: Get top thread in schedule queue.
56 current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); 57 current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
@@ -356,6 +357,32 @@ void GlobalScheduler::Shutdown() {
356 thread_list.clear(); 357 thread_list.clear();
357} 358}
358 359
360void GlobalScheduler::Lock() {
361 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
362 if (current_thread == current_owner) {
363 ++scope_lock;
364 } else {
365 inner_lock.lock();
366 current_owner = current_thread;
367 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
368 scope_lock = 1;
369 }
370}
371
372void GlobalScheduler::Unlock() {
373 if (--scope_lock != 0) {
374 ASSERT(scope_lock > 0);
375 return;
376 }
377 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
378 SelectThread(i);
379 }
380 current_owner = Core::EmuThreadHandle::InvalidHandle();
381 scope_lock = 1;
382 inner_lock.unlock();
383 // TODO(Blinkhawk): Setup the interrupts and change context on current core.
384}
385
359Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, std::size_t core_id) 386Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, std::size_t core_id)
360 : system(system), cpu_core(cpu_core), core_id(core_id) {} 387 : system(system), cpu_core(cpu_core), core_id(core_id) {}
361 388
@@ -485,4 +512,27 @@ void Scheduler::Shutdown() {
485 selected_thread = nullptr; 512 selected_thread = nullptr;
486} 513}
487 514
515SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} {
516 kernel.GlobalScheduler().Lock();
517}
518
519SchedulerLock::~SchedulerLock() {
520 kernel.GlobalScheduler().Unlock();
521}
522
523SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle,
524 Thread* time_task, s64 nanoseconds)
525 : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{
526 nanoseconds} {
527 event_handle = InvalidHandle;
528}
529
530SchedulerLockAndSleep::~SchedulerLockAndSleep() {
531 if (sleep_cancelled) {
532 return;
533 }
534 auto& time_manager = kernel.TimeManager();
535 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
536}
537
488} // namespace Kernel 538} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 96db049cb..1c93a838c 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -6,6 +6,7 @@
6 6
7#include <atomic> 7#include <atomic>
8#include <memory> 8#include <memory>
9#include <mutex>
9#include <vector> 10#include <vector>
10 11
11#include "common/common_types.h" 12#include "common/common_types.h"
@@ -20,11 +21,13 @@ class System;
20 21
21namespace Kernel { 22namespace Kernel {
22 23
24class KernelCore;
23class Process; 25class Process;
26class SchedulerLock;
24 27
25class GlobalScheduler final { 28class GlobalScheduler final {
26public: 29public:
27 explicit GlobalScheduler(Core::System& system); 30 explicit GlobalScheduler(KernelCore& kernel);
28 ~GlobalScheduler(); 31 ~GlobalScheduler();
29 32
30 /// Adds a new thread to the scheduler 33 /// Adds a new thread to the scheduler
@@ -138,6 +141,14 @@ public:
138 void Shutdown(); 141 void Shutdown();
139 142
140private: 143private:
144 friend class SchedulerLock;
145
146 /// Lock the scheduler to the current thread.
147 void Lock();
148
149 /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
150 /// and reschedules current core if needed.
151 void Unlock();
141 /** 152 /**
142 * Transfers a thread into an specific core. If the destination_core is -1 153 * Transfers a thread into an specific core. If the destination_core is -1
143 * it will be unscheduled from its source code and added into its suggested 154 * it will be unscheduled from its source code and added into its suggested
@@ -158,9 +169,14 @@ private:
158 // ordered from Core 0 to Core 3. 169 // ordered from Core 0 to Core 3.
159 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; 170 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
160 171
172 /// Scheduler lock mechanisms.
173 std::mutex inner_lock{}; // TODO(Blinkhawk): Replace for a SpinLock
174 std::atomic<s64> scope_lock{};
175 Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
176
161 /// Lists all thread ids that aren't deleted/etc. 177 /// Lists all thread ids that aren't deleted/etc.
162 std::vector<std::shared_ptr<Thread>> thread_list; 178 std::vector<std::shared_ptr<Thread>> thread_list;
163 Core::System& system; 179 KernelCore& kernel;
164}; 180};
165 181
166class Scheduler final { 182class Scheduler final {
@@ -227,4 +243,30 @@ private:
227 bool is_context_switch_pending = false; 243 bool is_context_switch_pending = false;
228}; 244};
229 245
246class SchedulerLock {
247public:
248 explicit SchedulerLock(KernelCore& kernel);
249 ~SchedulerLock();
250
251protected:
252 KernelCore& kernel;
253};
254
255class SchedulerLockAndSleep : public SchedulerLock {
256public:
257 explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task,
258 s64 nanoseconds);
259 ~SchedulerLockAndSleep();
260
261 void CancelSleep() {
262 sleep_cancelled = true;
263 }
264
265private:
266 Handle& event_handle;
267 Thread* time_task;
268 s64 nanoseconds;
269 bool sleep_cancelled{};
270};
271
230} // namespace Kernel 272} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index ae5f2c8bd..bf850e0b2 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -46,9 +46,9 @@ Thread::~Thread() = default;
46void Thread::Stop() { 46void Thread::Stop() {
47 // Cancel any outstanding wakeup events for this thread 47 // Cancel any outstanding wakeup events for this thread
48 Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(), 48 Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(),
49 callback_handle); 49 global_handle);
50 kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle); 50 kernel.GlobalHandleTable().Close(global_handle);
51 callback_handle = 0; 51 global_handle = 0;
52 SetStatus(ThreadStatus::Dead); 52 SetStatus(ThreadStatus::Dead);
53 Signal(); 53 Signal();
54 54
@@ -73,12 +73,12 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
73 // thread-safe version of ScheduleEvent. 73 // thread-safe version of ScheduleEvent.
74 const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds}); 74 const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds});
75 Core::System::GetInstance().CoreTiming().ScheduleEvent( 75 Core::System::GetInstance().CoreTiming().ScheduleEvent(
76 cycles, kernel.ThreadWakeupCallbackEventType(), callback_handle); 76 cycles, kernel.ThreadWakeupCallbackEventType(), global_handle);
77} 77}
78 78
79void Thread::CancelWakeupTimer() { 79void Thread::CancelWakeupTimer() {
80 Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(), 80 Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(),
81 callback_handle); 81 global_handle);
82} 82}
83 83
84void Thread::ResumeFromWait() { 84void Thread::ResumeFromWait() {
@@ -190,7 +190,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::strin
190 thread->condvar_wait_address = 0; 190 thread->condvar_wait_address = 0;
191 thread->wait_handle = 0; 191 thread->wait_handle = 0;
192 thread->name = std::move(name); 192 thread->name = std::move(name);
193 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); 193 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
194 thread->owner_process = &owner_process; 194 thread->owner_process = &owner_process;
195 auto& scheduler = kernel.GlobalScheduler(); 195 auto& scheduler = kernel.GlobalScheduler();
196 scheduler.AddThread(thread); 196 scheduler.AddThread(thread);
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 7a4916318..129e7858a 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -453,6 +453,10 @@ public:
453 is_sync_cancelled = value; 453 is_sync_cancelled = value;
454 } 454 }
455 455
456 Handle GetGlobalHandle() const {
457 return global_handle;
458 }
459
456private: 460private:
457 void SetSchedulingStatus(ThreadSchedStatus new_status); 461 void SetSchedulingStatus(ThreadSchedStatus new_status);
458 void SetCurrentPriority(u32 new_priority); 462 void SetCurrentPriority(u32 new_priority);
@@ -514,7 +518,7 @@ private:
514 VAddr arb_wait_address{0}; 518 VAddr arb_wait_address{0};
515 519
516 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. 520 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
517 Handle callback_handle = 0; 521 Handle global_handle = 0;
518 522
519 /// Callback that will be invoked when the thread is resumed from a waiting state. If the thread 523 /// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
520 /// was waiting via WaitSynchronization then the object will be the last object that became 524 /// was waiting via WaitSynchronization then the object will be the last object that became
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
new file mode 100644
index 000000000..21b290468
--- /dev/null
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -0,0 +1,44 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "core/core.h"
7#include "core/core_timing.h"
8#include "core/core_timing_util.h"
9#include "core/hle/kernel/handle_table.h"
10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/thread.h"
12#include "core/hle/kernel/time_manager.h"
13
14namespace Kernel {
15
16TimeManager::TimeManager(Core::System& system) : system{system} {
17 time_manager_event_type = Core::Timing::CreateEvent(
18 "Kernel::TimeManagerCallback", [this](u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
19 Handle proper_handle = static_cast<Handle>(thread_handle);
20 std::shared_ptr<Thread> thread =
21 this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
22 thread->ResumeFromWait();
23 });
24}
25
26void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) {
27 if (nanoseconds > 0) {
28 ASSERT(timetask);
29 event_handle = timetask->GetGlobalHandle();
30 const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds});
31 system.CoreTiming().ScheduleEvent(cycles, time_manager_event_type, event_handle);
32 } else {
33 event_handle = InvalidHandle;
34 }
35}
36
37void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
38 if (event_handle == InvalidHandle) {
39 return;
40 }
41 system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle);
42}
43
44} // namespace Kernel
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h
new file mode 100644
index 000000000..eaec486d1
--- /dev/null
+++ b/src/core/hle/kernel/time_manager.h
@@ -0,0 +1,43 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8
9#include "core/hle/kernel/object.h"
10
11namespace Core {
12class System;
13} // namespace Core
14
15namespace Core::Timing {
16struct EventType;
17} // namespace Core::Timing
18
19namespace Kernel {
20
21class Thread;
22
23/**
24 * The `TimeManager` takes care of scheduling time events on threads and executes their TimeUp
25 * method when the event is triggered.
26 */
27class TimeManager {
28public:
29 explicit TimeManager(Core::System& system);
30
31 /// Schedule a time event on `timetask` thread that will expire in 'nanoseconds'
32 /// returns a non-invalid handle in `event_handle` if correctly scheduled
33 void ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds);
34
35 /// Unschedule an existing time event
36 void UnscheduleTimeEvent(Handle event_handle);
37
38private:
39 Core::System& system;
40 std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
41};
42
43} // namespace Kernel
diff --git a/src/core/hle/service/bcat/backend/boxcat.cpp b/src/core/hle/service/bcat/backend/boxcat.cpp
index 67e39a5c4..f589864ee 100644
--- a/src/core/hle/service/bcat/backend/boxcat.cpp
+++ b/src/core/hle/service/bcat/backend/boxcat.cpp
@@ -200,7 +200,8 @@ private:
200 DownloadResult DownloadInternal(const std::string& resolved_path, u32 timeout_seconds, 200 DownloadResult DownloadInternal(const std::string& resolved_path, u32 timeout_seconds,
201 const std::string& content_type_name) { 201 const std::string& content_type_name) {
202 if (client == nullptr) { 202 if (client == nullptr) {
203 client = std::make_unique<httplib::SSLClient>(BOXCAT_HOSTNAME, PORT, timeout_seconds); 203 client = std::make_unique<httplib::SSLClient>(BOXCAT_HOSTNAME, PORT);
204 client->set_timeout_sec(timeout_seconds);
204 } 205 }
205 206
206 httplib::Headers headers{ 207 httplib::Headers headers{
@@ -448,8 +449,8 @@ std::optional<std::vector<u8>> Boxcat::GetLaunchParameter(TitleIDVersion title)
448 449
449Boxcat::StatusResult Boxcat::GetStatus(std::optional<std::string>& global, 450Boxcat::StatusResult Boxcat::GetStatus(std::optional<std::string>& global,
450 std::map<std::string, EventStatus>& games) { 451 std::map<std::string, EventStatus>& games) {
451 httplib::SSLClient client{BOXCAT_HOSTNAME, static_cast<int>(PORT), 452 httplib::SSLClient client{BOXCAT_HOSTNAME, static_cast<int>(PORT)};
452 static_cast<int>(TIMEOUT_SECONDS)}; 453 client.set_timeout_sec(static_cast<int>(TIMEOUT_SECONDS));
453 454
454 httplib::Headers headers{ 455 httplib::Headers headers{
455 {std::string("Game-Assets-API-Version"), std::string(BOXCAT_API_VERSION)}, 456 {std::string("Game-Assets-API-Version"), std::string(BOXCAT_API_VERSION)},
diff --git a/src/core/hle/service/ldn/ldn.cpp b/src/core/hle/service/ldn/ldn.cpp
index ed5059047..92adde6d4 100644
--- a/src/core/hle/service/ldn/ldn.cpp
+++ b/src/core/hle/service/ldn/ldn.cpp
@@ -129,12 +129,20 @@ public:
129 {304, nullptr, "Disconnect"}, 129 {304, nullptr, "Disconnect"},
130 {400, nullptr, "Initialize"}, 130 {400, nullptr, "Initialize"},
131 {401, nullptr, "Finalize"}, 131 {401, nullptr, "Finalize"},
132 {402, nullptr, "SetOperationMode"}, 132 {402, &IUserLocalCommunicationService::Initialize2, "Initialize2"}, // 7.0.0+
133 }; 133 };
134 // clang-format on 134 // clang-format on
135 135
136 RegisterHandlers(functions); 136 RegisterHandlers(functions);
137 } 137 }
138
139 void Initialize2(Kernel::HLERequestContext& ctx) {
140 LOG_WARNING(Service_LDN, "(STUBBED) called");
141 // Result success seem make this services start network and continue.
142 // If we just pass result error then it will stop and maybe try again and again.
143 IPC::ResponseBuilder rb{ctx, 2};
144 rb.Push(RESULT_UNKNOWN);
145 }
138}; 146};
139 147
140class LDNS final : public ServiceFramework<LDNS> { 148class LDNS final : public ServiceFramework<LDNS> {
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index 6d8bca8bb..f1966ac0e 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -44,6 +44,8 @@ u32 nvhost_gpu::ioctl(Ioctl command, const std::vector<u8>& input, const std::ve
44 return GetWaitbase(input, output); 44 return GetWaitbase(input, output);
45 case IoctlCommand::IocChannelSetTimeoutCommand: 45 case IoctlCommand::IocChannelSetTimeoutCommand:
46 return ChannelSetTimeout(input, output); 46 return ChannelSetTimeout(input, output);
47 case IoctlCommand::IocChannelSetTimeslice:
48 return ChannelSetTimeslice(input, output);
47 default: 49 default:
48 break; 50 break;
49 } 51 }
@@ -228,4 +230,14 @@ u32 nvhost_gpu::ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>&
228 return 0; 230 return 0;
229} 231}
230 232
233u32 nvhost_gpu::ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output) {
234 IoctlSetTimeslice params{};
235 std::memcpy(&params, input.data(), sizeof(IoctlSetTimeslice));
236 LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice);
237
238 channel_timeslice = params.timeslice;
239
240 return 0;
241}
242
231} // namespace Service::Nvidia::Devices 243} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
index d056dd046..2ac74743f 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
@@ -48,6 +48,7 @@ private:
48 IocAllocObjCtxCommand = 0xC0104809, 48 IocAllocObjCtxCommand = 0xC0104809,
49 IocChannelGetWaitbaseCommand = 0xC0080003, 49 IocChannelGetWaitbaseCommand = 0xC0080003,
50 IocChannelSetTimeoutCommand = 0x40044803, 50 IocChannelSetTimeoutCommand = 0x40044803,
51 IocChannelSetTimeslice = 0xC004481D,
51 }; 52 };
52 53
53 enum class CtxObjects : u32_le { 54 enum class CtxObjects : u32_le {
@@ -101,6 +102,11 @@ private:
101 static_assert(sizeof(IoctlChannelSetPriority) == 4, 102 static_assert(sizeof(IoctlChannelSetPriority) == 4,
102 "IoctlChannelSetPriority is incorrect size"); 103 "IoctlChannelSetPriority is incorrect size");
103 104
105 struct IoctlSetTimeslice {
106 u32_le timeslice;
107 };
108 static_assert(sizeof(IoctlSetTimeslice) == 4, "IoctlSetTimeslice is incorrect size");
109
104 struct IoctlEventIdControl { 110 struct IoctlEventIdControl {
105 u32_le cmd; // 0=disable, 1=enable, 2=clear 111 u32_le cmd; // 0=disable, 1=enable, 2=clear
106 u32_le id; 112 u32_le id;
@@ -174,6 +180,7 @@ private:
174 u64_le user_data{}; 180 u64_le user_data{};
175 IoctlZCullBind zcull_params{}; 181 IoctlZCullBind zcull_params{};
176 u32_le channel_priority{}; 182 u32_le channel_priority{};
183 u32_le channel_timeslice{};
177 184
178 u32 SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output); 185 u32 SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output);
179 u32 SetClientData(const std::vector<u8>& input, std::vector<u8>& output); 186 u32 SetClientData(const std::vector<u8>& input, std::vector<u8>& output);
@@ -188,6 +195,7 @@ private:
188 const std::vector<u8>& input2, IoctlVersion version); 195 const std::vector<u8>& input2, IoctlVersion version);
189 u32 GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output); 196 u32 GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
190 u32 ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output); 197 u32 ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output);
198 u32 ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output);
191 199
192 std::shared_ptr<nvmap> nvmap_dev; 200 std::shared_ptr<nvmap> nvmap_dev;
193 u32 assigned_syncpoints{}; 201 u32 assigned_syncpoints{};
diff --git a/src/core/settings.h b/src/core/settings.h
index e1a9a0ffa..f837d3fbc 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -429,6 +429,7 @@ struct Values {
429 int vulkan_device; 429 int vulkan_device;
430 430
431 float resolution_factor; 431 float resolution_factor;
432 int aspect_ratio;
432 bool use_frame_limit; 433 bool use_frame_limit;
433 u16 frame_limit; 434 u16 frame_limit;
434 bool use_disk_shader_cache; 435 bool use_disk_shader_cache;
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index db9332d00..4b0c6346f 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -37,6 +37,7 @@ add_library(video_core STATIC
37 memory_manager.h 37 memory_manager.h
38 morton.cpp 38 morton.cpp
39 morton.h 39 morton.h
40 query_cache.h
40 rasterizer_accelerated.cpp 41 rasterizer_accelerated.cpp
41 rasterizer_accelerated.h 42 rasterizer_accelerated.h
42 rasterizer_cache.cpp 43 rasterizer_cache.cpp
@@ -74,6 +75,8 @@ add_library(video_core STATIC
74 renderer_opengl/gl_stream_buffer.h 75 renderer_opengl/gl_stream_buffer.h
75 renderer_opengl/gl_texture_cache.cpp 76 renderer_opengl/gl_texture_cache.cpp
76 renderer_opengl/gl_texture_cache.h 77 renderer_opengl/gl_texture_cache.h
78 renderer_opengl/gl_query_cache.cpp
79 renderer_opengl/gl_query_cache.h
77 renderer_opengl/maxwell_to_gl.h 80 renderer_opengl/maxwell_to_gl.h
78 renderer_opengl/renderer_opengl.cpp 81 renderer_opengl/renderer_opengl.cpp
79 renderer_opengl/renderer_opengl.h 82 renderer_opengl/renderer_opengl.h
@@ -177,6 +180,8 @@ if (ENABLE_VULKAN)
177 renderer_vulkan/vk_memory_manager.h 180 renderer_vulkan/vk_memory_manager.h
178 renderer_vulkan/vk_pipeline_cache.cpp 181 renderer_vulkan/vk_pipeline_cache.cpp
179 renderer_vulkan/vk_pipeline_cache.h 182 renderer_vulkan/vk_pipeline_cache.h
183 renderer_vulkan/vk_query_cache.cpp
184 renderer_vulkan/vk_query_cache.h
180 renderer_vulkan/vk_rasterizer.cpp 185 renderer_vulkan/vk_rasterizer.cpp
181 renderer_vulkan/vk_rasterizer.h 186 renderer_vulkan/vk_rasterizer.h
182 renderer_vulkan/vk_renderpass_cache.cpp 187 renderer_vulkan/vk_renderpass_cache.cpp
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 0b3e8749b..b28de1092 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -4,6 +4,7 @@
4 4
5#include <cinttypes> 5#include <cinttypes>
6#include <cstring> 6#include <cstring>
7#include <optional>
7#include "common/assert.h" 8#include "common/assert.h"
8#include "core/core.h" 9#include "core/core.h"
9#include "core/core_timing.h" 10#include "core/core_timing.h"
@@ -16,6 +17,8 @@
16 17
17namespace Tegra::Engines { 18namespace Tegra::Engines {
18 19
20using VideoCore::QueryType;
21
19/// First register id that is actually a Macro call. 22/// First register id that is actually a Macro call.
20constexpr u32 MacroRegistersStart = 0xE00; 23constexpr u32 MacroRegistersStart = 0xE00;
21 24
@@ -400,6 +403,10 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
400 ProcessQueryCondition(); 403 ProcessQueryCondition();
401 break; 404 break;
402 } 405 }
406 case MAXWELL3D_REG_INDEX(counter_reset): {
407 ProcessCounterReset();
408 break;
409 }
403 case MAXWELL3D_REG_INDEX(sync_info): { 410 case MAXWELL3D_REG_INDEX(sync_info): {
404 ProcessSyncPoint(); 411 ProcessSyncPoint();
405 break; 412 break;
@@ -482,7 +489,7 @@ void Maxwell3D::FlushMMEInlineDraw() {
482 489
483 const bool is_indexed = mme_draw.current_mode == MMEDrawMode::Indexed; 490 const bool is_indexed = mme_draw.current_mode == MMEDrawMode::Indexed;
484 if (ShouldExecute()) { 491 if (ShouldExecute()) {
485 rasterizer.DrawMultiBatch(is_indexed); 492 rasterizer.Draw(is_indexed, true);
486 } 493 }
487 494
488 // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if 495 // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if
@@ -544,40 +551,28 @@ void Maxwell3D::ProcessQueryGet() {
544 "Units other than CROP are unimplemented"); 551 "Units other than CROP are unimplemented");
545 552
546 switch (regs.query.query_get.operation) { 553 switch (regs.query.query_get.operation) {
547 case Regs::QueryOperation::Release: { 554 case Regs::QueryOperation::Release:
548 const u64 result = regs.query.query_sequence; 555 StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0);
549 StampQueryResult(result, regs.query.query_get.short_query == 0);
550 break; 556 break;
551 } 557 case Regs::QueryOperation::Acquire:
552 case Regs::QueryOperation::Acquire: { 558 // TODO(Blinkhawk): Under this operation, the GPU waits for the CPU to write a value that
553 // Todo(Blinkhawk): Under this operation, the GPU waits for the CPU 559 // matches the current payload.
554 // to write a value that matches the current payload.
555 UNIMPLEMENTED_MSG("Unimplemented query operation ACQUIRE"); 560 UNIMPLEMENTED_MSG("Unimplemented query operation ACQUIRE");
556 break; 561 break;
557 } 562 case Regs::QueryOperation::Counter:
558 case Regs::QueryOperation::Counter: { 563 if (const std::optional<u64> result = GetQueryResult()) {
559 u64 result{}; 564 // If the query returns an empty optional it means it's cached and deferred.
560 switch (regs.query.query_get.select) { 565 // In this case we have a non-empty result, so we stamp it immediately.
561 case Regs::QuerySelect::Zero: 566 StampQueryResult(*result, regs.query.query_get.short_query == 0);
562 result = 0;
563 break;
564 default:
565 result = 1;
566 UNIMPLEMENTED_MSG("Unimplemented query select type {}",
567 static_cast<u32>(regs.query.query_get.select.Value()));
568 } 567 }
569 StampQueryResult(result, regs.query.query_get.short_query == 0);
570 break; 568 break;
571 } 569 case Regs::QueryOperation::Trap:
572 case Regs::QueryOperation::Trap: {
573 UNIMPLEMENTED_MSG("Unimplemented query operation TRAP"); 570 UNIMPLEMENTED_MSG("Unimplemented query operation TRAP");
574 break; 571 break;
575 } 572 default:
576 default: {
577 UNIMPLEMENTED_MSG("Unknown query operation"); 573 UNIMPLEMENTED_MSG("Unknown query operation");
578 break; 574 break;
579 } 575 }
580 }
581} 576}
582 577
583void Maxwell3D::ProcessQueryCondition() { 578void Maxwell3D::ProcessQueryCondition() {
@@ -593,20 +588,20 @@ void Maxwell3D::ProcessQueryCondition() {
593 } 588 }
594 case Regs::ConditionMode::ResNonZero: { 589 case Regs::ConditionMode::ResNonZero: {
595 Regs::QueryCompare cmp; 590 Regs::QueryCompare cmp;
596 memory_manager.ReadBlockUnsafe(condition_address, &cmp, sizeof(cmp)); 591 memory_manager.ReadBlock(condition_address, &cmp, sizeof(cmp));
597 execute_on = cmp.initial_sequence != 0U && cmp.initial_mode != 0U; 592 execute_on = cmp.initial_sequence != 0U && cmp.initial_mode != 0U;
598 break; 593 break;
599 } 594 }
600 case Regs::ConditionMode::Equal: { 595 case Regs::ConditionMode::Equal: {
601 Regs::QueryCompare cmp; 596 Regs::QueryCompare cmp;
602 memory_manager.ReadBlockUnsafe(condition_address, &cmp, sizeof(cmp)); 597 memory_manager.ReadBlock(condition_address, &cmp, sizeof(cmp));
603 execute_on = 598 execute_on =
604 cmp.initial_sequence == cmp.current_sequence && cmp.initial_mode == cmp.current_mode; 599 cmp.initial_sequence == cmp.current_sequence && cmp.initial_mode == cmp.current_mode;
605 break; 600 break;
606 } 601 }
607 case Regs::ConditionMode::NotEqual: { 602 case Regs::ConditionMode::NotEqual: {
608 Regs::QueryCompare cmp; 603 Regs::QueryCompare cmp;
609 memory_manager.ReadBlockUnsafe(condition_address, &cmp, sizeof(cmp)); 604 memory_manager.ReadBlock(condition_address, &cmp, sizeof(cmp));
610 execute_on = 605 execute_on =
611 cmp.initial_sequence != cmp.current_sequence || cmp.initial_mode != cmp.current_mode; 606 cmp.initial_sequence != cmp.current_sequence || cmp.initial_mode != cmp.current_mode;
612 break; 607 break;
@@ -619,6 +614,18 @@ void Maxwell3D::ProcessQueryCondition() {
619 } 614 }
620} 615}
621 616
617void Maxwell3D::ProcessCounterReset() {
618 switch (regs.counter_reset) {
619 case Regs::CounterReset::SampleCnt:
620 rasterizer.ResetCounter(QueryType::SamplesPassed);
621 break;
622 default:
623 LOG_WARNING(Render_OpenGL, "Unimplemented counter reset={}",
624 static_cast<int>(regs.counter_reset));
625 break;
626 }
627}
628
622void Maxwell3D::ProcessSyncPoint() { 629void Maxwell3D::ProcessSyncPoint() {
623 const u32 sync_point = regs.sync_info.sync_point.Value(); 630 const u32 sync_point = regs.sync_info.sync_point.Value();
624 const u32 increment = regs.sync_info.increment.Value(); 631 const u32 increment = regs.sync_info.increment.Value();
@@ -647,7 +654,7 @@ void Maxwell3D::DrawArrays() {
647 654
648 const bool is_indexed{regs.index_array.count && !regs.vertex_buffer.count}; 655 const bool is_indexed{regs.index_array.count && !regs.vertex_buffer.count};
649 if (ShouldExecute()) { 656 if (ShouldExecute()) {
650 rasterizer.DrawBatch(is_indexed); 657 rasterizer.Draw(is_indexed, false);
651 } 658 }
652 659
653 // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if 660 // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if
@@ -661,6 +668,22 @@ void Maxwell3D::DrawArrays() {
661 } 668 }
662} 669}
663 670
671std::optional<u64> Maxwell3D::GetQueryResult() {
672 switch (regs.query.query_get.select) {
673 case Regs::QuerySelect::Zero:
674 return 0;
675 case Regs::QuerySelect::SamplesPassed:
676 // Deferred.
677 rasterizer.Query(regs.query.QueryAddress(), VideoCore::QueryType::SamplesPassed,
678 system.GPU().GetTicks());
679 return {};
680 default:
681 UNIMPLEMENTED_MSG("Unimplemented query select type {}",
682 static_cast<u32>(regs.query.query_get.select.Value()));
683 return 1;
684 }
685}
686
664void Maxwell3D::ProcessCBBind(std::size_t stage_index) { 687void Maxwell3D::ProcessCBBind(std::size_t stage_index) {
665 // Bind the buffer currently in CB_ADDRESS to the specified index in the desired shader stage. 688 // Bind the buffer currently in CB_ADDRESS to the specified index in the desired shader stage.
666 auto& shader = state.shader_stages[stage_index]; 689 auto& shader = state.shader_stages[stage_index];
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 0a2af54e5..6ea7cc6a5 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -6,6 +6,7 @@
6 6
7#include <array> 7#include <array>
8#include <bitset> 8#include <bitset>
9#include <optional>
9#include <type_traits> 10#include <type_traits>
10#include <unordered_map> 11#include <unordered_map>
11#include <vector> 12#include <vector>
@@ -409,6 +410,27 @@ public:
409 Linear = 1, 410 Linear = 1,
410 }; 411 };
411 412
413 enum class CounterReset : u32 {
414 SampleCnt = 0x01,
415 Unk02 = 0x02,
416 Unk03 = 0x03,
417 Unk04 = 0x04,
418 EmittedPrimitives = 0x10, // Not tested
419 Unk11 = 0x11,
420 Unk12 = 0x12,
421 Unk13 = 0x13,
422 Unk15 = 0x15,
423 Unk16 = 0x16,
424 Unk17 = 0x17,
425 Unk18 = 0x18,
426 Unk1A = 0x1A,
427 Unk1B = 0x1B,
428 Unk1C = 0x1C,
429 Unk1D = 0x1D,
430 Unk1E = 0x1E,
431 GeneratedPrimitives = 0x1F,
432 };
433
412 struct Cull { 434 struct Cull {
413 enum class FrontFace : u32 { 435 enum class FrontFace : u32 {
414 ClockWise = 0x0900, 436 ClockWise = 0x0900,
@@ -520,7 +542,7 @@ public:
520 BitField<12, 1, InvMemoryLayout> type; 542 BitField<12, 1, InvMemoryLayout> type;
521 } memory_layout; 543 } memory_layout;
522 union { 544 union {
523 BitField<0, 16, u32> array_mode; 545 BitField<0, 16, u32> layers;
524 BitField<16, 1, u32> volume; 546 BitField<16, 1, u32> volume;
525 }; 547 };
526 u32 layer_stride; 548 u32 layer_stride;
@@ -778,8 +800,12 @@ public:
778 800
779 u32 zeta_width; 801 u32 zeta_width;
780 u32 zeta_height; 802 u32 zeta_height;
803 union {
804 BitField<0, 16, u32> zeta_layers;
805 BitField<16, 1, u32> zeta_volume;
806 };
781 807
782 INSERT_UNION_PADDING_WORDS(0x27); 808 INSERT_UNION_PADDING_WORDS(0x26);
783 809
784 u32 depth_test_enable; 810 u32 depth_test_enable;
785 811
@@ -857,7 +883,7 @@ public:
857 BitField<7, 1, u32> c7; 883 BitField<7, 1, u32> c7;
858 } clip_distance_enabled; 884 } clip_distance_enabled;
859 885
860 INSERT_UNION_PADDING_WORDS(0x1); 886 u32 samplecnt_enable;
861 887
862 float point_size; 888 float point_size;
863 889
@@ -865,7 +891,11 @@ public:
865 891
866 u32 point_sprite_enable; 892 u32 point_sprite_enable;
867 893
868 INSERT_UNION_PADDING_WORDS(0x5); 894 INSERT_UNION_PADDING_WORDS(0x3);
895
896 CounterReset counter_reset;
897
898 INSERT_UNION_PADDING_WORDS(0x1);
869 899
870 u32 zeta_enable; 900 u32 zeta_enable;
871 901
@@ -1412,12 +1442,15 @@ private:
1412 /// Handles a write to the QUERY_GET register. 1442 /// Handles a write to the QUERY_GET register.
1413 void ProcessQueryGet(); 1443 void ProcessQueryGet();
1414 1444
1415 // Writes the query result accordingly 1445 /// Writes the query result accordingly.
1416 void StampQueryResult(u64 payload, bool long_query); 1446 void StampQueryResult(u64 payload, bool long_query);
1417 1447
1418 // Handles Conditional Rendering 1448 /// Handles conditional rendering.
1419 void ProcessQueryCondition(); 1449 void ProcessQueryCondition();
1420 1450
1451 /// Handles counter resets.
1452 void ProcessCounterReset();
1453
1421 /// Handles writes to syncing register. 1454 /// Handles writes to syncing register.
1422 void ProcessSyncPoint(); 1455 void ProcessSyncPoint();
1423 1456
@@ -1434,6 +1467,9 @@ private:
1434 1467
1435 // Handles a instance drawcall from MME 1468 // Handles a instance drawcall from MME
1436 void StepInstance(MMEDrawMode expected_mode, u32 count); 1469 void StepInstance(MMEDrawMode expected_mode, u32 count);
1470
1471 /// Returns a query's value or an empty object if the value will be deferred through a cache.
1472 std::optional<u64> GetQueryResult();
1437}; 1473};
1438 1474
1439#define ASSERT_REG_POSITION(field_name, position) \ 1475#define ASSERT_REG_POSITION(field_name, position) \
@@ -1475,6 +1511,7 @@ ASSERT_REG_POSITION(vertex_attrib_format, 0x458);
1475ASSERT_REG_POSITION(rt_control, 0x487); 1511ASSERT_REG_POSITION(rt_control, 0x487);
1476ASSERT_REG_POSITION(zeta_width, 0x48a); 1512ASSERT_REG_POSITION(zeta_width, 0x48a);
1477ASSERT_REG_POSITION(zeta_height, 0x48b); 1513ASSERT_REG_POSITION(zeta_height, 0x48b);
1514ASSERT_REG_POSITION(zeta_layers, 0x48c);
1478ASSERT_REG_POSITION(depth_test_enable, 0x4B3); 1515ASSERT_REG_POSITION(depth_test_enable, 0x4B3);
1479ASSERT_REG_POSITION(independent_blend_enable, 0x4B9); 1516ASSERT_REG_POSITION(independent_blend_enable, 0x4B9);
1480ASSERT_REG_POSITION(depth_write_enabled, 0x4BA); 1517ASSERT_REG_POSITION(depth_write_enabled, 0x4BA);
@@ -1499,8 +1536,10 @@ ASSERT_REG_POSITION(screen_y_control, 0x4EB);
1499ASSERT_REG_POSITION(vb_element_base, 0x50D); 1536ASSERT_REG_POSITION(vb_element_base, 0x50D);
1500ASSERT_REG_POSITION(vb_base_instance, 0x50E); 1537ASSERT_REG_POSITION(vb_base_instance, 0x50E);
1501ASSERT_REG_POSITION(clip_distance_enabled, 0x544); 1538ASSERT_REG_POSITION(clip_distance_enabled, 0x544);
1539ASSERT_REG_POSITION(samplecnt_enable, 0x545);
1502ASSERT_REG_POSITION(point_size, 0x546); 1540ASSERT_REG_POSITION(point_size, 0x546);
1503ASSERT_REG_POSITION(point_sprite_enable, 0x548); 1541ASSERT_REG_POSITION(point_sprite_enable, 0x548);
1542ASSERT_REG_POSITION(counter_reset, 0x54C);
1504ASSERT_REG_POSITION(zeta_enable, 0x54E); 1543ASSERT_REG_POSITION(zeta_enable, 0x54E);
1505ASSERT_REG_POSITION(multisample_control, 0x54F); 1544ASSERT_REG_POSITION(multisample_control, 0x54F);
1506ASSERT_REG_POSITION(condition, 0x554); 1545ASSERT_REG_POSITION(condition, 0x554);
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 4419ab735..7d7137109 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -24,7 +24,7 @@ MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
24GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer, bool is_async) 24GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer, bool is_async)
25 : system{system}, renderer{renderer}, is_async{is_async} { 25 : system{system}, renderer{renderer}, is_async{is_async} {
26 auto& rasterizer{renderer.Rasterizer()}; 26 auto& rasterizer{renderer.Rasterizer()};
27 memory_manager = std::make_unique<Tegra::MemoryManager>(system); 27 memory_manager = std::make_unique<Tegra::MemoryManager>(system, rasterizer);
28 dma_pusher = std::make_unique<Tegra::DmaPusher>(*this); 28 dma_pusher = std::make_unique<Tegra::DmaPusher>(*this);
29 maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager); 29 maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager);
30 fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer); 30 fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer);
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index f1d50be3e..f5d33f27a 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -11,10 +11,12 @@
11#include "core/memory.h" 11#include "core/memory.h"
12#include "video_core/gpu.h" 12#include "video_core/gpu.h"
13#include "video_core/memory_manager.h" 13#include "video_core/memory_manager.h"
14#include "video_core/rasterizer_interface.h"
14 15
15namespace Tegra { 16namespace Tegra {
16 17
17MemoryManager::MemoryManager(Core::System& system) : system{system} { 18MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
19 : rasterizer{rasterizer}, system{system} {
18 std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); 20 std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
19 std::fill(page_table.attributes.begin(), page_table.attributes.end(), 21 std::fill(page_table.attributes.begin(), page_table.attributes.end(),
20 Common::PageType::Unmapped); 22 Common::PageType::Unmapped);
@@ -83,6 +85,7 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
83 const auto cpu_addr = GpuToCpuAddress(gpu_addr); 85 const auto cpu_addr = GpuToCpuAddress(gpu_addr);
84 ASSERT(cpu_addr); 86 ASSERT(cpu_addr);
85 87
88 // Flush and invalidate through the GPU interface, to be asynchronous if possible.
86 system.GPU().FlushAndInvalidateRegion(cache_addr, aligned_size); 89 system.GPU().FlushAndInvalidateRegion(cache_addr, aligned_size);
87 90
88 UnmapRange(gpu_addr, aligned_size); 91 UnmapRange(gpu_addr, aligned_size);
@@ -242,7 +245,9 @@ void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::s
242 switch (page_table.attributes[page_index]) { 245 switch (page_table.attributes[page_index]) {
243 case Common::PageType::Memory: { 246 case Common::PageType::Memory: {
244 const u8* src_ptr{page_table.pointers[page_index] + page_offset}; 247 const u8* src_ptr{page_table.pointers[page_index] + page_offset};
245 system.GPU().FlushRegion(ToCacheAddr(src_ptr), copy_amount); 248 // Flush must happen on the rasterizer interface, such that memory is always synchronous
249 // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu.
250 rasterizer.FlushRegion(ToCacheAddr(src_ptr), copy_amount);
246 std::memcpy(dest_buffer, src_ptr, copy_amount); 251 std::memcpy(dest_buffer, src_ptr, copy_amount);
247 break; 252 break;
248 } 253 }
@@ -292,7 +297,9 @@ void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const
292 switch (page_table.attributes[page_index]) { 297 switch (page_table.attributes[page_index]) {
293 case Common::PageType::Memory: { 298 case Common::PageType::Memory: {
294 u8* dest_ptr{page_table.pointers[page_index] + page_offset}; 299 u8* dest_ptr{page_table.pointers[page_index] + page_offset};
295 system.GPU().InvalidateRegion(ToCacheAddr(dest_ptr), copy_amount); 300 // Invalidate must happen on the rasterizer interface, such that memory is always
301 // synchronous when it is written (even when in asynchronous GPU mode).
302 rasterizer.InvalidateRegion(ToCacheAddr(dest_ptr), copy_amount);
296 std::memcpy(dest_ptr, src_buffer, copy_amount); 303 std::memcpy(dest_ptr, src_buffer, copy_amount);
297 break; 304 break;
298 } 305 }
@@ -339,8 +346,10 @@ void MemoryManager::CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, const std::
339 346
340 switch (page_table.attributes[page_index]) { 347 switch (page_table.attributes[page_index]) {
341 case Common::PageType::Memory: { 348 case Common::PageType::Memory: {
349 // Flush must happen on the rasterizer interface, such that memory is always synchronous
350 // when it is copied (even when in asynchronous GPU mode).
342 const u8* src_ptr{page_table.pointers[page_index] + page_offset}; 351 const u8* src_ptr{page_table.pointers[page_index] + page_offset};
343 system.GPU().FlushRegion(ToCacheAddr(src_ptr), copy_amount); 352 rasterizer.FlushRegion(ToCacheAddr(src_ptr), copy_amount);
344 WriteBlock(dest_addr, src_ptr, copy_amount); 353 WriteBlock(dest_addr, src_ptr, copy_amount);
345 break; 354 break;
346 } 355 }
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 393447eb4..aea010087 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -10,6 +10,10 @@
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "common/page_table.h" 11#include "common/page_table.h"
12 12
13namespace VideoCore {
14class RasterizerInterface;
15}
16
13namespace Core { 17namespace Core {
14class System; 18class System;
15} 19}
@@ -47,7 +51,7 @@ struct VirtualMemoryArea {
47 51
48class MemoryManager final { 52class MemoryManager final {
49public: 53public:
50 explicit MemoryManager(Core::System& system); 54 explicit MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer);
51 ~MemoryManager(); 55 ~MemoryManager();
52 56
53 GPUVAddr AllocateSpace(u64 size, u64 align); 57 GPUVAddr AllocateSpace(u64 size, u64 align);
@@ -172,6 +176,7 @@ private:
172 176
173 Common::PageTable page_table{page_bits}; 177 Common::PageTable page_table{page_bits};
174 VMAMap vma_map; 178 VMAMap vma_map;
179 VideoCore::RasterizerInterface& rasterizer;
175 180
176 Core::System& system; 181 Core::System& system;
177}; 182};
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
new file mode 100644
index 000000000..e66054ed0
--- /dev/null
+++ b/src/video_core/query_cache.h
@@ -0,0 +1,359 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <algorithm>
8#include <array>
9#include <cstring>
10#include <iterator>
11#include <memory>
12#include <mutex>
13#include <optional>
14#include <unordered_map>
15#include <vector>
16
17#include "common/assert.h"
18#include "core/core.h"
19#include "video_core/engines/maxwell_3d.h"
20#include "video_core/gpu.h"
21#include "video_core/memory_manager.h"
22#include "video_core/rasterizer_interface.h"
23
24namespace VideoCommon {
25
26template <class QueryCache, class HostCounter>
27class CounterStreamBase {
28public:
29 explicit CounterStreamBase(QueryCache& cache, VideoCore::QueryType type)
30 : cache{cache}, type{type} {}
31
32 /// Updates the state of the stream, enabling or disabling as needed.
33 void Update(bool enabled) {
34 if (enabled) {
35 Enable();
36 } else {
37 Disable();
38 }
39 }
40
41 /// Resets the stream to zero. It doesn't disable the query after resetting.
42 void Reset() {
43 if (current) {
44 current->EndQuery();
45
46 // Immediately start a new query to avoid disabling its state.
47 current = cache.Counter(nullptr, type);
48 }
49 last = nullptr;
50 }
51
52 /// Returns the current counter slicing as needed.
53 std::shared_ptr<HostCounter> Current() {
54 if (!current) {
55 return nullptr;
56 }
57 current->EndQuery();
58 last = std::move(current);
59 current = cache.Counter(last, type);
60 return last;
61 }
62
63 /// Returns true when the counter stream is enabled.
64 bool IsEnabled() const {
65 return current != nullptr;
66 }
67
68private:
69 /// Enables the stream.
70 void Enable() {
71 if (current) {
72 return;
73 }
74 current = cache.Counter(last, type);
75 }
76
77 // Disables the stream.
78 void Disable() {
79 if (current) {
80 current->EndQuery();
81 }
82 last = std::exchange(current, nullptr);
83 }
84
85 QueryCache& cache;
86 const VideoCore::QueryType type;
87
88 std::shared_ptr<HostCounter> current;
89 std::shared_ptr<HostCounter> last;
90};
91
92template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter,
93 class QueryPool>
94class QueryCacheBase {
95public:
96 explicit QueryCacheBase(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
97 : system{system}, rasterizer{rasterizer}, streams{{CounterStream{
98 static_cast<QueryCache&>(*this),
99 VideoCore::QueryType::SamplesPassed}}} {}
100
101 void InvalidateRegion(CacheAddr addr, std::size_t size) {
102 std::unique_lock lock{mutex};
103 FlushAndRemoveRegion(addr, size);
104 }
105
106 void FlushRegion(CacheAddr addr, std::size_t size) {
107 std::unique_lock lock{mutex};
108 FlushAndRemoveRegion(addr, size);
109 }
110
111 /**
112 * Records a query in GPU mapped memory, potentially marked with a timestamp.
113 * @param gpu_addr GPU address to flush to when the mapped memory is read.
114 * @param type Query type, e.g. SamplesPassed.
115 * @param timestamp Timestamp, when empty the flushed query is assumed to be short.
116 */
117 void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) {
118 std::unique_lock lock{mutex};
119 auto& memory_manager = system.GPU().MemoryManager();
120 const auto host_ptr = memory_manager.GetPointer(gpu_addr);
121
122 CachedQuery* query = TryGet(ToCacheAddr(host_ptr));
123 if (!query) {
124 const auto cpu_addr = memory_manager.GpuToCpuAddress(gpu_addr);
125 ASSERT_OR_EXECUTE(cpu_addr, return;);
126
127 query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
128 }
129
130 query->BindCounter(Stream(type).Current(), timestamp);
131 }
132
133 /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
134 void UpdateCounters() {
135 std::unique_lock lock{mutex};
136 const auto& regs = system.GPU().Maxwell3D().regs;
137 Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable);
138 }
139
140 /// Resets a counter to zero. It doesn't disable the query after resetting.
141 void ResetCounter(VideoCore::QueryType type) {
142 std::unique_lock lock{mutex};
143 Stream(type).Reset();
144 }
145
146 /// Disable all active streams. Expected to be called at the end of a command buffer.
147 void DisableStreams() {
148 std::unique_lock lock{mutex};
149 for (auto& stream : streams) {
150 stream.Update(false);
151 }
152 }
153
154 /// Returns a new host counter.
155 std::shared_ptr<HostCounter> Counter(std::shared_ptr<HostCounter> dependency,
156 VideoCore::QueryType type) {
157 return std::make_shared<HostCounter>(static_cast<QueryCache&>(*this), std::move(dependency),
158 type);
159 }
160
161 /// Returns the counter stream of the specified type.
162 CounterStream& Stream(VideoCore::QueryType type) {
163 return streams[static_cast<std::size_t>(type)];
164 }
165
166 /// Returns the counter stream of the specified type.
167 const CounterStream& Stream(VideoCore::QueryType type) const {
168 return streams[static_cast<std::size_t>(type)];
169 }
170
171protected:
172 std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
173
174private:
175 /// Flushes a memory range to guest memory and removes it from the cache.
176 void FlushAndRemoveRegion(CacheAddr addr, std::size_t size) {
177 const u64 addr_begin = static_cast<u64>(addr);
178 const u64 addr_end = addr_begin + static_cast<u64>(size);
179 const auto in_range = [addr_begin, addr_end](CachedQuery& query) {
180 const u64 cache_begin = query.GetCacheAddr();
181 const u64 cache_end = cache_begin + query.SizeInBytes();
182 return cache_begin < addr_end && addr_begin < cache_end;
183 };
184
185 const u64 page_end = addr_end >> PAGE_SHIFT;
186 for (u64 page = addr_begin >> PAGE_SHIFT; page <= page_end; ++page) {
187 const auto& it = cached_queries.find(page);
188 if (it == std::end(cached_queries)) {
189 continue;
190 }
191 auto& contents = it->second;
192 for (auto& query : contents) {
193 if (!in_range(query)) {
194 continue;
195 }
196 rasterizer.UpdatePagesCachedCount(query.CpuAddr(), query.SizeInBytes(), -1);
197 query.Flush();
198 }
199 contents.erase(std::remove_if(std::begin(contents), std::end(contents), in_range),
200 std::end(contents));
201 }
202 }
203
204 /// Registers the passed parameters as cached and returns a pointer to the stored cached query.
205 CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
206 rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1);
207 const u64 page = static_cast<u64>(ToCacheAddr(host_ptr)) >> PAGE_SHIFT;
208 return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
209 host_ptr);
210 }
211
212 /// Tries to a get a cached query. Returns nullptr on failure.
213 CachedQuery* TryGet(CacheAddr addr) {
214 const u64 page = static_cast<u64>(addr) >> PAGE_SHIFT;
215 const auto it = cached_queries.find(page);
216 if (it == std::end(cached_queries)) {
217 return nullptr;
218 }
219 auto& contents = it->second;
220 const auto found =
221 std::find_if(std::begin(contents), std::end(contents),
222 [addr](auto& query) { return query.GetCacheAddr() == addr; });
223 return found != std::end(contents) ? &*found : nullptr;
224 }
225
226 static constexpr std::uintptr_t PAGE_SIZE = 4096;
227 static constexpr unsigned PAGE_SHIFT = 12;
228
229 Core::System& system;
230 VideoCore::RasterizerInterface& rasterizer;
231
232 std::recursive_mutex mutex;
233
234 std::unordered_map<u64, std::vector<CachedQuery>> cached_queries;
235
236 std::array<CounterStream, VideoCore::NumQueryTypes> streams;
237};
238
239template <class QueryCache, class HostCounter>
240class HostCounterBase {
241public:
242 explicit HostCounterBase(std::shared_ptr<HostCounter> dependency_)
243 : dependency{std::move(dependency_)}, depth{dependency ? (dependency->Depth() + 1) : 0} {
244 // Avoid nesting too many dependencies to avoid a stack overflow when these are deleted.
245 constexpr u64 depth_threshold = 96;
246 if (depth > depth_threshold) {
247 depth = 0;
248 base_result = dependency->Query();
249 dependency = nullptr;
250 }
251 }
252 virtual ~HostCounterBase() = default;
253
254 /// Returns the current value of the query.
255 u64 Query() {
256 if (result) {
257 return *result;
258 }
259
260 u64 value = BlockingQuery() + base_result;
261 if (dependency) {
262 value += dependency->Query();
263 dependency = nullptr;
264 }
265
266 result = value;
267 return *result;
268 }
269
270 /// Returns true when flushing this query will potentially wait.
271 bool WaitPending() const noexcept {
272 return result.has_value();
273 }
274
275 u64 Depth() const noexcept {
276 return depth;
277 }
278
279protected:
280 /// Returns the value of query from the backend API blocking as needed.
281 virtual u64 BlockingQuery() const = 0;
282
283private:
284 std::shared_ptr<HostCounter> dependency; ///< Counter to add to this value.
285 std::optional<u64> result; ///< Filled with the already returned value.
286 u64 depth; ///< Number of nested dependencies.
287 u64 base_result = 0; ///< Equivalent to nested dependencies value.
288};
289
290template <class HostCounter>
291class CachedQueryBase {
292public:
293 explicit CachedQueryBase(VAddr cpu_addr, u8* host_ptr)
294 : cpu_addr{cpu_addr}, host_ptr{host_ptr} {}
295 virtual ~CachedQueryBase() = default;
296
297 CachedQueryBase(CachedQueryBase&&) noexcept = default;
298 CachedQueryBase(const CachedQueryBase&) = delete;
299
300 CachedQueryBase& operator=(CachedQueryBase&&) noexcept = default;
301 CachedQueryBase& operator=(const CachedQueryBase&) = delete;
302
303 /// Flushes the query to guest memory.
304 virtual void Flush() {
305 // When counter is nullptr it means that it's just been reseted. We are supposed to write a
306 // zero in these cases.
307 const u64 value = counter ? counter->Query() : 0;
308 std::memcpy(host_ptr, &value, sizeof(u64));
309
310 if (timestamp) {
311 std::memcpy(host_ptr + TIMESTAMP_OFFSET, &*timestamp, sizeof(u64));
312 }
313 }
314
315 /// Binds a counter to this query.
316 void BindCounter(std::shared_ptr<HostCounter> counter_, std::optional<u64> timestamp_) {
317 if (counter) {
318 // If there's an old counter set it means the query is being rewritten by the game.
319 // To avoid losing the data forever, flush here.
320 Flush();
321 }
322 counter = std::move(counter_);
323 timestamp = timestamp_;
324 }
325
326 VAddr CpuAddr() const noexcept {
327 return cpu_addr;
328 }
329
330 CacheAddr GetCacheAddr() const noexcept {
331 return ToCacheAddr(host_ptr);
332 }
333
334 u64 SizeInBytes() const noexcept {
335 return SizeInBytes(timestamp.has_value());
336 }
337
338 static constexpr u64 SizeInBytes(bool with_timestamp) noexcept {
339 return with_timestamp ? LARGE_QUERY_SIZE : SMALL_QUERY_SIZE;
340 }
341
342protected:
343 /// Returns true when querying the counter may potentially block.
344 bool WaitPending() const noexcept {
345 return counter && counter->WaitPending();
346 }
347
348private:
349 static constexpr std::size_t SMALL_QUERY_SIZE = 8; // Query size without timestamp.
350 static constexpr std::size_t LARGE_QUERY_SIZE = 16; // Query size with timestamp.
351 static constexpr std::intptr_t TIMESTAMP_OFFSET = 8; // Timestamp offset in a large query.
352
353 VAddr cpu_addr; ///< Guest CPU address.
354 u8* host_ptr; ///< Writable host pointer.
355 std::shared_ptr<HostCounter> counter; ///< Host counter to query, owns the dependency tree.
356 std::optional<u64> timestamp; ///< Timestamp to flush to guest memory.
357};
358
359} // namespace VideoCommon
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index c586cd6fe..f18eaf4bc 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -6,6 +6,7 @@
6 6
7#include <atomic> 7#include <atomic>
8#include <functional> 8#include <functional>
9#include <optional>
9#include "common/common_types.h" 10#include "common/common_types.h"
10#include "video_core/engines/fermi_2d.h" 11#include "video_core/engines/fermi_2d.h"
11#include "video_core/gpu.h" 12#include "video_core/gpu.h"
@@ -17,6 +18,11 @@ class MemoryManager;
17 18
18namespace VideoCore { 19namespace VideoCore {
19 20
21enum class QueryType {
22 SamplesPassed,
23};
24constexpr std::size_t NumQueryTypes = 1;
25
20enum class LoadCallbackStage { 26enum class LoadCallbackStage {
21 Prepare, 27 Prepare,
22 Decompile, 28 Decompile,
@@ -29,11 +35,8 @@ class RasterizerInterface {
29public: 35public:
30 virtual ~RasterizerInterface() {} 36 virtual ~RasterizerInterface() {}
31 37
32 /// Draw the current batch of vertex arrays 38 /// Dispatches a draw invocation
33 virtual bool DrawBatch(bool is_indexed) = 0; 39 virtual void Draw(bool is_indexed, bool is_instanced) = 0;
34
35 /// Draw the current batch of multiple instances of vertex arrays
36 virtual bool DrawMultiBatch(bool is_indexed) = 0;
37 40
38 /// Clear the current framebuffer 41 /// Clear the current framebuffer
39 virtual void Clear() = 0; 42 virtual void Clear() = 0;
@@ -41,6 +44,12 @@ public:
41 /// Dispatches a compute shader invocation 44 /// Dispatches a compute shader invocation
42 virtual void DispatchCompute(GPUVAddr code_addr) = 0; 45 virtual void DispatchCompute(GPUVAddr code_addr) = 0;
43 46
47 /// Resets the counter of a query
48 virtual void ResetCounter(QueryType type) = 0;
49
50 /// Records a GPU query and caches it
51 virtual void Query(GPUVAddr gpu_addr, QueryType type, std::optional<u64> timestamp) = 0;
52
44 /// Notify rasterizer that all caches should be flushed to Switch memory 53 /// Notify rasterizer that all caches should be flushed to Switch memory
45 virtual void FlushAll() = 0; 54 virtual void FlushAll() = 0;
46 55
diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp
new file mode 100644
index 000000000..f12e9f55f
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_query_cache.cpp
@@ -0,0 +1,120 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <cstring>
7#include <memory>
8#include <unordered_map>
9#include <utility>
10#include <vector>
11
12#include <glad/glad.h>
13
14#include "common/assert.h"
15#include "core/core.h"
16#include "video_core/engines/maxwell_3d.h"
17#include "video_core/memory_manager.h"
18#include "video_core/renderer_opengl/gl_query_cache.h"
19#include "video_core/renderer_opengl/gl_rasterizer.h"
20
21namespace OpenGL {
22
23namespace {
24
25constexpr std::array<GLenum, VideoCore::NumQueryTypes> QueryTargets = {GL_SAMPLES_PASSED};
26
27constexpr GLenum GetTarget(VideoCore::QueryType type) {
28 return QueryTargets[static_cast<std::size_t>(type)];
29}
30
31} // Anonymous namespace
32
33QueryCache::QueryCache(Core::System& system, RasterizerOpenGL& gl_rasterizer)
34 : VideoCommon::QueryCacheBase<
35 QueryCache, CachedQuery, CounterStream, HostCounter,
36 std::vector<OGLQuery>>{system,
37 static_cast<VideoCore::RasterizerInterface&>(gl_rasterizer)},
38 gl_rasterizer{gl_rasterizer} {}
39
40QueryCache::~QueryCache() = default;
41
42OGLQuery QueryCache::AllocateQuery(VideoCore::QueryType type) {
43 auto& reserve = query_pools[static_cast<std::size_t>(type)];
44 OGLQuery query;
45 if (reserve.empty()) {
46 query.Create(GetTarget(type));
47 return query;
48 }
49
50 query = std::move(reserve.back());
51 reserve.pop_back();
52 return query;
53}
54
55void QueryCache::Reserve(VideoCore::QueryType type, OGLQuery&& query) {
56 query_pools[static_cast<std::size_t>(type)].push_back(std::move(query));
57}
58
59bool QueryCache::AnyCommandQueued() const noexcept {
60 return gl_rasterizer.AnyCommandQueued();
61}
62
63HostCounter::HostCounter(QueryCache& cache, std::shared_ptr<HostCounter> dependency,
64 VideoCore::QueryType type)
65 : VideoCommon::HostCounterBase<QueryCache, HostCounter>{std::move(dependency)}, cache{cache},
66 type{type}, query{cache.AllocateQuery(type)} {
67 glBeginQuery(GetTarget(type), query.handle);
68}
69
70HostCounter::~HostCounter() {
71 cache.Reserve(type, std::move(query));
72}
73
74void HostCounter::EndQuery() {
75 if (!cache.AnyCommandQueued()) {
76 // There are chances a query waited on without commands (glDraw, glClear, glDispatch). Not
77 // having any of these causes a lock. glFlush is considered a command, so we can safely wait
78 // for this. Insert to the OpenGL command stream a flush.
79 glFlush();
80 }
81 glEndQuery(GetTarget(type));
82}
83
84u64 HostCounter::BlockingQuery() const {
85 GLint64 value;
86 glGetQueryObjecti64v(query.handle, GL_QUERY_RESULT, &value);
87 return static_cast<u64>(value);
88}
89
90CachedQuery::CachedQuery(QueryCache& cache, VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr)
91 : VideoCommon::CachedQueryBase<HostCounter>{cpu_addr, host_ptr}, cache{&cache}, type{type} {}
92
93CachedQuery::CachedQuery(CachedQuery&& rhs) noexcept
94 : VideoCommon::CachedQueryBase<HostCounter>(std::move(rhs)), cache{rhs.cache}, type{rhs.type} {}
95
96CachedQuery& CachedQuery::operator=(CachedQuery&& rhs) noexcept {
97 VideoCommon::CachedQueryBase<HostCounter>::operator=(std::move(rhs));
98 cache = rhs.cache;
99 type = rhs.type;
100 return *this;
101}
102
103void CachedQuery::Flush() {
104 // Waiting for a query while another query of the same target is enabled locks Nvidia's driver.
105 // To avoid this disable and re-enable keeping the dependency stream.
106 // But we only have to do this if we have pending waits to be done.
107 auto& stream = cache->Stream(type);
108 const bool slice_counter = WaitPending() && stream.IsEnabled();
109 if (slice_counter) {
110 stream.Update(false);
111 }
112
113 VideoCommon::CachedQueryBase<HostCounter>::Flush();
114
115 if (slice_counter) {
116 stream.Update(true);
117 }
118}
119
120} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h
new file mode 100644
index 000000000..d8e7052a1
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_query_cache.h
@@ -0,0 +1,78 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <memory>
9#include <vector>
10
11#include "common/common_types.h"
12#include "video_core/query_cache.h"
13#include "video_core/rasterizer_interface.h"
14#include "video_core/renderer_opengl/gl_resource_manager.h"
15
16namespace Core {
17class System;
18}
19
20namespace OpenGL {
21
22class CachedQuery;
23class HostCounter;
24class QueryCache;
25class RasterizerOpenGL;
26
27using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
28
29class QueryCache final : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream,
30 HostCounter, std::vector<OGLQuery>> {
31public:
32 explicit QueryCache(Core::System& system, RasterizerOpenGL& rasterizer);
33 ~QueryCache();
34
35 OGLQuery AllocateQuery(VideoCore::QueryType type);
36
37 void Reserve(VideoCore::QueryType type, OGLQuery&& query);
38
39 bool AnyCommandQueued() const noexcept;
40
41private:
42 RasterizerOpenGL& gl_rasterizer;
43};
44
45class HostCounter final : public VideoCommon::HostCounterBase<QueryCache, HostCounter> {
46public:
47 explicit HostCounter(QueryCache& cache, std::shared_ptr<HostCounter> dependency,
48 VideoCore::QueryType type);
49 ~HostCounter();
50
51 void EndQuery();
52
53private:
54 u64 BlockingQuery() const override;
55
56 QueryCache& cache;
57 const VideoCore::QueryType type;
58 OGLQuery query;
59};
60
61class CachedQuery final : public VideoCommon::CachedQueryBase<HostCounter> {
62public:
63 explicit CachedQuery(QueryCache& cache, VideoCore::QueryType type, VAddr cpu_addr,
64 u8* host_ptr);
65 CachedQuery(CachedQuery&& rhs) noexcept;
66 CachedQuery(const CachedQuery&) = delete;
67
68 CachedQuery& operator=(CachedQuery&& rhs) noexcept;
69 CachedQuery& operator=(const CachedQuery&) = delete;
70
71 void Flush() override;
72
73private:
74 QueryCache* cache;
75 VideoCore::QueryType type;
76};
77
78} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index b0eb14c8b..e1965fb21 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -25,6 +25,7 @@
25#include "video_core/engines/maxwell_3d.h" 25#include "video_core/engines/maxwell_3d.h"
26#include "video_core/engines/shader_type.h" 26#include "video_core/engines/shader_type.h"
27#include "video_core/memory_manager.h" 27#include "video_core/memory_manager.h"
28#include "video_core/renderer_opengl/gl_query_cache.h"
28#include "video_core/renderer_opengl/gl_rasterizer.h" 29#include "video_core/renderer_opengl/gl_rasterizer.h"
29#include "video_core/renderer_opengl/gl_shader_cache.h" 30#include "video_core/renderer_opengl/gl_shader_cache.h"
30#include "video_core/renderer_opengl/gl_shader_gen.h" 31#include "video_core/renderer_opengl/gl_shader_gen.h"
@@ -92,8 +93,8 @@ std::size_t GetConstBufferSize(const Tegra::Engines::ConstBufferInfo& buffer,
92RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWindow& emu_window, 93RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWindow& emu_window,
93 ScreenInfo& info) 94 ScreenInfo& info)
94 : RasterizerAccelerated{system.Memory()}, texture_cache{system, *this, device}, 95 : RasterizerAccelerated{system.Memory()}, texture_cache{system, *this, device},
95 shader_cache{*this, system, emu_window, device}, system{system}, screen_info{info}, 96 shader_cache{*this, system, emu_window, device}, query_cache{system, *this}, system{system},
96 buffer_cache{*this, system, device, STREAM_BUFFER_SIZE} { 97 screen_info{info}, buffer_cache{*this, system, device, STREAM_BUFFER_SIZE} {
97 shader_program_manager = std::make_unique<GLShader::ProgramManager>(); 98 shader_program_manager = std::make_unique<GLShader::ProgramManager>();
98 state.draw.shader_program = 0; 99 state.draw.shader_program = 0;
99 state.Apply(); 100 state.Apply();
@@ -541,11 +542,16 @@ void RasterizerOpenGL::Clear() {
541 } else if (use_stencil) { 542 } else if (use_stencil) {
542 glClearBufferiv(GL_STENCIL, 0, &regs.clear_stencil); 543 glClearBufferiv(GL_STENCIL, 0, &regs.clear_stencil);
543 } 544 }
545
546 ++num_queued_commands;
544} 547}
545 548
546void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { 549void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
547 MICROPROFILE_SCOPE(OpenGL_Drawing); 550 MICROPROFILE_SCOPE(OpenGL_Drawing);
548 auto& gpu = system.GPU().Maxwell3D(); 551 auto& gpu = system.GPU().Maxwell3D();
552 const auto& regs = gpu.regs;
553
554 query_cache.UpdateCounters();
549 555
550 SyncRasterizeEnable(state); 556 SyncRasterizeEnable(state);
551 SyncColorMask(); 557 SyncColorMask();
@@ -611,7 +617,7 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
611 617
612 // Setup shaders and their used resources. 618 // Setup shaders and their used resources.
613 texture_cache.GuardSamplers(true); 619 texture_cache.GuardSamplers(true);
614 const auto primitive_mode = MaxwellToGL::PrimitiveTopology(gpu.regs.draw.topology); 620 const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(gpu.regs.draw.topology);
615 SetupShaders(primitive_mode); 621 SetupShaders(primitive_mode);
616 texture_cache.GuardSamplers(false); 622 texture_cache.GuardSamplers(false);
617 623
@@ -638,35 +644,47 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
638 glTextureBarrier(); 644 glTextureBarrier();
639 } 645 }
640 646
647 ++num_queued_commands;
648
641 const GLuint base_instance = static_cast<GLuint>(gpu.regs.vb_base_instance); 649 const GLuint base_instance = static_cast<GLuint>(gpu.regs.vb_base_instance);
642 const GLsizei num_instances = 650 const GLsizei num_instances =
643 static_cast<GLsizei>(is_instanced ? gpu.mme_draw.instance_count : 1); 651 static_cast<GLsizei>(is_instanced ? gpu.mme_draw.instance_count : 1);
644 if (is_indexed) { 652 if (is_indexed) {
645 const GLenum index_format = MaxwellToGL::IndexFormat(gpu.regs.index_array.format);
646 const GLint base_vertex = static_cast<GLint>(gpu.regs.vb_element_base); 653 const GLint base_vertex = static_cast<GLint>(gpu.regs.vb_element_base);
647 const GLsizei num_vertices = static_cast<GLsizei>(gpu.regs.index_array.count); 654 const GLsizei num_vertices = static_cast<GLsizei>(gpu.regs.index_array.count);
648 glDrawElementsInstancedBaseVertexBaseInstance( 655 const GLvoid* offset = reinterpret_cast<const GLvoid*>(index_buffer_offset);
649 primitive_mode, num_vertices, index_format, 656 const GLenum format = MaxwellToGL::IndexFormat(gpu.regs.index_array.format);
650 reinterpret_cast<const void*>(index_buffer_offset), num_instances, base_vertex, 657 if (num_instances == 1 && base_instance == 0 && base_vertex == 0) {
651 base_instance); 658 glDrawElements(primitive_mode, num_vertices, format, offset);
659 } else if (num_instances == 1 && base_instance == 0) {
660 glDrawElementsBaseVertex(primitive_mode, num_vertices, format, offset, base_vertex);
661 } else if (base_vertex == 0 && base_instance == 0) {
662 glDrawElementsInstanced(primitive_mode, num_vertices, format, offset, num_instances);
663 } else if (base_vertex == 0) {
664 glDrawElementsInstancedBaseInstance(primitive_mode, num_vertices, format, offset,
665 num_instances, base_instance);
666 } else if (base_instance == 0) {
667 glDrawElementsInstancedBaseVertex(primitive_mode, num_vertices, format, offset,
668 num_instances, base_vertex);
669 } else {
670 glDrawElementsInstancedBaseVertexBaseInstance(primitive_mode, num_vertices, format,
671 offset, num_instances, base_vertex,
672 base_instance);
673 }
652 } else { 674 } else {
653 const GLint base_vertex = static_cast<GLint>(gpu.regs.vertex_buffer.first); 675 const GLint base_vertex = static_cast<GLint>(gpu.regs.vertex_buffer.first);
654 const GLsizei num_vertices = static_cast<GLsizei>(gpu.regs.vertex_buffer.count); 676 const GLsizei num_vertices = static_cast<GLsizei>(gpu.regs.vertex_buffer.count);
655 glDrawArraysInstancedBaseInstance(primitive_mode, base_vertex, num_vertices, num_instances, 677 if (num_instances == 1 && base_instance == 0) {
656 base_instance); 678 glDrawArrays(primitive_mode, base_vertex, num_vertices);
679 } else if (base_instance == 0) {
680 glDrawArraysInstanced(primitive_mode, base_vertex, num_vertices, num_instances);
681 } else {
682 glDrawArraysInstancedBaseInstance(primitive_mode, base_vertex, num_vertices,
683 num_instances, base_instance);
684 }
657 } 685 }
658} 686}
659 687
660bool RasterizerOpenGL::DrawBatch(bool is_indexed) {
661 Draw(is_indexed, false);
662 return true;
663}
664
665bool RasterizerOpenGL::DrawMultiBatch(bool is_indexed) {
666 Draw(is_indexed, true);
667 return true;
668}
669
670void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { 688void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
671 if (device.HasBrokenCompute()) { 689 if (device.HasBrokenCompute()) {
672 return; 690 return;
@@ -707,6 +725,16 @@ void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
707 state.ApplyProgramPipeline(); 725 state.ApplyProgramPipeline();
708 726
709 glDispatchCompute(launch_desc.grid_dim_x, launch_desc.grid_dim_y, launch_desc.grid_dim_z); 727 glDispatchCompute(launch_desc.grid_dim_x, launch_desc.grid_dim_y, launch_desc.grid_dim_z);
728 ++num_queued_commands;
729}
730
731void RasterizerOpenGL::ResetCounter(VideoCore::QueryType type) {
732 query_cache.ResetCounter(type);
733}
734
735void RasterizerOpenGL::Query(GPUVAddr gpu_addr, VideoCore::QueryType type,
736 std::optional<u64> timestamp) {
737 query_cache.Query(gpu_addr, type, timestamp);
710} 738}
711 739
712void RasterizerOpenGL::FlushAll() {} 740void RasterizerOpenGL::FlushAll() {}
@@ -718,6 +746,7 @@ void RasterizerOpenGL::FlushRegion(CacheAddr addr, u64 size) {
718 } 746 }
719 texture_cache.FlushRegion(addr, size); 747 texture_cache.FlushRegion(addr, size);
720 buffer_cache.FlushRegion(addr, size); 748 buffer_cache.FlushRegion(addr, size);
749 query_cache.FlushRegion(addr, size);
721} 750}
722 751
723void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) { 752void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) {
@@ -728,6 +757,7 @@ void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) {
728 texture_cache.InvalidateRegion(addr, size); 757 texture_cache.InvalidateRegion(addr, size);
729 shader_cache.InvalidateRegion(addr, size); 758 shader_cache.InvalidateRegion(addr, size);
730 buffer_cache.InvalidateRegion(addr, size); 759 buffer_cache.InvalidateRegion(addr, size);
760 query_cache.InvalidateRegion(addr, size);
731} 761}
732 762
733void RasterizerOpenGL::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { 763void RasterizerOpenGL::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
@@ -738,10 +768,18 @@ void RasterizerOpenGL::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
738} 768}
739 769
740void RasterizerOpenGL::FlushCommands() { 770void RasterizerOpenGL::FlushCommands() {
771 // Only flush when we have commands queued to OpenGL.
772 if (num_queued_commands == 0) {
773 return;
774 }
775 num_queued_commands = 0;
741 glFlush(); 776 glFlush();
742} 777}
743 778
744void RasterizerOpenGL::TickFrame() { 779void RasterizerOpenGL::TickFrame() {
780 // Ticking a frame means that buffers will be swapped, calling glFlush implicitly.
781 num_queued_commands = 0;
782
745 buffer_cache.TickFrame(); 783 buffer_cache.TickFrame();
746} 784}
747 785
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 0501f3828..68abe9a21 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -24,6 +24,7 @@
24#include "video_core/renderer_opengl/gl_buffer_cache.h" 24#include "video_core/renderer_opengl/gl_buffer_cache.h"
25#include "video_core/renderer_opengl/gl_device.h" 25#include "video_core/renderer_opengl/gl_device.h"
26#include "video_core/renderer_opengl/gl_framebuffer_cache.h" 26#include "video_core/renderer_opengl/gl_framebuffer_cache.h"
27#include "video_core/renderer_opengl/gl_query_cache.h"
27#include "video_core/renderer_opengl/gl_resource_manager.h" 28#include "video_core/renderer_opengl/gl_resource_manager.h"
28#include "video_core/renderer_opengl/gl_sampler_cache.h" 29#include "video_core/renderer_opengl/gl_sampler_cache.h"
29#include "video_core/renderer_opengl/gl_shader_cache.h" 30#include "video_core/renderer_opengl/gl_shader_cache.h"
@@ -57,10 +58,11 @@ public:
57 ScreenInfo& info); 58 ScreenInfo& info);
58 ~RasterizerOpenGL() override; 59 ~RasterizerOpenGL() override;
59 60
60 bool DrawBatch(bool is_indexed) override; 61 void Draw(bool is_indexed, bool is_instanced) override;
61 bool DrawMultiBatch(bool is_indexed) override;
62 void Clear() override; 62 void Clear() override;
63 void DispatchCompute(GPUVAddr code_addr) override; 63 void DispatchCompute(GPUVAddr code_addr) override;
64 void ResetCounter(VideoCore::QueryType type) override;
65 void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
64 void FlushAll() override; 66 void FlushAll() override;
65 void FlushRegion(CacheAddr addr, u64 size) override; 67 void FlushRegion(CacheAddr addr, u64 size) override;
66 void InvalidateRegion(CacheAddr addr, u64 size) override; 68 void InvalidateRegion(CacheAddr addr, u64 size) override;
@@ -75,6 +77,11 @@ public:
75 void LoadDiskResources(const std::atomic_bool& stop_loading, 77 void LoadDiskResources(const std::atomic_bool& stop_loading,
76 const VideoCore::DiskResourceLoadCallback& callback) override; 78 const VideoCore::DiskResourceLoadCallback& callback) override;
77 79
80 /// Returns true when there are commands queued to the OpenGL server.
81 bool AnyCommandQueued() const {
82 return num_queued_commands > 0;
83 }
84
78private: 85private:
79 /// Configures the color and depth framebuffer states. 86 /// Configures the color and depth framebuffer states.
80 void ConfigureFramebuffers(); 87 void ConfigureFramebuffers();
@@ -102,9 +109,6 @@ private:
102 void SetupGlobalMemory(u32 binding, const GLShader::GlobalMemoryEntry& entry, GPUVAddr gpu_addr, 109 void SetupGlobalMemory(u32 binding, const GLShader::GlobalMemoryEntry& entry, GPUVAddr gpu_addr,
103 std::size_t size); 110 std::size_t size);
104 111
105 /// Syncs all the state, shaders, render targets and textures setting before a draw call.
106 void Draw(bool is_indexed, bool is_instanced);
107
108 /// Configures the current textures to use for the draw command. 112 /// Configures the current textures to use for the draw command.
109 void SetupDrawTextures(std::size_t stage_index, const Shader& shader); 113 void SetupDrawTextures(std::size_t stage_index, const Shader& shader);
110 114
@@ -180,10 +184,23 @@ private:
180 /// Syncs the alpha test state to match the guest state 184 /// Syncs the alpha test state to match the guest state
181 void SyncAlphaTest(); 185 void SyncAlphaTest();
182 186
183 /// Check for extension that are not strictly required 187 /// Check for extension that are not strictly required but are needed for correct emulation
184 /// but are needed for correct emulation
185 void CheckExtensions(); 188 void CheckExtensions();
186 189
190 std::size_t CalculateVertexArraysSize() const;
191
192 std::size_t CalculateIndexBufferSize() const;
193
194 /// Updates and returns a vertex array object representing current vertex format
195 GLuint SetupVertexFormat();
196
197 void SetupVertexBuffer(GLuint vao);
198 void SetupVertexInstances(GLuint vao);
199
200 GLintptr SetupIndexBuffer();
201
202 void SetupShaders(GLenum primitive_mode);
203
187 const Device device; 204 const Device device;
188 OpenGLState state; 205 OpenGLState state;
189 206
@@ -191,6 +208,7 @@ private:
191 ShaderCacheOpenGL shader_cache; 208 ShaderCacheOpenGL shader_cache;
192 SamplerCacheOpenGL sampler_cache; 209 SamplerCacheOpenGL sampler_cache;
193 FramebufferCacheOpenGL framebuffer_cache; 210 FramebufferCacheOpenGL framebuffer_cache;
211 QueryCache query_cache;
194 212
195 Core::System& system; 213 Core::System& system;
196 ScreenInfo& screen_info; 214 ScreenInfo& screen_info;
@@ -208,19 +226,8 @@ private:
208 BindBuffersRangePushBuffer bind_ubo_pushbuffer{GL_UNIFORM_BUFFER}; 226 BindBuffersRangePushBuffer bind_ubo_pushbuffer{GL_UNIFORM_BUFFER};
209 BindBuffersRangePushBuffer bind_ssbo_pushbuffer{GL_SHADER_STORAGE_BUFFER}; 227 BindBuffersRangePushBuffer bind_ssbo_pushbuffer{GL_SHADER_STORAGE_BUFFER};
210 228
211 std::size_t CalculateVertexArraysSize() const; 229 /// Number of commands queued to the OpenGL driver. Reseted on flush.
212 230 std::size_t num_queued_commands = 0;
213 std::size_t CalculateIndexBufferSize() const;
214
215 /// Updates and returns a vertex array object representing current vertex format
216 GLuint SetupVertexFormat();
217
218 void SetupVertexBuffer(GLuint vao);
219 void SetupVertexInstances(GLuint vao);
220
221 GLintptr SetupIndexBuffer();
222
223 void SetupShaders(GLenum primitive_mode);
224}; 231};
225 232
226} // namespace OpenGL 233} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_resource_manager.cpp b/src/video_core/renderer_opengl/gl_resource_manager.cpp
index 5c96c1d46..f0ddfb276 100644
--- a/src/video_core/renderer_opengl/gl_resource_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_resource_manager.cpp
@@ -207,4 +207,21 @@ void OGLFramebuffer::Release() {
207 handle = 0; 207 handle = 0;
208} 208}
209 209
210void OGLQuery::Create(GLenum target) {
211 if (handle != 0)
212 return;
213
214 MICROPROFILE_SCOPE(OpenGL_ResourceCreation);
215 glCreateQueries(target, 1, &handle);
216}
217
218void OGLQuery::Release() {
219 if (handle == 0)
220 return;
221
222 MICROPROFILE_SCOPE(OpenGL_ResourceDeletion);
223 glDeleteQueries(1, &handle);
224 handle = 0;
225}
226
210} // namespace OpenGL 227} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_resource_manager.h b/src/video_core/renderer_opengl/gl_resource_manager.h
index 3a85a1d4c..514d1d165 100644
--- a/src/video_core/renderer_opengl/gl_resource_manager.h
+++ b/src/video_core/renderer_opengl/gl_resource_manager.h
@@ -266,4 +266,29 @@ public:
266 GLuint handle = 0; 266 GLuint handle = 0;
267}; 267};
268 268
269class OGLQuery : private NonCopyable {
270public:
271 OGLQuery() = default;
272
273 OGLQuery(OGLQuery&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
274
275 ~OGLQuery() {
276 Release();
277 }
278
279 OGLQuery& operator=(OGLQuery&& o) noexcept {
280 Release();
281 handle = std::exchange(o.handle, 0);
282 return *this;
283 }
284
285 /// Creates a new internal OpenGL resource and stores the handle
286 void Create(GLenum target);
287
288 /// Deletes the internal OpenGL resource
289 void Release();
290
291 GLuint handle = 0;
292};
293
269} // namespace OpenGL 294} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
index c9d8aeca9..cf934b0d8 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -261,6 +261,13 @@ CachedSurface::~CachedSurface() = default;
261void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) { 261void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
262 MICROPROFILE_SCOPE(OpenGL_Texture_Download); 262 MICROPROFILE_SCOPE(OpenGL_Texture_Download);
263 263
264 if (params.IsBuffer()) {
265 glGetNamedBufferSubData(texture_buffer.handle, 0,
266 static_cast<GLsizeiptr>(params.GetHostSizeInBytes()),
267 staging_buffer.data());
268 return;
269 }
270
264 SCOPE_EXIT({ glPixelStorei(GL_PACK_ROW_LENGTH, 0); }); 271 SCOPE_EXIT({ glPixelStorei(GL_PACK_ROW_LENGTH, 0); });
265 272
266 for (u32 level = 0; level < params.emulated_levels; ++level) { 273 for (u32 level = 0; level < params.emulated_levels; ++level) {
@@ -399,24 +406,36 @@ CachedSurfaceView::CachedSurfaceView(CachedSurface& surface, const ViewParams& p
399CachedSurfaceView::~CachedSurfaceView() = default; 406CachedSurfaceView::~CachedSurfaceView() = default;
400 407
401void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const { 408void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const {
402 ASSERT(params.num_layers == 1 && params.num_levels == 1); 409 ASSERT(params.num_levels == 1);
403 410
404 const auto& owner_params = surface.GetSurfaceParams(); 411 const GLuint texture = surface.GetTexture();
412 if (params.num_layers > 1) {
413 // Layered framebuffer attachments
414 UNIMPLEMENTED_IF(params.base_layer != 0);
415
416 switch (params.target) {
417 case SurfaceTarget::Texture2DArray:
418 glFramebufferTexture(target, attachment, texture, params.base_level);
419 break;
420 default:
421 UNIMPLEMENTED();
422 }
423 return;
424 }
405 425
406 switch (owner_params.target) { 426 const GLenum view_target = surface.GetTarget();
427 switch (surface.GetSurfaceParams().target) {
407 case SurfaceTarget::Texture1D: 428 case SurfaceTarget::Texture1D:
408 glFramebufferTexture1D(target, attachment, surface.GetTarget(), surface.GetTexture(), 429 glFramebufferTexture1D(target, attachment, view_target, texture, params.base_level);
409 params.base_level);
410 break; 430 break;
411 case SurfaceTarget::Texture2D: 431 case SurfaceTarget::Texture2D:
412 glFramebufferTexture2D(target, attachment, surface.GetTarget(), surface.GetTexture(), 432 glFramebufferTexture2D(target, attachment, view_target, texture, params.base_level);
413 params.base_level);
414 break; 433 break;
415 case SurfaceTarget::Texture1DArray: 434 case SurfaceTarget::Texture1DArray:
416 case SurfaceTarget::Texture2DArray: 435 case SurfaceTarget::Texture2DArray:
417 case SurfaceTarget::TextureCubemap: 436 case SurfaceTarget::TextureCubemap:
418 case SurfaceTarget::TextureCubeArray: 437 case SurfaceTarget::TextureCubeArray:
419 glFramebufferTextureLayer(target, attachment, surface.GetTexture(), params.base_level, 438 glFramebufferTextureLayer(target, attachment, texture, params.base_level,
420 params.base_layer); 439 params.base_layer);
421 break; 440 break;
422 default: 441 default:
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index 8c49c66a7..ef66dd141 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -165,7 +165,7 @@ struct FormatTuple {
165 {vk::Format::eUndefined, {}}, // ASTC_2D_5X4 165 {vk::Format::eUndefined, {}}, // ASTC_2D_5X4
166 {vk::Format::eUndefined, {}}, // BGRA8_SRGB 166 {vk::Format::eUndefined, {}}, // BGRA8_SRGB
167 {vk::Format::eBc1RgbaSrgbBlock, {}}, // DXT1_SRGB 167 {vk::Format::eBc1RgbaSrgbBlock, {}}, // DXT1_SRGB
168 {vk::Format::eUndefined, {}}, // DXT23_SRGB 168 {vk::Format::eBc2SrgbBlock, {}}, // DXT23_SRGB
169 {vk::Format::eBc3SrgbBlock, {}}, // DXT45_SRGB 169 {vk::Format::eBc3SrgbBlock, {}}, // DXT45_SRGB
170 {vk::Format::eBc7SrgbBlock, {}}, // BC7U_SRGB 170 {vk::Format::eBc7SrgbBlock, {}}, // BC7U_SRGB
171 {vk::Format::eR4G4B4A4UnormPack16, Attachable}, // R4G4B4A4U 171 {vk::Format::eR4G4B4A4UnormPack16, Attachable}, // R4G4B4A4U
@@ -364,6 +364,8 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
364 return vk::Format::eR8G8B8A8Uint; 364 return vk::Format::eR8G8B8A8Uint;
365 case Maxwell::VertexAttribute::Size::Size_32: 365 case Maxwell::VertexAttribute::Size::Size_32:
366 return vk::Format::eR32Uint; 366 return vk::Format::eR32Uint;
367 case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
368 return vk::Format::eR32G32B32A32Uint;
367 default: 369 default:
368 break; 370 break;
369 } 371 }
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index de712223e..886bde3b9 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -104,8 +104,11 @@ bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instan
104 features.depthBiasClamp = true; 104 features.depthBiasClamp = true;
105 features.geometryShader = true; 105 features.geometryShader = true;
106 features.tessellationShader = true; 106 features.tessellationShader = true;
107 features.occlusionQueryPrecise = true;
107 features.fragmentStoresAndAtomics = true; 108 features.fragmentStoresAndAtomics = true;
108 features.shaderImageGatherExtended = true; 109 features.shaderImageGatherExtended = true;
110 features.shaderStorageImageReadWithoutFormat =
111 is_shader_storage_img_read_without_format_supported;
109 features.shaderStorageImageWriteWithoutFormat = true; 112 features.shaderStorageImageWriteWithoutFormat = true;
110 features.textureCompressionASTC_LDR = is_optimal_astc_supported; 113 features.textureCompressionASTC_LDR = is_optimal_astc_supported;
111 114
@@ -117,6 +120,10 @@ bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instan
117 bit8_storage.uniformAndStorageBuffer8BitAccess = true; 120 bit8_storage.uniformAndStorageBuffer8BitAccess = true;
118 SetNext(next, bit8_storage); 121 SetNext(next, bit8_storage);
119 122
123 vk::PhysicalDeviceHostQueryResetFeaturesEXT host_query_reset;
124 host_query_reset.hostQueryReset = true;
125 SetNext(next, host_query_reset);
126
120 vk::PhysicalDeviceFloat16Int8FeaturesKHR float16_int8; 127 vk::PhysicalDeviceFloat16Int8FeaturesKHR float16_int8;
121 if (is_float16_supported) { 128 if (is_float16_supported) {
122 float16_int8.shaderFloat16 = true; 129 float16_int8.shaderFloat16 = true;
@@ -273,6 +280,7 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev
273 VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME, 280 VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
274 VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME, 281 VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
275 VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME, 282 VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
283 VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME,
276 }; 284 };
277 std::bitset<required_extensions.size()> available_extensions{}; 285 std::bitset<required_extensions.size()> available_extensions{};
278 286
@@ -340,6 +348,7 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev
340 std::make_pair(features.depthBiasClamp, "depthBiasClamp"), 348 std::make_pair(features.depthBiasClamp, "depthBiasClamp"),
341 std::make_pair(features.geometryShader, "geometryShader"), 349 std::make_pair(features.geometryShader, "geometryShader"),
342 std::make_pair(features.tessellationShader, "tessellationShader"), 350 std::make_pair(features.tessellationShader, "tessellationShader"),
351 std::make_pair(features.occlusionQueryPrecise, "occlusionQueryPrecise"),
343 std::make_pair(features.fragmentStoresAndAtomics, "fragmentStoresAndAtomics"), 352 std::make_pair(features.fragmentStoresAndAtomics, "fragmentStoresAndAtomics"),
344 std::make_pair(features.shaderImageGatherExtended, "shaderImageGatherExtended"), 353 std::make_pair(features.shaderImageGatherExtended, "shaderImageGatherExtended"),
345 std::make_pair(features.shaderStorageImageWriteWithoutFormat, 354 std::make_pair(features.shaderStorageImageWriteWithoutFormat,
@@ -376,7 +385,7 @@ std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynami
376 } 385 }
377 }; 386 };
378 387
379 extensions.reserve(13); 388 extensions.reserve(14);
380 extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); 389 extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
381 extensions.push_back(VK_KHR_16BIT_STORAGE_EXTENSION_NAME); 390 extensions.push_back(VK_KHR_16BIT_STORAGE_EXTENSION_NAME);
382 extensions.push_back(VK_KHR_8BIT_STORAGE_EXTENSION_NAME); 391 extensions.push_back(VK_KHR_8BIT_STORAGE_EXTENSION_NAME);
@@ -384,6 +393,7 @@ std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynami
384 extensions.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); 393 extensions.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME);
385 extensions.push_back(VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME); 394 extensions.push_back(VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME);
386 extensions.push_back(VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME); 395 extensions.push_back(VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME);
396 extensions.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
387 397
388 [[maybe_unused]] const bool nsight = 398 [[maybe_unused]] const bool nsight =
389 std::getenv("NVTX_INJECTION64_PATH") || std::getenv("NSIGHT_LAUNCHED"); 399 std::getenv("NVTX_INJECTION64_PATH") || std::getenv("NSIGHT_LAUNCHED");
@@ -457,6 +467,8 @@ void VKDevice::SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceK
457 467
458void VKDevice::SetupFeatures(const vk::DispatchLoaderDynamic& dldi) { 468void VKDevice::SetupFeatures(const vk::DispatchLoaderDynamic& dldi) {
459 const auto supported_features{physical.getFeatures(dldi)}; 469 const auto supported_features{physical.getFeatures(dldi)};
470 is_shader_storage_img_read_without_format_supported =
471 supported_features.shaderStorageImageReadWithoutFormat;
460 is_optimal_astc_supported = IsOptimalAstcSupported(supported_features, dldi); 472 is_optimal_astc_supported = IsOptimalAstcSupported(supported_features, dldi);
461} 473}
462 474
@@ -531,6 +543,7 @@ std::unordered_map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperti
531 vk::Format::eBc6HUfloatBlock, 543 vk::Format::eBc6HUfloatBlock,
532 vk::Format::eBc6HSfloatBlock, 544 vk::Format::eBc6HSfloatBlock,
533 vk::Format::eBc1RgbaSrgbBlock, 545 vk::Format::eBc1RgbaSrgbBlock,
546 vk::Format::eBc2SrgbBlock,
534 vk::Format::eBc3SrgbBlock, 547 vk::Format::eBc3SrgbBlock,
535 vk::Format::eBc7SrgbBlock, 548 vk::Format::eBc7SrgbBlock,
536 vk::Format::eAstc4x4SrgbBlock, 549 vk::Format::eAstc4x4SrgbBlock,
diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h
index 72603f9f6..2c27ad730 100644
--- a/src/video_core/renderer_vulkan/vk_device.h
+++ b/src/video_core/renderer_vulkan/vk_device.h
@@ -122,6 +122,11 @@ public:
122 return properties.limits.maxPushConstantsSize; 122 return properties.limits.maxPushConstantsSize;
123 } 123 }
124 124
125 /// Returns true if Shader storage Image Read Without Format supported.
126 bool IsShaderStorageImageReadWithoutFormatSupported() const {
127 return is_shader_storage_img_read_without_format_supported;
128 }
129
125 /// Returns true if ASTC is natively supported. 130 /// Returns true if ASTC is natively supported.
126 bool IsOptimalAstcSupported() const { 131 bool IsOptimalAstcSupported() const {
127 return is_optimal_astc_supported; 132 return is_optimal_astc_supported;
@@ -227,6 +232,8 @@ private:
227 bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted. 232 bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted.
228 bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer. 233 bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer.
229 bool nv_device_diagnostic_checkpoints{}; ///< Support for VK_NV_device_diagnostic_checkpoints. 234 bool nv_device_diagnostic_checkpoints{}; ///< Support for VK_NV_device_diagnostic_checkpoints.
235 bool is_shader_storage_img_read_without_format_supported{}; ///< Support for shader storage
236 ///< image read without format
230 237
231 // Telemetry parameters 238 // Telemetry parameters
232 std::string vendor_name; ///< Device's driver name. 239 std::string vendor_name; ///< Device's driver name.
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
new file mode 100644
index 000000000..ffbf60dda
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -0,0 +1,122 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <cstddef>
7#include <cstdint>
8#include <utility>
9#include <vector>
10
11#include "video_core/renderer_vulkan/declarations.h"
12#include "video_core/renderer_vulkan/vk_device.h"
13#include "video_core/renderer_vulkan/vk_query_cache.h"
14#include "video_core/renderer_vulkan/vk_resource_manager.h"
15#include "video_core/renderer_vulkan/vk_scheduler.h"
16
17namespace Vulkan {
18
19namespace {
20
21constexpr std::array QUERY_TARGETS = {vk::QueryType::eOcclusion};
22
23constexpr vk::QueryType GetTarget(VideoCore::QueryType type) {
24 return QUERY_TARGETS[static_cast<std::size_t>(type)];
25}
26
27} // Anonymous namespace
28
29QueryPool::QueryPool() : VKFencedPool{GROW_STEP} {}
30
31QueryPool::~QueryPool() = default;
32
33void QueryPool::Initialize(const VKDevice& device_, VideoCore::QueryType type_) {
34 device = &device_;
35 type = type_;
36}
37
38std::pair<vk::QueryPool, std::uint32_t> QueryPool::Commit(VKFence& fence) {
39 std::size_t index;
40 do {
41 index = CommitResource(fence);
42 } while (usage[index]);
43 usage[index] = true;
44
45 return {*pools[index / GROW_STEP], static_cast<std::uint32_t>(index % GROW_STEP)};
46}
47
48void QueryPool::Allocate(std::size_t begin, std::size_t end) {
49 usage.resize(end);
50
51 const auto dev = device->GetLogical();
52 const u32 size = static_cast<u32>(end - begin);
53 const vk::QueryPoolCreateInfo query_pool_ci({}, GetTarget(type), size, {});
54 pools.push_back(dev.createQueryPoolUnique(query_pool_ci, nullptr, device->GetDispatchLoader()));
55}
56
57void QueryPool::Reserve(std::pair<vk::QueryPool, std::uint32_t> query) {
58 const auto it =
59 std::find_if(std::begin(pools), std::end(pools),
60 [query_pool = query.first](auto& pool) { return query_pool == *pool; });
61 ASSERT(it != std::end(pools));
62
63 const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
64 usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
65}
66
67VKQueryCache::VKQueryCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
68 const VKDevice& device, VKScheduler& scheduler)
69 : VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter,
70 QueryPool>{system, rasterizer},
71 device{device}, scheduler{scheduler} {
72 for (std::size_t i = 0; i < static_cast<std::size_t>(VideoCore::NumQueryTypes); ++i) {
73 query_pools[i].Initialize(device, static_cast<VideoCore::QueryType>(i));
74 }
75}
76
77VKQueryCache::~VKQueryCache() = default;
78
79std::pair<vk::QueryPool, std::uint32_t> VKQueryCache::AllocateQuery(VideoCore::QueryType type) {
80 return query_pools[static_cast<std::size_t>(type)].Commit(scheduler.GetFence());
81}
82
83void VKQueryCache::Reserve(VideoCore::QueryType type,
84 std::pair<vk::QueryPool, std::uint32_t> query) {
85 query_pools[static_cast<std::size_t>(type)].Reserve(query);
86}
87
88HostCounter::HostCounter(VKQueryCache& cache, std::shared_ptr<HostCounter> dependency,
89 VideoCore::QueryType type)
90 : VideoCommon::HostCounterBase<VKQueryCache, HostCounter>{std::move(dependency)}, cache{cache},
91 type{type}, query{cache.AllocateQuery(type)}, ticks{cache.Scheduler().Ticks()} {
92 const auto dev = cache.Device().GetLogical();
93 cache.Scheduler().Record([dev, query = query](vk::CommandBuffer cmdbuf, auto& dld) {
94 dev.resetQueryPoolEXT(query.first, query.second, 1, dld);
95 cmdbuf.beginQuery(query.first, query.second, vk::QueryControlFlagBits::ePrecise, dld);
96 });
97}
98
99HostCounter::~HostCounter() {
100 cache.Reserve(type, query);
101}
102
103void HostCounter::EndQuery() {
104 cache.Scheduler().Record([query = query](auto cmdbuf, auto& dld) {
105 cmdbuf.endQuery(query.first, query.second, dld);
106 });
107}
108
109u64 HostCounter::BlockingQuery() const {
110 if (ticks >= cache.Scheduler().Ticks()) {
111 cache.Scheduler().Flush();
112 }
113
114 const auto dev = cache.Device().GetLogical();
115 const auto& dld = cache.Device().GetDispatchLoader();
116 u64 value;
117 dev.getQueryPoolResults(query.first, query.second, 1, sizeof(value), &value, sizeof(value),
118 vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait, dld);
119 return value;
120}
121
122} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
new file mode 100644
index 000000000..c3092ee96
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -0,0 +1,104 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <cstddef>
8#include <cstdint>
9#include <memory>
10#include <utility>
11#include <vector>
12
13#include "common/common_types.h"
14#include "video_core/query_cache.h"
15#include "video_core/renderer_vulkan/declarations.h"
16#include "video_core/renderer_vulkan/vk_resource_manager.h"
17
18namespace VideoCore {
19class RasterizerInterface;
20}
21
22namespace Vulkan {
23
24class CachedQuery;
25class HostCounter;
26class VKDevice;
27class VKQueryCache;
28class VKScheduler;
29
30using CounterStream = VideoCommon::CounterStreamBase<VKQueryCache, HostCounter>;
31
32class QueryPool final : public VKFencedPool {
33public:
34 explicit QueryPool();
35 ~QueryPool() override;
36
37 void Initialize(const VKDevice& device, VideoCore::QueryType type);
38
39 std::pair<vk::QueryPool, std::uint32_t> Commit(VKFence& fence);
40
41 void Reserve(std::pair<vk::QueryPool, std::uint32_t> query);
42
43protected:
44 void Allocate(std::size_t begin, std::size_t end) override;
45
46private:
47 static constexpr std::size_t GROW_STEP = 512;
48
49 const VKDevice* device = nullptr;
50 VideoCore::QueryType type = {};
51
52 std::vector<UniqueQueryPool> pools;
53 std::vector<bool> usage;
54};
55
56class VKQueryCache final
57 : public VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter,
58 QueryPool> {
59public:
60 explicit VKQueryCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
61 const VKDevice& device, VKScheduler& scheduler);
62 ~VKQueryCache();
63
64 std::pair<vk::QueryPool, std::uint32_t> AllocateQuery(VideoCore::QueryType type);
65
66 void Reserve(VideoCore::QueryType type, std::pair<vk::QueryPool, std::uint32_t> query);
67
68 const VKDevice& Device() const noexcept {
69 return device;
70 }
71
72 VKScheduler& Scheduler() const noexcept {
73 return scheduler;
74 }
75
76private:
77 const VKDevice& device;
78 VKScheduler& scheduler;
79};
80
81class HostCounter final : public VideoCommon::HostCounterBase<VKQueryCache, HostCounter> {
82public:
83 explicit HostCounter(VKQueryCache& cache, std::shared_ptr<HostCounter> dependency,
84 VideoCore::QueryType type);
85 ~HostCounter();
86
87 void EndQuery();
88
89private:
90 u64 BlockingQuery() const override;
91
92 VKQueryCache& cache;
93 const VideoCore::QueryType type;
94 const std::pair<vk::QueryPool, std::uint32_t> query;
95 const u64 ticks;
96};
97
98class CachedQuery : public VideoCommon::CachedQueryBase<HostCounter> {
99public:
100 explicit CachedQuery(VKQueryCache&, VideoCore::QueryType, VAddr cpu_addr, u8* host_ptr)
101 : VideoCommon::CachedQueryBase<HostCounter>{cpu_addr, host_ptr} {}
102};
103
104} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index aada38702..3bf86da87 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -289,25 +289,19 @@ RasterizerVulkan::RasterizerVulkan(Core::System& system, Core::Frontend::EmuWind
289 staging_pool), 289 staging_pool),
290 pipeline_cache(system, *this, device, scheduler, descriptor_pool, update_descriptor_queue), 290 pipeline_cache(system, *this, device, scheduler, descriptor_pool, update_descriptor_queue),
291 buffer_cache(*this, system, device, memory_manager, scheduler, staging_pool), 291 buffer_cache(*this, system, device, memory_manager, scheduler, staging_pool),
292 sampler_cache(device) {} 292 sampler_cache(device), query_cache(system, *this, device, scheduler) {
293 293 scheduler.SetQueryCache(query_cache);
294RasterizerVulkan::~RasterizerVulkan() = default;
295
296bool RasterizerVulkan::DrawBatch(bool is_indexed) {
297 Draw(is_indexed, false);
298 return true;
299} 294}
300 295
301bool RasterizerVulkan::DrawMultiBatch(bool is_indexed) { 296RasterizerVulkan::~RasterizerVulkan() = default;
302 Draw(is_indexed, true);
303 return true;
304}
305 297
306void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { 298void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
307 MICROPROFILE_SCOPE(Vulkan_Drawing); 299 MICROPROFILE_SCOPE(Vulkan_Drawing);
308 300
309 FlushWork(); 301 FlushWork();
310 302
303 query_cache.UpdateCounters();
304
311 const auto& gpu = system.GPU().Maxwell3D(); 305 const auto& gpu = system.GPU().Maxwell3D();
312 GraphicsPipelineCacheKey key{GetFixedPipelineState(gpu.regs)}; 306 GraphicsPipelineCacheKey key{GetFixedPipelineState(gpu.regs)};
313 307
@@ -362,6 +356,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
362void RasterizerVulkan::Clear() { 356void RasterizerVulkan::Clear() {
363 MICROPROFILE_SCOPE(Vulkan_Clearing); 357 MICROPROFILE_SCOPE(Vulkan_Clearing);
364 358
359 query_cache.UpdateCounters();
360
365 const auto& gpu = system.GPU().Maxwell3D(); 361 const auto& gpu = system.GPU().Maxwell3D();
366 if (!system.GPU().Maxwell3D().ShouldExecute()) { 362 if (!system.GPU().Maxwell3D().ShouldExecute()) {
367 return; 363 return;
@@ -429,6 +425,8 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
429 sampled_views.clear(); 425 sampled_views.clear();
430 image_views.clear(); 426 image_views.clear();
431 427
428 query_cache.UpdateCounters();
429
432 const auto& launch_desc = system.GPU().KeplerCompute().launch_description; 430 const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
433 const ComputePipelineCacheKey key{ 431 const ComputePipelineCacheKey key{
434 code_addr, 432 code_addr,
@@ -471,17 +469,28 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
471 }); 469 });
472} 470}
473 471
472void RasterizerVulkan::ResetCounter(VideoCore::QueryType type) {
473 query_cache.ResetCounter(type);
474}
475
476void RasterizerVulkan::Query(GPUVAddr gpu_addr, VideoCore::QueryType type,
477 std::optional<u64> timestamp) {
478 query_cache.Query(gpu_addr, type, timestamp);
479}
480
474void RasterizerVulkan::FlushAll() {} 481void RasterizerVulkan::FlushAll() {}
475 482
476void RasterizerVulkan::FlushRegion(CacheAddr addr, u64 size) { 483void RasterizerVulkan::FlushRegion(CacheAddr addr, u64 size) {
477 texture_cache.FlushRegion(addr, size); 484 texture_cache.FlushRegion(addr, size);
478 buffer_cache.FlushRegion(addr, size); 485 buffer_cache.FlushRegion(addr, size);
486 query_cache.FlushRegion(addr, size);
479} 487}
480 488
481void RasterizerVulkan::InvalidateRegion(CacheAddr addr, u64 size) { 489void RasterizerVulkan::InvalidateRegion(CacheAddr addr, u64 size) {
482 texture_cache.InvalidateRegion(addr, size); 490 texture_cache.InvalidateRegion(addr, size);
483 pipeline_cache.InvalidateRegion(addr, size); 491 pipeline_cache.InvalidateRegion(addr, size);
484 buffer_cache.InvalidateRegion(addr, size); 492 buffer_cache.InvalidateRegion(addr, size);
493 query_cache.InvalidateRegion(addr, size);
485} 494}
486 495
487void RasterizerVulkan::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { 496void RasterizerVulkan::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
@@ -602,33 +611,34 @@ bool RasterizerVulkan::WalkAttachmentOverlaps(const CachedSurfaceView& attachmen
602std::tuple<vk::Framebuffer, vk::Extent2D> RasterizerVulkan::ConfigureFramebuffers( 611std::tuple<vk::Framebuffer, vk::Extent2D> RasterizerVulkan::ConfigureFramebuffers(
603 vk::RenderPass renderpass) { 612 vk::RenderPass renderpass) {
604 FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(), 613 FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(),
605 std::numeric_limits<u32>::max()}; 614 std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()};
606 615
607 const auto MarkAsModifiedAndPush = [&](const View& view) { 616 const auto try_push = [&](const View& view) {
608 if (view == nullptr) { 617 if (!view) {
609 return false; 618 return false;
610 } 619 }
611 key.views.push_back(view->GetHandle()); 620 key.views.push_back(view->GetHandle());
612 key.width = std::min(key.width, view->GetWidth()); 621 key.width = std::min(key.width, view->GetWidth());
613 key.height = std::min(key.height, view->GetHeight()); 622 key.height = std::min(key.height, view->GetHeight());
623 key.layers = std::min(key.layers, view->GetNumLayers());
614 return true; 624 return true;
615 }; 625 };
616 626
617 for (std::size_t index = 0; index < std::size(color_attachments); ++index) { 627 for (std::size_t index = 0; index < std::size(color_attachments); ++index) {
618 if (MarkAsModifiedAndPush(color_attachments[index])) { 628 if (try_push(color_attachments[index])) {
619 texture_cache.MarkColorBufferInUse(index); 629 texture_cache.MarkColorBufferInUse(index);
620 } 630 }
621 } 631 }
622 if (MarkAsModifiedAndPush(zeta_attachment)) { 632 if (try_push(zeta_attachment)) {
623 texture_cache.MarkDepthBufferInUse(); 633 texture_cache.MarkDepthBufferInUse();
624 } 634 }
625 635
626 const auto [fbentry, is_cache_miss] = framebuffer_cache.try_emplace(key); 636 const auto [fbentry, is_cache_miss] = framebuffer_cache.try_emplace(key);
627 auto& framebuffer = fbentry->second; 637 auto& framebuffer = fbentry->second;
628 if (is_cache_miss) { 638 if (is_cache_miss) {
629 const vk::FramebufferCreateInfo framebuffer_ci({}, key.renderpass, 639 const vk::FramebufferCreateInfo framebuffer_ci(
630 static_cast<u32>(key.views.size()), 640 {}, key.renderpass, static_cast<u32>(key.views.size()), key.views.data(), key.width,
631 key.views.data(), key.width, key.height, 1); 641 key.height, key.layers);
632 const auto dev = device.GetLogical(); 642 const auto dev = device.GetLogical();
633 const auto& dld = device.GetDispatchLoader(); 643 const auto& dld = device.GetDispatchLoader();
634 framebuffer = dev.createFramebufferUnique(framebuffer_ci, nullptr, dld); 644 framebuffer = dev.createFramebufferUnique(framebuffer_ci, nullptr, dld);
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 7be71e734..4dc8af6e8 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -24,6 +24,7 @@
24#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 24#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
25#include "video_core/renderer_vulkan/vk_memory_manager.h" 25#include "video_core/renderer_vulkan/vk_memory_manager.h"
26#include "video_core/renderer_vulkan/vk_pipeline_cache.h" 26#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
27#include "video_core/renderer_vulkan/vk_query_cache.h"
27#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 28#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
28#include "video_core/renderer_vulkan/vk_resource_manager.h" 29#include "video_core/renderer_vulkan/vk_resource_manager.h"
29#include "video_core/renderer_vulkan/vk_sampler_cache.h" 30#include "video_core/renderer_vulkan/vk_sampler_cache.h"
@@ -55,6 +56,7 @@ struct FramebufferCacheKey {
55 vk::RenderPass renderpass{}; 56 vk::RenderPass renderpass{};
56 u32 width = 0; 57 u32 width = 0;
57 u32 height = 0; 58 u32 height = 0;
59 u32 layers = 0;
58 ImageViewsPack views; 60 ImageViewsPack views;
59 61
60 std::size_t Hash() const noexcept { 62 std::size_t Hash() const noexcept {
@@ -65,12 +67,17 @@ struct FramebufferCacheKey {
65 } 67 }
66 boost::hash_combine(hash, width); 68 boost::hash_combine(hash, width);
67 boost::hash_combine(hash, height); 69 boost::hash_combine(hash, height);
70 boost::hash_combine(hash, layers);
68 return hash; 71 return hash;
69 } 72 }
70 73
71 bool operator==(const FramebufferCacheKey& rhs) const noexcept { 74 bool operator==(const FramebufferCacheKey& rhs) const noexcept {
72 return std::tie(renderpass, views, width, height) == 75 return std::tie(renderpass, views, width, height, layers) ==
73 std::tie(rhs.renderpass, rhs.views, rhs.width, rhs.height); 76 std::tie(rhs.renderpass, rhs.views, rhs.width, rhs.height, rhs.layers);
77 }
78
79 bool operator!=(const FramebufferCacheKey& rhs) const noexcept {
80 return !operator==(rhs);
74 } 81 }
75}; 82};
76 83
@@ -96,7 +103,7 @@ struct ImageView {
96 vk::ImageLayout* layout = nullptr; 103 vk::ImageLayout* layout = nullptr;
97}; 104};
98 105
99class RasterizerVulkan : public VideoCore::RasterizerAccelerated { 106class RasterizerVulkan final : public VideoCore::RasterizerAccelerated {
100public: 107public:
101 explicit RasterizerVulkan(Core::System& system, Core::Frontend::EmuWindow& render_window, 108 explicit RasterizerVulkan(Core::System& system, Core::Frontend::EmuWindow& render_window,
102 VKScreenInfo& screen_info, const VKDevice& device, 109 VKScreenInfo& screen_info, const VKDevice& device,
@@ -104,10 +111,11 @@ public:
104 VKScheduler& scheduler); 111 VKScheduler& scheduler);
105 ~RasterizerVulkan() override; 112 ~RasterizerVulkan() override;
106 113
107 bool DrawBatch(bool is_indexed) override; 114 void Draw(bool is_indexed, bool is_instanced) override;
108 bool DrawMultiBatch(bool is_indexed) override;
109 void Clear() override; 115 void Clear() override;
110 void DispatchCompute(GPUVAddr code_addr) override; 116 void DispatchCompute(GPUVAddr code_addr) override;
117 void ResetCounter(VideoCore::QueryType type) override;
118 void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
111 void FlushAll() override; 119 void FlushAll() override;
112 void FlushRegion(CacheAddr addr, u64 size) override; 120 void FlushRegion(CacheAddr addr, u64 size) override;
113 void InvalidateRegion(CacheAddr addr, u64 size) override; 121 void InvalidateRegion(CacheAddr addr, u64 size) override;
@@ -140,8 +148,6 @@ private:
140 148
141 static constexpr std::size_t ZETA_TEXCEPTION_INDEX = 8; 149 static constexpr std::size_t ZETA_TEXCEPTION_INDEX = 8;
142 150
143 void Draw(bool is_indexed, bool is_instanced);
144
145 void FlushWork(); 151 void FlushWork();
146 152
147 Texceptions UpdateAttachments(); 153 Texceptions UpdateAttachments();
@@ -247,6 +253,7 @@ private:
247 VKPipelineCache pipeline_cache; 253 VKPipelineCache pipeline_cache;
248 VKBufferCache buffer_cache; 254 VKBufferCache buffer_cache;
249 VKSamplerCache sampler_cache; 255 VKSamplerCache sampler_cache;
256 VKQueryCache query_cache;
250 257
251 std::array<View, Maxwell::NumRenderTargets> color_attachments; 258 std::array<View, Maxwell::NumRenderTargets> color_attachments;
252 View zeta_attachment; 259 View zeta_attachment;
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
index 0a8ec8398..204b7c39c 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
@@ -23,7 +23,14 @@ static std::optional<vk::BorderColor> TryConvertBorderColor(std::array<float, 4>
23 } else if (color == std::array<float, 4>{1, 1, 1, 1}) { 23 } else if (color == std::array<float, 4>{1, 1, 1, 1}) {
24 return vk::BorderColor::eFloatOpaqueWhite; 24 return vk::BorderColor::eFloatOpaqueWhite;
25 } else { 25 } else {
26 return {}; 26 if (color[0] + color[1] + color[2] > 1.35f) {
27 // If color elements are brighter than roughly 0.5 average, use white border
28 return vk::BorderColor::eFloatOpaqueWhite;
29 }
30 if (color[3] > 0.5f) {
31 return vk::BorderColor::eFloatOpaqueBlack;
32 }
33 return vk::BorderColor::eFloatTransparentBlack;
27 } 34 }
28} 35}
29 36
@@ -37,8 +44,6 @@ UniqueSampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc)
37 44
38 const auto border_color{tsc.GetBorderColor()}; 45 const auto border_color{tsc.GetBorderColor()};
39 const auto vk_border_color{TryConvertBorderColor(border_color)}; 46 const auto vk_border_color{TryConvertBorderColor(border_color)};
40 UNIMPLEMENTED_IF_MSG(!vk_border_color, "Unimplemented border color {} {} {} {}",
41 border_color[0], border_color[1], border_color[2], border_color[3]);
42 47
43 constexpr bool unnormalized_coords{false}; 48 constexpr bool unnormalized_coords{false};
44 49
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index d66133ad1..92bd6c344 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -6,6 +6,7 @@
6#include "common/microprofile.h" 6#include "common/microprofile.h"
7#include "video_core/renderer_vulkan/declarations.h" 7#include "video_core/renderer_vulkan/declarations.h"
8#include "video_core/renderer_vulkan/vk_device.h" 8#include "video_core/renderer_vulkan/vk_device.h"
9#include "video_core/renderer_vulkan/vk_query_cache.h"
9#include "video_core/renderer_vulkan/vk_resource_manager.h" 10#include "video_core/renderer_vulkan/vk_resource_manager.h"
10#include "video_core/renderer_vulkan/vk_scheduler.h" 11#include "video_core/renderer_vulkan/vk_scheduler.h"
11 12
@@ -139,6 +140,8 @@ void VKScheduler::SubmitExecution(vk::Semaphore semaphore) {
139} 140}
140 141
141void VKScheduler::AllocateNewContext() { 142void VKScheduler::AllocateNewContext() {
143 ++ticks;
144
142 std::unique_lock lock{mutex}; 145 std::unique_lock lock{mutex};
143 current_fence = next_fence; 146 current_fence = next_fence;
144 next_fence = &resource_manager.CommitFence(); 147 next_fence = &resource_manager.CommitFence();
@@ -146,6 +149,10 @@ void VKScheduler::AllocateNewContext() {
146 current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence); 149 current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence);
147 current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit}, 150 current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit},
148 device.GetDispatchLoader()); 151 device.GetDispatchLoader());
152 // Enable counters once again. These are disabled when a command buffer is finished.
153 if (query_cache) {
154 query_cache->UpdateCounters();
155 }
149} 156}
150 157
151void VKScheduler::InvalidateState() { 158void VKScheduler::InvalidateState() {
@@ -159,6 +166,7 @@ void VKScheduler::InvalidateState() {
159} 166}
160 167
161void VKScheduler::EndPendingOperations() { 168void VKScheduler::EndPendingOperations() {
169 query_cache->DisableStreams();
162 EndRenderPass(); 170 EndRenderPass();
163} 171}
164 172
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index bcdffbba0..62fd7858b 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -4,6 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <atomic>
7#include <condition_variable> 8#include <condition_variable>
8#include <memory> 9#include <memory>
9#include <optional> 10#include <optional>
@@ -18,6 +19,7 @@ namespace Vulkan {
18 19
19class VKDevice; 20class VKDevice;
20class VKFence; 21class VKFence;
22class VKQueryCache;
21class VKResourceManager; 23class VKResourceManager;
22 24
23class VKFenceView { 25class VKFenceView {
@@ -67,6 +69,11 @@ public:
67 /// Binds a pipeline to the current execution context. 69 /// Binds a pipeline to the current execution context.
68 void BindGraphicsPipeline(vk::Pipeline pipeline); 70 void BindGraphicsPipeline(vk::Pipeline pipeline);
69 71
72 /// Assigns the query cache.
73 void SetQueryCache(VKQueryCache& query_cache_) {
74 query_cache = &query_cache_;
75 }
76
70 /// Returns true when viewports have been set in the current command buffer. 77 /// Returns true when viewports have been set in the current command buffer.
71 bool TouchViewports() { 78 bool TouchViewports() {
72 return std::exchange(state.viewports, true); 79 return std::exchange(state.viewports, true);
@@ -112,6 +119,11 @@ public:
112 return current_fence; 119 return current_fence;
113 } 120 }
114 121
122 /// Returns the current command buffer tick.
123 u64 Ticks() const {
124 return ticks;
125 }
126
115private: 127private:
116 class Command { 128 class Command {
117 public: 129 public:
@@ -205,6 +217,8 @@ private:
205 217
206 const VKDevice& device; 218 const VKDevice& device;
207 VKResourceManager& resource_manager; 219 VKResourceManager& resource_manager;
220 VKQueryCache* query_cache = nullptr;
221
208 vk::CommandBuffer current_cmdbuf; 222 vk::CommandBuffer current_cmdbuf;
209 VKFence* current_fence = nullptr; 223 VKFence* current_fence = nullptr;
210 VKFence* next_fence = nullptr; 224 VKFence* next_fence = nullptr;
@@ -227,6 +241,7 @@ private:
227 Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_reserve; 241 Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_reserve;
228 std::mutex mutex; 242 std::mutex mutex;
229 std::condition_variable cv; 243 std::condition_variable cv;
244 std::atomic<u64> ticks = 0;
230 bool quit = false; 245 bool quit = false;
231}; 246};
232 247
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index 24a658dce..2da622d15 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -86,6 +86,7 @@ struct AttributeType {
86 86
87struct VertexIndices { 87struct VertexIndices {
88 std::optional<u32> position; 88 std::optional<u32> position;
89 std::optional<u32> layer;
89 std::optional<u32> viewport; 90 std::optional<u32> viewport;
90 std::optional<u32> point_size; 91 std::optional<u32> point_size;
91 std::optional<u32> clip_distances; 92 std::optional<u32> clip_distances;
@@ -275,21 +276,29 @@ public:
275 AddCapability(spv::Capability::ImageGatherExtended); 276 AddCapability(spv::Capability::ImageGatherExtended);
276 AddCapability(spv::Capability::SampledBuffer); 277 AddCapability(spv::Capability::SampledBuffer);
277 AddCapability(spv::Capability::StorageImageWriteWithoutFormat); 278 AddCapability(spv::Capability::StorageImageWriteWithoutFormat);
279 AddCapability(spv::Capability::DrawParameters);
278 AddCapability(spv::Capability::SubgroupBallotKHR); 280 AddCapability(spv::Capability::SubgroupBallotKHR);
279 AddCapability(spv::Capability::SubgroupVoteKHR); 281 AddCapability(spv::Capability::SubgroupVoteKHR);
280 AddExtension("SPV_KHR_shader_ballot"); 282 AddExtension("SPV_KHR_shader_ballot");
281 AddExtension("SPV_KHR_subgroup_vote"); 283 AddExtension("SPV_KHR_subgroup_vote");
282 AddExtension("SPV_KHR_storage_buffer_storage_class"); 284 AddExtension("SPV_KHR_storage_buffer_storage_class");
283 AddExtension("SPV_KHR_variable_pointers"); 285 AddExtension("SPV_KHR_variable_pointers");
286 AddExtension("SPV_KHR_shader_draw_parameters");
284 287
285 if (ir.UsesViewportIndex()) { 288 if (ir.UsesLayer() || ir.UsesViewportIndex()) {
286 AddCapability(spv::Capability::MultiViewport); 289 if (ir.UsesViewportIndex()) {
287 if (device.IsExtShaderViewportIndexLayerSupported()) { 290 AddCapability(spv::Capability::MultiViewport);
291 }
292 if (stage != ShaderType::Geometry && device.IsExtShaderViewportIndexLayerSupported()) {
288 AddExtension("SPV_EXT_shader_viewport_index_layer"); 293 AddExtension("SPV_EXT_shader_viewport_index_layer");
289 AddCapability(spv::Capability::ShaderViewportIndexLayerEXT); 294 AddCapability(spv::Capability::ShaderViewportIndexLayerEXT);
290 } 295 }
291 } 296 }
292 297
298 if (device.IsShaderStorageImageReadWithoutFormatSupported()) {
299 AddCapability(spv::Capability::StorageImageReadWithoutFormat);
300 }
301
293 if (device.IsFloat16Supported()) { 302 if (device.IsFloat16Supported()) {
294 AddCapability(spv::Capability::Float16); 303 AddCapability(spv::Capability::Float16);
295 } 304 }
@@ -492,9 +501,11 @@ private:
492 interfaces.push_back(AddGlobalVariable(Name(out_vertex, "out_vertex"))); 501 interfaces.push_back(AddGlobalVariable(Name(out_vertex, "out_vertex")));
493 502
494 // Declare input attributes 503 // Declare input attributes
495 vertex_index = DeclareInputBuiltIn(spv::BuiltIn::VertexIndex, t_in_uint, "vertex_index"); 504 vertex_index = DeclareInputBuiltIn(spv::BuiltIn::VertexIndex, t_in_int, "vertex_index");
496 instance_index = 505 instance_index =
497 DeclareInputBuiltIn(spv::BuiltIn::InstanceIndex, t_in_uint, "instance_index"); 506 DeclareInputBuiltIn(spv::BuiltIn::InstanceIndex, t_in_int, "instance_index");
507 base_vertex = DeclareInputBuiltIn(spv::BuiltIn::BaseVertex, t_in_int, "base_vertex");
508 base_instance = DeclareInputBuiltIn(spv::BuiltIn::BaseInstance, t_in_int, "base_instance");
498 } 509 }
499 510
500 void DeclareTessControl() { 511 void DeclareTessControl() {
@@ -920,13 +931,22 @@ private:
920 VertexIndices indices; 931 VertexIndices indices;
921 indices.position = AddBuiltIn(t_float4, spv::BuiltIn::Position, "position"); 932 indices.position = AddBuiltIn(t_float4, spv::BuiltIn::Position, "position");
922 933
934 if (ir.UsesLayer()) {
935 if (stage != ShaderType::Vertex || device.IsExtShaderViewportIndexLayerSupported()) {
936 indices.layer = AddBuiltIn(t_int, spv::BuiltIn::Layer, "layer");
937 } else {
938 LOG_ERROR(
939 Render_Vulkan,
940 "Shader requires Layer but it's not supported on this stage with this device.");
941 }
942 }
943
923 if (ir.UsesViewportIndex()) { 944 if (ir.UsesViewportIndex()) {
924 if (stage != ShaderType::Vertex || device.IsExtShaderViewportIndexLayerSupported()) { 945 if (stage != ShaderType::Vertex || device.IsExtShaderViewportIndexLayerSupported()) {
925 indices.viewport = AddBuiltIn(t_int, spv::BuiltIn::ViewportIndex, "viewport_index"); 946 indices.viewport = AddBuiltIn(t_int, spv::BuiltIn::ViewportIndex, "viewport_index");
926 } else { 947 } else {
927 LOG_ERROR(Render_Vulkan, 948 LOG_ERROR(Render_Vulkan, "Shader requires ViewportIndex but it's not supported on "
928 "Shader requires ViewportIndex but it's not supported on this " 949 "this stage with this device.");
929 "stage with this device.");
930 } 950 }
931 } 951 }
932 952
@@ -1068,9 +1088,12 @@ private:
1068 return {OpLoad(t_float, AccessElement(t_in_float, tess_coord, element)), 1088 return {OpLoad(t_float, AccessElement(t_in_float, tess_coord, element)),
1069 Type::Float}; 1089 Type::Float};
1070 case 2: 1090 case 2:
1071 return {OpLoad(t_uint, instance_index), Type::Uint}; 1091 return {
1092 OpISub(t_int, OpLoad(t_int, instance_index), OpLoad(t_int, base_instance)),
1093 Type::Int};
1072 case 3: 1094 case 3:
1073 return {OpLoad(t_uint, vertex_index), Type::Uint}; 1095 return {OpISub(t_int, OpLoad(t_int, vertex_index), OpLoad(t_int, base_vertex)),
1096 Type::Int};
1074 } 1097 }
1075 UNIMPLEMENTED_MSG("Unmanaged TessCoordInstanceIDVertexID element={}", element); 1098 UNIMPLEMENTED_MSG("Unmanaged TessCoordInstanceIDVertexID element={}", element);
1076 return {Constant(t_uint, 0U), Type::Uint}; 1099 return {Constant(t_uint, 0U), Type::Uint};
@@ -1285,6 +1308,13 @@ private:
1285 } 1308 }
1286 case Attribute::Index::LayerViewportPointSize: 1309 case Attribute::Index::LayerViewportPointSize:
1287 switch (element) { 1310 switch (element) {
1311 case 1: {
1312 if (!out_indices.layer) {
1313 return {};
1314 }
1315 const u32 index = out_indices.layer.value();
1316 return {AccessElement(t_out_int, out_vertex, index), Type::Int};
1317 }
1288 case 2: { 1318 case 2: {
1289 if (!out_indices.viewport) { 1319 if (!out_indices.viewport) {
1290 return {}; 1320 return {};
@@ -1355,6 +1385,11 @@ private:
1355 UNIMPLEMENTED(); 1385 UNIMPLEMENTED();
1356 } 1386 }
1357 1387
1388 if (!target.id) {
1389 // On failure we return a nullptr target.id, skip these stores.
1390 return {};
1391 }
1392
1358 OpStore(target.id, As(Visit(src), target.type)); 1393 OpStore(target.id, As(Visit(src), target.type));
1359 return {}; 1394 return {};
1360 } 1395 }
@@ -1748,8 +1783,16 @@ private:
1748 } 1783 }
1749 1784
1750 Expression ImageLoad(Operation operation) { 1785 Expression ImageLoad(Operation operation) {
1751 UNIMPLEMENTED(); 1786 if (!device.IsShaderStorageImageReadWithoutFormatSupported()) {
1752 return {}; 1787 return {v_float_zero, Type::Float};
1788 }
1789
1790 const auto& meta{std::get<MetaImage>(operation.GetMeta())};
1791
1792 const Id coords = GetCoordinates(operation, Type::Int);
1793 const Id texel = OpImageRead(t_uint4, GetImage(operation), coords);
1794
1795 return {OpCompositeExtract(t_uint, texel, meta.element), Type::Uint};
1753 } 1796 }
1754 1797
1755 Expression ImageStore(Operation operation) { 1798 Expression ImageStore(Operation operation) {
@@ -2542,6 +2585,8 @@ private:
2542 2585
2543 Id instance_index{}; 2586 Id instance_index{};
2544 Id vertex_index{}; 2587 Id vertex_index{};
2588 Id base_instance{};
2589 Id base_vertex{};
2545 std::array<Id, Maxwell::NumRenderTargets> frag_colors{}; 2590 std::array<Id, Maxwell::NumRenderTargets> frag_colors{};
2546 Id frag_depth{}; 2591 Id frag_depth{};
2547 Id frag_coord{}; 2592 Id frag_coord{};
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index d3edbe80c..22e3d34de 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -151,6 +151,10 @@ public:
151 return params.GetMipHeight(base_level); 151 return params.GetMipHeight(base_level);
152 } 152 }
153 153
154 u32 GetNumLayers() const {
155 return num_layers;
156 }
157
154 bool IsBufferView() const { 158 bool IsBufferView() const {
155 return buffer_view; 159 return buffer_view;
156 } 160 }
diff --git a/src/video_core/shader/decode/conversion.cpp b/src/video_core/shader/decode/conversion.cpp
index 0eeb75559..6ead42070 100644
--- a/src/video_core/shader/decode/conversion.cpp
+++ b/src/video_core/shader/decode/conversion.cpp
@@ -83,14 +83,14 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
83 83
84 const bool input_signed = instr.conversion.is_input_signed; 84 const bool input_signed = instr.conversion.is_input_signed;
85 85
86 if (instr.conversion.src_size == Register::Size::Byte) { 86 if (const u32 offset = static_cast<u32>(instr.conversion.int_src.selector); offset > 0) {
87 const u32 offset = static_cast<u32>(instr.conversion.int_src.selector) * 8; 87 ASSERT(instr.conversion.src_size == Register::Size::Byte ||
88 if (offset > 0) { 88 instr.conversion.src_size == Register::Size::Short);
89 value = SignedOperation(OperationCode::ILogicalShiftRight, input_signed, 89 if (instr.conversion.src_size == Register::Size::Short) {
90 std::move(value), Immediate(offset)); 90 ASSERT(offset == 0 || offset == 2);
91 } 91 }
92 } else { 92 value = SignedOperation(OperationCode::ILogicalShiftRight, input_signed,
93 UNIMPLEMENTED_IF(instr.conversion.int_src.selector != 0); 93 std::move(value), Immediate(offset * 8));
94 } 94 }
95 95
96 value = ConvertIntegerSize(value, instr.conversion.src_size, input_signed); 96 value = ConvertIntegerSize(value, instr.conversion.src_size, input_signed);
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index 351c8c2f1..bee7d8cad 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -522,68 +522,53 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
522 Node array, Node depth_compare, u32 bias_offset, 522 Node array, Node depth_compare, u32 bias_offset,
523 std::vector<Node> aoffi, 523 std::vector<Node> aoffi,
524 std::optional<Tegra::Shader::Register> bindless_reg) { 524 std::optional<Tegra::Shader::Register> bindless_reg) {
525 const auto is_array = static_cast<bool>(array); 525 const bool is_array = array != nullptr;
526 const auto is_shadow = static_cast<bool>(depth_compare); 526 const bool is_shadow = depth_compare != nullptr;
527 const bool is_bindless = bindless_reg.has_value(); 527 const bool is_bindless = bindless_reg.has_value();
528 528
529 UNIMPLEMENTED_IF_MSG((texture_type == TextureType::Texture3D && (is_array || is_shadow)) || 529 UNIMPLEMENTED_IF(texture_type == TextureType::TextureCube && is_array && is_shadow);
530 (texture_type == TextureType::TextureCube && is_array && is_shadow), 530 ASSERT_MSG(texture_type != TextureType::Texture3D || !is_array || !is_shadow,
531 "This method is not supported."); 531 "Illegal texture type");
532 532
533 const SamplerInfo info{texture_type, is_array, is_shadow, false}; 533 const SamplerInfo info{texture_type, is_array, is_shadow, false};
534 Node index_var{}; 534 Node index_var;
535 const Sampler* sampler = is_bindless ? GetBindlessSampler(*bindless_reg, index_var, info) 535 const Sampler* sampler = is_bindless ? GetBindlessSampler(*bindless_reg, index_var, info)
536 : GetSampler(instr.sampler, info); 536 : GetSampler(instr.sampler, info);
537 Node4 values; 537 if (!sampler) {
538 if (sampler == nullptr) { 538 return {Immediate(0), Immediate(0), Immediate(0), Immediate(0)};
539 for (u32 element = 0; element < values.size(); ++element) {
540 values[element] = Immediate(0);
541 }
542 return values;
543 } 539 }
544 540
545 const bool lod_needed = process_mode == TextureProcessMode::LZ || 541 const bool lod_needed = process_mode == TextureProcessMode::LZ ||
546 process_mode == TextureProcessMode::LL || 542 process_mode == TextureProcessMode::LL ||
547 process_mode == TextureProcessMode::LLA; 543 process_mode == TextureProcessMode::LLA;
548 544 const OperationCode opcode = lod_needed ? OperationCode::TextureLod : OperationCode::Texture;
549 // LOD selection (either via bias or explicit textureLod) not supported in GL for
550 // sampler2DArrayShadow and samplerCubeArrayShadow.
551 const bool gl_lod_supported =
552 !((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && is_shadow) ||
553 (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && is_shadow));
554
555 const OperationCode read_method =
556 (lod_needed && gl_lod_supported) ? OperationCode::TextureLod : OperationCode::Texture;
557
558 UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported);
559 545
560 Node bias; 546 Node bias;
561 Node lod; 547 Node lod;
562 if (process_mode != TextureProcessMode::None && gl_lod_supported) { 548 switch (process_mode) {
563 switch (process_mode) { 549 case TextureProcessMode::None:
564 case TextureProcessMode::LZ: 550 break;
565 lod = Immediate(0.0f); 551 case TextureProcessMode::LZ:
566 break; 552 lod = Immediate(0.0f);
567 case TextureProcessMode::LB: 553 break;
568 // If present, lod or bias are always stored in the register 554 case TextureProcessMode::LB:
569 // indexed by the gpr20 field with an offset depending on the 555 // If present, lod or bias are always stored in the register indexed by the gpr20 field with
570 // usage of the other registers 556 // an offset depending on the usage of the other registers.
571 bias = GetRegister(instr.gpr20.Value() + bias_offset); 557 bias = GetRegister(instr.gpr20.Value() + bias_offset);
572 break; 558 break;
573 case TextureProcessMode::LL: 559 case TextureProcessMode::LL:
574 lod = GetRegister(instr.gpr20.Value() + bias_offset); 560 lod = GetRegister(instr.gpr20.Value() + bias_offset);
575 break; 561 break;
576 default: 562 default:
577 UNIMPLEMENTED_MSG("Unimplemented process mode={}", static_cast<u32>(process_mode)); 563 UNIMPLEMENTED_MSG("Unimplemented process mode={}", static_cast<u32>(process_mode));
578 break; 564 break;
579 }
580 } 565 }
581 566
567 Node4 values;
582 for (u32 element = 0; element < values.size(); ++element) { 568 for (u32 element = 0; element < values.size(); ++element) {
583 auto copy_coords = coords;
584 MetaTexture meta{*sampler, array, depth_compare, aoffi, {}, {}, bias, 569 MetaTexture meta{*sampler, array, depth_compare, aoffi, {}, {}, bias,
585 lod, {}, element, index_var}; 570 lod, {}, element, index_var};
586 values[element] = Operation(read_method, meta, std::move(copy_coords)); 571 values[element] = Operation(opcode, meta, coords);
587 } 572 }
588 573
589 return values; 574 return values;
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
index 84469b7ba..002df414f 100644
--- a/src/video_core/texture_cache/surface_base.cpp
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -277,6 +277,10 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
277 SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params, 277 SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params,
278 staging_buffer.data() + host_offset, level); 278 staging_buffer.data() + host_offset, level);
279 } 279 }
280 } else if (params.IsBuffer()) {
281 // Buffers don't have pitch or any fancy layout property. We can just memcpy them to guest
282 // memory.
283 std::memcpy(host_ptr, staging_buffer.data(), guest_memory_size);
280 } else { 284 } else {
281 ASSERT(params.target == SurfaceTarget::Texture2D); 285 ASSERT(params.target == SurfaceTarget::Texture2D);
282 ASSERT(params.num_levels == 1); 286 ASSERT(params.num_levels == 1);
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
index 38b3a4ba8..f00839313 100644
--- a/src/video_core/texture_cache/surface_params.cpp
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -84,19 +84,16 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
84 if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) { 84 if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) {
85 switch (params.pixel_format) { 85 switch (params.pixel_format) {
86 case PixelFormat::R16U: 86 case PixelFormat::R16U:
87 case PixelFormat::R16F: { 87 case PixelFormat::R16F:
88 params.pixel_format = PixelFormat::Z16; 88 params.pixel_format = PixelFormat::Z16;
89 break; 89 break;
90 } 90 case PixelFormat::R32F:
91 case PixelFormat::R32F: {
92 params.pixel_format = PixelFormat::Z32F; 91 params.pixel_format = PixelFormat::Z32F;
93 break; 92 break;
94 } 93 default:
95 default: {
96 UNIMPLEMENTED_MSG("Unimplemented shadow convert format: {}", 94 UNIMPLEMENTED_MSG("Unimplemented shadow convert format: {}",
97 static_cast<u32>(params.pixel_format)); 95 static_cast<u32>(params.pixel_format));
98 } 96 }
99 }
100 params.type = GetFormatType(params.pixel_format); 97 params.type = GetFormatType(params.pixel_format);
101 } 98 }
102 params.type = GetFormatType(params.pixel_format); 99 params.type = GetFormatType(params.pixel_format);
@@ -168,27 +165,29 @@ SurfaceParams SurfaceParams::CreateForImage(const FormatLookupTable& lookup_tabl
168 return params; 165 return params;
169} 166}
170 167
171SurfaceParams SurfaceParams::CreateForDepthBuffer( 168SurfaceParams SurfaceParams::CreateForDepthBuffer(Core::System& system) {
172 Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format, 169 const auto& regs = system.GPU().Maxwell3D().regs;
173 u32 block_width, u32 block_height, u32 block_depth, 170 regs.zeta_width, regs.zeta_height, regs.zeta.format, regs.zeta.memory_layout.type;
174 Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type) {
175 SurfaceParams params; 171 SurfaceParams params;
176 params.is_tiled = type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear; 172 params.is_tiled = regs.zeta.memory_layout.type ==
173 Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
177 params.srgb_conversion = false; 174 params.srgb_conversion = false;
178 params.block_width = std::min(block_width, 5U); 175 params.block_width = std::min(regs.zeta.memory_layout.block_width.Value(), 5U);
179 params.block_height = std::min(block_height, 5U); 176 params.block_height = std::min(regs.zeta.memory_layout.block_height.Value(), 5U);
180 params.block_depth = std::min(block_depth, 5U); 177 params.block_depth = std::min(regs.zeta.memory_layout.block_depth.Value(), 5U);
181 params.tile_width_spacing = 1; 178 params.tile_width_spacing = 1;
182 params.pixel_format = PixelFormatFromDepthFormat(format); 179 params.pixel_format = PixelFormatFromDepthFormat(regs.zeta.format);
183 params.type = GetFormatType(params.pixel_format); 180 params.type = GetFormatType(params.pixel_format);
184 params.width = zeta_width; 181 params.width = regs.zeta_width;
185 params.height = zeta_height; 182 params.height = regs.zeta_height;
186 params.target = SurfaceTarget::Texture2D;
187 params.depth = 1;
188 params.pitch = 0; 183 params.pitch = 0;
189 params.num_levels = 1; 184 params.num_levels = 1;
190 params.emulated_levels = 1; 185 params.emulated_levels = 1;
191 params.is_layered = false; 186
187 const bool is_layered = regs.zeta_layers > 1 && params.block_depth == 0;
188 params.is_layered = is_layered;
189 params.target = is_layered ? SurfaceTarget::Texture2DArray : SurfaceTarget::Texture2D;
190 params.depth = is_layered ? regs.zeta_layers.Value() : 1U;
192 return params; 191 return params;
193} 192}
194 193
@@ -214,11 +213,13 @@ SurfaceParams SurfaceParams::CreateForFramebuffer(Core::System& system, std::siz
214 params.width = params.pitch / bpp; 213 params.width = params.pitch / bpp;
215 } 214 }
216 params.height = config.height; 215 params.height = config.height;
217 params.depth = 1;
218 params.target = SurfaceTarget::Texture2D;
219 params.num_levels = 1; 216 params.num_levels = 1;
220 params.emulated_levels = 1; 217 params.emulated_levels = 1;
221 params.is_layered = false; 218
219 const bool is_layered = config.layers > 1 && params.block_depth == 0;
220 params.is_layered = is_layered;
221 params.depth = is_layered ? config.layers.Value() : 1;
222 params.target = is_layered ? SurfaceTarget::Texture2DArray : SurfaceTarget::Texture2D;
222 return params; 223 return params;
223} 224}
224 225
diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h
index 9256fd6d9..995cc3818 100644
--- a/src/video_core/texture_cache/surface_params.h
+++ b/src/video_core/texture_cache/surface_params.h
@@ -35,10 +35,7 @@ public:
35 const VideoCommon::Shader::Image& entry); 35 const VideoCommon::Shader::Image& entry);
36 36
37 /// Creates SurfaceCachedParams for a depth buffer configuration. 37 /// Creates SurfaceCachedParams for a depth buffer configuration.
38 static SurfaceParams CreateForDepthBuffer( 38 static SurfaceParams CreateForDepthBuffer(Core::System& system);
39 Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format,
40 u32 block_width, u32 block_height, u32 block_depth,
41 Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type);
42 39
43 /// Creates SurfaceCachedParams from a framebuffer configuration. 40 /// Creates SurfaceCachedParams from a framebuffer configuration.
44 static SurfaceParams CreateForFramebuffer(Core::System& system, std::size_t index); 41 static SurfaceParams CreateForFramebuffer(Core::System& system, std::size_t index);
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index f4c015635..c70e4aec2 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -160,10 +160,7 @@ public:
160 SetEmptyDepthBuffer(); 160 SetEmptyDepthBuffer();
161 return {}; 161 return {};
162 } 162 }
163 const auto depth_params{SurfaceParams::CreateForDepthBuffer( 163 const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)};
164 system, regs.zeta_width, regs.zeta_height, regs.zeta.format,
165 regs.zeta.memory_layout.block_width, regs.zeta.memory_layout.block_height,
166 regs.zeta.memory_layout.block_depth, regs.zeta.memory_layout.type)};
167 auto surface_view = GetSurface(gpu_addr, cache_addr, depth_params, preserve_contents, true); 164 auto surface_view = GetSurface(gpu_addr, cache_addr, depth_params, preserve_contents, true);
168 if (depth_buffer.target) 165 if (depth_buffer.target)
169 depth_buffer.target->MarkAsRenderTarget(false, NO_RT); 166 depth_buffer.target->MarkAsRenderTarget(false, NO_RT);
@@ -721,7 +718,6 @@ private:
721 std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const CacheAddr cache_addr, 718 std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const CacheAddr cache_addr,
722 const SurfaceParams& params, bool preserve_contents, 719 const SurfaceParams& params, bool preserve_contents,
723 bool is_render) { 720 bool is_render) {
724
725 // Step 1 721 // Step 1
726 // Check Level 1 Cache for a fast structural match. If candidate surface 722 // Check Level 1 Cache for a fast structural match. If candidate surface
727 // matches at certain level we are pretty much done. 723 // matches at certain level we are pretty much done.
@@ -733,14 +729,18 @@ private:
733 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, 729 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
734 topological_result); 730 topological_result);
735 } 731 }
732
736 const auto struct_result = current_surface->MatchesStructure(params); 733 const auto struct_result = current_surface->MatchesStructure(params);
737 if (struct_result != MatchStructureResult::None && 734 if (struct_result != MatchStructureResult::None) {
738 (params.target != SurfaceTarget::Texture3D || 735 const auto& old_params = current_surface->GetSurfaceParams();
739 current_surface->MatchTarget(params.target))) { 736 const bool not_3d = params.target != SurfaceTarget::Texture3D &&
740 if (struct_result == MatchStructureResult::FullMatch) { 737 old_params.target != SurfaceTarget::Texture3D;
741 return ManageStructuralMatch(current_surface, params, is_render); 738 if (not_3d || current_surface->MatchTarget(params.target)) {
742 } else { 739 if (struct_result == MatchStructureResult::FullMatch) {
743 return RebuildSurface(current_surface, params, is_render); 740 return ManageStructuralMatch(current_surface, params, is_render);
741 } else {
742 return RebuildSurface(current_surface, params, is_render);
743 }
744 } 744 }
745 } 745 }
746 } 746 }
diff --git a/src/web_service/web_backend.cpp b/src/web_service/web_backend.cpp
index 6683f459f..737ffe409 100644
--- a/src/web_service/web_backend.cpp
+++ b/src/web_service/web_backend.cpp
@@ -73,14 +73,12 @@ struct Client::Impl {
73 if (!parsedUrl.GetPort(&port)) { 73 if (!parsedUrl.GetPort(&port)) {
74 port = HTTP_PORT; 74 port = HTTP_PORT;
75 } 75 }
76 cli = std::make_unique<httplib::Client>(parsedUrl.m_Host.c_str(), port, 76 cli = std::make_unique<httplib::Client>(parsedUrl.m_Host.c_str(), port);
77 TIMEOUT_SECONDS);
78 } else if (parsedUrl.m_Scheme == "https") { 77 } else if (parsedUrl.m_Scheme == "https") {
79 if (!parsedUrl.GetPort(&port)) { 78 if (!parsedUrl.GetPort(&port)) {
80 port = HTTPS_PORT; 79 port = HTTPS_PORT;
81 } 80 }
82 cli = std::make_unique<httplib::SSLClient>(parsedUrl.m_Host.c_str(), port, 81 cli = std::make_unique<httplib::SSLClient>(parsedUrl.m_Host.c_str(), port);
83 TIMEOUT_SECONDS);
84 } else { 82 } else {
85 LOG_ERROR(WebService, "Bad URL scheme {}", parsedUrl.m_Scheme); 83 LOG_ERROR(WebService, "Bad URL scheme {}", parsedUrl.m_Scheme);
86 return Common::WebResult{Common::WebResult::Code::InvalidURL, "Bad URL scheme"}; 84 return Common::WebResult{Common::WebResult::Code::InvalidURL, "Bad URL scheme"};
@@ -90,6 +88,7 @@ struct Client::Impl {
90 LOG_ERROR(WebService, "Invalid URL {}", host + path); 88 LOG_ERROR(WebService, "Invalid URL {}", host + path);
91 return Common::WebResult{Common::WebResult::Code::InvalidURL, "Invalid URL"}; 89 return Common::WebResult{Common::WebResult::Code::InvalidURL, "Invalid URL"};
92 } 90 }
91 cli->set_timeout_sec(TIMEOUT_SECONDS);
93 92
94 httplib::Headers params; 93 httplib::Headers params;
95 if (!jwt.empty()) { 94 if (!jwt.empty()) {
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index cd94693c1..6209fff75 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -630,6 +630,7 @@ void Config::ReadRendererValues() {
630 Settings::values.vulkan_device = ReadSetting(QStringLiteral("vulkan_device"), 0).toInt(); 630 Settings::values.vulkan_device = ReadSetting(QStringLiteral("vulkan_device"), 0).toInt();
631 Settings::values.resolution_factor = 631 Settings::values.resolution_factor =
632 ReadSetting(QStringLiteral("resolution_factor"), 1.0).toFloat(); 632 ReadSetting(QStringLiteral("resolution_factor"), 1.0).toFloat();
633 Settings::values.aspect_ratio = ReadSetting(QStringLiteral("aspect_ratio"), 0).toInt();
633 Settings::values.use_frame_limit = 634 Settings::values.use_frame_limit =
634 ReadSetting(QStringLiteral("use_frame_limit"), true).toBool(); 635 ReadSetting(QStringLiteral("use_frame_limit"), true).toBool();
635 Settings::values.frame_limit = ReadSetting(QStringLiteral("frame_limit"), 100).toInt(); 636 Settings::values.frame_limit = ReadSetting(QStringLiteral("frame_limit"), 100).toInt();
@@ -1064,6 +1065,7 @@ void Config::SaveRendererValues() {
1064 WriteSetting(QStringLiteral("vulkan_device"), Settings::values.vulkan_device, 0); 1065 WriteSetting(QStringLiteral("vulkan_device"), Settings::values.vulkan_device, 0);
1065 WriteSetting(QStringLiteral("resolution_factor"), 1066 WriteSetting(QStringLiteral("resolution_factor"),
1066 static_cast<double>(Settings::values.resolution_factor), 1.0); 1067 static_cast<double>(Settings::values.resolution_factor), 1.0);
1068 WriteSetting(QStringLiteral("aspect_ratio"), Settings::values.aspect_ratio, 0);
1067 WriteSetting(QStringLiteral("use_frame_limit"), Settings::values.use_frame_limit, true); 1069 WriteSetting(QStringLiteral("use_frame_limit"), Settings::values.use_frame_limit, true);
1068 WriteSetting(QStringLiteral("frame_limit"), Settings::values.frame_limit, 100); 1070 WriteSetting(QStringLiteral("frame_limit"), Settings::values.frame_limit, 100);
1069 WriteSetting(QStringLiteral("use_disk_shader_cache"), Settings::values.use_disk_shader_cache, 1071 WriteSetting(QStringLiteral("use_disk_shader_cache"), Settings::values.use_disk_shader_cache,
diff --git a/src/yuzu/configuration/configure_graphics.cpp b/src/yuzu/configuration/configure_graphics.cpp
index f57a24e36..ea899c080 100644
--- a/src/yuzu/configuration/configure_graphics.cpp
+++ b/src/yuzu/configuration/configure_graphics.cpp
@@ -97,6 +97,7 @@ void ConfigureGraphics::SetConfiguration() {
97 ui->api->setCurrentIndex(static_cast<int>(Settings::values.renderer_backend)); 97 ui->api->setCurrentIndex(static_cast<int>(Settings::values.renderer_backend));
98 ui->resolution_factor_combobox->setCurrentIndex( 98 ui->resolution_factor_combobox->setCurrentIndex(
99 static_cast<int>(FromResolutionFactor(Settings::values.resolution_factor))); 99 static_cast<int>(FromResolutionFactor(Settings::values.resolution_factor)));
100 ui->aspect_ratio_combobox->setCurrentIndex(Settings::values.aspect_ratio);
100 ui->use_disk_shader_cache->setEnabled(runtime_lock); 101 ui->use_disk_shader_cache->setEnabled(runtime_lock);
101 ui->use_disk_shader_cache->setChecked(Settings::values.use_disk_shader_cache); 102 ui->use_disk_shader_cache->setChecked(Settings::values.use_disk_shader_cache);
102 ui->use_accurate_gpu_emulation->setChecked(Settings::values.use_accurate_gpu_emulation); 103 ui->use_accurate_gpu_emulation->setChecked(Settings::values.use_accurate_gpu_emulation);
@@ -114,6 +115,7 @@ void ConfigureGraphics::ApplyConfiguration() {
114 Settings::values.vulkan_device = vulkan_device; 115 Settings::values.vulkan_device = vulkan_device;
115 Settings::values.resolution_factor = 116 Settings::values.resolution_factor =
116 ToResolutionFactor(static_cast<Resolution>(ui->resolution_factor_combobox->currentIndex())); 117 ToResolutionFactor(static_cast<Resolution>(ui->resolution_factor_combobox->currentIndex()));
118 Settings::values.aspect_ratio = ui->aspect_ratio_combobox->currentIndex();
117 Settings::values.use_disk_shader_cache = ui->use_disk_shader_cache->isChecked(); 119 Settings::values.use_disk_shader_cache = ui->use_disk_shader_cache->isChecked();
118 Settings::values.use_accurate_gpu_emulation = ui->use_accurate_gpu_emulation->isChecked(); 120 Settings::values.use_accurate_gpu_emulation = ui->use_accurate_gpu_emulation->isChecked();
119 Settings::values.use_asynchronous_gpu_emulation = 121 Settings::values.use_asynchronous_gpu_emulation =
diff --git a/src/yuzu/configuration/configure_graphics.ui b/src/yuzu/configuration/configure_graphics.ui
index e24372204..db60426ab 100644
--- a/src/yuzu/configuration/configure_graphics.ui
+++ b/src/yuzu/configuration/configure_graphics.ui
@@ -139,6 +139,41 @@
139 </layout> 139 </layout>
140 </item> 140 </item>
141 <item> 141 <item>
142 <layout class="QHBoxLayout" name="horizontalLayout_6">
143 <item>
144 <widget class="QLabel" name="ar_label">
145 <property name="text">
146 <string>Aspect Ratio:</string>
147 </property>
148 </widget>
149 </item>
150 <item>
151 <widget class="QComboBox" name="aspect_ratio_combobox">
152 <item>
153 <property name="text">
154 <string>Default (16:9)</string>
155 </property>
156 </item>
157 <item>
158 <property name="text">
159 <string>Force 4:3</string>
160 </property>
161 </item>
162 <item>
163 <property name="text">
164 <string>Force 21:9</string>
165 </property>
166 </item>
167 <item>
168 <property name="text">
169 <string>Stretch to Window</string>
170 </property>
171 </item>
172 </widget>
173 </item>
174 </layout>
175 </item>
176 <item>
142 <layout class="QHBoxLayout" name="horizontalLayout_3"> 177 <layout class="QHBoxLayout" name="horizontalLayout_3">
143 <item> 178 <item>
144 <widget class="QLabel" name="bg_label"> 179 <widget class="QLabel" name="bg_label">
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index b01a36023..96f1ce3af 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -379,6 +379,8 @@ void Config::ReadValues() {
379 379
380 Settings::values.resolution_factor = 380 Settings::values.resolution_factor =
381 static_cast<float>(sdl2_config->GetReal("Renderer", "resolution_factor", 1.0)); 381 static_cast<float>(sdl2_config->GetReal("Renderer", "resolution_factor", 1.0));
382 Settings::values.aspect_ratio =
383 static_cast<int>(sdl2_config->GetInteger("Renderer", "aspect_ratio", 0));
382 Settings::values.use_frame_limit = sdl2_config->GetBoolean("Renderer", "use_frame_limit", true); 384 Settings::values.use_frame_limit = sdl2_config->GetBoolean("Renderer", "use_frame_limit", true);
383 Settings::values.frame_limit = 385 Settings::values.frame_limit =
384 static_cast<u16>(sdl2_config->GetInteger("Renderer", "frame_limit", 100)); 386 static_cast<u16>(sdl2_config->GetInteger("Renderer", "frame_limit", 100));
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h
index 00fd88279..8a2b658cd 100644
--- a/src/yuzu_cmd/default_ini.h
+++ b/src/yuzu_cmd/default_ini.h
@@ -122,6 +122,10 @@ use_shader_jit =
122# factor for the Switch resolution 122# factor for the Switch resolution
123resolution_factor = 123resolution_factor =
124 124
125# Aspect ratio
126# 0: Default (16:9), 1: Force 4:3, 2: Force 21:9, 3: Stretch to Window
127aspect_ratio =
128
125# Whether to enable V-Sync (caps the framerate at 60FPS) or not. 129# Whether to enable V-Sync (caps the framerate at 60FPS) or not.
126# 0 (default): Off, 1: On 130# 0 (default): Off, 1: On
127use_vsync = 131use_vsync =
diff --git a/src/yuzu_tester/config.cpp b/src/yuzu_tester/config.cpp
index 84ab4d687..0ac93b62a 100644
--- a/src/yuzu_tester/config.cpp
+++ b/src/yuzu_tester/config.cpp
@@ -118,6 +118,8 @@ void Config::ReadValues() {
118 // Renderer 118 // Renderer
119 Settings::values.resolution_factor = 119 Settings::values.resolution_factor =
120 static_cast<float>(sdl2_config->GetReal("Renderer", "resolution_factor", 1.0)); 120 static_cast<float>(sdl2_config->GetReal("Renderer", "resolution_factor", 1.0));
121 Settings::values.aspect_ratio =
122 static_cast<int>(sdl2_config->GetInteger("Renderer", "aspect_ratio", 0));
121 Settings::values.use_frame_limit = false; 123 Settings::values.use_frame_limit = false;
122 Settings::values.frame_limit = 100; 124 Settings::values.frame_limit = 100;
123 Settings::values.use_disk_shader_cache = 125 Settings::values.use_disk_shader_cache =
diff --git a/src/yuzu_tester/default_ini.h b/src/yuzu_tester/default_ini.h
index 9a3e86d68..8d93f7b88 100644
--- a/src/yuzu_tester/default_ini.h
+++ b/src/yuzu_tester/default_ini.h
@@ -26,6 +26,10 @@ use_shader_jit =
26# factor for the Switch resolution 26# factor for the Switch resolution
27resolution_factor = 27resolution_factor =
28 28
29# Aspect ratio
30# 0: Default (16:9), 1: Force 4:3, 2: Force 21:9, 3: Stretch to Window
31aspect_ratio =
32
29# Whether to enable V-Sync (caps the framerate at 60FPS) or not. 33# Whether to enable V-Sync (caps the framerate at 60FPS) or not.
30# 0 (default): Off, 1: On 34# 0 (default): Off, 1: On
31use_vsync = 35use_vsync =