summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar James Rowe2018-01-11 20:07:44 -0700
committerGravatar James Rowe2018-01-12 19:11:03 -0700
commit1d28b2e142f845773e2b90e267d9632e196a99b9 (patch)
tree027a3586a0fc927731afb3711c328c6dafc8551f /src
parentMassive removal of unused modules (diff)
downloadyuzu-1d28b2e142f845773e2b90e267d9632e196a99b9.tar.gz
yuzu-1d28b2e142f845773e2b90e267d9632e196a99b9.tar.xz
yuzu-1d28b2e142f845773e2b90e267d9632e196a99b9.zip
Remove references to PICA and rasterizers in video_core
Diffstat (limited to 'src')
-rw-r--r--src/core/CMakeLists.txt17
-rw-r--r--src/core/hle/service/dsp_dsp.cpp17
-rw-r--r--src/core/hle/service/dsp_dsp.h26
-rw-r--r--src/core/hle/service/gsp_gpu.cpp11
-rw-r--r--src/core/hle/service/gsp_gpu.h195
-rw-r--r--src/core/hw/aes/arithmetic128.cpp47
-rw-r--r--src/core/hw/aes/arithmetic128.h17
-rw-r--r--src/core/hw/aes/ccm.h40
-rw-r--r--src/core/hw/aes/key.cpp173
-rw-r--r--src/core/hw/aes/key.h37
-rw-r--r--src/core/hw/gpu.cpp573
-rw-r--r--src/core/hw/gpu.h334
-rw-r--r--src/core/settings.cpp6
-rw-r--r--src/video_core/CMakeLists.txt75
-rw-r--r--src/video_core/command_processor.cpp647
-rw-r--r--src/video_core/command_processor.h41
-rw-r--r--src/video_core/debug_utils/debug_utils.cpp577
-rw-r--r--src/video_core/debug_utils/debug_utils.h251
-rw-r--r--src/video_core/geometry_pipeline.cpp274
-rw-r--r--src/video_core/geometry_pipeline.h49
-rw-r--r--src/video_core/gpu_debugger.h85
-rw-r--r--src/video_core/pica.cpp54
-rw-r--r--src/video_core/pica.h16
-rw-r--r--src/video_core/pica_state.h159
-rw-r--r--src/video_core/pica_types.h143
-rw-r--r--src/video_core/primitive_assembly.cpp77
-rw-r--r--src/video_core/primitive_assembly.h57
-rw-r--r--src/video_core/rasterizer_interface.h67
-rw-r--r--src/video_core/regs.cpp488
-rw-r--r--src/video_core/regs.h149
-rw-r--r--src/video_core/regs_framebuffer.h283
-rw-r--r--src/video_core/regs_lighting.h321
-rw-r--r--src/video_core/regs_pipeline.h269
-rw-r--r--src/video_core/regs_rasterizer.h139
-rw-r--r--src/video_core/regs_shader.h111
-rw-r--r--src/video_core/regs_texturing.h452
-rw-r--r--src/video_core/renderer_base.cpp15
-rw-r--r--src/video_core/renderer_base.h7
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp1686
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h316
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.cpp799
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.h239
-rw-r--r--src/video_core/renderer_opengl/gl_shader_gen.cpp1231
-rw-r--r--src/video_core/renderer_opengl/gl_shader_gen.h162
-rw-r--r--src/video_core/renderer_opengl/pica_to_gl.h235
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp7
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.h1
-rw-r--r--src/video_core/shader/debug_data.h186
-rw-r--r--src/video_core/shader/shader.cpp154
-rw-r--r--src/video_core/shader/shader.h233
-rw-r--r--src/video_core/shader/shader_interpreter.cpp701
-rw-r--r--src/video_core/shader/shader_interpreter.h32
-rw-r--r--src/video_core/shader/shader_jit_x64.cpp48
-rw-r--r--src/video_core/shader/shader_jit_x64.h30
-rw-r--r--src/video_core/shader/shader_jit_x64_compiler.cpp942
-rw-r--r--src/video_core/shader/shader_jit_x64_compiler.h127
-rw-r--r--src/video_core/swrasterizer/clipper.cpp197
-rw-r--r--src/video_core/swrasterizer/clipper.h21
-rw-r--r--src/video_core/swrasterizer/framebuffer.cpp360
-rw-r--r--src/video_core/swrasterizer/framebuffer.h29
-rw-r--r--src/video_core/swrasterizer/lighting.cpp308
-rw-r--r--src/video_core/swrasterizer/lighting.h19
-rw-r--r--src/video_core/swrasterizer/proctex.cpp223
-rw-r--r--src/video_core/swrasterizer/proctex.h16
-rw-r--r--src/video_core/swrasterizer/rasterizer.cpp853
-rw-r--r--src/video_core/swrasterizer/rasterizer.h48
-rw-r--r--src/video_core/swrasterizer/swrasterizer.cpp15
-rw-r--r--src/video_core/swrasterizer/swrasterizer.h27
-rw-r--r--src/video_core/swrasterizer/texturing.cpp244
-rw-r--r--src/video_core/swrasterizer/texturing.h28
-rw-r--r--src/video_core/texture/etc1.cpp122
-rw-r--r--src/video_core/texture/etc1.h16
-rw-r--r--src/video_core/texture/texture_decode.cpp227
-rw-r--r--src/video_core/texture/texture_decode.h60
-rw-r--r--src/video_core/vertex_loader.cpp160
-rw-r--r--src/video_core/vertex_loader.h42
-rw-r--r--src/video_core/video_core.cpp5
77 files changed, 4 insertions, 16444 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 90772d0db..021e2f152 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -41,8 +41,6 @@ set(SRCS
41 hle/service/am/applet_oe.cpp 41 hle/service/am/applet_oe.cpp
42 hle/service/aoc/aoc_u.cpp 42 hle/service/aoc/aoc_u.cpp
43 hle/service/apm/apm.cpp 43 hle/service/apm/apm.cpp
44 hle/service/dsp_dsp.cpp
45 hle/service/gsp_gpu.cpp
46 hle/service/hid/hid.cpp 44 hle/service/hid/hid.cpp
47 hle/service/lm/lm.cpp 45 hle/service/lm/lm.cpp
48 hle/service/nvdrv/devices/nvdisp_disp0.cpp 46 hle/service/nvdrv/devices/nvdisp_disp0.cpp
@@ -58,10 +56,6 @@ set(SRCS
58 hle/service/vi/vi.cpp 56 hle/service/vi/vi.cpp
59 hle/service/vi/vi_m.cpp 57 hle/service/vi/vi_m.cpp
60 hle/shared_page.cpp 58 hle/shared_page.cpp
61 hw/aes/arithmetic128.cpp
62 hw/aes/ccm.cpp
63 hw/aes/key.cpp
64 hw/gpu.cpp
65 hw/hw.cpp 59 hw/hw.cpp
66 hw/lcd.cpp 60 hw/lcd.cpp
67 loader/elf.cpp 61 loader/elf.cpp
@@ -130,8 +124,6 @@ set(HEADERS
130 hle/service/am/applet_oe.h 124 hle/service/am/applet_oe.h
131 hle/service/aoc/aoc_u.h 125 hle/service/aoc/aoc_u.h
132 hle/service/apm/apm.h 126 hle/service/apm/apm.h
133 hle/service/dsp_dsp.h
134 hle/service/gsp_gpu.h
135 hle/service/hid/hid.h 127 hle/service/hid/hid.h
136 hle/service/lm/lm.h 128 hle/service/lm/lm.h
137 hle/service/nvdrv/devices/nvdevice.h 129 hle/service/nvdrv/devices/nvdevice.h
@@ -148,10 +140,6 @@ set(HEADERS
148 hle/service/vi/vi.h 140 hle/service/vi/vi.h
149 hle/service/vi/vi_m.h 141 hle/service/vi/vi_m.h
150 hle/shared_page.h 142 hle/shared_page.h
151 hw/aes/arithmetic128.h
152 hw/aes/ccm.h
153 hw/aes/key.h
154 hw/gpu.h
155 hw/hw.h 143 hw/hw.h
156 hw/lcd.h 144 hw/lcd.h
157 loader/elf.h 145 loader/elf.h
@@ -171,8 +159,5 @@ set(HEADERS
171 159
172create_directory_groups(${SRCS} ${HEADERS}) 160create_directory_groups(${SRCS} ${HEADERS})
173add_library(core STATIC ${SRCS} ${HEADERS}) 161add_library(core STATIC ${SRCS} ${HEADERS})
174target_link_libraries(core PUBLIC common PRIVATE audio_core dynarmic network video_core) 162target_link_libraries(core PUBLIC common PRIVATE dynarmic video_core)
175target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt lz4_static unicorn) 163target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt lz4_static unicorn)
176if (ENABLE_WEB_SERVICE)
177 target_link_libraries(core PUBLIC json-headers web_service)
178endif()
diff --git a/src/core/hle/service/dsp_dsp.cpp b/src/core/hle/service/dsp_dsp.cpp
deleted file mode 100644
index a8958373a..000000000
--- a/src/core/hle/service/dsp_dsp.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "audio_core/hle/pipe.h"
6#include "core/hle/service/dsp_dsp.h"
7
8using DspPipe = DSP::HLE::DspPipe;
9
10namespace Service {
11namespace DSP_DSP {
12
13void SignalPipeInterrupt(DspPipe pipe) {
14}
15
16} // namespace DSP_DSP
17} // namespace Service
diff --git a/src/core/hle/service/dsp_dsp.h b/src/core/hle/service/dsp_dsp.h
deleted file mode 100644
index 18ac76d9a..000000000
--- a/src/core/hle/service/dsp_dsp.h
+++ /dev/null
@@ -1,26 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <string>
8#include "core/hle/service/service.h"
9
10namespace DSP {
11namespace HLE {
12enum class DspPipe;
13}
14}
15
16namespace Service {
17namespace DSP_DSP {
18
19/**
20 * Signal a specific DSP related interrupt of type == InterruptType::Pipe, pipe == pipe.
21 * @param pipe The DSP pipe for which to signal an interrupt for.
22 */
23void SignalPipeInterrupt(DSP::HLE::DspPipe pipe);
24
25} // namespace DSP_DSP
26} // namespace Service
diff --git a/src/core/hle/service/gsp_gpu.cpp b/src/core/hle/service/gsp_gpu.cpp
deleted file mode 100644
index 8f29e04a5..000000000
--- a/src/core/hle/service/gsp_gpu.cpp
+++ /dev/null
@@ -1,11 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/service/gsp_gpu.h"
6
7namespace Service {
8namespace GSP {
9
10} // namespace GSP
11} // namespace Service
diff --git a/src/core/hle/service/gsp_gpu.h b/src/core/hle/service/gsp_gpu.h
deleted file mode 100644
index 3c97763d9..000000000
--- a/src/core/hle/service/gsp_gpu.h
+++ /dev/null
@@ -1,195 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <cstddef>
8#include <string>
9#include "common/bit_field.h"
10#include "common/common_types.h"
11#include "core/hle/result.h"
12#include "core/hle/service/service.h"
13
14namespace Service {
15namespace GSP {
16
17/// GSP interrupt ID
18enum class InterruptId : u8 {
19 PSC0 = 0x00,
20 PSC1 = 0x01,
21 PDC0 = 0x02, // Seems called every vertical screen line
22 PDC1 = 0x03, // Seems called every frame
23 PPF = 0x04,
24 P3D = 0x05,
25 DMA = 0x06,
26};
27
28/// GSP command ID
29enum class CommandId : u32 {
30 REQUEST_DMA = 0x00,
31 /// Submits a commandlist for execution by the GPU.
32 SUBMIT_GPU_CMDLIST = 0x01,
33
34 // Fills a given memory range with a particular value
35 SET_MEMORY_FILL = 0x02,
36
37 // Copies an image and optionally performs color-conversion or scaling.
38 // This is highly similar to the GameCube's EFB copy feature
39 SET_DISPLAY_TRANSFER = 0x03,
40
41 // Conceptionally similar to SET_DISPLAY_TRANSFER and presumable uses the same hardware path
42 SET_TEXTURE_COPY = 0x04,
43 /// Flushes up to 3 cache regions in a single command.
44 CACHE_FLUSH = 0x05,
45};
46
47/// GSP thread interrupt relay queue
48struct InterruptRelayQueue {
49 // Index of last interrupt in the queue
50 u8 index;
51 // Number of interrupts remaining to be processed by the userland code
52 u8 number_interrupts;
53 // Error code - zero on success, otherwise an error has occurred
54 u8 error_code;
55 u8 padding1;
56
57 u32 missed_PDC0;
58 u32 missed_PDC1;
59
60 InterruptId slot[0x34]; ///< Interrupt ID slots
61};
62static_assert(sizeof(InterruptRelayQueue) == 0x40, "InterruptRelayQueue struct has incorrect size");
63
64struct FrameBufferInfo {
65 BitField<0, 1, u32> active_fb; // 0 = first, 1 = second
66
67 u32 address_left;
68 u32 address_right;
69 u32 stride; // maps to 0x1EF00X90 ?
70 u32 format; // maps to 0x1EF00X70 ?
71 u32 shown_fb; // maps to 0x1EF00X78 ?
72 u32 unknown;
73};
74static_assert(sizeof(FrameBufferInfo) == 0x1c, "Struct has incorrect size");
75
76struct FrameBufferUpdate {
77 BitField<0, 1, u8> index; // Index used for GSP::SetBufferSwap
78 BitField<0, 1, u8> is_dirty; // true if GSP should update GPU framebuffer registers
79 u16 pad1;
80
81 FrameBufferInfo framebuffer_info[2];
82
83 u32 pad2;
84};
85static_assert(sizeof(FrameBufferUpdate) == 0x40, "Struct has incorrect size");
86// TODO: Not sure if this padding is correct.
87// Chances are the second block is stored at offset 0x24 rather than 0x20.
88#ifndef _MSC_VER
89static_assert(offsetof(FrameBufferUpdate, framebuffer_info[1]) == 0x20,
90 "FrameBufferInfo element has incorrect alignment");
91#endif
92
93/// GSP command
94struct Command {
95 BitField<0, 8, CommandId> id;
96
97 union {
98 struct {
99 u32 source_address;
100 u32 dest_address;
101 u32 size;
102 } dma_request;
103
104 struct {
105 u32 address;
106 u32 size;
107 u32 flags;
108 u32 unused[3];
109 u32 do_flush;
110 } submit_gpu_cmdlist;
111
112 struct {
113 u32 start1;
114 u32 value1;
115 u32 end1;
116
117 u32 start2;
118 u32 value2;
119 u32 end2;
120
121 u16 control1;
122 u16 control2;
123 } memory_fill;
124
125 struct {
126 u32 in_buffer_address;
127 u32 out_buffer_address;
128 u32 in_buffer_size;
129 u32 out_buffer_size;
130 u32 flags;
131 } display_transfer;
132
133 struct {
134 u32 in_buffer_address;
135 u32 out_buffer_address;
136 u32 size;
137 u32 in_width_gap;
138 u32 out_width_gap;
139 u32 flags;
140 } texture_copy;
141
142 struct {
143 struct {
144 u32 address;
145 u32 size;
146 } regions[3];
147 } cache_flush;
148
149 u8 raw_data[0x1C];
150 };
151};
152static_assert(sizeof(Command) == 0x20, "Command struct has incorrect size");
153
154/// GSP shared memory GX command buffer header
155struct CommandBuffer {
156 union {
157 u32 hex;
158
159 // Current command index. This index is updated by GSP module after loading the command
160 // data, right before the command is processed. When this index is updated by GSP module,
161 // the total commands field is decreased by one as well.
162 BitField<0, 8, u32> index;
163
164 // Total commands to process, must not be value 0 when GSP module handles commands. This
165 // must be <=15 when writing a command to shared memory. This is incremented by the
166 // application when writing a command to shared memory, after increasing this value
167 // TriggerCmdReqQueue is only used if this field is value 1.
168 BitField<8, 8, u32> number_commands;
169 };
170
171 u32 unk[7];
172
173 Command commands[0xF];
174};
175static_assert(sizeof(CommandBuffer) == 0x200, "CommandBuffer struct has incorrect size");
176
177/**
178 * Signals that the specified interrupt type has occurred to userland code
179 * @param interrupt_id ID of interrupt that is being signalled
180 */
181void SignalInterrupt(InterruptId interrupt_id);
182
183ResultCode SetBufferSwap(u32 screen_id, const FrameBufferInfo& info);
184
185/**
186 * Retrieves the framebuffer info stored in the GSP shared memory for the
187 * specified screen index and thread id.
188 * @param thread_id GSP thread id of the process that accesses the structure that we are requesting.
189 * @param screen_index Index of the screen we are requesting (Top = 0, Bottom = 1).
190 * @returns FramebufferUpdate Information about the specified framebuffer.
191 */
192FrameBufferUpdate* GetFrameBufferInfo(u32 thread_id, u32 screen_index);
193
194} // namespace GSP
195} // namespace Service
diff --git a/src/core/hw/aes/arithmetic128.cpp b/src/core/hw/aes/arithmetic128.cpp
deleted file mode 100644
index 55b954a52..000000000
--- a/src/core/hw/aes/arithmetic128.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <functional>
7#include "core/hw/aes/arithmetic128.h"
8
9namespace HW {
10namespace AES {
11
12AESKey Lrot128(const AESKey& in, u32 rot) {
13 AESKey out;
14 rot %= 128;
15 const u32 byte_shift = rot / 8;
16 const u32 bit_shift = rot % 8;
17
18 for (u32 i = 0; i < 16; i++) {
19 const u32 wrap_index_a = (i + byte_shift) % 16;
20 const u32 wrap_index_b = (i + byte_shift + 1) % 16;
21 out[i] = ((in[wrap_index_a] << bit_shift) | (in[wrap_index_b] >> (8 - bit_shift))) & 0xFF;
22 }
23 return out;
24}
25
26AESKey Add128(const AESKey& a, const AESKey& b) {
27 AESKey out;
28 u32 carry = 0;
29 u32 sum = 0;
30
31 for (int i = 15; i >= 0; i--) {
32 sum = a[i] + b[i] + carry;
33 carry = sum >> 8;
34 out[i] = static_cast<u8>(sum & 0xff);
35 }
36
37 return out;
38}
39
40AESKey Xor128(const AESKey& a, const AESKey& b) {
41 AESKey out;
42 std::transform(a.cbegin(), a.cend(), b.cbegin(), out.begin(), std::bit_xor<>());
43 return out;
44}
45
46} // namespace AES
47} // namespace HW
diff --git a/src/core/hw/aes/arithmetic128.h b/src/core/hw/aes/arithmetic128.h
deleted file mode 100644
index d670e2ce2..000000000
--- a/src/core/hw/aes/arithmetic128.h
+++ /dev/null
@@ -1,17 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8#include "core/hw/aes/key.h"
9
10namespace HW {
11namespace AES {
12AESKey Lrot128(const AESKey& in, u32 rot);
13AESKey Add128(const AESKey& a, const AESKey& b);
14AESKey Xor128(const AESKey& a, const AESKey& b);
15
16} // namspace AES
17} // namespace HW
diff --git a/src/core/hw/aes/ccm.h b/src/core/hw/aes/ccm.h
deleted file mode 100644
index bf4146e80..000000000
--- a/src/core/hw/aes/ccm.h
+++ /dev/null
@@ -1,40 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <cstddef>
9#include <vector>
10#include "common/common_types.h"
11
12namespace HW {
13namespace AES {
14
15constexpr size_t CCM_NONCE_SIZE = 12;
16constexpr size_t CCM_MAC_SIZE = 16;
17
18using CCMNonce = std::array<u8, CCM_NONCE_SIZE>;
19
20/**
21 * Encrypts and adds a MAC to the given data using AES-CCM algorithm.
22 * @param pdata The plain text data to encrypt
23 * @param nonce The nonce data to use for encryption
24 * @param slot_id The slot ID of the key to use for encryption
25 * @returns a vector of u8 containing the encrypted data with MAC at the end
26 */
27std::vector<u8> EncryptSignCCM(const std::vector<u8>& pdata, const CCMNonce& nonce, size_t slot_id);
28
29/**
30 * Decrypts and verify the MAC of the given data using AES-CCM algorithm.
31 * @param cipher The cipher text data to decrypt, with MAC at the end to verify
32 * @param nonce The nonce data to use for decryption
33 * @param slot_id The slot ID of the key to use for decryption
34 * @returns a vector of u8 containing the decrypted data; an empty vector if the verification fails
35 */
36std::vector<u8> DecryptVerifyCCM(const std::vector<u8>& cipher, const CCMNonce& nonce,
37 size_t slot_id);
38
39} // namespace AES
40} // namespace HW
diff --git a/src/core/hw/aes/key.cpp b/src/core/hw/aes/key.cpp
deleted file mode 100644
index 4e8a8a59a..000000000
--- a/src/core/hw/aes/key.cpp
+++ /dev/null
@@ -1,173 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <exception>
7#include <sstream>
8#include <boost/optional.hpp>
9#include "common/common_paths.h"
10#include "common/file_util.h"
11#include "common/logging/log.h"
12#include "common/string_util.h"
13#include "core/hw/aes/arithmetic128.h"
14#include "core/hw/aes/key.h"
15
16namespace HW {
17namespace AES {
18
19namespace {
20
21boost::optional<AESKey> generator_constant;
22
23struct KeySlot {
24 boost::optional<AESKey> x;
25 boost::optional<AESKey> y;
26 boost::optional<AESKey> normal;
27
28 void SetKeyX(const AESKey& key) {
29 x = key;
30 if (y && generator_constant) {
31 GenerateNormalKey();
32 }
33 }
34
35 void SetKeyY(const AESKey& key) {
36 y = key;
37 if (x && generator_constant) {
38 GenerateNormalKey();
39 }
40 }
41
42 void SetNormalKey(const AESKey& key) {
43 normal = key;
44 }
45
46 void GenerateNormalKey() {
47 normal = Lrot128(Add128(Xor128(Lrot128(*x, 2), *y), *generator_constant), 87);
48 }
49
50 void Clear() {
51 x.reset();
52 y.reset();
53 normal.reset();
54 }
55};
56
57std::array<KeySlot, KeySlotID::MaxKeySlotID> key_slots;
58
59void ClearAllKeys() {
60 for (KeySlot& slot : key_slots) {
61 slot.Clear();
62 }
63 generator_constant.reset();
64}
65
66AESKey HexToKey(const std::string& hex) {
67 if (hex.size() < 32) {
68 throw std::invalid_argument("hex string is too short");
69 }
70
71 AESKey key;
72 for (size_t i = 0; i < key.size(); ++i) {
73 key[i] = static_cast<u8>(std::stoi(hex.substr(i * 2, 2), 0, 16));
74 }
75
76 return key;
77}
78
79void LoadPresetKeys() {
80 const std::string filepath = FileUtil::GetUserPath(D_SYSDATA_IDX) + AES_KEYS;
81 FileUtil::CreateFullPath(filepath); // Create path if not already created
82 std::ifstream file;
83 OpenFStream(file, filepath, std::ios_base::in);
84 if (!file) {
85 return;
86 }
87
88 while (!file.eof()) {
89 std::string line;
90 std::getline(file, line);
91 std::vector<std::string> parts;
92 Common::SplitString(line, '=', parts);
93 if (parts.size() != 2) {
94 LOG_ERROR(HW_AES, "Failed to parse %s", line.c_str());
95 continue;
96 }
97
98 const std::string& name = parts[0];
99 AESKey key;
100 try {
101 key = HexToKey(parts[1]);
102 } catch (const std::logic_error& e) {
103 LOG_ERROR(HW_AES, "Invalid key %s: %s", parts[1].c_str(), e.what());
104 continue;
105 }
106
107 if (name == "generator") {
108 generator_constant = key;
109 continue;
110 }
111
112 size_t slot_id;
113 char key_type;
114 if (std::sscanf(name.c_str(), "slot0x%zXKey%c", &slot_id, &key_type) != 2) {
115 LOG_ERROR(HW_AES, "Invalid key name %s", name.c_str());
116 continue;
117 }
118
119 if (slot_id >= MaxKeySlotID) {
120 LOG_ERROR(HW_AES, "Out of range slot ID 0x%zX", slot_id);
121 continue;
122 }
123
124 switch (key_type) {
125 case 'X':
126 key_slots.at(slot_id).SetKeyX(key);
127 break;
128 case 'Y':
129 key_slots.at(slot_id).SetKeyY(key);
130 break;
131 case 'N':
132 key_slots.at(slot_id).SetNormalKey(key);
133 break;
134 default:
135 LOG_ERROR(HW_AES, "Invalid key type %c", key_type);
136 break;
137 }
138 }
139}
140
141} // namespace
142
143void InitKeys() {
144 ClearAllKeys();
145 LoadPresetKeys();
146}
147
148void SetGeneratorConstant(const AESKey& key) {
149 generator_constant = key;
150}
151
152void SetKeyX(size_t slot_id, const AESKey& key) {
153 key_slots.at(slot_id).SetKeyX(key);
154}
155
156void SetKeyY(size_t slot_id, const AESKey& key) {
157 key_slots.at(slot_id).SetKeyY(key);
158}
159
160void SetNormalKey(size_t slot_id, const AESKey& key) {
161 key_slots.at(slot_id).SetNormalKey(key);
162}
163
164bool IsNormalKeyAvailable(size_t slot_id) {
165 return key_slots.at(slot_id).normal.is_initialized();
166}
167
168AESKey GetNormalKey(size_t slot_id) {
169 return key_slots.at(slot_id).normal.value_or(AESKey{});
170}
171
172} // namespace AES
173} // namespace HW
diff --git a/src/core/hw/aes/key.h b/src/core/hw/aes/key.h
deleted file mode 100644
index c9f1342f4..000000000
--- a/src/core/hw/aes/key.h
+++ /dev/null
@@ -1,37 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <cstddef>
9#include "common/common_types.h"
10
11namespace HW {
12namespace AES {
13
14enum KeySlotID : size_t {
15 // AES Keyslot used to generate the UDS data frame CCMP key.
16 UDSDataKey = 0x2D,
17 APTWrap = 0x31,
18
19 MaxKeySlotID = 0x40,
20};
21
22constexpr size_t AES_BLOCK_SIZE = 16;
23
24using AESKey = std::array<u8, AES_BLOCK_SIZE>;
25
26void InitKeys();
27
28void SetGeneratorConstant(const AESKey& key);
29void SetKeyX(size_t slot_id, const AESKey& key);
30void SetKeyY(size_t slot_id, const AESKey& key);
31void SetNormalKey(size_t slot_id, const AESKey& key);
32
33bool IsNormalKeyAvailable(size_t slot_id);
34AESKey GetNormalKey(size_t slot_id);
35
36} // namspace AES
37} // namespace HW
diff --git a/src/core/hw/gpu.cpp b/src/core/hw/gpu.cpp
deleted file mode 100644
index 47ab14ae9..000000000
--- a/src/core/hw/gpu.cpp
+++ /dev/null
@@ -1,573 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <cstring>
6#include <numeric>
7#include <type_traits>
8#include "common/alignment.h"
9#include "common/color.h"
10#include "common/common_types.h"
11#include "common/logging/log.h"
12#include "common/microprofile.h"
13#include "common/vector_math.h"
14#include "core/core_timing.h"
15#include "core/hle/service/gsp_gpu.h"
16#include "core/hw/gpu.h"
17#include "core/hw/hw.h"
18#include "core/memory.h"
19#include "core/tracer/recorder.h"
20#include "video_core/command_processor.h"
21#include "video_core/debug_utils/debug_utils.h"
22#include "video_core/rasterizer_interface.h"
23#include "video_core/renderer_base.h"
24#include "video_core/utils.h"
25#include "video_core/video_core.h"
26
27namespace GPU {
28
29Regs g_regs;
30
31/// 268MHz CPU clocks / 60Hz frames per second
32const u64 frame_ticks = static_cast<u64>(BASE_CLOCK_RATE / SCREEN_REFRESH_RATE);
33/// Event id for CoreTiming
34static CoreTiming::EventType* vblank_event;
35
36template <typename T>
37inline void Read(T& var, const u32 raw_addr) {
38 u32 addr = raw_addr - HW::VADDR_GPU;
39 u32 index = addr / 4;
40
41 // Reads other than u32 are untested, so I'd rather have them abort than silently fail
42 if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) {
43 LOG_ERROR(HW_GPU, "unknown Read%lu @ 0x%08X", sizeof(var) * 8, addr);
44 return;
45 }
46
47 var = g_regs[addr / 4];
48}
49
50static Math::Vec4<u8> DecodePixel(Regs::PixelFormat input_format, const u8* src_pixel) {
51 switch (input_format) {
52 case Regs::PixelFormat::RGBA8:
53 return Color::DecodeRGBA8(src_pixel);
54
55 case Regs::PixelFormat::RGB8:
56 return Color::DecodeRGB8(src_pixel);
57
58 case Regs::PixelFormat::RGB565:
59 return Color::DecodeRGB565(src_pixel);
60
61 case Regs::PixelFormat::RGB5A1:
62 return Color::DecodeRGB5A1(src_pixel);
63
64 case Regs::PixelFormat::RGBA4:
65 return Color::DecodeRGBA4(src_pixel);
66
67 default:
68 LOG_ERROR(HW_GPU, "Unknown source framebuffer format %x", input_format);
69 return {0, 0, 0, 0};
70 }
71}
72
73MICROPROFILE_DEFINE(GPU_DisplayTransfer, "GPU", "DisplayTransfer", MP_RGB(100, 100, 255));
74MICROPROFILE_DEFINE(GPU_CmdlistProcessing, "GPU", "Cmdlist Processing", MP_RGB(100, 255, 100));
75
76static void MemoryFill(const Regs::MemoryFillConfig& config) {
77 const PAddr start_addr = config.GetStartAddress();
78 const PAddr end_addr = config.GetEndAddress();
79
80 // TODO: do hwtest with these cases
81 if (!Memory::IsValidPhysicalAddress(start_addr)) {
82 LOG_CRITICAL(HW_GPU, "invalid start address 0x%08X", start_addr);
83 return;
84 }
85
86 if (!Memory::IsValidPhysicalAddress(end_addr)) {
87 LOG_CRITICAL(HW_GPU, "invalid end address 0x%08X", end_addr);
88 return;
89 }
90
91 if (end_addr <= start_addr) {
92 LOG_CRITICAL(HW_GPU, "invalid memory range from 0x%08X to 0x%08X", start_addr, end_addr);
93 return;
94 }
95
96 u8* start = Memory::GetPhysicalPointer(start_addr);
97 u8* end = Memory::GetPhysicalPointer(end_addr);
98
99 // TODO: Consider always accelerating and returning vector of
100 // regions that the accelerated fill did not cover to
101 // reduce/eliminate the fill that the cpu has to do.
102 // This would also mean that the flush below is not needed.
103 // Fill should first flush all surfaces that touch but are
104 // not completely within the fill range.
105 // Then fill all completely covered surfaces, and return the
106 // regions that were between surfaces or within the touching
107 // ones for cpu to manually fill here.
108 if (VideoCore::g_renderer->Rasterizer()->AccelerateFill(config))
109 return;
110
111 Memory::RasterizerFlushAndInvalidateRegion(config.GetStartAddress(),
112 config.GetEndAddress() - config.GetStartAddress());
113
114 if (config.fill_24bit) {
115 // fill with 24-bit values
116 for (u8* ptr = start; ptr < end; ptr += 3) {
117 ptr[0] = config.value_24bit_r;
118 ptr[1] = config.value_24bit_g;
119 ptr[2] = config.value_24bit_b;
120 }
121 } else if (config.fill_32bit) {
122 // fill with 32-bit values
123 if (end > start) {
124 u32 value = config.value_32bit;
125 size_t len = (end - start) / sizeof(u32);
126 for (size_t i = 0; i < len; ++i)
127 memcpy(&start[i * sizeof(u32)], &value, sizeof(u32));
128 }
129 } else {
130 // fill with 16-bit values
131 u16 value_16bit = config.value_16bit.Value();
132 for (u8* ptr = start; ptr < end; ptr += sizeof(u16))
133 memcpy(ptr, &value_16bit, sizeof(u16));
134 }
135}
136
137static void DisplayTransfer(const Regs::DisplayTransferConfig& config) {
138 const PAddr src_addr = config.GetPhysicalInputAddress();
139 const PAddr dst_addr = config.GetPhysicalOutputAddress();
140
141 // TODO: do hwtest with these cases
142 if (!Memory::IsValidPhysicalAddress(src_addr)) {
143 LOG_CRITICAL(HW_GPU, "invalid input address 0x%08X", src_addr);
144 return;
145 }
146
147 if (!Memory::IsValidPhysicalAddress(dst_addr)) {
148 LOG_CRITICAL(HW_GPU, "invalid output address 0x%08X", dst_addr);
149 return;
150 }
151
152 if (config.input_width == 0) {
153 LOG_CRITICAL(HW_GPU, "zero input width");
154 return;
155 }
156
157 if (config.input_height == 0) {
158 LOG_CRITICAL(HW_GPU, "zero input height");
159 return;
160 }
161
162 if (config.output_width == 0) {
163 LOG_CRITICAL(HW_GPU, "zero output width");
164 return;
165 }
166
167 if (config.output_height == 0) {
168 LOG_CRITICAL(HW_GPU, "zero output height");
169 return;
170 }
171
172 if (VideoCore::g_renderer->Rasterizer()->AccelerateDisplayTransfer(config))
173 return;
174
175 u8* src_pointer = Memory::GetPhysicalPointer(src_addr);
176 u8* dst_pointer = Memory::GetPhysicalPointer(dst_addr);
177
178 if (config.scaling > config.ScaleXY) {
179 LOG_CRITICAL(HW_GPU, "Unimplemented display transfer scaling mode %u",
180 config.scaling.Value());
181 UNIMPLEMENTED();
182 return;
183 }
184
185 if (config.input_linear && config.scaling != config.NoScale) {
186 LOG_CRITICAL(HW_GPU, "Scaling is only implemented on tiled input");
187 UNIMPLEMENTED();
188 return;
189 }
190
191 int horizontal_scale = config.scaling != config.NoScale ? 1 : 0;
192 int vertical_scale = config.scaling == config.ScaleXY ? 1 : 0;
193
194 u32 output_width = config.output_width >> horizontal_scale;
195 u32 output_height = config.output_height >> vertical_scale;
196
197 u32 input_size =
198 config.input_width * config.input_height * GPU::Regs::BytesPerPixel(config.input_format);
199 u32 output_size = output_width * output_height * GPU::Regs::BytesPerPixel(config.output_format);
200
201 Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(), input_size);
202 Memory::RasterizerFlushAndInvalidateRegion(config.GetPhysicalOutputAddress(), output_size);
203
204 for (u32 y = 0; y < output_height; ++y) {
205 for (u32 x = 0; x < output_width; ++x) {
206 Math::Vec4<u8> src_color;
207
208 // Calculate the [x,y] position of the input image
209 // based on the current output position and the scale
210 u32 input_x = x << horizontal_scale;
211 u32 input_y = y << vertical_scale;
212
213 u32 output_y;
214 if (config.flip_vertically) {
215 // Flip the y value of the output data,
216 // we do this after calculating the [x,y] position of the input image
217 // to account for the scaling options.
218 output_y = output_height - y - 1;
219 } else {
220 output_y = y;
221 }
222
223 u32 dst_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.output_format);
224 u32 src_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.input_format);
225 u32 src_offset;
226 u32 dst_offset;
227
228 if (config.input_linear) {
229 if (!config.dont_swizzle) {
230 // Interpret the input as linear and the output as tiled
231 u32 coarse_y = output_y & ~7;
232 u32 stride = output_width * dst_bytes_per_pixel;
233
234 src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel;
235 dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) +
236 coarse_y * stride;
237 } else {
238 // Both input and output are linear
239 src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel;
240 dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel;
241 }
242 } else {
243 if (!config.dont_swizzle) {
244 // Interpret the input as tiled and the output as linear
245 u32 coarse_y = input_y & ~7;
246 u32 stride = config.input_width * src_bytes_per_pixel;
247
248 src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) +
249 coarse_y * stride;
250 dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel;
251 } else {
252 // Both input and output are tiled
253 u32 out_coarse_y = output_y & ~7;
254 u32 out_stride = output_width * dst_bytes_per_pixel;
255
256 u32 in_coarse_y = input_y & ~7;
257 u32 in_stride = config.input_width * src_bytes_per_pixel;
258
259 src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) +
260 in_coarse_y * in_stride;
261 dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) +
262 out_coarse_y * out_stride;
263 }
264 }
265
266 const u8* src_pixel = src_pointer + src_offset;
267 src_color = DecodePixel(config.input_format, src_pixel);
268 if (config.scaling == config.ScaleX) {
269 Math::Vec4<u8> pixel =
270 DecodePixel(config.input_format, src_pixel + src_bytes_per_pixel);
271 src_color = ((src_color + pixel) / 2).Cast<u8>();
272 } else if (config.scaling == config.ScaleXY) {
273 Math::Vec4<u8> pixel1 =
274 DecodePixel(config.input_format, src_pixel + 1 * src_bytes_per_pixel);
275 Math::Vec4<u8> pixel2 =
276 DecodePixel(config.input_format, src_pixel + 2 * src_bytes_per_pixel);
277 Math::Vec4<u8> pixel3 =
278 DecodePixel(config.input_format, src_pixel + 3 * src_bytes_per_pixel);
279 src_color = (((src_color + pixel1) + (pixel2 + pixel3)) / 4).Cast<u8>();
280 }
281
282 u8* dst_pixel = dst_pointer + dst_offset;
283 switch (config.output_format) {
284 case Regs::PixelFormat::RGBA8:
285 Color::EncodeRGBA8(src_color, dst_pixel);
286 break;
287
288 case Regs::PixelFormat::RGB8:
289 Color::EncodeRGB8(src_color, dst_pixel);
290 break;
291
292 case Regs::PixelFormat::RGB565:
293 Color::EncodeRGB565(src_color, dst_pixel);
294 break;
295
296 case Regs::PixelFormat::RGB5A1:
297 Color::EncodeRGB5A1(src_color, dst_pixel);
298 break;
299
300 case Regs::PixelFormat::RGBA4:
301 Color::EncodeRGBA4(src_color, dst_pixel);
302 break;
303
304 default:
305 LOG_ERROR(HW_GPU, "Unknown destination framebuffer format %x",
306 config.output_format.Value());
307 break;
308 }
309 }
310 }
311}
312
313static void TextureCopy(const Regs::DisplayTransferConfig& config) {
314 const PAddr src_addr = config.GetPhysicalInputAddress();
315 const PAddr dst_addr = config.GetPhysicalOutputAddress();
316
317 // TODO: do hwtest with invalid addresses
318 if (!Memory::IsValidPhysicalAddress(src_addr)) {
319 LOG_CRITICAL(HW_GPU, "invalid input address 0x%08X", src_addr);
320 return;
321 }
322
323 if (!Memory::IsValidPhysicalAddress(dst_addr)) {
324 LOG_CRITICAL(HW_GPU, "invalid output address 0x%08X", dst_addr);
325 return;
326 }
327
328 if (VideoCore::g_renderer->Rasterizer()->AccelerateTextureCopy(config))
329 return;
330
331 u8* src_pointer = Memory::GetPhysicalPointer(src_addr);
332 u8* dst_pointer = Memory::GetPhysicalPointer(dst_addr);
333
334 u32 remaining_size = Common::AlignDown(config.texture_copy.size, 16);
335
336 if (remaining_size == 0) {
337 LOG_CRITICAL(HW_GPU, "zero size. Real hardware freezes on this.");
338 return;
339 }
340
341 u32 input_gap = config.texture_copy.input_gap * 16;
342 u32 output_gap = config.texture_copy.output_gap * 16;
343
344 // Zero gap means contiguous input/output even if width = 0. To avoid infinite loop below, width
345 // is assigned with the total size if gap = 0.
346 u32 input_width = input_gap == 0 ? remaining_size : config.texture_copy.input_width * 16;
347 u32 output_width = output_gap == 0 ? remaining_size : config.texture_copy.output_width * 16;
348
349 if (input_width == 0) {
350 LOG_CRITICAL(HW_GPU, "zero input width. Real hardware freezes on this.");
351 return;
352 }
353
354 if (output_width == 0) {
355 LOG_CRITICAL(HW_GPU, "zero output width. Real hardware freezes on this.");
356 return;
357 }
358
359 size_t contiguous_input_size =
360 config.texture_copy.size / input_width * (input_width + input_gap);
361 Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(),
362 static_cast<u32>(contiguous_input_size));
363
364 size_t contiguous_output_size =
365 config.texture_copy.size / output_width * (output_width + output_gap);
366 Memory::RasterizerFlushAndInvalidateRegion(config.GetPhysicalOutputAddress(),
367 static_cast<u32>(contiguous_output_size));
368
369 u32 remaining_input = input_width;
370 u32 remaining_output = output_width;
371 while (remaining_size > 0) {
372 u32 copy_size = std::min({remaining_input, remaining_output, remaining_size});
373
374 std::memcpy(dst_pointer, src_pointer, copy_size);
375 src_pointer += copy_size;
376 dst_pointer += copy_size;
377
378 remaining_input -= copy_size;
379 remaining_output -= copy_size;
380 remaining_size -= copy_size;
381
382 if (remaining_input == 0) {
383 remaining_input = input_width;
384 src_pointer += input_gap;
385 }
386 if (remaining_output == 0) {
387 remaining_output = output_width;
388 dst_pointer += output_gap;
389 }
390 }
391}
392
393template <typename T>
394inline void Write(u32 addr, const T data) {
395 addr -= HW::VADDR_GPU;
396 u32 index = addr / 4;
397
398 // Writes other than u32 are untested, so I'd rather have them abort than silently fail
399 if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) {
400 LOG_ERROR(HW_GPU, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, addr);
401 return;
402 }
403
404 g_regs[index] = static_cast<u32>(data);
405
406 switch (index) {
407
408 // Memory fills are triggered once the fill value is written.
409 case GPU_REG_INDEX_WORKAROUND(memory_fill_config[0].trigger, 0x00004 + 0x3):
410 case GPU_REG_INDEX_WORKAROUND(memory_fill_config[1].trigger, 0x00008 + 0x3): {
411 const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].trigger));
412 auto& config = g_regs.memory_fill_config[is_second_filler];
413
414 if (config.trigger) {
415 MemoryFill(config);
416 LOG_TRACE(HW_GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(),
417 config.GetEndAddress());
418
419 // It seems that it won't signal interrupt if "address_start" is zero.
420 // TODO: hwtest this
421 if (config.GetStartAddress() != 0) {
422 if (!is_second_filler) {
423 //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PSC0);
424 } else {
425 //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PSC1);
426 }
427 }
428
429 // Reset "trigger" flag and set the "finish" flag
430 // NOTE: This was confirmed to happen on hardware even if "address_start" is zero.
431 config.trigger.Assign(0);
432 config.finished.Assign(1);
433 }
434 break;
435 }
436
437 case GPU_REG_INDEX(display_transfer_config.trigger): {
438 MICROPROFILE_SCOPE(GPU_DisplayTransfer);
439
440 const auto& config = g_regs.display_transfer_config;
441 if (config.trigger & 1) {
442
443 if (Pica::g_debug_context)
444 Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::IncomingDisplayTransfer,
445 nullptr);
446
447 if (config.is_texture_copy) {
448 TextureCopy(config);
449 LOG_TRACE(HW_GPU, "TextureCopy: 0x%X bytes from 0x%08X(%u+%u)-> "
450 "0x%08X(%u+%u), flags 0x%08X",
451 config.texture_copy.size, config.GetPhysicalInputAddress(),
452 config.texture_copy.input_width * 16, config.texture_copy.input_gap * 16,
453 config.GetPhysicalOutputAddress(), config.texture_copy.output_width * 16,
454 config.texture_copy.output_gap * 16, config.flags);
455 } else {
456 DisplayTransfer(config);
457 LOG_TRACE(HW_GPU, "DisplayTransfer: 0x%08x(%ux%u)-> "
458 "0x%08x(%ux%u), dst format %x, flags 0x%08X",
459 config.GetPhysicalInputAddress(), config.input_width.Value(),
460 config.input_height.Value(), config.GetPhysicalOutputAddress(),
461 config.output_width.Value(), config.output_height.Value(),
462 config.output_format.Value(), config.flags);
463 }
464
465 g_regs.display_transfer_config.trigger = 0;
466 //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PPF);
467 }
468 break;
469 }
470
471 // Seems like writing to this register triggers processing
472 case GPU_REG_INDEX(command_processor_config.trigger): {
473 const auto& config = g_regs.command_processor_config;
474 if (config.trigger & 1) {
475 MICROPROFILE_SCOPE(GPU_CmdlistProcessing);
476
477 u32* buffer = (u32*)Memory::GetPhysicalPointer(config.GetPhysicalAddress());
478
479 if (Pica::g_debug_context && Pica::g_debug_context->recorder) {
480 Pica::g_debug_context->recorder->MemoryAccessed((u8*)buffer, config.size,
481 config.GetPhysicalAddress());
482 }
483
484 Pica::CommandProcessor::ProcessCommandList(buffer, config.size);
485
486 g_regs.command_processor_config.trigger = 0;
487 }
488 break;
489 }
490
491 default:
492 break;
493 }
494
495 // Notify tracer about the register write
496 // This is happening *after* handling the write to make sure we properly catch all memory reads.
497 if (Pica::g_debug_context && Pica::g_debug_context->recorder) {
498 // addr + GPU VBase - IO VBase + IO PBase
499 Pica::g_debug_context->recorder->RegisterWritten<T>(
500 addr + 0x1EF00000 - 0x1EC00000 + 0x10100000, data);
501 }
502}
503
504// Explicitly instantiate template functions because we aren't defining this in the header:
505
506template void Read<u64>(u64& var, const u32 addr);
507template void Read<u32>(u32& var, const u32 addr);
508template void Read<u16>(u16& var, const u32 addr);
509template void Read<u8>(u8& var, const u32 addr);
510
511template void Write<u64>(u32 addr, const u64 data);
512template void Write<u32>(u32 addr, const u32 data);
513template void Write<u16>(u32 addr, const u16 data);
514template void Write<u8>(u32 addr, const u8 data);
515
516/// Update hardware
517static void VBlankCallback(u64 userdata, int cycles_late) {
518 //VideoCore::g_renderer->SwapBuffers();
519
520 //// Signal to GSP that GPU interrupt has occurred
521 //// TODO(yuriks): hwtest to determine if PDC0 is for the Top screen and PDC1 for the Sub
522 //// screen, or if both use the same interrupts and these two instead determine the
523 //// beginning and end of the VBlank period. If needed, split the interrupt firing into
524 //// two different intervals.
525 //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PDC0);
526 //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PDC1);
527
528 // Reschedule recurrent event
529 CoreTiming::ScheduleEvent(frame_ticks - cycles_late, vblank_event);
530}
531
532/// Initialize hardware
533void Init() {
534 memset(&g_regs, 0, sizeof(g_regs));
535
536 auto& framebuffer_top = g_regs.framebuffer_config[0];
537 auto& framebuffer_sub = g_regs.framebuffer_config[1];
538
539 // Setup default framebuffer addresses (located in VRAM)
540 // .. or at least these are the ones used by system applets.
541 // There's probably a smarter way to come up with addresses
542 // like this which does not require hardcoding.
543 framebuffer_top.address_left1 = 0x181E6000;
544 framebuffer_top.address_left2 = 0x1822C800;
545 framebuffer_top.address_right1 = 0x18273000;
546 framebuffer_top.address_right2 = 0x182B9800;
547 framebuffer_sub.address_left1 = 0x1848F000;
548 framebuffer_sub.address_left2 = 0x184C7800;
549
550 framebuffer_top.width.Assign(240);
551 framebuffer_top.height.Assign(400);
552 framebuffer_top.stride = 3 * 240;
553 framebuffer_top.color_format.Assign(Regs::PixelFormat::RGB8);
554 framebuffer_top.active_fb = 0;
555
556 framebuffer_sub.width.Assign(240);
557 framebuffer_sub.height.Assign(320);
558 framebuffer_sub.stride = 3 * 240;
559 framebuffer_sub.color_format.Assign(Regs::PixelFormat::RGB8);
560 framebuffer_sub.active_fb = 0;
561
562 vblank_event = CoreTiming::RegisterEvent("GPU::VBlankCallback", VBlankCallback);
563 CoreTiming::ScheduleEvent(frame_ticks, vblank_event);
564
565 LOG_DEBUG(HW_GPU, "initialized OK");
566}
567
568/// Shutdown hardware
569void Shutdown() {
570 LOG_DEBUG(HW_GPU, "shutdown OK");
571}
572
573} // namespace
diff --git a/src/core/hw/gpu.h b/src/core/hw/gpu.h
deleted file mode 100644
index e3d0a0e08..000000000
--- a/src/core/hw/gpu.h
+++ /dev/null
@@ -1,334 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <cstddef>
8#include <type_traits>
9#include "common/assert.h"
10#include "common/bit_field.h"
11#include "common/common_funcs.h"
12#include "common/common_types.h"
13
14namespace GPU {
15
16constexpr float SCREEN_REFRESH_RATE = 60;
17
18// Returns index corresponding to the Regs member labeled by field_name
19// TODO: Due to Visual studio bug 209229, offsetof does not return constant expressions
20// when used with array elements (e.g. GPU_REG_INDEX(memory_fill_config[0])).
21// For details cf.
22// https://connect.microsoft.com/VisualStudio/feedback/details/209229/offsetof-does-not-produce-a-constant-expression-for-array-members
23// Hopefully, this will be fixed sometime in the future.
24// For lack of better alternatives, we currently hardcode the offsets when constant
25// expressions are needed via GPU_REG_INDEX_WORKAROUND (on sane compilers, static_asserts
26// will then make sure the offsets indeed match the automatically calculated ones).
27#define GPU_REG_INDEX(field_name) (offsetof(GPU::Regs, field_name) / sizeof(u32))
28#if defined(_MSC_VER)
29#define GPU_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) (backup_workaround_index)
30#else
31// NOTE: Yeah, hacking in a static_assert here just to workaround the lacking MSVC compiler
32// really is this annoying. This macro just forwards its first argument to GPU_REG_INDEX
33// and then performs a (no-op) cast to size_t iff the second argument matches the expected
34// field offset. Otherwise, the compiler will fail to compile this code.
35#define GPU_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) \
36 ((typename std::enable_if<backup_workaround_index == GPU_REG_INDEX(field_name), size_t>::type) \
37 GPU_REG_INDEX(field_name))
38#endif
39
40// MMIO region 0x1EFxxxxx
41struct Regs {
42
43// helper macro to make sure the defined structures are of the expected size.
44#if defined(_MSC_VER)
45// TODO: MSVC does not support using sizeof() on non-static data members even though this
46// is technically allowed since C++11. This macro should be enabled once MSVC adds
47// support for that.
48#define ASSERT_MEMBER_SIZE(name, size_in_bytes)
49#else
50#define ASSERT_MEMBER_SIZE(name, size_in_bytes) \
51 static_assert(sizeof(name) == size_in_bytes, \
52 "Structure size and register block length don't match")
53#endif
54
55 // Components are laid out in reverse byte order, most significant bits first.
56 enum class PixelFormat : u32 {
57 RGBA8 = 0,
58 RGB8 = 1,
59 RGB565 = 2,
60 RGB5A1 = 3,
61 RGBA4 = 4,
62 };
63
64 /**
65 * Returns the number of bytes per pixel.
66 */
67 static int BytesPerPixel(PixelFormat format) {
68 switch (format) {
69 case PixelFormat::RGBA8:
70 return 4;
71 case PixelFormat::RGB8:
72 return 3;
73 case PixelFormat::RGB565:
74 case PixelFormat::RGB5A1:
75 case PixelFormat::RGBA4:
76 return 2;
77 }
78
79 UNREACHABLE();
80 }
81
82 INSERT_PADDING_WORDS(0x4);
83
84 struct MemoryFillConfig {
85 u32 address_start;
86 u32 address_end;
87
88 union {
89 u32 value_32bit;
90
91 BitField<0, 16, u32> value_16bit;
92
93 // TODO: Verify component order
94 BitField<0, 8, u32> value_24bit_r;
95 BitField<8, 8, u32> value_24bit_g;
96 BitField<16, 8, u32> value_24bit_b;
97 };
98
99 union {
100 u32 control;
101
102 // Setting this field to 1 triggers the memory fill.
103 // This field also acts as a status flag, and gets reset to 0 upon completion.
104 BitField<0, 1, u32> trigger;
105
106 // Set to 1 upon completion.
107 BitField<1, 1, u32> finished;
108
109 // If both of these bits are unset, then it will fill the memory with a 16 bit value
110 // 1: fill with 24-bit wide values
111 BitField<8, 1, u32> fill_24bit;
112 // 1: fill with 32-bit wide values
113 BitField<9, 1, u32> fill_32bit;
114 };
115
116 inline u32 GetStartAddress() const {
117 return DecodeAddressRegister(address_start);
118 }
119
120 inline u32 GetEndAddress() const {
121 return DecodeAddressRegister(address_end);
122 }
123 } memory_fill_config[2];
124 ASSERT_MEMBER_SIZE(memory_fill_config[0], 0x10);
125
126 INSERT_PADDING_WORDS(0x10b);
127
128 struct FramebufferConfig {
129 union {
130 u32 size;
131
132 BitField<0, 16, u32> width;
133 BitField<16, 16, u32> height;
134 };
135
136 INSERT_PADDING_WORDS(0x2);
137
138 u32 address_left1;
139 u32 address_left2;
140
141 union {
142 u32 format;
143
144 BitField<0, 3, PixelFormat> color_format;
145 };
146
147 INSERT_PADDING_WORDS(0x1);
148
149 union {
150 u32 active_fb;
151
152 // 0: Use parameters ending with "1"
153 // 1: Use parameters ending with "2"
154 BitField<0, 1, u32> second_fb_active;
155 };
156
157 INSERT_PADDING_WORDS(0x5);
158
159 // Distance between two pixel rows, in bytes
160 u32 stride;
161
162 u32 address_right1;
163 u32 address_right2;
164
165 INSERT_PADDING_WORDS(0x30);
166 } framebuffer_config[2];
167 ASSERT_MEMBER_SIZE(framebuffer_config[0], 0x100);
168
169 INSERT_PADDING_WORDS(0x169);
170
171 struct DisplayTransferConfig {
172 u32 input_address;
173 u32 output_address;
174
175 inline u32 GetPhysicalInputAddress() const {
176 return DecodeAddressRegister(input_address);
177 }
178
179 inline u32 GetPhysicalOutputAddress() const {
180 return DecodeAddressRegister(output_address);
181 }
182
183 union {
184 u32 output_size;
185
186 BitField<0, 16, u32> output_width;
187 BitField<16, 16, u32> output_height;
188 };
189
190 union {
191 u32 input_size;
192
193 BitField<0, 16, u32> input_width;
194 BitField<16, 16, u32> input_height;
195 };
196
197 enum ScalingMode : u32 {
198 NoScale = 0, // Doesn't scale the image
199 ScaleX = 1, // Downscales the image in half in the X axis and applies a box filter
200 ScaleXY =
201 2, // Downscales the image in half in both the X and Y axes and applies a box filter
202 };
203
204 union {
205 u32 flags;
206
207 BitField<0, 1, u32> flip_vertically; // flips input data vertically
208 BitField<1, 1, u32> input_linear; // Converts from linear to tiled format
209 BitField<2, 1, u32> crop_input_lines;
210 BitField<3, 1, u32> is_texture_copy; // Copies the data without performing any
211 // processing and respecting texture copy fields
212 BitField<5, 1, u32> dont_swizzle;
213 BitField<8, 3, PixelFormat> input_format;
214 BitField<12, 3, PixelFormat> output_format;
215 /// Uses some kind of 32x32 block swizzling mode, instead of the usual 8x8 one.
216 BitField<16, 1, u32> block_32; // TODO(yuriks): unimplemented
217 BitField<24, 2, ScalingMode> scaling; // Determines the scaling mode of the transfer
218 };
219
220 INSERT_PADDING_WORDS(0x1);
221
222 // it seems that writing to this field triggers the display transfer
223 u32 trigger;
224
225 INSERT_PADDING_WORDS(0x1);
226
227 struct {
228 u32 size; // The lower 4 bits are ignored
229
230 union {
231 u32 input_size;
232
233 BitField<0, 16, u32> input_width;
234 BitField<16, 16, u32> input_gap;
235 };
236
237 union {
238 u32 output_size;
239
240 BitField<0, 16, u32> output_width;
241 BitField<16, 16, u32> output_gap;
242 };
243 } texture_copy;
244 } display_transfer_config;
245 ASSERT_MEMBER_SIZE(display_transfer_config, 0x2c);
246
247 INSERT_PADDING_WORDS(0x32D);
248
249 struct {
250 // command list size (in bytes)
251 u32 size;
252
253 INSERT_PADDING_WORDS(0x1);
254
255 // command list address
256 u32 address;
257
258 INSERT_PADDING_WORDS(0x1);
259
260 // it seems that writing to this field triggers command list processing
261 u32 trigger;
262
263 inline u32 GetPhysicalAddress() const {
264 return DecodeAddressRegister(address);
265 }
266 } command_processor_config;
267 ASSERT_MEMBER_SIZE(command_processor_config, 0x14);
268
269 INSERT_PADDING_WORDS(0x9c3);
270
271 static constexpr size_t NumIds() {
272 return sizeof(Regs) / sizeof(u32);
273 }
274
275 const u32& operator[](int index) const {
276 const u32* content = reinterpret_cast<const u32*>(this);
277 return content[index];
278 }
279
280 u32& operator[](int index) {
281 u32* content = reinterpret_cast<u32*>(this);
282 return content[index];
283 }
284
285#undef ASSERT_MEMBER_SIZE
286
287private:
288 /*
289 * Most physical addresses which GPU registers refer to are 8-byte aligned.
290 * This function should be used to get the address from a raw register value.
291 */
292 static inline u32 DecodeAddressRegister(u32 register_value) {
293 return register_value * 8;
294 }
295};
296static_assert(std::is_standard_layout<Regs>::value, "Structure does not use standard layout");
297
298// TODO: MSVC does not support using offsetof() on non-static data members even though this
299// is technically allowed since C++11. This macro should be enabled once MSVC adds
300// support for that.
301#ifndef _MSC_VER
302#define ASSERT_REG_POSITION(field_name, position) \
303 static_assert(offsetof(Regs, field_name) == position * 4, \
304 "Field " #field_name " has invalid position")
305
306ASSERT_REG_POSITION(memory_fill_config[0], 0x00004);
307ASSERT_REG_POSITION(memory_fill_config[1], 0x00008);
308ASSERT_REG_POSITION(framebuffer_config[0], 0x00117);
309ASSERT_REG_POSITION(framebuffer_config[1], 0x00157);
310ASSERT_REG_POSITION(display_transfer_config, 0x00300);
311ASSERT_REG_POSITION(command_processor_config, 0x00638);
312
313#undef ASSERT_REG_POSITION
314#endif // !defined(_MSC_VER)
315
316// The total number of registers is chosen arbitrarily, but let's make sure it's not some odd value
317// anyway.
318static_assert(sizeof(Regs) == 0x1000 * sizeof(u32), "Invalid total size of register set");
319
320extern Regs g_regs;
321
322template <typename T>
323void Read(T& var, const u32 addr);
324
325template <typename T>
326void Write(u32 addr, const T data);
327
328/// Initialize hardware
329void Init();
330
331/// Shutdown hardware
332void Shutdown();
333
334} // namespace
diff --git a/src/core/settings.cpp b/src/core/settings.cpp
index 012f7e6c3..444bcc387 100644
--- a/src/core/settings.cpp
+++ b/src/core/settings.cpp
@@ -2,7 +2,6 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "audio_core/audio_core.h"
6#include "core/gdbstub/gdbstub.h" 5#include "core/gdbstub/gdbstub.h"
7#include "core/hle/service/hid/hid.h" 6#include "core/hle/service/hid/hid.h"
8#include "core/settings.h" 7#include "core/settings.h"
@@ -19,8 +18,6 @@ void Apply() {
19 GDBStub::SetServerPort(values.gdbstub_port); 18 GDBStub::SetServerPort(values.gdbstub_port);
20 GDBStub::ToggleServer(values.use_gdbstub); 19 GDBStub::ToggleServer(values.use_gdbstub);
21 20
22 VideoCore::g_hw_renderer_enabled = values.use_hw_renderer;
23 VideoCore::g_shader_jit_enabled = values.use_shader_jit;
24 VideoCore::g_toggle_framelimit_enabled = values.toggle_framelimit; 21 VideoCore::g_toggle_framelimit_enabled = values.toggle_framelimit;
25 22
26 if (VideoCore::g_emu_window) { 23 if (VideoCore::g_emu_window) {
@@ -28,9 +25,6 @@ void Apply() {
28 VideoCore::g_emu_window->UpdateCurrentFramebufferLayout(layout.width, layout.height); 25 VideoCore::g_emu_window->UpdateCurrentFramebufferLayout(layout.width, layout.height);
29 } 26 }
30 27
31 AudioCore::SelectSink(values.sink_id);
32 AudioCore::EnableStretching(values.enable_audio_stretching);
33
34 Service::HID::ReloadInputDevices(); 28 Service::HID::ReloadInputDevices();
35} 29}
36 30
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 82f47d8a9..3fd177c46 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -1,96 +1,23 @@
1set(SRCS 1set(SRCS
2 command_processor.cpp
3 debug_utils/debug_utils.cpp
4 geometry_pipeline.cpp
5 pica.cpp
6 primitive_assembly.cpp
7 regs.cpp
8 renderer_base.cpp 2 renderer_base.cpp
9 renderer_opengl/gl_rasterizer.cpp
10 renderer_opengl/gl_rasterizer_cache.cpp
11 renderer_opengl/gl_shader_gen.cpp
12 renderer_opengl/gl_shader_util.cpp 3 renderer_opengl/gl_shader_util.cpp
13 renderer_opengl/gl_state.cpp 4 renderer_opengl/gl_state.cpp
14 renderer_opengl/renderer_opengl.cpp 5 renderer_opengl/renderer_opengl.cpp
15 shader/shader.cpp
16 shader/shader_interpreter.cpp
17 swrasterizer/clipper.cpp
18 swrasterizer/framebuffer.cpp
19 swrasterizer/lighting.cpp
20 swrasterizer/proctex.cpp
21 swrasterizer/rasterizer.cpp
22 swrasterizer/swrasterizer.cpp
23 swrasterizer/texturing.cpp
24 texture/etc1.cpp
25 texture/texture_decode.cpp
26 vertex_loader.cpp
27 video_core.cpp 6 video_core.cpp
28 ) 7 )
29 8
30set(HEADERS 9set(HEADERS
31 command_processor.h
32 debug_utils/debug_utils.h
33 geometry_pipeline.h
34 gpu_debugger.h
35 pica.h
36 pica_state.h
37 pica_types.h
38 primitive_assembly.h
39 rasterizer_interface.h
40 regs.h
41 regs_framebuffer.h
42 regs_lighting.h
43 regs_pipeline.h
44 regs_rasterizer.h
45 regs_shader.h
46 regs_texturing.h
47 renderer_base.h 10 renderer_base.h
48 renderer_opengl/gl_rasterizer.h
49 renderer_opengl/gl_rasterizer_cache.h
50 renderer_opengl/gl_resource_manager.h 11 renderer_opengl/gl_resource_manager.h
51 renderer_opengl/gl_shader_gen.h
52 renderer_opengl/gl_shader_util.h 12 renderer_opengl/gl_shader_util.h
53 renderer_opengl/gl_state.h 13 renderer_opengl/gl_state.h
54 renderer_opengl/pica_to_gl.h
55 renderer_opengl/renderer_opengl.h 14 renderer_opengl/renderer_opengl.h
56 shader/debug_data.h
57 shader/shader.h
58 shader/shader_interpreter.h
59 swrasterizer/clipper.h
60 swrasterizer/framebuffer.h
61 swrasterizer/lighting.h
62 swrasterizer/proctex.h
63 swrasterizer/rasterizer.h
64 swrasterizer/swrasterizer.h
65 swrasterizer/texturing.h
66 texture/etc1.h
67 texture/texture_decode.h
68 utils.h 15 utils.h
69 vertex_loader.h
70 video_core.h 16 video_core.h
71 ) 17 )
72 18
73if(ARCHITECTURE_x86_64)
74 set(SRCS ${SRCS}
75 shader/shader_jit_x64.cpp
76 shader/shader_jit_x64_compiler.cpp)
77
78 set(HEADERS ${HEADERS}
79 shader/shader_jit_x64.h
80 shader/shader_jit_x64_compiler.h)
81endif()
82
83create_directory_groups(${SRCS} ${HEADERS}) 19create_directory_groups(${SRCS} ${HEADERS})
84 20
85add_library(video_core STATIC ${SRCS} ${HEADERS}) 21add_library(video_core STATIC ${SRCS} ${HEADERS})
86target_link_libraries(video_core PUBLIC common core) 22target_link_libraries(video_core PUBLIC common core)
87target_link_libraries(video_core PRIVATE glad nihstro-headers) 23target_link_libraries(video_core PRIVATE glad)
88
89if (ARCHITECTURE_x86_64)
90 target_link_libraries(video_core PRIVATE xbyak)
91endif()
92
93if (PNG_FOUND)
94 target_link_libraries(video_core PRIVATE PNG::PNG)
95 target_compile_definitions(video_core PRIVATE HAVE_PNG)
96endif()
diff --git a/src/video_core/command_processor.cpp b/src/video_core/command_processor.cpp
deleted file mode 100644
index b6fbc5d80..000000000
--- a/src/video_core/command_processor.cpp
+++ /dev/null
@@ -1,647 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <array>
6#include <cstddef>
7#include <memory>
8#include <utility>
9#include "common/assert.h"
10#include "common/logging/log.h"
11#include "common/microprofile.h"
12#include "common/vector_math.h"
13#include "core/hle/service/gsp_gpu.h"
14#include "core/hw/gpu.h"
15#include "core/memory.h"
16#include "core/tracer/recorder.h"
17#include "video_core/command_processor.h"
18#include "video_core/debug_utils/debug_utils.h"
19#include "video_core/pica_state.h"
20#include "video_core/pica_types.h"
21#include "video_core/primitive_assembly.h"
22#include "video_core/rasterizer_interface.h"
23#include "video_core/regs.h"
24#include "video_core/regs_pipeline.h"
25#include "video_core/regs_texturing.h"
26#include "video_core/renderer_base.h"
27#include "video_core/shader/shader.h"
28#include "video_core/vertex_loader.h"
29#include "video_core/video_core.h"
30
31namespace Pica {
32
33namespace CommandProcessor {
34
35static int vs_float_regs_counter = 0;
36static u32 vs_uniform_write_buffer[4];
37
38static int gs_float_regs_counter = 0;
39static u32 gs_uniform_write_buffer[4];
40
41static int default_attr_counter = 0;
42static u32 default_attr_write_buffer[3];
43
44// Expand a 4-bit mask to 4-byte mask, e.g. 0b0101 -> 0x00FF00FF
45static const u32 expand_bits_to_bytes[] = {
46 0x00000000, 0x000000ff, 0x0000ff00, 0x0000ffff, 0x00ff0000, 0x00ff00ff, 0x00ffff00, 0x00ffffff,
47 0xff000000, 0xff0000ff, 0xff00ff00, 0xff00ffff, 0xffff0000, 0xffff00ff, 0xffffff00, 0xffffffff,
48};
49
50MICROPROFILE_DEFINE(GPU_Drawing, "GPU", "Drawing", MP_RGB(50, 50, 240));
51
52static const char* GetShaderSetupTypeName(Shader::ShaderSetup& setup) {
53 if (&setup == &g_state.vs) {
54 return "vertex shader";
55 }
56 if (&setup == &g_state.gs) {
57 return "geometry shader";
58 }
59 return "unknown shader";
60}
61
62static void WriteUniformBoolReg(Shader::ShaderSetup& setup, u32 value) {
63 for (unsigned i = 0; i < setup.uniforms.b.size(); ++i)
64 setup.uniforms.b[i] = (value & (1 << i)) != 0;
65}
66
67static void WriteUniformIntReg(Shader::ShaderSetup& setup, unsigned index,
68 const Math::Vec4<u8>& values) {
69 ASSERT(index < setup.uniforms.i.size());
70 setup.uniforms.i[index] = values;
71 LOG_TRACE(HW_GPU, "Set %s integer uniform %d to %02x %02x %02x %02x",
72 GetShaderSetupTypeName(setup), index, values.x, values.y, values.z, values.w);
73}
74
75static void WriteUniformFloatReg(ShaderRegs& config, Shader::ShaderSetup& setup,
76 int& float_regs_counter, u32 uniform_write_buffer[4], u32 value) {
77 auto& uniform_setup = config.uniform_setup;
78
79 // TODO: Does actual hardware indeed keep an intermediate buffer or does
80 // it directly write the values?
81 uniform_write_buffer[float_regs_counter++] = value;
82
83 // Uniforms are written in a packed format such that four float24 values are encoded in
84 // three 32-bit numbers. We write to internal memory once a full such vector is
85 // written.
86 if ((float_regs_counter >= 4 && uniform_setup.IsFloat32()) ||
87 (float_regs_counter >= 3 && !uniform_setup.IsFloat32())) {
88 float_regs_counter = 0;
89
90 auto& uniform = setup.uniforms.f[uniform_setup.index];
91
92 if (uniform_setup.index >= 96) {
93 LOG_ERROR(HW_GPU, "Invalid %s float uniform index %d", GetShaderSetupTypeName(setup),
94 (int)uniform_setup.index);
95 } else {
96
97 // NOTE: The destination component order indeed is "backwards"
98 if (uniform_setup.IsFloat32()) {
99 for (auto i : {0, 1, 2, 3})
100 uniform[3 - i] = float24::FromFloat32(*(float*)(&uniform_write_buffer[i]));
101 } else {
102 // TODO: Untested
103 uniform.w = float24::FromRaw(uniform_write_buffer[0] >> 8);
104 uniform.z = float24::FromRaw(((uniform_write_buffer[0] & 0xFF) << 16) |
105 ((uniform_write_buffer[1] >> 16) & 0xFFFF));
106 uniform.y = float24::FromRaw(((uniform_write_buffer[1] & 0xFFFF) << 8) |
107 ((uniform_write_buffer[2] >> 24) & 0xFF));
108 uniform.x = float24::FromRaw(uniform_write_buffer[2] & 0xFFFFFF);
109 }
110
111 LOG_TRACE(HW_GPU, "Set %s float uniform %x to (%f %f %f %f)",
112 GetShaderSetupTypeName(setup), (int)uniform_setup.index,
113 uniform.x.ToFloat32(), uniform.y.ToFloat32(), uniform.z.ToFloat32(),
114 uniform.w.ToFloat32());
115
116 // TODO: Verify that this actually modifies the register!
117 uniform_setup.index.Assign(uniform_setup.index + 1);
118 }
119 }
120}
121
122static void LoadDefaultVertexAttributes(u32 register_value) {
123 auto& regs = g_state.regs;
124
125 // TODO: Does actual hardware indeed keep an intermediate buffer or does
126 // it directly write the values?
127 default_attr_write_buffer[default_attr_counter++] = register_value;
128
129 // Default attributes are written in a packed format such that four float24 values are encoded
130 // in three 32-bit numbers.
131 // We write to internal memory once a full such vector is written.
132 if (default_attr_counter >= 3) {
133 default_attr_counter = 0;
134
135 auto& setup = regs.pipeline.vs_default_attributes_setup;
136
137 if (setup.index >= 16) {
138 LOG_ERROR(HW_GPU, "Invalid VS default attribute index %d", (int)setup.index);
139 return;
140 }
141
142 Math::Vec4<float24> attribute;
143
144 // NOTE: The destination component order indeed is "backwards"
145 attribute.w = float24::FromRaw(default_attr_write_buffer[0] >> 8);
146 attribute.z = float24::FromRaw(((default_attr_write_buffer[0] & 0xFF) << 16) |
147 ((default_attr_write_buffer[1] >> 16) & 0xFFFF));
148 attribute.y = float24::FromRaw(((default_attr_write_buffer[1] & 0xFFFF) << 8) |
149 ((default_attr_write_buffer[2] >> 24) & 0xFF));
150 attribute.x = float24::FromRaw(default_attr_write_buffer[2] & 0xFFFFFF);
151
152 LOG_TRACE(HW_GPU, "Set default VS attribute %x to (%f %f %f %f)", (int)setup.index,
153 attribute.x.ToFloat32(), attribute.y.ToFloat32(), attribute.z.ToFloat32(),
154 attribute.w.ToFloat32());
155
156 // TODO: Verify that this actually modifies the register!
157 if (setup.index < 15) {
158 g_state.input_default_attributes.attr[setup.index] = attribute;
159 setup.index++;
160 } else {
161 // Put each attribute into an immediate input buffer. When all specified immediate
162 // attributes are present, the Vertex Shader is invoked and everything is sent to
163 // the primitive assembler.
164
165 auto& immediate_input = g_state.immediate.input_vertex;
166 auto& immediate_attribute_id = g_state.immediate.current_attribute;
167
168 immediate_input.attr[immediate_attribute_id] = attribute;
169
170 if (immediate_attribute_id < regs.pipeline.max_input_attrib_index) {
171 immediate_attribute_id += 1;
172 } else {
173 MICROPROFILE_SCOPE(GPU_Drawing);
174 immediate_attribute_id = 0;
175
176 auto* shader_engine = Shader::GetEngine();
177 shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
178
179 // Send to vertex shader
180 if (g_debug_context)
181 g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
182 static_cast<void*>(&immediate_input));
183 Shader::UnitState shader_unit;
184 Shader::AttributeBuffer output{};
185
186 shader_unit.LoadInput(regs.vs, immediate_input);
187 shader_engine->Run(g_state.vs, shader_unit);
188 shader_unit.WriteOutput(regs.vs, output);
189
190 // Send to geometry pipeline
191 if (g_state.immediate.reset_geometry_pipeline) {
192 g_state.geometry_pipeline.Reconfigure();
193 g_state.immediate.reset_geometry_pipeline = false;
194 }
195 ASSERT(!g_state.geometry_pipeline.NeedIndexInput());
196 g_state.geometry_pipeline.Setup(shader_engine);
197 g_state.geometry_pipeline.SubmitVertex(output);
198
199 // TODO: If drawing after every immediate mode triangle kills performance,
200 // change it to flush triangles whenever a drawing config register changes
201 // See: https://github.com/citra-emu/citra/pull/2866#issuecomment-327011550
202 VideoCore::g_renderer->Rasterizer()->DrawTriangles();
203 if (g_debug_context) {
204 g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr);
205 }
206 }
207 }
208 }
209}
210
211static void Draw(u32 command_id) {
212 MICROPROFILE_SCOPE(GPU_Drawing);
213 auto& regs = g_state.regs;
214
215#if PICA_LOG_TEV
216 DebugUtils::DumpTevStageConfig(regs.GetTevStages());
217#endif
218 if (g_debug_context)
219 g_debug_context->OnEvent(DebugContext::Event::IncomingPrimitiveBatch, nullptr);
220
221 // Processes information about internal vertex attributes to figure out how a vertex is
222 // loaded.
223 // Later, these can be compiled and cached.
224 const u32 base_address = regs.pipeline.vertex_attributes.GetPhysicalBaseAddress();
225 VertexLoader loader(regs.pipeline);
226
227 // Load vertices
228 bool is_indexed = (command_id == PICA_REG_INDEX(pipeline.trigger_draw_indexed));
229
230 const auto& index_info = regs.pipeline.index_array;
231 const u8* index_address_8 = Memory::GetPhysicalPointer(base_address + index_info.offset);
232 const u16* index_address_16 = reinterpret_cast<const u16*>(index_address_8);
233 bool index_u16 = index_info.format != 0;
234
235 PrimitiveAssembler<Shader::OutputVertex>& primitive_assembler = g_state.primitive_assembler;
236
237 if (g_debug_context && g_debug_context->recorder) {
238 for (int i = 0; i < 3; ++i) {
239 const auto texture = regs.texturing.GetTextures()[i];
240 if (!texture.enabled)
241 continue;
242
243 u8* texture_data = Memory::GetPhysicalPointer(texture.config.GetPhysicalAddress());
244 g_debug_context->recorder->MemoryAccessed(
245 texture_data, Pica::TexturingRegs::NibblesPerPixel(texture.format) *
246 texture.config.width / 2 * texture.config.height,
247 texture.config.GetPhysicalAddress());
248 }
249 }
250
251 DebugUtils::MemoryAccessTracker memory_accesses;
252
253 // Simple circular-replacement vertex cache
254 // The size has been tuned for optimal balance between hit-rate and the cost of lookup
255 const size_t VERTEX_CACHE_SIZE = 32;
256 std::array<u16, VERTEX_CACHE_SIZE> vertex_cache_ids;
257 std::array<Shader::AttributeBuffer, VERTEX_CACHE_SIZE> vertex_cache;
258 Shader::AttributeBuffer vs_output;
259
260 unsigned int vertex_cache_pos = 0;
261 vertex_cache_ids.fill(-1);
262
263 auto* shader_engine = Shader::GetEngine();
264 Shader::UnitState shader_unit;
265
266 shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
267
268 g_state.geometry_pipeline.Reconfigure();
269 g_state.geometry_pipeline.Setup(shader_engine);
270 if (g_state.geometry_pipeline.NeedIndexInput())
271 ASSERT(is_indexed);
272
273 for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) {
274 // Indexed rendering doesn't use the start offset
275 unsigned int vertex = is_indexed
276 ? (index_u16 ? index_address_16[index] : index_address_8[index])
277 : (index + regs.pipeline.vertex_offset);
278
279 // -1 is a common special value used for primitive restart. Since it's unknown if
280 // the PICA supports it, and it would mess up the caching, guard against it here.
281 ASSERT(vertex != -1);
282
283 bool vertex_cache_hit = false;
284
285 if (is_indexed) {
286 if (g_state.geometry_pipeline.NeedIndexInput()) {
287 g_state.geometry_pipeline.SubmitIndex(vertex);
288 continue;
289 }
290
291 if (g_debug_context && Pica::g_debug_context->recorder) {
292 int size = index_u16 ? 2 : 1;
293 memory_accesses.AddAccess(base_address + index_info.offset + size * index, size);
294 }
295
296 for (unsigned int i = 0; i < VERTEX_CACHE_SIZE; ++i) {
297 if (vertex == vertex_cache_ids[i]) {
298 vs_output = vertex_cache[i];
299 vertex_cache_hit = true;
300 break;
301 }
302 }
303 }
304
305 if (!vertex_cache_hit) {
306 // Initialize data for the current vertex
307 Shader::AttributeBuffer input;
308 loader.LoadVertex(base_address, index, vertex, input, memory_accesses);
309
310 // Send to vertex shader
311 if (g_debug_context)
312 g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
313 (void*)&input);
314 shader_unit.LoadInput(regs.vs, input);
315 shader_engine->Run(g_state.vs, shader_unit);
316 shader_unit.WriteOutput(regs.vs, vs_output);
317
318 if (is_indexed) {
319 vertex_cache[vertex_cache_pos] = vs_output;
320 vertex_cache_ids[vertex_cache_pos] = vertex;
321 vertex_cache_pos = (vertex_cache_pos + 1) % VERTEX_CACHE_SIZE;
322 }
323 }
324
325 // Send to geometry pipeline
326 g_state.geometry_pipeline.SubmitVertex(vs_output);
327 }
328
329 for (auto& range : memory_accesses.ranges) {
330 g_debug_context->recorder->MemoryAccessed(Memory::GetPhysicalPointer(range.first),
331 range.second, range.first);
332 }
333
334 VideoCore::g_renderer->Rasterizer()->DrawTriangles();
335 if (g_debug_context) {
336 g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr);
337 }
338}
339
340static void WritePicaReg(u32 id, u32 value, u32 mask) {
341 auto& regs = g_state.regs;
342
343 if (id >= Regs::NUM_REGS) {
344 LOG_ERROR(HW_GPU,
345 "Commandlist tried to write to invalid register 0x%03X (value: %08X, mask: %X)",
346 id, value, mask);
347 return;
348 }
349
350 // TODO: Figure out how register masking acts on e.g. vs.uniform_setup.set_value
351 u32 old_value = regs.reg_array[id];
352
353 const u32 write_mask = expand_bits_to_bytes[mask];
354
355 regs.reg_array[id] = (old_value & ~write_mask) | (value & write_mask);
356
357 // Double check for is_pica_tracing to avoid call overhead
358 if (DebugUtils::IsPicaTracing()) {
359 DebugUtils::OnPicaRegWrite({(u16)id, (u16)mask, regs.reg_array[id]});
360 }
361
362 if (g_debug_context)
363 g_debug_context->OnEvent(DebugContext::Event::PicaCommandLoaded,
364 reinterpret_cast<void*>(&id));
365
366 switch (id) {
367 // Trigger IRQ
368 case PICA_REG_INDEX(trigger_irq):
369 //Service::GSP::SignalInterrupt(Service::GSP::InterruptId::P3D);
370 break;
371
372 case PICA_REG_INDEX(pipeline.triangle_topology):
373 g_state.primitive_assembler.Reconfigure(regs.pipeline.triangle_topology);
374 break;
375
376 case PICA_REG_INDEX(pipeline.restart_primitive):
377 g_state.primitive_assembler.Reset();
378 break;
379
380 case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.index):
381 g_state.immediate.current_attribute = 0;
382 g_state.immediate.reset_geometry_pipeline = true;
383 default_attr_counter = 0;
384 break;
385
386 // Load default vertex input attributes
387 case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[0], 0x233):
388 case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[1], 0x234):
389 case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[2], 0x235):
390 LoadDefaultVertexAttributes(value);
391 break;
392
393 case PICA_REG_INDEX(pipeline.gpu_mode):
394 // This register likely just enables vertex processing and doesn't need any special handling
395 break;
396
397 case PICA_REG_INDEX_WORKAROUND(pipeline.command_buffer.trigger[0], 0x23c):
398 case PICA_REG_INDEX_WORKAROUND(pipeline.command_buffer.trigger[1], 0x23d): {
399 unsigned index =
400 static_cast<unsigned>(id - PICA_REG_INDEX(pipeline.command_buffer.trigger[0]));
401 u32* head_ptr = (u32*)Memory::GetPhysicalPointer(
402 regs.pipeline.command_buffer.GetPhysicalAddress(index));
403 g_state.cmd_list.head_ptr = g_state.cmd_list.current_ptr = head_ptr;
404 g_state.cmd_list.length = regs.pipeline.command_buffer.GetSize(index) / sizeof(u32);
405 break;
406 }
407
408 // It seems like these trigger vertex rendering
409 case PICA_REG_INDEX(pipeline.trigger_draw):
410 case PICA_REG_INDEX(pipeline.trigger_draw_indexed):
411 Draw(id);
412 break;
413
414 case PICA_REG_INDEX(gs.bool_uniforms):
415 WriteUniformBoolReg(g_state.gs, g_state.regs.gs.bool_uniforms.Value());
416 break;
417
418 case PICA_REG_INDEX_WORKAROUND(gs.int_uniforms[0], 0x281):
419 case PICA_REG_INDEX_WORKAROUND(gs.int_uniforms[1], 0x282):
420 case PICA_REG_INDEX_WORKAROUND(gs.int_uniforms[2], 0x283):
421 case PICA_REG_INDEX_WORKAROUND(gs.int_uniforms[3], 0x284): {
422 unsigned index = (id - PICA_REG_INDEX_WORKAROUND(gs.int_uniforms[0], 0x281));
423 auto values = regs.gs.int_uniforms[index];
424 WriteUniformIntReg(g_state.gs, index,
425 Math::Vec4<u8>(values.x, values.y, values.z, values.w));
426 break;
427 }
428
429 case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[0], 0x291):
430 case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[1], 0x292):
431 case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[2], 0x293):
432 case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[3], 0x294):
433 case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[4], 0x295):
434 case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[5], 0x296):
435 case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[6], 0x297):
436 case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[7], 0x298): {
437 WriteUniformFloatReg(g_state.regs.gs, g_state.gs, gs_float_regs_counter,
438 gs_uniform_write_buffer, value);
439 break;
440 }
441
442 case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[0], 0x29c):
443 case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[1], 0x29d):
444 case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[2], 0x29e):
445 case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[3], 0x29f):
446 case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[4], 0x2a0):
447 case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[5], 0x2a1):
448 case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[6], 0x2a2):
449 case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[7], 0x2a3): {
450 u32& offset = g_state.regs.gs.program.offset;
451 if (offset >= 4096) {
452 LOG_ERROR(HW_GPU, "Invalid GS program offset %u", offset);
453 } else {
454 g_state.gs.program_code[offset] = value;
455 offset++;
456 }
457 break;
458 }
459
460 case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[0], 0x2a6):
461 case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[1], 0x2a7):
462 case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[2], 0x2a8):
463 case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[3], 0x2a9):
464 case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[4], 0x2aa):
465 case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[5], 0x2ab):
466 case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[6], 0x2ac):
467 case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[7], 0x2ad): {
468 u32& offset = g_state.regs.gs.swizzle_patterns.offset;
469 if (offset >= g_state.gs.swizzle_data.size()) {
470 LOG_ERROR(HW_GPU, "Invalid GS swizzle pattern offset %u", offset);
471 } else {
472 g_state.gs.swizzle_data[offset] = value;
473 offset++;
474 }
475 break;
476 }
477
478 case PICA_REG_INDEX(vs.bool_uniforms):
479 // TODO (wwylele): does regs.pipeline.gs_unit_exclusive_configuration affect this?
480 WriteUniformBoolReg(g_state.vs, g_state.regs.vs.bool_uniforms.Value());
481 break;
482
483 case PICA_REG_INDEX_WORKAROUND(vs.int_uniforms[0], 0x2b1):
484 case PICA_REG_INDEX_WORKAROUND(vs.int_uniforms[1], 0x2b2):
485 case PICA_REG_INDEX_WORKAROUND(vs.int_uniforms[2], 0x2b3):
486 case PICA_REG_INDEX_WORKAROUND(vs.int_uniforms[3], 0x2b4): {
487 // TODO (wwylele): does regs.pipeline.gs_unit_exclusive_configuration affect this?
488 unsigned index = (id - PICA_REG_INDEX_WORKAROUND(vs.int_uniforms[0], 0x2b1));
489 auto values = regs.vs.int_uniforms[index];
490 WriteUniformIntReg(g_state.vs, index,
491 Math::Vec4<u8>(values.x, values.y, values.z, values.w));
492 break;
493 }
494
495 case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[0], 0x2c1):
496 case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[1], 0x2c2):
497 case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[2], 0x2c3):
498 case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[3], 0x2c4):
499 case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[4], 0x2c5):
500 case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[5], 0x2c6):
501 case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[6], 0x2c7):
502 case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[7], 0x2c8): {
503 // TODO (wwylele): does regs.pipeline.gs_unit_exclusive_configuration affect this?
504 WriteUniformFloatReg(g_state.regs.vs, g_state.vs, vs_float_regs_counter,
505 vs_uniform_write_buffer, value);
506 break;
507 }
508
509 case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[0], 0x2cc):
510 case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[1], 0x2cd):
511 case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[2], 0x2ce):
512 case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[3], 0x2cf):
513 case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[4], 0x2d0):
514 case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[5], 0x2d1):
515 case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[6], 0x2d2):
516 case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[7], 0x2d3): {
517 u32& offset = g_state.regs.vs.program.offset;
518 if (offset >= 512) {
519 LOG_ERROR(HW_GPU, "Invalid VS program offset %u", offset);
520 } else {
521 g_state.vs.program_code[offset] = value;
522 if (!g_state.regs.pipeline.gs_unit_exclusive_configuration) {
523 g_state.gs.program_code[offset] = value;
524 }
525 offset++;
526 }
527 break;
528 }
529
530 case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[0], 0x2d6):
531 case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[1], 0x2d7):
532 case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[2], 0x2d8):
533 case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[3], 0x2d9):
534 case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[4], 0x2da):
535 case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[5], 0x2db):
536 case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[6], 0x2dc):
537 case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[7], 0x2dd): {
538 u32& offset = g_state.regs.vs.swizzle_patterns.offset;
539 if (offset >= g_state.vs.swizzle_data.size()) {
540 LOG_ERROR(HW_GPU, "Invalid VS swizzle pattern offset %u", offset);
541 } else {
542 g_state.vs.swizzle_data[offset] = value;
543 if (!g_state.regs.pipeline.gs_unit_exclusive_configuration) {
544 g_state.gs.swizzle_data[offset] = value;
545 }
546 offset++;
547 }
548 break;
549 }
550
551 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[0], 0x1c8):
552 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[1], 0x1c9):
553 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[2], 0x1ca):
554 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[3], 0x1cb):
555 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[4], 0x1cc):
556 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[5], 0x1cd):
557 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[6], 0x1ce):
558 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[7], 0x1cf): {
559 auto& lut_config = regs.lighting.lut_config;
560
561 ASSERT_MSG(lut_config.index < 256, "lut_config.index exceeded maximum value of 255!");
562
563 g_state.lighting.luts[lut_config.type][lut_config.index].raw = value;
564 lut_config.index.Assign(lut_config.index + 1);
565 break;
566 }
567
568 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[0], 0xe8):
569 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[1], 0xe9):
570 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[2], 0xea):
571 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[3], 0xeb):
572 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[4], 0xec):
573 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[5], 0xed):
574 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[6], 0xee):
575 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[7], 0xef): {
576 g_state.fog.lut[regs.texturing.fog_lut_offset % 128].raw = value;
577 regs.texturing.fog_lut_offset.Assign(regs.texturing.fog_lut_offset + 1);
578 break;
579 }
580
581 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[0], 0xb0):
582 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[1], 0xb1):
583 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[2], 0xb2):
584 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[3], 0xb3):
585 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[4], 0xb4):
586 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[5], 0xb5):
587 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[6], 0xb6):
588 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[7], 0xb7): {
589 auto& index = regs.texturing.proctex_lut_config.index;
590 auto& pt = g_state.proctex;
591
592 switch (regs.texturing.proctex_lut_config.ref_table.Value()) {
593 case TexturingRegs::ProcTexLutTable::Noise:
594 pt.noise_table[index % pt.noise_table.size()].raw = value;
595 break;
596 case TexturingRegs::ProcTexLutTable::ColorMap:
597 pt.color_map_table[index % pt.color_map_table.size()].raw = value;
598 break;
599 case TexturingRegs::ProcTexLutTable::AlphaMap:
600 pt.alpha_map_table[index % pt.alpha_map_table.size()].raw = value;
601 break;
602 case TexturingRegs::ProcTexLutTable::Color:
603 pt.color_table[index % pt.color_table.size()].raw = value;
604 break;
605 case TexturingRegs::ProcTexLutTable::ColorDiff:
606 pt.color_diff_table[index % pt.color_diff_table.size()].raw = value;
607 break;
608 }
609 index.Assign(index + 1);
610 break;
611 }
612 default:
613 break;
614 }
615
616 VideoCore::g_renderer->Rasterizer()->NotifyPicaRegisterChanged(id);
617
618 if (g_debug_context)
619 g_debug_context->OnEvent(DebugContext::Event::PicaCommandProcessed,
620 reinterpret_cast<void*>(&id));
621}
622
623void ProcessCommandList(const u32* list, u32 size) {
624 g_state.cmd_list.head_ptr = g_state.cmd_list.current_ptr = list;
625 g_state.cmd_list.length = size / sizeof(u32);
626
627 while (g_state.cmd_list.current_ptr < g_state.cmd_list.head_ptr + g_state.cmd_list.length) {
628
629 // Align read pointer to 8 bytes
630 if ((g_state.cmd_list.head_ptr - g_state.cmd_list.current_ptr) % 2 != 0)
631 ++g_state.cmd_list.current_ptr;
632
633 u32 value = *g_state.cmd_list.current_ptr++;
634 const CommandHeader header = {*g_state.cmd_list.current_ptr++};
635
636 WritePicaReg(header.cmd_id, value, header.parameter_mask);
637
638 for (unsigned i = 0; i < header.extra_data_length; ++i) {
639 u32 cmd = header.cmd_id + (header.group_commands ? i + 1 : 0);
640 WritePicaReg(cmd, *g_state.cmd_list.current_ptr++, header.parameter_mask);
641 }
642 }
643}
644
645} // namespace CommandProcessor
646
647} // namespace Pica
diff --git a/src/video_core/command_processor.h b/src/video_core/command_processor.h
deleted file mode 100644
index 62ad2d3f3..000000000
--- a/src/video_core/command_processor.h
+++ /dev/null
@@ -1,41 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <type_traits>
8#include "common/bit_field.h"
9#include "common/common_types.h"
10
11namespace Pica {
12
13namespace CommandProcessor {
14
15union CommandHeader {
16 u32 hex;
17
18 BitField<0, 16, u32> cmd_id;
19
20 // parameter_mask:
21 // Mask applied to the input value to make it possible to update
22 // parts of a register without overwriting its other fields.
23 // first bit: 0x000000FF
24 // second bit: 0x0000FF00
25 // third bit: 0x00FF0000
26 // fourth bit: 0xFF000000
27 BitField<16, 4, u32> parameter_mask;
28
29 BitField<20, 11, u32> extra_data_length;
30
31 BitField<31, 1, u32> group_commands;
32};
33static_assert(std::is_standard_layout<CommandHeader>::value == true,
34 "CommandHeader does not use standard layout");
35static_assert(sizeof(CommandHeader) == sizeof(u32), "CommandHeader has incorrect size!");
36
37void ProcessCommandList(const u32* list, u32 size);
38
39} // namespace
40
41} // namespace
diff --git a/src/video_core/debug_utils/debug_utils.cpp b/src/video_core/debug_utils/debug_utils.cpp
deleted file mode 100644
index 47dbc8cc8..000000000
--- a/src/video_core/debug_utils/debug_utils.cpp
+++ /dev/null
@@ -1,577 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <condition_variable>
7#include <cstdint>
8#include <cstring>
9#include <fstream>
10#include <map>
11#include <mutex>
12#include <stdexcept>
13#include <string>
14
15#ifdef HAVE_PNG
16#include <png.h>
17#include <setjmp.h>
18#endif
19
20#include <nihstro/bit_field.h>
21#include <nihstro/float24.h>
22#include <nihstro/shader_binary.h>
23#include "common/assert.h"
24#include "common/bit_field.h"
25#include "common/color.h"
26#include "common/common_types.h"
27#include "common/file_util.h"
28#include "common/logging/log.h"
29#include "common/math_util.h"
30#include "common/vector_math.h"
31#include "video_core/debug_utils/debug_utils.h"
32#include "video_core/pica_state.h"
33#include "video_core/pica_types.h"
34#include "video_core/rasterizer_interface.h"
35#include "video_core/regs_rasterizer.h"
36#include "video_core/regs_shader.h"
37#include "video_core/regs_texturing.h"
38#include "video_core/renderer_base.h"
39#include "video_core/shader/shader.h"
40#include "video_core/texture/texture_decode.h"
41#include "video_core/utils.h"
42#include "video_core/video_core.h"
43
44using nihstro::DVLBHeader;
45using nihstro::DVLEHeader;
46using nihstro::DVLPHeader;
47
48namespace Pica {
49
50void DebugContext::DoOnEvent(Event event, void* data) {
51 {
52 std::unique_lock<std::mutex> lock(breakpoint_mutex);
53
54 // Commit the rasterizer's caches so framebuffers, render targets, etc. will show on debug
55 // widgets
56 VideoCore::g_renderer->Rasterizer()->FlushAll();
57
58 // TODO: Should stop the CPU thread here once we multithread emulation.
59
60 active_breakpoint = event;
61 at_breakpoint = true;
62
63 // Tell all observers that we hit a breakpoint
64 for (auto& breakpoint_observer : breakpoint_observers) {
65 breakpoint_observer->OnPicaBreakPointHit(event, data);
66 }
67
68 // Wait until another thread tells us to Resume()
69 resume_from_breakpoint.wait(lock, [&] { return !at_breakpoint; });
70 }
71}
72
73void DebugContext::Resume() {
74 {
75 std::lock_guard<std::mutex> lock(breakpoint_mutex);
76
77 // Tell all observers that we are about to resume
78 for (auto& breakpoint_observer : breakpoint_observers) {
79 breakpoint_observer->OnPicaResume();
80 }
81
82 // Resume the waiting thread (i.e. OnEvent())
83 at_breakpoint = false;
84 }
85
86 resume_from_breakpoint.notify_one();
87}
88
89std::shared_ptr<DebugContext> g_debug_context; // TODO: Get rid of this global
90
91namespace DebugUtils {
92
93void DumpShader(const std::string& filename, const ShaderRegs& config,
94 const Shader::ShaderSetup& setup,
95 const RasterizerRegs::VSOutputAttributes* output_attributes) {
96 struct StuffToWrite {
97 const u8* pointer;
98 u32 size;
99 };
100 std::vector<StuffToWrite> writing_queue;
101 u32 write_offset = 0;
102
103 auto QueueForWriting = [&writing_queue, &write_offset](const u8* pointer, u32 size) {
104 writing_queue.push_back({pointer, size});
105 u32 old_write_offset = write_offset;
106 write_offset += size;
107 return old_write_offset;
108 };
109
110 // First off, try to translate Pica state (one enum for output attribute type and component)
111 // into shbin format (separate type and component mask).
112 union OutputRegisterInfo {
113 enum Type : u64 {
114 POSITION = 0,
115 QUATERNION = 1,
116 COLOR = 2,
117 TEXCOORD0 = 3,
118 TEXCOORD1 = 5,
119 TEXCOORD2 = 6,
120
121 VIEW = 8,
122 };
123
124 BitField<0, 64, u64> hex;
125
126 BitField<0, 16, Type> type;
127 BitField<16, 16, u64> id;
128 BitField<32, 4, u64> component_mask;
129 };
130
131 // This is put into a try-catch block to make sure we notice unknown configurations.
132 std::vector<OutputRegisterInfo> output_info_table;
133 for (unsigned i = 0; i < 7; ++i) {
134 using OutputAttributes = Pica::RasterizerRegs::VSOutputAttributes;
135
136 // TODO: It's still unclear how the attribute components map to the register!
137 // Once we know that, this code probably will not make much sense anymore.
138 std::map<OutputAttributes::Semantic, std::pair<OutputRegisterInfo::Type, u32>> map = {
139 {OutputAttributes::POSITION_X, {OutputRegisterInfo::POSITION, 1}},
140 {OutputAttributes::POSITION_Y, {OutputRegisterInfo::POSITION, 2}},
141 {OutputAttributes::POSITION_Z, {OutputRegisterInfo::POSITION, 4}},
142 {OutputAttributes::POSITION_W, {OutputRegisterInfo::POSITION, 8}},
143 {OutputAttributes::QUATERNION_X, {OutputRegisterInfo::QUATERNION, 1}},
144 {OutputAttributes::QUATERNION_Y, {OutputRegisterInfo::QUATERNION, 2}},
145 {OutputAttributes::QUATERNION_Z, {OutputRegisterInfo::QUATERNION, 4}},
146 {OutputAttributes::QUATERNION_W, {OutputRegisterInfo::QUATERNION, 8}},
147 {OutputAttributes::COLOR_R, {OutputRegisterInfo::COLOR, 1}},
148 {OutputAttributes::COLOR_G, {OutputRegisterInfo::COLOR, 2}},
149 {OutputAttributes::COLOR_B, {OutputRegisterInfo::COLOR, 4}},
150 {OutputAttributes::COLOR_A, {OutputRegisterInfo::COLOR, 8}},
151 {OutputAttributes::TEXCOORD0_U, {OutputRegisterInfo::TEXCOORD0, 1}},
152 {OutputAttributes::TEXCOORD0_V, {OutputRegisterInfo::TEXCOORD0, 2}},
153 {OutputAttributes::TEXCOORD1_U, {OutputRegisterInfo::TEXCOORD1, 1}},
154 {OutputAttributes::TEXCOORD1_V, {OutputRegisterInfo::TEXCOORD1, 2}},
155 {OutputAttributes::TEXCOORD2_U, {OutputRegisterInfo::TEXCOORD2, 1}},
156 {OutputAttributes::TEXCOORD2_V, {OutputRegisterInfo::TEXCOORD2, 2}},
157 {OutputAttributes::VIEW_X, {OutputRegisterInfo::VIEW, 1}},
158 {OutputAttributes::VIEW_Y, {OutputRegisterInfo::VIEW, 2}},
159 {OutputAttributes::VIEW_Z, {OutputRegisterInfo::VIEW, 4}},
160 };
161
162 for (const auto& semantic : std::vector<OutputAttributes::Semantic>{
163 output_attributes[i].map_x, output_attributes[i].map_y, output_attributes[i].map_z,
164 output_attributes[i].map_w}) {
165 if (semantic == OutputAttributes::INVALID)
166 continue;
167
168 try {
169 OutputRegisterInfo::Type type = map.at(semantic).first;
170 u32 component_mask = map.at(semantic).second;
171
172 auto it = std::find_if(output_info_table.begin(), output_info_table.end(),
173 [&i, &type](const OutputRegisterInfo& info) {
174 return info.id == i && info.type == type;
175 });
176
177 if (it == output_info_table.end()) {
178 output_info_table.emplace_back();
179 output_info_table.back().type.Assign(type);
180 output_info_table.back().component_mask.Assign(component_mask);
181 output_info_table.back().id.Assign(i);
182 } else {
183 it->component_mask.Assign(it->component_mask | component_mask);
184 }
185 } catch (const std::out_of_range&) {
186 DEBUG_ASSERT_MSG(false, "Unknown output attribute mapping");
187 LOG_ERROR(HW_GPU, "Unknown output attribute mapping: %03x, %03x, %03x, %03x",
188 (int)output_attributes[i].map_x.Value(),
189 (int)output_attributes[i].map_y.Value(),
190 (int)output_attributes[i].map_z.Value(),
191 (int)output_attributes[i].map_w.Value());
192 }
193 }
194 }
195
196 struct {
197 DVLBHeader header;
198 u32 dvle_offset;
199 } dvlb{{DVLBHeader::MAGIC_WORD, 1}}; // 1 DVLE
200
201 DVLPHeader dvlp{DVLPHeader::MAGIC_WORD};
202 DVLEHeader dvle{DVLEHeader::MAGIC_WORD};
203
204 QueueForWriting(reinterpret_cast<const u8*>(&dvlb), sizeof(dvlb));
205 u32 dvlp_offset = QueueForWriting(reinterpret_cast<const u8*>(&dvlp), sizeof(dvlp));
206 dvlb.dvle_offset = QueueForWriting(reinterpret_cast<const u8*>(&dvle), sizeof(dvle));
207
208 // TODO: Reduce the amount of binary code written to relevant portions
209 dvlp.binary_offset = write_offset - dvlp_offset;
210 dvlp.binary_size_words = static_cast<uint32_t>(setup.program_code.size());
211 QueueForWriting(reinterpret_cast<const u8*>(setup.program_code.data()),
212 static_cast<u32>(setup.program_code.size()) * sizeof(u32));
213
214 dvlp.swizzle_info_offset = write_offset - dvlp_offset;
215 dvlp.swizzle_info_num_entries = static_cast<uint32_t>(setup.swizzle_data.size());
216 u32 dummy = 0;
217 for (unsigned int i = 0; i < setup.swizzle_data.size(); ++i) {
218 QueueForWriting(reinterpret_cast<const u8*>(&setup.swizzle_data[i]),
219 sizeof(setup.swizzle_data[i]));
220 QueueForWriting(reinterpret_cast<const u8*>(&dummy), sizeof(dummy));
221 }
222
223 dvle.main_offset_words = config.main_offset;
224 dvle.output_register_table_offset = write_offset - dvlb.dvle_offset;
225 dvle.output_register_table_size = static_cast<u32>(output_info_table.size());
226 QueueForWriting(reinterpret_cast<const u8*>(output_info_table.data()),
227 static_cast<u32>(output_info_table.size() * sizeof(OutputRegisterInfo)));
228
229 // TODO: Create a label table for "main"
230
231 std::vector<nihstro::ConstantInfo> constant_table;
232 for (unsigned i = 0; i < setup.uniforms.b.size(); ++i) {
233 nihstro::ConstantInfo constant;
234 memset(&constant, 0, sizeof(constant));
235 constant.type = nihstro::ConstantInfo::Bool;
236 constant.regid = i;
237 constant.b = setup.uniforms.b[i];
238 constant_table.emplace_back(constant);
239 }
240 for (unsigned i = 0; i < setup.uniforms.i.size(); ++i) {
241 nihstro::ConstantInfo constant;
242 memset(&constant, 0, sizeof(constant));
243 constant.type = nihstro::ConstantInfo::Int;
244 constant.regid = i;
245 constant.i.x = setup.uniforms.i[i].x;
246 constant.i.y = setup.uniforms.i[i].y;
247 constant.i.z = setup.uniforms.i[i].z;
248 constant.i.w = setup.uniforms.i[i].w;
249 constant_table.emplace_back(constant);
250 }
251 for (unsigned i = 0; i < sizeof(setup.uniforms.f) / sizeof(setup.uniforms.f[0]); ++i) {
252 nihstro::ConstantInfo constant;
253 memset(&constant, 0, sizeof(constant));
254 constant.type = nihstro::ConstantInfo::Float;
255 constant.regid = i;
256 constant.f.x = nihstro::to_float24(setup.uniforms.f[i].x.ToFloat32());
257 constant.f.y = nihstro::to_float24(setup.uniforms.f[i].y.ToFloat32());
258 constant.f.z = nihstro::to_float24(setup.uniforms.f[i].z.ToFloat32());
259 constant.f.w = nihstro::to_float24(setup.uniforms.f[i].w.ToFloat32());
260
261 // Store constant if it's different from zero..
262 if (setup.uniforms.f[i].x.ToFloat32() != 0.0 || setup.uniforms.f[i].y.ToFloat32() != 0.0 ||
263 setup.uniforms.f[i].z.ToFloat32() != 0.0 || setup.uniforms.f[i].w.ToFloat32() != 0.0)
264 constant_table.emplace_back(constant);
265 }
266 dvle.constant_table_offset = write_offset - dvlb.dvle_offset;
267 dvle.constant_table_size = static_cast<uint32_t>(constant_table.size());
268 for (const auto& constant : constant_table) {
269 QueueForWriting(reinterpret_cast<const u8*>(&constant), sizeof(constant));
270 }
271
272 // Write data to file
273 std::ofstream file(filename, std::ios_base::out | std::ios_base::binary);
274
275 for (const auto& chunk : writing_queue) {
276 file.write(reinterpret_cast<const char*>(chunk.pointer), chunk.size);
277 }
278}
279
280static std::unique_ptr<PicaTrace> pica_trace;
281static std::mutex pica_trace_mutex;
282bool g_is_pica_tracing = false;
283
284void StartPicaTracing() {
285 if (g_is_pica_tracing) {
286 LOG_WARNING(HW_GPU, "StartPicaTracing called even though tracing already running!");
287 return;
288 }
289
290 std::lock_guard<std::mutex> lock(pica_trace_mutex);
291 pica_trace = std::make_unique<PicaTrace>();
292
293 g_is_pica_tracing = true;
294}
295
296void OnPicaRegWrite(PicaTrace::Write write) {
297 std::lock_guard<std::mutex> lock(pica_trace_mutex);
298
299 if (!g_is_pica_tracing)
300 return;
301
302 pica_trace->writes.push_back(write);
303}
304
305std::unique_ptr<PicaTrace> FinishPicaTracing() {
306 if (!g_is_pica_tracing) {
307 LOG_WARNING(HW_GPU, "FinishPicaTracing called even though tracing isn't running!");
308 return {};
309 }
310
311 // signalize that no further tracing should be performed
312 g_is_pica_tracing = false;
313
314 // Wait until running tracing is finished
315 std::lock_guard<std::mutex> lock(pica_trace_mutex);
316 std::unique_ptr<PicaTrace> ret(std::move(pica_trace));
317
318 return ret;
319}
320
321#ifdef HAVE_PNG
322// Adapter functions to libpng to write/flush to File::IOFile instances.
323static void WriteIOFile(png_structp png_ptr, png_bytep data, png_size_t length) {
324 auto* fp = static_cast<FileUtil::IOFile*>(png_get_io_ptr(png_ptr));
325 if (!fp->WriteBytes(data, length))
326 png_error(png_ptr, "Failed to write to output PNG file.");
327}
328
329static void FlushIOFile(png_structp png_ptr) {
330 auto* fp = static_cast<FileUtil::IOFile*>(png_get_io_ptr(png_ptr));
331 if (!fp->Flush())
332 png_error(png_ptr, "Failed to flush to output PNG file.");
333}
334#endif
335
336void DumpTexture(const TexturingRegs::TextureConfig& texture_config, u8* data) {
337#ifndef HAVE_PNG
338 return;
339#else
340 if (!data)
341 return;
342
343 // Write data to file
344 static int dump_index = 0;
345 std::string filename =
346 std::string("texture_dump") + std::to_string(++dump_index) + std::string(".png");
347 u32 row_stride = texture_config.width * 3;
348
349 u8* buf;
350
351 char title[] = "Citra texture dump";
352 char title_key[] = "Title";
353 png_structp png_ptr = nullptr;
354 png_infop info_ptr = nullptr;
355
356 // Open file for writing (binary mode)
357 FileUtil::IOFile fp(filename, "wb");
358
359 // Initialize write structure
360 png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr);
361 if (png_ptr == nullptr) {
362 LOG_ERROR(Debug_GPU, "Could not allocate write struct");
363 goto finalise;
364 }
365
366 // Initialize info structure
367 info_ptr = png_create_info_struct(png_ptr);
368 if (info_ptr == nullptr) {
369 LOG_ERROR(Debug_GPU, "Could not allocate info struct");
370 goto finalise;
371 }
372
373 // Setup Exception handling
374 if (setjmp(png_jmpbuf(png_ptr))) {
375 LOG_ERROR(Debug_GPU, "Error during png creation");
376 goto finalise;
377 }
378
379 png_set_write_fn(png_ptr, static_cast<void*>(&fp), WriteIOFile, FlushIOFile);
380
381 // Write header (8 bit color depth)
382 png_set_IHDR(png_ptr, info_ptr, texture_config.width, texture_config.height, 8,
383 PNG_COLOR_TYPE_RGB /*_ALPHA*/, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE,
384 PNG_FILTER_TYPE_BASE);
385
386 png_text title_text;
387 title_text.compression = PNG_TEXT_COMPRESSION_NONE;
388 title_text.key = title_key;
389 title_text.text = title;
390 png_set_text(png_ptr, info_ptr, &title_text, 1);
391
392 png_write_info(png_ptr, info_ptr);
393
394 buf = new u8[row_stride * texture_config.height];
395 for (unsigned y = 0; y < texture_config.height; ++y) {
396 for (unsigned x = 0; x < texture_config.width; ++x) {
397 Pica::Texture::TextureInfo info;
398 info.width = texture_config.width;
399 info.height = texture_config.height;
400 info.stride = row_stride;
401 info.format = g_state.regs.texturing.texture0_format;
402 Math::Vec4<u8> texture_color = Pica::Texture::LookupTexture(data, x, y, info);
403 buf[3 * x + y * row_stride] = texture_color.r();
404 buf[3 * x + y * row_stride + 1] = texture_color.g();
405 buf[3 * x + y * row_stride + 2] = texture_color.b();
406 }
407 }
408
409 // Write image data
410 for (unsigned y = 0; y < texture_config.height; ++y) {
411 u8* row_ptr = (u8*)buf + y * row_stride;
412 png_write_row(png_ptr, row_ptr);
413 }
414
415 delete[] buf;
416
417 // End write
418 png_write_end(png_ptr, nullptr);
419
420finalise:
421 if (info_ptr != nullptr)
422 png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
423 if (png_ptr != nullptr)
424 png_destroy_write_struct(&png_ptr, (png_infopp) nullptr);
425#endif
426}
427
428static std::string ReplacePattern(const std::string& input, const std::string& pattern,
429 const std::string& replacement) {
430 size_t start = input.find(pattern);
431 if (start == std::string::npos)
432 return input;
433
434 std::string ret = input;
435 ret.replace(start, pattern.length(), replacement);
436 return ret;
437}
438
439static std::string GetTevStageConfigSourceString(
440 const TexturingRegs::TevStageConfig::Source& source) {
441
442 using Source = TexturingRegs::TevStageConfig::Source;
443 static const std::map<Source, std::string> source_map = {
444 {Source::PrimaryColor, "PrimaryColor"},
445 {Source::PrimaryFragmentColor, "PrimaryFragmentColor"},
446 {Source::SecondaryFragmentColor, "SecondaryFragmentColor"},
447 {Source::Texture0, "Texture0"},
448 {Source::Texture1, "Texture1"},
449 {Source::Texture2, "Texture2"},
450 {Source::Texture3, "Texture3"},
451 {Source::PreviousBuffer, "PreviousBuffer"},
452 {Source::Constant, "Constant"},
453 {Source::Previous, "Previous"},
454 };
455
456 const auto src_it = source_map.find(source);
457 if (src_it == source_map.end())
458 return "Unknown";
459
460 return src_it->second;
461}
462
463static std::string GetTevStageConfigColorSourceString(
464 const TexturingRegs::TevStageConfig::Source& source,
465 const TexturingRegs::TevStageConfig::ColorModifier modifier) {
466
467 using ColorModifier = TexturingRegs::TevStageConfig::ColorModifier;
468 static const std::map<ColorModifier, std::string> color_modifier_map = {
469 {ColorModifier::SourceColor, "%source.rgb"},
470 {ColorModifier::OneMinusSourceColor, "(1.0 - %source.rgb)"},
471 {ColorModifier::SourceAlpha, "%source.aaa"},
472 {ColorModifier::OneMinusSourceAlpha, "(1.0 - %source.aaa)"},
473 {ColorModifier::SourceRed, "%source.rrr"},
474 {ColorModifier::OneMinusSourceRed, "(1.0 - %source.rrr)"},
475 {ColorModifier::SourceGreen, "%source.ggg"},
476 {ColorModifier::OneMinusSourceGreen, "(1.0 - %source.ggg)"},
477 {ColorModifier::SourceBlue, "%source.bbb"},
478 {ColorModifier::OneMinusSourceBlue, "(1.0 - %source.bbb)"},
479 };
480
481 auto src_str = GetTevStageConfigSourceString(source);
482 auto modifier_it = color_modifier_map.find(modifier);
483 std::string modifier_str = "%source.????";
484 if (modifier_it != color_modifier_map.end())
485 modifier_str = modifier_it->second;
486
487 return ReplacePattern(modifier_str, "%source", src_str);
488}
489
490static std::string GetTevStageConfigAlphaSourceString(
491 const TexturingRegs::TevStageConfig::Source& source,
492 const TexturingRegs::TevStageConfig::AlphaModifier modifier) {
493
494 using AlphaModifier = TexturingRegs::TevStageConfig::AlphaModifier;
495 static const std::map<AlphaModifier, std::string> alpha_modifier_map = {
496 {AlphaModifier::SourceAlpha, "%source.a"},
497 {AlphaModifier::OneMinusSourceAlpha, "(1.0 - %source.a)"},
498 {AlphaModifier::SourceRed, "%source.r"},
499 {AlphaModifier::OneMinusSourceRed, "(1.0 - %source.r)"},
500 {AlphaModifier::SourceGreen, "%source.g"},
501 {AlphaModifier::OneMinusSourceGreen, "(1.0 - %source.g)"},
502 {AlphaModifier::SourceBlue, "%source.b"},
503 {AlphaModifier::OneMinusSourceBlue, "(1.0 - %source.b)"},
504 };
505
506 auto src_str = GetTevStageConfigSourceString(source);
507 auto modifier_it = alpha_modifier_map.find(modifier);
508 std::string modifier_str = "%source.????";
509 if (modifier_it != alpha_modifier_map.end())
510 modifier_str = modifier_it->second;
511
512 return ReplacePattern(modifier_str, "%source", src_str);
513}
514
515static std::string GetTevStageConfigOperationString(
516 const TexturingRegs::TevStageConfig::Operation& operation) {
517
518 using Operation = TexturingRegs::TevStageConfig::Operation;
519 static const std::map<Operation, std::string> combiner_map = {
520 {Operation::Replace, "%source1"},
521 {Operation::Modulate, "(%source1 * %source2)"},
522 {Operation::Add, "(%source1 + %source2)"},
523 {Operation::AddSigned, "(%source1 + %source2) - 0.5"},
524 {Operation::Lerp, "lerp(%source1, %source2, %source3)"},
525 {Operation::Subtract, "(%source1 - %source2)"},
526 {Operation::Dot3_RGB, "dot(%source1, %source2)"},
527 {Operation::MultiplyThenAdd, "((%source1 * %source2) + %source3)"},
528 {Operation::AddThenMultiply, "((%source1 + %source2) * %source3)"},
529 };
530
531 const auto op_it = combiner_map.find(operation);
532 if (op_it == combiner_map.end())
533 return "Unknown op (%source1, %source2, %source3)";
534
535 return op_it->second;
536}
537
538std::string GetTevStageConfigColorCombinerString(const TexturingRegs::TevStageConfig& tev_stage) {
539 auto op_str = GetTevStageConfigOperationString(tev_stage.color_op);
540 op_str = ReplacePattern(
541 op_str, "%source1",
542 GetTevStageConfigColorSourceString(tev_stage.color_source1, tev_stage.color_modifier1));
543 op_str = ReplacePattern(
544 op_str, "%source2",
545 GetTevStageConfigColorSourceString(tev_stage.color_source2, tev_stage.color_modifier2));
546 return ReplacePattern(
547 op_str, "%source3",
548 GetTevStageConfigColorSourceString(tev_stage.color_source3, tev_stage.color_modifier3));
549}
550
551std::string GetTevStageConfigAlphaCombinerString(const TexturingRegs::TevStageConfig& tev_stage) {
552 auto op_str = GetTevStageConfigOperationString(tev_stage.alpha_op);
553 op_str = ReplacePattern(
554 op_str, "%source1",
555 GetTevStageConfigAlphaSourceString(tev_stage.alpha_source1, tev_stage.alpha_modifier1));
556 op_str = ReplacePattern(
557 op_str, "%source2",
558 GetTevStageConfigAlphaSourceString(tev_stage.alpha_source2, tev_stage.alpha_modifier2));
559 return ReplacePattern(
560 op_str, "%source3",
561 GetTevStageConfigAlphaSourceString(tev_stage.alpha_source3, tev_stage.alpha_modifier3));
562}
563
564void DumpTevStageConfig(const std::array<TexturingRegs::TevStageConfig, 6>& stages) {
565 std::string stage_info = "Tev setup:\n";
566 for (size_t index = 0; index < stages.size(); ++index) {
567 const auto& tev_stage = stages[index];
568 stage_info += "Stage " + std::to_string(index) + ": " +
569 GetTevStageConfigColorCombinerString(tev_stage) + " " +
570 GetTevStageConfigAlphaCombinerString(tev_stage) + "\n";
571 }
572 LOG_TRACE(HW_GPU, "%s", stage_info.c_str());
573}
574
575} // namespace
576
577} // namespace
diff --git a/src/video_core/debug_utils/debug_utils.h b/src/video_core/debug_utils/debug_utils.h
deleted file mode 100644
index c1f29c527..000000000
--- a/src/video_core/debug_utils/debug_utils.h
+++ /dev/null
@@ -1,251 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <algorithm>
8#include <array>
9#include <condition_variable>
10#include <iterator>
11#include <list>
12#include <map>
13#include <memory>
14#include <mutex>
15#include <string>
16#include <utility>
17#include <vector>
18#include "common/common_types.h"
19#include "common/vector_math.h"
20#include "video_core/regs_rasterizer.h"
21#include "video_core/regs_shader.h"
22#include "video_core/regs_texturing.h"
23
24namespace CiTrace {
25class Recorder;
26}
27
28namespace Pica {
29
30namespace Shader {
31struct ShaderSetup;
32}
33
34class DebugContext {
35public:
36 enum class Event {
37 FirstEvent = 0,
38
39 PicaCommandLoaded = FirstEvent,
40 PicaCommandProcessed,
41 IncomingPrimitiveBatch,
42 FinishedPrimitiveBatch,
43 VertexShaderInvocation,
44 IncomingDisplayTransfer,
45 GSPCommandProcessed,
46 BufferSwapped,
47
48 NumEvents
49 };
50
51 /**
52 * Inherit from this class to be notified of events registered to some debug context.
53 * Most importantly this is used for our debugger GUI.
54 *
55 * To implement event handling, override the OnPicaBreakPointHit and OnPicaResume methods.
56 * @warning All BreakPointObservers need to be on the same thread to guarantee thread-safe state
57 * access
58 * @todo Evaluate an alternative interface, in which there is only one managing observer and
59 * multiple child observers running (by design) on the same thread.
60 */
61 class BreakPointObserver {
62 public:
63 /// Constructs the object such that it observes events of the given DebugContext.
64 BreakPointObserver(std::shared_ptr<DebugContext> debug_context)
65 : context_weak(debug_context) {
66 std::unique_lock<std::mutex> lock(debug_context->breakpoint_mutex);
67 debug_context->breakpoint_observers.push_back(this);
68 }
69
70 virtual ~BreakPointObserver() {
71 auto context = context_weak.lock();
72 if (context) {
73 std::unique_lock<std::mutex> lock(context->breakpoint_mutex);
74 context->breakpoint_observers.remove(this);
75
76 // If we are the last observer to be destroyed, tell the debugger context that
77 // it is free to continue. In particular, this is required for a proper Citra
78 // shutdown, when the emulation thread is waiting at a breakpoint.
79 if (context->breakpoint_observers.empty())
80 context->Resume();
81 }
82 }
83
84 /**
85 * Action to perform when a breakpoint was reached.
86 * @param event Type of event which triggered the breakpoint
87 * @param data Optional data pointer (if unused, this is a nullptr)
88 * @note This function will perform nothing unless it is overridden in the child class.
89 */
90 virtual void OnPicaBreakPointHit(Event event, void* data) {}
91
92 /**
93 * Action to perform when emulation is resumed from a breakpoint.
94 * @note This function will perform nothing unless it is overridden in the child class.
95 */
96 virtual void OnPicaResume() {}
97
98 protected:
99 /**
100 * Weak context pointer. This need not be valid, so when requesting a shared_ptr via
101 * context_weak.lock(), always compare the result against nullptr.
102 */
103 std::weak_ptr<DebugContext> context_weak;
104 };
105
106 /**
107 * Simple structure defining a breakpoint state
108 */
109 struct BreakPoint {
110 bool enabled = false;
111 };
112
113 /**
114 * Static constructor used to create a shared_ptr of a DebugContext.
115 */
116 static std::shared_ptr<DebugContext> Construct() {
117 return std::shared_ptr<DebugContext>(new DebugContext);
118 }
119
120 /**
121 * Used by the emulation core when a given event has happened. If a breakpoint has been set
122 * for this event, OnEvent calls the event handlers of the registered breakpoint observers.
123 * The current thread then is halted until Resume() is called from another thread (or until
124 * emulation is stopped).
125 * @param event Event which has happened
126 * @param data Optional data pointer (pass nullptr if unused). Needs to remain valid until
127 * Resume() is called.
128 */
129 void OnEvent(Event event, void* data) {
130 // This check is left in the header to allow the compiler to inline it.
131 if (!breakpoints[(int)event].enabled)
132 return;
133 // For the rest of event handling, call a separate function.
134 DoOnEvent(event, data);
135 }
136
137 void DoOnEvent(Event event, void* data);
138
139 /**
140 * Resume from the current breakpoint.
141 * @warning Calling this from the same thread that OnEvent was called in will cause a deadlock.
142 * Calling from any other thread is safe.
143 */
144 void Resume();
145
146 /**
147 * Delete all set breakpoints and resume emulation.
148 */
149 void ClearBreakpoints() {
150 for (auto& bp : breakpoints) {
151 bp.enabled = false;
152 }
153 Resume();
154 }
155
156 // TODO: Evaluate if access to these members should be hidden behind a public interface.
157 std::array<BreakPoint, (int)Event::NumEvents> breakpoints;
158 Event active_breakpoint;
159 bool at_breakpoint = false;
160
161 std::shared_ptr<CiTrace::Recorder> recorder = nullptr;
162
163private:
164 /**
165 * Private default constructor to make sure people always construct this through Construct()
166 * instead.
167 */
168 DebugContext() = default;
169
170 /// Mutex protecting current breakpoint state and the observer list.
171 std::mutex breakpoint_mutex;
172
173 /// Used by OnEvent to wait for resumption.
174 std::condition_variable resume_from_breakpoint;
175
176 /// List of registered observers
177 std::list<BreakPointObserver*> breakpoint_observers;
178};
179
180extern std::shared_ptr<DebugContext> g_debug_context; // TODO: Get rid of this global
181
182namespace DebugUtils {
183
184#define PICA_DUMP_TEXTURES 0
185#define PICA_LOG_TEV 0
186
187void DumpShader(const std::string& filename, const ShaderRegs& config,
188 const Shader::ShaderSetup& setup,
189 const RasterizerRegs::VSOutputAttributes* output_attributes);
190
191// Utility class to log Pica commands.
192struct PicaTrace {
193 struct Write {
194 u16 cmd_id;
195 u16 mask;
196 u32 value;
197 };
198 std::vector<Write> writes;
199};
200
201extern bool g_is_pica_tracing;
202
203void StartPicaTracing();
204inline bool IsPicaTracing() {
205 return g_is_pica_tracing;
206}
207void OnPicaRegWrite(PicaTrace::Write write);
208std::unique_ptr<PicaTrace> FinishPicaTracing();
209
210void DumpTexture(const TexturingRegs::TextureConfig& texture_config, u8* data);
211
212std::string GetTevStageConfigColorCombinerString(const TexturingRegs::TevStageConfig& tev_stage);
213std::string GetTevStageConfigAlphaCombinerString(const TexturingRegs::TevStageConfig& tev_stage);
214
215/// Dumps the Tev stage config to log at trace level
216void DumpTevStageConfig(const std::array<TexturingRegs::TevStageConfig, 6>& stages);
217
218/**
219 * Used in the vertex loader to merge access records. TODO: Investigate if actually useful.
220 */
221class MemoryAccessTracker {
222 /// Combine overlapping and close ranges
223 void SimplifyRanges() {
224 for (auto it = ranges.begin(); it != ranges.end(); ++it) {
225 // NOTE: We add 32 to the range end address to make sure "close" ranges are combined,
226 // too
227 auto it2 = std::next(it);
228 while (it2 != ranges.end() && it->first + it->second + 32 >= it2->first) {
229 it->second = std::max(it->second, it2->first + it2->second - it->first);
230 it2 = ranges.erase(it2);
231 }
232 }
233 }
234
235public:
236 /// Record a particular memory access in the list
237 void AddAccess(u32 paddr, u32 size) {
238 // Create new range or extend existing one
239 ranges[paddr] = std::max(ranges[paddr], size);
240
241 // Simplify ranges...
242 SimplifyRanges();
243 }
244
245 /// Map of accessed ranges (mapping start address to range size)
246 std::map<u32, u32> ranges;
247};
248
249} // namespace
250
251} // namespace
diff --git a/src/video_core/geometry_pipeline.cpp b/src/video_core/geometry_pipeline.cpp
deleted file mode 100644
index 98ff2ccd3..000000000
--- a/src/video_core/geometry_pipeline.cpp
+++ /dev/null
@@ -1,274 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "video_core/geometry_pipeline.h"
6#include "video_core/pica_state.h"
7#include "video_core/regs.h"
8#include "video_core/renderer_base.h"
9#include "video_core/video_core.h"
10
11namespace Pica {
12
13/// An attribute buffering interface for different pipeline modes
14class GeometryPipelineBackend {
15public:
16 virtual ~GeometryPipelineBackend() = default;
17
18 /// Checks if there is no incomplete data transfer
19 virtual bool IsEmpty() const = 0;
20
21 /// Checks if the pipeline needs a direct input from index buffer
22 virtual bool NeedIndexInput() const = 0;
23
24 /// Submits an index from index buffer
25 virtual void SubmitIndex(unsigned int val) = 0;
26
27 /**
28 * Submits vertex attributes
29 * @param input attributes of a vertex output from vertex shader
30 * @return if the buffer is full and the geometry shader should be invoked
31 */
32 virtual bool SubmitVertex(const Shader::AttributeBuffer& input) = 0;
33};
34
35// In the Point mode, vertex attributes are sent to the input registers in the geometry shader unit.
36// The size of vertex shader outputs and geometry shader inputs are constants. Geometry shader is
37// invoked upon inputs buffer filled up by vertex shader outputs. For example, if we have a geometry
38// shader that takes 6 inputs, and the vertex shader outputs 2 attributes, it would take 3 vertices
39// for one geometry shader invocation.
40// TODO: what happens when the input size is not divisible by the output size?
41class GeometryPipeline_Point : public GeometryPipelineBackend {
42public:
43 GeometryPipeline_Point(const Regs& regs, Shader::GSUnitState& unit) : regs(regs), unit(unit) {
44 ASSERT(regs.pipeline.variable_primitive == 0);
45 ASSERT(regs.gs.input_to_uniform == 0);
46 vs_output_num = regs.pipeline.vs_outmap_total_minus_1_a + 1;
47 size_t gs_input_num = regs.gs.max_input_attribute_index + 1;
48 ASSERT(gs_input_num % vs_output_num == 0);
49 buffer_cur = attribute_buffer.attr;
50 buffer_end = attribute_buffer.attr + gs_input_num;
51 }
52
53 bool IsEmpty() const override {
54 return buffer_cur == attribute_buffer.attr;
55 }
56
57 bool NeedIndexInput() const override {
58 return false;
59 }
60
61 void SubmitIndex(unsigned int val) override {
62 UNREACHABLE();
63 }
64
65 bool SubmitVertex(const Shader::AttributeBuffer& input) override {
66 buffer_cur = std::copy(input.attr, input.attr + vs_output_num, buffer_cur);
67 if (buffer_cur == buffer_end) {
68 buffer_cur = attribute_buffer.attr;
69 unit.LoadInput(regs.gs, attribute_buffer);
70 return true;
71 }
72 return false;
73 }
74
75private:
76 const Regs& regs;
77 Shader::GSUnitState& unit;
78 Shader::AttributeBuffer attribute_buffer;
79 Math::Vec4<float24>* buffer_cur;
80 Math::Vec4<float24>* buffer_end;
81 unsigned int vs_output_num;
82};
83
84// In VariablePrimitive mode, vertex attributes are buffered into the uniform registers in the
85// geometry shader unit. The number of vertex is variable, which is specified by the first index
86// value in the batch. This mode is usually used for subdivision.
87class GeometryPipeline_VariablePrimitive : public GeometryPipelineBackend {
88public:
89 GeometryPipeline_VariablePrimitive(const Regs& regs, Shader::ShaderSetup& setup)
90 : regs(regs), setup(setup) {
91 ASSERT(regs.pipeline.variable_primitive == 1);
92 ASSERT(regs.gs.input_to_uniform == 1);
93 vs_output_num = regs.pipeline.vs_outmap_total_minus_1_a + 1;
94 }
95
96 bool IsEmpty() const override {
97 return need_index;
98 }
99
100 bool NeedIndexInput() const override {
101 return need_index;
102 }
103
104 void SubmitIndex(unsigned int val) override {
105 DEBUG_ASSERT(need_index);
106
107 // The number of vertex input is put to the uniform register
108 float24 vertex_num = float24::FromFloat32(static_cast<float>(val));
109 setup.uniforms.f[0] = Math::MakeVec(vertex_num, vertex_num, vertex_num, vertex_num);
110
111 // The second uniform register and so on are used for receiving input vertices
112 buffer_cur = setup.uniforms.f + 1;
113
114 main_vertex_num = regs.pipeline.variable_vertex_main_num_minus_1 + 1;
115 total_vertex_num = val;
116 need_index = false;
117 }
118
119 bool SubmitVertex(const Shader::AttributeBuffer& input) override {
120 DEBUG_ASSERT(!need_index);
121 if (main_vertex_num != 0) {
122 // For main vertices, receive all attributes
123 buffer_cur = std::copy(input.attr, input.attr + vs_output_num, buffer_cur);
124 --main_vertex_num;
125 } else {
126 // For other vertices, only receive the first attribute (usually the position)
127 *(buffer_cur++) = input.attr[0];
128 }
129 --total_vertex_num;
130
131 if (total_vertex_num == 0) {
132 need_index = true;
133 return true;
134 }
135
136 return false;
137 }
138
139private:
140 bool need_index = true;
141 const Regs& regs;
142 Shader::ShaderSetup& setup;
143 unsigned int main_vertex_num;
144 unsigned int total_vertex_num;
145 Math::Vec4<float24>* buffer_cur;
146 unsigned int vs_output_num;
147};
148
149// In FixedPrimitive mode, vertex attributes are buffered into the uniform registers in the geometry
150// shader unit. The number of vertex per shader invocation is constant. This is usually used for
151// particle system.
152class GeometryPipeline_FixedPrimitive : public GeometryPipelineBackend {
153public:
154 GeometryPipeline_FixedPrimitive(const Regs& regs, Shader::ShaderSetup& setup)
155 : regs(regs), setup(setup) {
156 ASSERT(regs.pipeline.variable_primitive == 0);
157 ASSERT(regs.gs.input_to_uniform == 1);
158 vs_output_num = regs.pipeline.vs_outmap_total_minus_1_a + 1;
159 ASSERT(vs_output_num == regs.pipeline.gs_config.stride_minus_1 + 1);
160 size_t vertex_num = regs.pipeline.gs_config.fixed_vertex_num_minus_1 + 1;
161 buffer_cur = buffer_begin = setup.uniforms.f + regs.pipeline.gs_config.start_index;
162 buffer_end = buffer_begin + vs_output_num * vertex_num;
163 }
164
165 bool IsEmpty() const override {
166 return buffer_cur == buffer_begin;
167 }
168
169 bool NeedIndexInput() const override {
170 return false;
171 }
172
173 void SubmitIndex(unsigned int val) override {
174 UNREACHABLE();
175 }
176
177 bool SubmitVertex(const Shader::AttributeBuffer& input) override {
178 buffer_cur = std::copy(input.attr, input.attr + vs_output_num, buffer_cur);
179 if (buffer_cur == buffer_end) {
180 buffer_cur = buffer_begin;
181 return true;
182 }
183 return false;
184 }
185
186private:
187 const Regs& regs;
188 Shader::ShaderSetup& setup;
189 Math::Vec4<float24>* buffer_begin;
190 Math::Vec4<float24>* buffer_cur;
191 Math::Vec4<float24>* buffer_end;
192 unsigned int vs_output_num;
193};
194
195GeometryPipeline::GeometryPipeline(State& state) : state(state) {}
196
197GeometryPipeline::~GeometryPipeline() = default;
198
199void GeometryPipeline::SetVertexHandler(Shader::VertexHandler vertex_handler) {
200 this->vertex_handler = vertex_handler;
201}
202
203void GeometryPipeline::Setup(Shader::ShaderEngine* shader_engine) {
204 if (!backend)
205 return;
206
207 this->shader_engine = shader_engine;
208 shader_engine->SetupBatch(state.gs, state.regs.gs.main_offset);
209}
210
211void GeometryPipeline::Reconfigure() {
212 ASSERT(!backend || backend->IsEmpty());
213
214 if (state.regs.pipeline.use_gs == PipelineRegs::UseGS::No) {
215 backend = nullptr;
216 return;
217 }
218
219 ASSERT(state.regs.pipeline.use_gs == PipelineRegs::UseGS::Yes);
220
221 // The following assumes that when geometry shader is in use, the shader unit 3 is configured as
222 // a geometry shader unit.
223 // TODO: what happens if this is not true?
224 ASSERT(state.regs.pipeline.gs_unit_exclusive_configuration == 1);
225 ASSERT(state.regs.gs.shader_mode == ShaderRegs::ShaderMode::GS);
226
227 state.gs_unit.ConfigOutput(state.regs.gs);
228
229 ASSERT(state.regs.pipeline.vs_outmap_total_minus_1_a ==
230 state.regs.pipeline.vs_outmap_total_minus_1_b);
231
232 switch (state.regs.pipeline.gs_config.mode) {
233 case PipelineRegs::GSMode::Point:
234 backend = std::make_unique<GeometryPipeline_Point>(state.regs, state.gs_unit);
235 break;
236 case PipelineRegs::GSMode::VariablePrimitive:
237 backend = std::make_unique<GeometryPipeline_VariablePrimitive>(state.regs, state.gs);
238 break;
239 case PipelineRegs::GSMode::FixedPrimitive:
240 backend = std::make_unique<GeometryPipeline_FixedPrimitive>(state.regs, state.gs);
241 break;
242 default:
243 UNREACHABLE();
244 }
245}
246
247bool GeometryPipeline::NeedIndexInput() const {
248 if (!backend)
249 return false;
250 return backend->NeedIndexInput();
251}
252
253void GeometryPipeline::SubmitIndex(unsigned int val) {
254 backend->SubmitIndex(val);
255}
256
257void GeometryPipeline::SubmitVertex(const Shader::AttributeBuffer& input) {
258 if (!backend) {
259 // No backend means the geometry shader is disabled, so we send the vertex shader output
260 // directly to the primitive assembler.
261 vertex_handler(input);
262 } else {
263 if (backend->SubmitVertex(input)) {
264 shader_engine->Run(state.gs, state.gs_unit);
265
266 // The uniform b15 is set to true after every geometry shader invocation. This is useful
267 // for the shader to know if this is the first invocation in a batch, if the program set
268 // b15 to false first.
269 state.gs.uniforms.b[15] = true;
270 }
271 }
272}
273
274} // namespace Pica
diff --git a/src/video_core/geometry_pipeline.h b/src/video_core/geometry_pipeline.h
deleted file mode 100644
index 91fdd3192..000000000
--- a/src/video_core/geometry_pipeline.h
+++ /dev/null
@@ -1,49 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include "video_core/shader/shader.h"
9
10namespace Pica {
11
12struct State;
13
14class GeometryPipelineBackend;
15
16/// A pipeline receiving from vertex shader and sending to geometry shader and primitive assembler
17class GeometryPipeline {
18public:
19 explicit GeometryPipeline(State& state);
20 ~GeometryPipeline();
21
22 /// Sets the handler for receiving vertex outputs from vertex shader
23 void SetVertexHandler(Shader::VertexHandler vertex_handler);
24
25 /**
26 * Setup the geometry shader unit if it is in use
27 * @param shader_engine the shader engine for the geometry shader to run
28 */
29 void Setup(Shader::ShaderEngine* shader_engine);
30
31 /// Reconfigures the pipeline according to current register settings
32 void Reconfigure();
33
34 /// Checks if the pipeline needs a direct input from index buffer
35 bool NeedIndexInput() const;
36
37 /// Submits an index from index buffer. Call this only when NeedIndexInput returns true
38 void SubmitIndex(unsigned int val);
39
40 /// Submits vertex attributes output from vertex shader
41 void SubmitVertex(const Shader::AttributeBuffer& input);
42
43private:
44 Shader::VertexHandler vertex_handler;
45 Shader::ShaderEngine* shader_engine;
46 std::unique_ptr<GeometryPipelineBackend> backend;
47 State& state;
48};
49} // namespace Pica
diff --git a/src/video_core/gpu_debugger.h b/src/video_core/gpu_debugger.h
deleted file mode 100644
index c1f9b43c2..000000000
--- a/src/video_core/gpu_debugger.h
+++ /dev/null
@@ -1,85 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <algorithm>
8#include <functional>
9#include <vector>
10#include "core/hle/service/gsp_gpu.h"
11
12class GraphicsDebugger {
13public:
14 // Base class for all objects which need to be notified about GPU events
15 class DebuggerObserver {
16 public:
17 DebuggerObserver() : observed(nullptr) {}
18
19 virtual ~DebuggerObserver() {
20 if (observed)
21 observed->UnregisterObserver(this);
22 }
23
24 /**
25 * Called when a GX command has been processed and is ready for being
26 * read via GraphicsDebugger::ReadGXCommandHistory.
27 * @param total_command_count Total number of commands in the GX history
28 * @note All methods in this class are called from the GSP thread
29 */
30 virtual void GXCommandProcessed(int total_command_count) {
31 const Service::GSP::Command& cmd =
32 observed->ReadGXCommandHistory(total_command_count - 1);
33 LOG_TRACE(Debug_GPU, "Received command: id=%x", (int)cmd.id.Value());
34 }
35
36 protected:
37 const GraphicsDebugger* GetDebugger() const {
38 return observed;
39 }
40
41 private:
42 GraphicsDebugger* observed;
43
44 friend class GraphicsDebugger;
45 };
46
47 void GXCommandProcessed(u8* command_data) {
48 if (observers.empty())
49 return;
50
51 gx_command_history.emplace_back();
52 Service::GSP::Command& cmd = gx_command_history.back();
53
54 memcpy(&cmd, command_data, sizeof(Service::GSP::Command));
55
56 ForEachObserver([this](DebuggerObserver* observer) {
57 observer->GXCommandProcessed(static_cast<int>(this->gx_command_history.size()));
58 });
59 }
60
61 const Service::GSP::Command& ReadGXCommandHistory(int index) const {
62 // TODO: Is this thread-safe?
63 return gx_command_history[index];
64 }
65
66 void RegisterObserver(DebuggerObserver* observer) {
67 // TODO: Check for duplicates
68 observers.push_back(observer);
69 observer->observed = this;
70 }
71
72 void UnregisterObserver(DebuggerObserver* observer) {
73 observers.erase(std::remove(observers.begin(), observers.end(), observer), observers.end());
74 observer->observed = nullptr;
75 }
76
77private:
78 void ForEachObserver(std::function<void(DebuggerObserver*)> func) {
79 std::for_each(observers.begin(), observers.end(), func);
80 }
81
82 std::vector<DebuggerObserver*> observers;
83
84 std::vector<Service::GSP::Command> gx_command_history;
85};
diff --git a/src/video_core/pica.cpp b/src/video_core/pica.cpp
deleted file mode 100644
index 218e06883..000000000
--- a/src/video_core/pica.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <cstring>
6#include "video_core/geometry_pipeline.h"
7#include "video_core/pica.h"
8#include "video_core/pica_state.h"
9#include "video_core/renderer_base.h"
10#include "video_core/video_core.h"
11
12namespace Pica {
13
14State g_state;
15
16void Init() {
17 g_state.Reset();
18}
19
20void Shutdown() {
21 Shader::Shutdown();
22}
23
24template <typename T>
25void Zero(T& o) {
26 memset(&o, 0, sizeof(o));
27}
28
29State::State() : geometry_pipeline(*this) {
30 auto SubmitVertex = [this](const Shader::AttributeBuffer& vertex) {
31 using Pica::Shader::OutputVertex;
32 auto AddTriangle = [this](const OutputVertex& v0, const OutputVertex& v1,
33 const OutputVertex& v2) {
34 VideoCore::g_renderer->Rasterizer()->AddTriangle(v0, v1, v2);
35 };
36 primitive_assembler.SubmitVertex(
37 Shader::OutputVertex::FromAttributeBuffer(regs.rasterizer, vertex), AddTriangle);
38 };
39
40 auto SetWinding = [this]() { primitive_assembler.SetWinding(); };
41
42 g_state.gs_unit.SetVertexHandler(SubmitVertex, SetWinding);
43 g_state.geometry_pipeline.SetVertexHandler(SubmitVertex);
44}
45
46void State::Reset() {
47 Zero(regs);
48 Zero(vs);
49 Zero(gs);
50 Zero(cmd_list);
51 Zero(immediate);
52 primitive_assembler.Reconfigure(PipelineRegs::TriangleTopology::List);
53}
54}
diff --git a/src/video_core/pica.h b/src/video_core/pica.h
deleted file mode 100644
index dc8aa6670..000000000
--- a/src/video_core/pica.h
+++ /dev/null
@@ -1,16 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "video_core/regs_texturing.h"
8namespace Pica {
9
10/// Initialize Pica state
11void Init();
12
13/// Shutdown Pica state
14void Shutdown();
15
16} // namespace
diff --git a/src/video_core/pica_state.h b/src/video_core/pica_state.h
deleted file mode 100644
index c6634a0bc..000000000
--- a/src/video_core/pica_state.h
+++ /dev/null
@@ -1,159 +0,0 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include "common/bit_field.h"
9#include "common/common_types.h"
10#include "common/vector_math.h"
11#include "video_core/geometry_pipeline.h"
12#include "video_core/primitive_assembly.h"
13#include "video_core/regs.h"
14#include "video_core/shader/shader.h"
15
16namespace Pica {
17
18/// Struct used to describe current Pica state
19struct State {
20 State();
21 void Reset();
22
23 /// Pica registers
24 Regs regs;
25
26 Shader::ShaderSetup vs;
27 Shader::ShaderSetup gs;
28
29 Shader::AttributeBuffer input_default_attributes;
30
31 struct ProcTex {
32 union ValueEntry {
33 u32 raw;
34
35 // LUT value, encoded as 12-bit fixed point, with 12 fraction bits
36 BitField<0, 12, u32> value; // 0.0.12 fixed point
37
38 // Difference between two entry values. Used for efficient interpolation.
39 // 0.0.12 fixed point with two's complement. The range is [-0.5, 0.5).
40 // Note: the type of this is different from the one of lighting LUT
41 BitField<12, 12, s32> difference;
42
43 float ToFloat() const {
44 return static_cast<float>(value) / 4095.f;
45 }
46
47 float DiffToFloat() const {
48 return static_cast<float>(difference) / 4095.f;
49 }
50 };
51
52 union ColorEntry {
53 u32 raw;
54 BitField<0, 8, u32> r;
55 BitField<8, 8, u32> g;
56 BitField<16, 8, u32> b;
57 BitField<24, 8, u32> a;
58
59 Math::Vec4<u8> ToVector() const {
60 return {static_cast<u8>(r), static_cast<u8>(g), static_cast<u8>(b),
61 static_cast<u8>(a)};
62 }
63 };
64
65 union ColorDifferenceEntry {
66 u32 raw;
67 BitField<0, 8, s32> r; // half of the difference between two ColorEntry
68 BitField<8, 8, s32> g;
69 BitField<16, 8, s32> b;
70 BitField<24, 8, s32> a;
71
72 Math::Vec4<s32> ToVector() const {
73 return Math::Vec4<s32>{r, g, b, a} * 2;
74 }
75 };
76
77 std::array<ValueEntry, 128> noise_table;
78 std::array<ValueEntry, 128> color_map_table;
79 std::array<ValueEntry, 128> alpha_map_table;
80 std::array<ColorEntry, 256> color_table;
81 std::array<ColorDifferenceEntry, 256> color_diff_table;
82 } proctex;
83
84 struct Lighting {
85 union LutEntry {
86 // Used for raw access
87 u32 raw;
88
89 // LUT value, encoded as 12-bit fixed point, with 12 fraction bits
90 BitField<0, 12, u32> value; // 0.0.12 fixed point
91
92 // Used for efficient interpolation.
93 BitField<12, 11, u32> difference; // 0.0.11 fixed point
94 BitField<23, 1, u32> neg_difference;
95
96 float ToFloat() const {
97 return static_cast<float>(value) / 4095.f;
98 }
99
100 float DiffToFloat() const {
101 float diff = static_cast<float>(difference) / 2047.f;
102 return neg_difference ? -diff : diff;
103 }
104 };
105
106 std::array<std::array<LutEntry, 256>, 24> luts;
107 } lighting;
108
109 struct {
110 union LutEntry {
111 // Used for raw access
112 u32 raw;
113
114 BitField<0, 13, s32> difference; // 1.1.11 fixed point
115 BitField<13, 11, u32> value; // 0.0.11 fixed point
116
117 float ToFloat() const {
118 return static_cast<float>(value) / 2047.0f;
119 }
120
121 float DiffToFloat() const {
122 return static_cast<float>(difference) / 2047.0f;
123 }
124 };
125
126 std::array<LutEntry, 128> lut;
127 } fog;
128
129 /// Current Pica command list
130 struct {
131 const u32* head_ptr;
132 const u32* current_ptr;
133 u32 length;
134 } cmd_list;
135
136 /// Struct used to describe immediate mode rendering state
137 struct ImmediateModeState {
138 // Used to buffer partial vertices for immediate-mode rendering.
139 Shader::AttributeBuffer input_vertex;
140 // Index of the next attribute to be loaded into `input_vertex`.
141 u32 current_attribute = 0;
142 // Indicates the immediate mode just started and the geometry pipeline needs to reconfigure
143 bool reset_geometry_pipeline = true;
144 } immediate;
145
146 // the geometry shader needs to be kept in the global state because some shaders relie on
147 // preserved register value across shader invocation.
148 // TODO: also bring the three vertex shader units here and implement the shader scheduler.
149 Shader::GSUnitState gs_unit;
150
151 GeometryPipeline geometry_pipeline;
152
153 // This is constructed with a dummy triangle topology
154 PrimitiveAssembler<Shader::OutputVertex> primitive_assembler;
155};
156
157extern State g_state; ///< Current Pica state
158
159} // namespace
diff --git a/src/video_core/pica_types.h b/src/video_core/pica_types.h
deleted file mode 100644
index 2eafa7e9e..000000000
--- a/src/video_core/pica_types.h
+++ /dev/null
@@ -1,143 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <cmath>
8#include <cstring>
9#include "common/common_types.h"
10
11namespace Pica {
12
13/**
14 * Template class for converting arbitrary Pica float types to IEEE 754 32-bit single-precision
15 * floating point.
16 *
17 * When decoding, format is as follows:
18 * - The first `M` bits are the mantissa
19 * - The next `E` bits are the exponent
20 * - The last bit is the sign bit
21 *
22 * @todo Verify on HW if this conversion is sufficiently accurate.
23 */
24template <unsigned M, unsigned E>
25struct Float {
26public:
27 static Float<M, E> FromFloat32(float val) {
28 Float<M, E> ret;
29 ret.value = val;
30 return ret;
31 }
32
33 static Float<M, E> FromRaw(u32 hex) {
34 Float<M, E> res;
35
36 const int width = M + E + 1;
37 const int bias = 128 - (1 << (E - 1));
38 const int exponent = (hex >> M) & ((1 << E) - 1);
39 const unsigned mantissa = hex & ((1 << M) - 1);
40
41 if (hex & ((1 << (width - 1)) - 1))
42 hex = ((hex >> (E + M)) << 31) | (mantissa << (23 - M)) | ((exponent + bias) << 23);
43 else
44 hex = ((hex >> (E + M)) << 31);
45
46 std::memcpy(&res.value, &hex, sizeof(float));
47
48 return res;
49 }
50
51 static Float<M, E> Zero() {
52 return FromFloat32(0.f);
53 }
54
55 // Not recommended for anything but logging
56 float ToFloat32() const {
57 return value;
58 }
59
60 Float<M, E> operator*(const Float<M, E>& flt) const {
61 float result = value * flt.ToFloat32();
62 // PICA gives 0 instead of NaN when multiplying by inf
63 if (!std::isnan(value) && !std::isnan(flt.ToFloat32()))
64 if (std::isnan(result))
65 result = 0.f;
66 return Float<M, E>::FromFloat32(result);
67 }
68
69 Float<M, E> operator/(const Float<M, E>& flt) const {
70 return Float<M, E>::FromFloat32(ToFloat32() / flt.ToFloat32());
71 }
72
73 Float<M, E> operator+(const Float<M, E>& flt) const {
74 return Float<M, E>::FromFloat32(ToFloat32() + flt.ToFloat32());
75 }
76
77 Float<M, E> operator-(const Float<M, E>& flt) const {
78 return Float<M, E>::FromFloat32(ToFloat32() - flt.ToFloat32());
79 }
80
81 Float<M, E>& operator*=(const Float<M, E>& flt) {
82 value = operator*(flt).value;
83 return *this;
84 }
85
86 Float<M, E>& operator/=(const Float<M, E>& flt) {
87 value /= flt.ToFloat32();
88 return *this;
89 }
90
91 Float<M, E>& operator+=(const Float<M, E>& flt) {
92 value += flt.ToFloat32();
93 return *this;
94 }
95
96 Float<M, E>& operator-=(const Float<M, E>& flt) {
97 value -= flt.ToFloat32();
98 return *this;
99 }
100
101 Float<M, E> operator-() const {
102 return Float<M, E>::FromFloat32(-ToFloat32());
103 }
104
105 bool operator<(const Float<M, E>& flt) const {
106 return ToFloat32() < flt.ToFloat32();
107 }
108
109 bool operator>(const Float<M, E>& flt) const {
110 return ToFloat32() > flt.ToFloat32();
111 }
112
113 bool operator>=(const Float<M, E>& flt) const {
114 return ToFloat32() >= flt.ToFloat32();
115 }
116
117 bool operator<=(const Float<M, E>& flt) const {
118 return ToFloat32() <= flt.ToFloat32();
119 }
120
121 bool operator==(const Float<M, E>& flt) const {
122 return ToFloat32() == flt.ToFloat32();
123 }
124
125 bool operator!=(const Float<M, E>& flt) const {
126 return ToFloat32() != flt.ToFloat32();
127 }
128
129private:
130 static const unsigned MASK = (1 << (M + E + 1)) - 1;
131 static const unsigned MANTISSA_MASK = (1 << M) - 1;
132 static const unsigned EXPONENT_MASK = (1 << E) - 1;
133
134 // Stored as a regular float, merely for convenience
135 // TODO: Perform proper arithmetic on this!
136 float value;
137};
138
139using float24 = Float<16, 7>;
140using float20 = Float<12, 7>;
141using float16 = Float<10, 5>;
142
143} // namespace Pica
diff --git a/src/video_core/primitive_assembly.cpp b/src/video_core/primitive_assembly.cpp
deleted file mode 100644
index 9c3dd4cab..000000000
--- a/src/video_core/primitive_assembly.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/logging/log.h"
6#include "video_core/primitive_assembly.h"
7#include "video_core/regs_pipeline.h"
8#include "video_core/shader/shader.h"
9
10namespace Pica {
11
12template <typename VertexType>
13PrimitiveAssembler<VertexType>::PrimitiveAssembler(PipelineRegs::TriangleTopology topology)
14 : topology(topology), buffer_index(0) {}
15
16template <typename VertexType>
17void PrimitiveAssembler<VertexType>::SubmitVertex(const VertexType& vtx,
18 TriangleHandler triangle_handler) {
19 switch (topology) {
20 case PipelineRegs::TriangleTopology::List:
21 case PipelineRegs::TriangleTopology::Shader:
22 if (buffer_index < 2) {
23 buffer[buffer_index++] = vtx;
24 } else {
25 buffer_index = 0;
26 if (topology == PipelineRegs::TriangleTopology::Shader && winding) {
27 triangle_handler(buffer[1], buffer[0], vtx);
28 winding = false;
29 } else {
30 triangle_handler(buffer[0], buffer[1], vtx);
31 }
32 }
33 break;
34
35 case PipelineRegs::TriangleTopology::Strip:
36 case PipelineRegs::TriangleTopology::Fan:
37 if (strip_ready)
38 triangle_handler(buffer[0], buffer[1], vtx);
39
40 buffer[buffer_index] = vtx;
41
42 strip_ready |= (buffer_index == 1);
43
44 if (topology == PipelineRegs::TriangleTopology::Strip)
45 buffer_index = !buffer_index;
46 else if (topology == PipelineRegs::TriangleTopology::Fan)
47 buffer_index = 1;
48 break;
49
50 default:
51 LOG_ERROR(HW_GPU, "Unknown triangle topology %x:", (int)topology);
52 break;
53 }
54}
55
56template <typename VertexType>
57void PrimitiveAssembler<VertexType>::SetWinding() {
58 winding = true;
59}
60
61template <typename VertexType>
62void PrimitiveAssembler<VertexType>::Reset() {
63 buffer_index = 0;
64 strip_ready = false;
65 winding = false;
66}
67
68template <typename VertexType>
69void PrimitiveAssembler<VertexType>::Reconfigure(PipelineRegs::TriangleTopology topology) {
70 Reset();
71 this->topology = topology;
72}
73
74// explicitly instantiate use cases
75template struct PrimitiveAssembler<Shader::OutputVertex>;
76
77} // namespace
diff --git a/src/video_core/primitive_assembly.h b/src/video_core/primitive_assembly.h
deleted file mode 100644
index 12de8e3b9..000000000
--- a/src/video_core/primitive_assembly.h
+++ /dev/null
@@ -1,57 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <functional>
8#include "video_core/regs_pipeline.h"
9
10namespace Pica {
11
12/*
13 * Utility class to build triangles from a series of vertices,
14 * according to a given triangle topology.
15 */
16template <typename VertexType>
17struct PrimitiveAssembler {
18 using TriangleHandler =
19 std::function<void(const VertexType& v0, const VertexType& v1, const VertexType& v2)>;
20
21 PrimitiveAssembler(
22 PipelineRegs::TriangleTopology topology = PipelineRegs::TriangleTopology::List);
23
24 /*
25 * Queues a vertex, builds primitives from the vertex queue according to the given
26 * triangle topology, and calls triangle_handler for each generated primitive.
27 * NOTE: We could specify the triangle handler in the constructor, but this way we can
28 * keep event and handler code next to each other.
29 */
30 void SubmitVertex(const VertexType& vtx, TriangleHandler triangle_handler);
31
32 /**
33 * Invert the vertex order of the next triangle. Called by geometry shader emitter.
34 * This only takes effect for TriangleTopology::Shader.
35 */
36 void SetWinding();
37
38 /**
39 * Resets the internal state of the PrimitiveAssembler.
40 */
41 void Reset();
42
43 /**
44 * Reconfigures the PrimitiveAssembler to use a different triangle topology.
45 */
46 void Reconfigure(PipelineRegs::TriangleTopology topology);
47
48private:
49 PipelineRegs::TriangleTopology topology;
50
51 int buffer_index;
52 VertexType buffer[2];
53 bool strip_ready = false;
54 bool winding = false;
55};
56
57} // namespace
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
deleted file mode 100644
index 4b099bc55..000000000
--- a/src/video_core/rasterizer_interface.h
+++ /dev/null
@@ -1,67 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8#include "core/hw/gpu.h"
9
10struct ScreenInfo;
11
12namespace Pica {
13namespace Shader {
14struct OutputVertex;
15}
16}
17
18namespace VideoCore {
19
20class RasterizerInterface {
21public:
22 virtual ~RasterizerInterface() {}
23
24 /// Queues the primitive formed by the given vertices for rendering
25 virtual void AddTriangle(const Pica::Shader::OutputVertex& v0,
26 const Pica::Shader::OutputVertex& v1,
27 const Pica::Shader::OutputVertex& v2) = 0;
28
29 /// Draw the current batch of triangles
30 virtual void DrawTriangles() = 0;
31
32 /// Notify rasterizer that the specified PICA register has been changed
33 virtual void NotifyPicaRegisterChanged(u32 id) = 0;
34
35 /// Notify rasterizer that all caches should be flushed to 3DS memory
36 virtual void FlushAll() = 0;
37
38 /// Notify rasterizer that any caches of the specified region should be flushed to 3DS memory
39 virtual void FlushRegion(PAddr addr, u64 size) = 0;
40
41 /// Notify rasterizer that any caches of the specified region should be flushed to 3DS memory
42 /// and invalidated
43 virtual void FlushAndInvalidateRegion(PAddr addr, u64 size) = 0;
44
45 /// Attempt to use a faster method to perform a display transfer with is_texture_copy = 0
46 virtual bool AccelerateDisplayTransfer(const GPU::Regs::DisplayTransferConfig& config) {
47 return false;
48 }
49
50 /// Attempt to use a faster method to perform a display transfer with is_texture_copy = 1
51 virtual bool AccelerateTextureCopy(const GPU::Regs::DisplayTransferConfig& config) {
52 return false;
53 }
54
55 /// Attempt to use a faster method to fill a region
56 virtual bool AccelerateFill(const GPU::Regs::MemoryFillConfig& config) {
57 return false;
58 }
59
60 /// Attempt to use a faster method to display the framebuffer to screen
61 virtual bool AccelerateDisplay(const GPU::Regs::FramebufferConfig& config,
62 PAddr framebuffer_addr, u32 pixel_stride,
63 ScreenInfo& screen_info) {
64 return false;
65 }
66};
67}
diff --git a/src/video_core/regs.cpp b/src/video_core/regs.cpp
deleted file mode 100644
index 2699e710a..000000000
--- a/src/video_core/regs.cpp
+++ /dev/null
@@ -1,488 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <iterator>
7#include <utility>
8
9#include "common/common_types.h"
10#include "video_core/regs.h"
11
12namespace Pica {
13
14static const std::pair<u16, const char*> register_names[] = {
15 {0x010, "GPUREG_FINALIZE"},
16
17 {0x040, "GPUREG_FACECULLING_CONFIG"},
18 {0x041, "GPUREG_VIEWPORT_WIDTH"},
19 {0x042, "GPUREG_VIEWPORT_INVW"},
20 {0x043, "GPUREG_VIEWPORT_HEIGHT"},
21 {0x044, "GPUREG_VIEWPORT_INVH"},
22
23 {0x047, "GPUREG_FRAGOP_CLIP"},
24 {0x048, "GPUREG_FRAGOP_CLIP_DATA0"},
25 {0x049, "GPUREG_FRAGOP_CLIP_DATA1"},
26 {0x04A, "GPUREG_FRAGOP_CLIP_DATA2"},
27 {0x04B, "GPUREG_FRAGOP_CLIP_DATA3"},
28
29 {0x04D, "GPUREG_DEPTHMAP_SCALE"},
30 {0x04E, "GPUREG_DEPTHMAP_OFFSET"},
31 {0x04F, "GPUREG_SH_OUTMAP_TOTAL"},
32 {0x050, "GPUREG_SH_OUTMAP_O0"},
33 {0x051, "GPUREG_SH_OUTMAP_O1"},
34 {0x052, "GPUREG_SH_OUTMAP_O2"},
35 {0x053, "GPUREG_SH_OUTMAP_O3"},
36 {0x054, "GPUREG_SH_OUTMAP_O4"},
37 {0x055, "GPUREG_SH_OUTMAP_O5"},
38 {0x056, "GPUREG_SH_OUTMAP_O6"},
39
40 {0x061, "GPUREG_EARLYDEPTH_FUNC"},
41 {0x062, "GPUREG_EARLYDEPTH_TEST1"},
42 {0x063, "GPUREG_EARLYDEPTH_CLEAR"},
43 {0x064, "GPUREG_SH_OUTATTR_MODE"},
44 {0x065, "GPUREG_SCISSORTEST_MODE"},
45 {0x066, "GPUREG_SCISSORTEST_POS"},
46 {0x067, "GPUREG_SCISSORTEST_DIM"},
47 {0x068, "GPUREG_VIEWPORT_XY"},
48
49 {0x06A, "GPUREG_EARLYDEPTH_DATA"},
50
51 {0x06D, "GPUREG_DEPTHMAP_ENABLE"},
52 {0x06E, "GPUREG_RENDERBUF_DIM"},
53 {0x06F, "GPUREG_SH_OUTATTR_CLOCK"},
54
55 {0x080, "GPUREG_TEXUNIT_CONFIG"},
56 {0x081, "GPUREG_TEXUNIT0_BORDER_COLOR"},
57 {0x082, "GPUREG_TEXUNIT0_DIM"},
58 {0x083, "GPUREG_TEXUNIT0_PARAM"},
59 {0x084, "GPUREG_TEXUNIT0_LOD"},
60 {0x085, "GPUREG_TEXUNIT0_ADDR1"},
61 {0x086, "GPUREG_TEXUNIT0_ADDR2"},
62 {0x087, "GPUREG_TEXUNIT0_ADDR3"},
63 {0x088, "GPUREG_TEXUNIT0_ADDR4"},
64 {0x089, "GPUREG_TEXUNIT0_ADDR5"},
65 {0x08A, "GPUREG_TEXUNIT0_ADDR6"},
66 {0x08B, "GPUREG_TEXUNIT0_SHADOW"},
67
68 {0x08E, "GPUREG_TEXUNIT0_TYPE"},
69 {0x08F, "GPUREG_LIGHTING_ENABLE0"},
70
71 {0x091, "GPUREG_TEXUNIT1_BORDER_COLOR"},
72 {0x092, "GPUREG_TEXUNIT1_DIM"},
73 {0x093, "GPUREG_TEXUNIT1_PARAM"},
74 {0x094, "GPUREG_TEXUNIT1_LOD"},
75 {0x095, "GPUREG_TEXUNIT1_ADDR"},
76 {0x096, "GPUREG_TEXUNIT1_TYPE"},
77
78 {0x099, "GPUREG_TEXUNIT2_BORDER_COLOR"},
79 {0x09A, "GPUREG_TEXUNIT2_DIM"},
80 {0x09B, "GPUREG_TEXUNIT2_PARAM"},
81 {0x09C, "GPUREG_TEXUNIT2_LOD"},
82 {0x09D, "GPUREG_TEXUNIT2_ADDR"},
83 {0x09E, "GPUREG_TEXUNIT2_TYPE"},
84
85 {0x0A8, "GPUREG_TEXUNIT3_PROCTEX0"},
86 {0x0A9, "GPUREG_TEXUNIT3_PROCTEX1"},
87 {0x0AA, "GPUREG_TEXUNIT3_PROCTEX2"},
88 {0x0AB, "GPUREG_TEXUNIT3_PROCTEX3"},
89 {0x0AC, "GPUREG_TEXUNIT3_PROCTEX4"},
90 {0x0AD, "GPUREG_TEXUNIT3_PROCTEX5"},
91
92 {0x0AF, "GPUREG_PROCTEX_LUT"},
93 {0x0B0, "GPUREG_PROCTEX_LUT_DATA0"},
94 {0x0B1, "GPUREG_PROCTEX_LUT_DATA1"},
95 {0x0B2, "GPUREG_PROCTEX_LUT_DATA2"},
96 {0x0B3, "GPUREG_PROCTEX_LUT_DATA3"},
97 {0x0B4, "GPUREG_PROCTEX_LUT_DATA4"},
98 {0x0B5, "GPUREG_PROCTEX_LUT_DATA5"},
99 {0x0B6, "GPUREG_PROCTEX_LUT_DATA6"},
100 {0x0B7, "GPUREG_PROCTEX_LUT_DATA7"},
101
102 {0x0C0, "GPUREG_TEXENV0_SOURCE"},
103 {0x0C1, "GPUREG_TEXENV0_OPERAND"},
104 {0x0C2, "GPUREG_TEXENV0_COMBINER"},
105 {0x0C3, "GPUREG_TEXENV0_COLOR"},
106 {0x0C4, "GPUREG_TEXENV0_SCALE"},
107
108 {0x0C8, "GPUREG_TEXENV1_SOURCE"},
109 {0x0C9, "GPUREG_TEXENV1_OPERAND"},
110 {0x0CA, "GPUREG_TEXENV1_COMBINER"},
111 {0x0CB, "GPUREG_TEXENV1_COLOR"},
112 {0x0CC, "GPUREG_TEXENV1_SCALE"},
113
114 {0x0D0, "GPUREG_TEXENV2_SOURCE"},
115 {0x0D1, "GPUREG_TEXENV2_OPERAND"},
116 {0x0D2, "GPUREG_TEXENV2_COMBINER"},
117 {0x0D3, "GPUREG_TEXENV2_COLOR"},
118 {0x0D4, "GPUREG_TEXENV2_SCALE"},
119
120 {0x0D8, "GPUREG_TEXENV3_SOURCE"},
121 {0x0D9, "GPUREG_TEXENV3_OPERAND"},
122 {0x0DA, "GPUREG_TEXENV3_COMBINER"},
123 {0x0DB, "GPUREG_TEXENV3_COLOR"},
124 {0x0DC, "GPUREG_TEXENV3_SCALE"},
125
126 {0x0E0, "GPUREG_TEXENV_UPDATE_BUFFER"},
127 {0x0E1, "GPUREG_FOG_COLOR"},
128
129 {0x0E4, "GPUREG_GAS_ATTENUATION"},
130 {0x0E5, "GPUREG_GAS_ACCMAX"},
131 {0x0E6, "GPUREG_FOG_LUT_INDEX"},
132
133 {0x0E8, "GPUREG_FOG_LUT_DATA0"},
134 {0x0E9, "GPUREG_FOG_LUT_DATA1"},
135 {0x0EA, "GPUREG_FOG_LUT_DATA2"},
136 {0x0EB, "GPUREG_FOG_LUT_DATA3"},
137 {0x0EC, "GPUREG_FOG_LUT_DATA4"},
138 {0x0ED, "GPUREG_FOG_LUT_DATA5"},
139 {0x0EE, "GPUREG_FOG_LUT_DATA6"},
140 {0x0EF, "GPUREG_FOG_LUT_DATA7"},
141 {0x0F0, "GPUREG_TEXENV4_SOURCE"},
142 {0x0F1, "GPUREG_TEXENV4_OPERAND"},
143 {0x0F2, "GPUREG_TEXENV4_COMBINER"},
144 {0x0F3, "GPUREG_TEXENV4_COLOR"},
145 {0x0F4, "GPUREG_TEXENV4_SCALE"},
146
147 {0x0F8, "GPUREG_TEXENV5_SOURCE"},
148 {0x0F9, "GPUREG_TEXENV5_OPERAND"},
149 {0x0FA, "GPUREG_TEXENV5_COMBINER"},
150 {0x0FB, "GPUREG_TEXENV5_COLOR"},
151 {0x0FC, "GPUREG_TEXENV5_SCALE"},
152 {0x0FD, "GPUREG_TEXENV_BUFFER_COLOR"},
153
154 {0x100, "GPUREG_COLOR_OPERATION"},
155 {0x101, "GPUREG_BLEND_FUNC"},
156 {0x102, "GPUREG_LOGIC_OP"},
157 {0x103, "GPUREG_BLEND_COLOR"},
158 {0x104, "GPUREG_FRAGOP_ALPHA_TEST"},
159 {0x105, "GPUREG_STENCIL_TEST"},
160 {0x106, "GPUREG_STENCIL_OP"},
161 {0x107, "GPUREG_DEPTH_COLOR_MASK"},
162
163 {0x110, "GPUREG_FRAMEBUFFER_INVALIDATE"},
164 {0x111, "GPUREG_FRAMEBUFFER_FLUSH"},
165 {0x112, "GPUREG_COLORBUFFER_READ"},
166 {0x113, "GPUREG_COLORBUFFER_WRITE"},
167 {0x114, "GPUREG_DEPTHBUFFER_READ"},
168 {0x115, "GPUREG_DEPTHBUFFER_WRITE"},
169 {0x116, "GPUREG_DEPTHBUFFER_FORMAT"},
170 {0x117, "GPUREG_COLORBUFFER_FORMAT"},
171 {0x118, "GPUREG_EARLYDEPTH_TEST2"},
172
173 {0x11B, "GPUREG_FRAMEBUFFER_BLOCK32"},
174 {0x11C, "GPUREG_DEPTHBUFFER_LOC"},
175 {0x11D, "GPUREG_COLORBUFFER_LOC"},
176 {0x11E, "GPUREG_FRAMEBUFFER_DIM"},
177
178 {0x120, "GPUREG_GAS_LIGHT_XY"},
179 {0x121, "GPUREG_GAS_LIGHT_Z"},
180 {0x122, "GPUREG_GAS_LIGHT_Z_COLOR"},
181 {0x123, "GPUREG_GAS_LUT_INDEX"},
182 {0x124, "GPUREG_GAS_LUT_DATA"},
183
184 {0x126, "GPUREG_GAS_DELTAZ_DEPTH"},
185
186 {0x130, "GPUREG_FRAGOP_SHADOW"},
187
188 {0x140, "GPUREG_LIGHT0_SPECULAR0"},
189 {0x141, "GPUREG_LIGHT0_SPECULAR1"},
190 {0x142, "GPUREG_LIGHT0_DIFFUSE"},
191 {0x143, "GPUREG_LIGHT0_AMBIENT"},
192 {0x144, "GPUREG_LIGHT0_XY"},
193 {0x145, "GPUREG_LIGHT0_Z"},
194 {0x146, "GPUREG_LIGHT0_SPOTDIR_XY"},
195 {0x147, "GPUREG_LIGHT0_SPOTDIR_Z"},
196
197 {0x149, "GPUREG_LIGHT0_CONFIG"},
198 {0x14A, "GPUREG_LIGHT0_ATTENUATION_BIAS"},
199 {0x14B, "GPUREG_LIGHT0_ATTENUATION_SCALE"},
200
201 {0x150, "GPUREG_LIGHT1_SPECULAR0"},
202 {0x151, "GPUREG_LIGHT1_SPECULAR1"},
203 {0x152, "GPUREG_LIGHT1_DIFFUSE"},
204 {0x153, "GPUREG_LIGHT1_AMBIENT"},
205 {0x154, "GPUREG_LIGHT1_XY"},
206 {0x155, "GPUREG_LIGHT1_Z"},
207 {0x156, "GPUREG_LIGHT1_SPOTDIR_XY"},
208 {0x157, "GPUREG_LIGHT1_SPOTDIR_Z"},
209
210 {0x159, "GPUREG_LIGHT1_CONFIG"},
211 {0x15A, "GPUREG_LIGHT1_ATTENUATION_BIAS"},
212 {0x15B, "GPUREG_LIGHT1_ATTENUATION_SCALE"},
213
214 {0x160, "GPUREG_LIGHT2_SPECULAR0"},
215 {0x161, "GPUREG_LIGHT2_SPECULAR1"},
216 {0x162, "GPUREG_LIGHT2_DIFFUSE"},
217 {0x163, "GPUREG_LIGHT2_AMBIENT"},
218 {0x164, "GPUREG_LIGHT2_XY"},
219 {0x165, "GPUREG_LIGHT2_Z"},
220 {0x166, "GPUREG_LIGHT2_SPOTDIR_XY"},
221 {0x167, "GPUREG_LIGHT2_SPOTDIR_Z"},
222
223 {0x169, "GPUREG_LIGHT2_CONFIG"},
224 {0x16A, "GPUREG_LIGHT2_ATTENUATION_BIAS"},
225 {0x16B, "GPUREG_LIGHT2_ATTENUATION_SCALE"},
226
227 {0x170, "GPUREG_LIGHT3_SPECULAR0"},
228 {0x171, "GPUREG_LIGHT3_SPECULAR1"},
229 {0x172, "GPUREG_LIGHT3_DIFFUSE"},
230 {0x173, "GPUREG_LIGHT3_AMBIENT"},
231 {0x174, "GPUREG_LIGHT3_XY"},
232 {0x175, "GPUREG_LIGHT3_Z"},
233 {0x176, "GPUREG_LIGHT3_SPOTDIR_XY"},
234 {0x177, "GPUREG_LIGHT3_SPOTDIR_Z"},
235
236 {0x179, "GPUREG_LIGHT3_CONFIG"},
237 {0x17A, "GPUREG_LIGHT3_ATTENUATION_BIAS"},
238 {0x17B, "GPUREG_LIGHT3_ATTENUATION_SCALE"},
239
240 {0x180, "GPUREG_LIGHT4_SPECULAR0"},
241 {0x181, "GPUREG_LIGHT4_SPECULAR1"},
242 {0x182, "GPUREG_LIGHT4_DIFFUSE"},
243 {0x183, "GPUREG_LIGHT4_AMBIENT"},
244 {0x184, "GPUREG_LIGHT4_XY"},
245 {0x185, "GPUREG_LIGHT4_Z"},
246 {0x186, "GPUREG_LIGHT4_SPOTDIR_XY"},
247 {0x187, "GPUREG_LIGHT4_SPOTDIR_Z"},
248
249 {0x189, "GPUREG_LIGHT4_CONFIG"},
250 {0x18A, "GPUREG_LIGHT4_ATTENUATION_BIAS"},
251 {0x18B, "GPUREG_LIGHT4_ATTENUATION_SCALE"},
252
253 {0x190, "GPUREG_LIGHT5_SPECULAR0"},
254 {0x191, "GPUREG_LIGHT5_SPECULAR1"},
255 {0x192, "GPUREG_LIGHT5_DIFFUSE"},
256 {0x193, "GPUREG_LIGHT5_AMBIENT"},
257 {0x194, "GPUREG_LIGHT5_XY"},
258 {0x195, "GPUREG_LIGHT5_Z"},
259 {0x196, "GPUREG_LIGHT5_SPOTDIR_XY"},
260 {0x197, "GPUREG_LIGHT5_SPOTDIR_Z"},
261
262 {0x199, "GPUREG_LIGHT5_CONFIG"},
263 {0x19A, "GPUREG_LIGHT5_ATTENUATION_BIAS"},
264 {0x19B, "GPUREG_LIGHT5_ATTENUATION_SCALE"},
265
266 {0x1A0, "GPUREG_LIGHT6_SPECULAR0"},
267 {0x1A1, "GPUREG_LIGHT6_SPECULAR1"},
268 {0x1A2, "GPUREG_LIGHT6_DIFFUSE"},
269 {0x1A3, "GPUREG_LIGHT6_AMBIENT"},
270 {0x1A4, "GPUREG_LIGHT6_XY"},
271 {0x1A5, "GPUREG_LIGHT6_Z"},
272 {0x1A6, "GPUREG_LIGHT6_SPOTDIR_XY"},
273 {0x1A7, "GPUREG_LIGHT6_SPOTDIR_Z"},
274
275 {0x1A9, "GPUREG_LIGHT6_CONFIG"},
276 {0x1AA, "GPUREG_LIGHT6_ATTENUATION_BIAS"},
277 {0x1AB, "GPUREG_LIGHT6_ATTENUATION_SCALE"},
278
279 {0x1B0, "GPUREG_LIGHT7_SPECULAR0"},
280 {0x1B1, "GPUREG_LIGHT7_SPECULAR1"},
281 {0x1B2, "GPUREG_LIGHT7_DIFFUSE"},
282 {0x1B3, "GPUREG_LIGHT7_AMBIENT"},
283 {0x1B4, "GPUREG_LIGHT7_XY"},
284 {0x1B5, "GPUREG_LIGHT7_Z"},
285 {0x1B6, "GPUREG_LIGHT7_SPOTDIR_XY"},
286 {0x1B7, "GPUREG_LIGHT7_SPOTDIR_Z"},
287
288 {0x1B9, "GPUREG_LIGHT7_CONFIG"},
289 {0x1BA, "GPUREG_LIGHT7_ATTENUATION_BIAS"},
290 {0x1BB, "GPUREG_LIGHT7_ATTENUATION_SCALE"},
291
292 {0x1C0, "GPUREG_LIGHTING_AMBIENT"},
293
294 {0x1C2, "GPUREG_LIGHTING_NUM_LIGHTS"},
295 {0x1C3, "GPUREG_LIGHTING_CONFIG0"},
296 {0x1C4, "GPUREG_LIGHTING_CONFIG1"},
297 {0x1C5, "GPUREG_LIGHTING_LUT_INDEX"},
298 {0x1C6, "GPUREG_LIGHTING_ENABLE1"},
299
300 {0x1C8, "GPUREG_LIGHTING_LUT_DATA0"},
301 {0x1C9, "GPUREG_LIGHTING_LUT_DATA1"},
302 {0x1CA, "GPUREG_LIGHTING_LUT_DATA2"},
303 {0x1CB, "GPUREG_LIGHTING_LUT_DATA3"},
304 {0x1CC, "GPUREG_LIGHTING_LUT_DATA4"},
305 {0x1CD, "GPUREG_LIGHTING_LUT_DATA5"},
306 {0x1CE, "GPUREG_LIGHTING_LUT_DATA6"},
307 {0x1CF, "GPUREG_LIGHTING_LUT_DATA7"},
308 {0x1D0, "GPUREG_LIGHTING_LUTINPUT_ABS"},
309 {0x1D1, "GPUREG_LIGHTING_LUTINPUT_SELECT"},
310 {0x1D2, "GPUREG_LIGHTING_LUTINPUT_SCALE"},
311
312 {0x1D9, "GPUREG_LIGHTING_LIGHT_PERMUTATION"},
313
314 {0x200, "GPUREG_ATTRIBBUFFERS_LOC"},
315 {0x201, "GPUREG_ATTRIBBUFFERS_FORMAT_LOW"},
316 {0x202, "GPUREG_ATTRIBBUFFERS_FORMAT_HIGH"},
317 {0x203, "GPUREG_ATTRIBBUFFER0_OFFSET"},
318 {0x204, "GPUREG_ATTRIBBUFFER0_CONFIG1"},
319 {0x205, "GPUREG_ATTRIBBUFFER0_CONFIG2"},
320 {0x206, "GPUREG_ATTRIBBUFFER1_OFFSET"},
321 {0x207, "GPUREG_ATTRIBBUFFER1_CONFIG1"},
322 {0x208, "GPUREG_ATTRIBBUFFER1_CONFIG2"},
323 {0x209, "GPUREG_ATTRIBBUFFER2_OFFSET"},
324 {0x20A, "GPUREG_ATTRIBBUFFER2_CONFIG1"},
325 {0x20B, "GPUREG_ATTRIBBUFFER2_CONFIG2"},
326 {0x20C, "GPUREG_ATTRIBBUFFER3_OFFSET"},
327 {0x20D, "GPUREG_ATTRIBBUFFER3_CONFIG1"},
328 {0x20E, "GPUREG_ATTRIBBUFFER3_CONFIG2"},
329 {0x20F, "GPUREG_ATTRIBBUFFER4_OFFSET"},
330 {0x210, "GPUREG_ATTRIBBUFFER4_CONFIG1"},
331 {0x211, "GPUREG_ATTRIBBUFFER4_CONFIG2"},
332 {0x212, "GPUREG_ATTRIBBUFFER5_OFFSET"},
333 {0x213, "GPUREG_ATTRIBBUFFER5_CONFIG1"},
334 {0x214, "GPUREG_ATTRIBBUFFER5_CONFIG2"},
335 {0x215, "GPUREG_ATTRIBBUFFER6_OFFSET"},
336 {0x216, "GPUREG_ATTRIBBUFFER6_CONFIG1"},
337 {0x217, "GPUREG_ATTRIBBUFFER6_CONFIG2"},
338 {0x218, "GPUREG_ATTRIBBUFFER7_OFFSET"},
339 {0x219, "GPUREG_ATTRIBBUFFER7_CONFIG1"},
340 {0x21A, "GPUREG_ATTRIBBUFFER7_CONFIG2"},
341 {0x21B, "GPUREG_ATTRIBBUFFER8_OFFSET"},
342 {0x21C, "GPUREG_ATTRIBBUFFER8_CONFIG1"},
343 {0x21D, "GPUREG_ATTRIBBUFFER8_CONFIG2"},
344 {0x21E, "GPUREG_ATTRIBBUFFER9_OFFSET"},
345 {0x21F, "GPUREG_ATTRIBBUFFER9_CONFIG1"},
346 {0x220, "GPUREG_ATTRIBBUFFER9_CONFIG2"},
347 {0x221, "GPUREG_ATTRIBBUFFER10_OFFSET"},
348 {0x222, "GPUREG_ATTRIBBUFFER10_CONFIG1"},
349 {0x223, "GPUREG_ATTRIBBUFFER10_CONFIG2"},
350 {0x224, "GPUREG_ATTRIBBUFFER11_OFFSET"},
351 {0x225, "GPUREG_ATTRIBBUFFER11_CONFIG1"},
352 {0x226, "GPUREG_ATTRIBBUFFER11_CONFIG2"},
353 {0x227, "GPUREG_INDEXBUFFER_CONFIG"},
354 {0x228, "GPUREG_NUMVERTICES"},
355 {0x229, "GPUREG_GEOSTAGE_CONFIG"},
356 {0x22A, "GPUREG_VERTEX_OFFSET"},
357
358 {0x22D, "GPUREG_POST_VERTEX_CACHE_NUM"},
359 {0x22E, "GPUREG_DRAWARRAYS"},
360 {0x22F, "GPUREG_DRAWELEMENTS"},
361
362 {0x231, "GPUREG_VTX_FUNC"},
363 {0x232, "GPUREG_FIXEDATTRIB_INDEX"},
364 {0x233, "GPUREG_FIXEDATTRIB_DATA0"},
365 {0x234, "GPUREG_FIXEDATTRIB_DATA1"},
366 {0x235, "GPUREG_FIXEDATTRIB_DATA2"},
367
368 {0x238, "GPUREG_CMDBUF_SIZE0"},
369 {0x239, "GPUREG_CMDBUF_SIZE1"},
370 {0x23A, "GPUREG_CMDBUF_ADDR0"},
371 {0x23B, "GPUREG_CMDBUF_ADDR1"},
372 {0x23C, "GPUREG_CMDBUF_JUMP0"},
373 {0x23D, "GPUREG_CMDBUF_JUMP1"},
374
375 {0x242, "GPUREG_VSH_NUM_ATTR"},
376
377 {0x244, "GPUREG_VSH_COM_MODE"},
378 {0x245, "GPUREG_START_DRAW_FUNC0"},
379
380 {0x24A, "GPUREG_VSH_OUTMAP_TOTAL1"},
381
382 {0x251, "GPUREG_VSH_OUTMAP_TOTAL2"},
383 {0x252, "GPUREG_GSH_MISC0"},
384 {0x253, "GPUREG_GEOSTAGE_CONFIG2"},
385 {0x254, "GPUREG_GSH_MISC1"},
386
387 {0x25E, "GPUREG_PRIMITIVE_CONFIG"},
388 {0x25F, "GPUREG_RESTART_PRIMITIVE"},
389
390 {0x280, "GPUREG_GSH_BOOLUNIFORM"},
391 {0x281, "GPUREG_GSH_INTUNIFORM_I0"},
392 {0x282, "GPUREG_GSH_INTUNIFORM_I1"},
393 {0x283, "GPUREG_GSH_INTUNIFORM_I2"},
394 {0x284, "GPUREG_GSH_INTUNIFORM_I3"},
395
396 {0x289, "GPUREG_GSH_INPUTBUFFER_CONFIG"},
397 {0x28A, "GPUREG_GSH_ENTRYPOINT"},
398 {0x28B, "GPUREG_GSH_ATTRIBUTES_PERMUTATION_LOW"},
399 {0x28C, "GPUREG_GSH_ATTRIBUTES_PERMUTATION_HIGH"},
400 {0x28D, "GPUREG_GSH_OUTMAP_MASK"},
401
402 {0x28F, "GPUREG_GSH_CODETRANSFER_END"},
403 {0x290, "GPUREG_GSH_FLOATUNIFORM_INDEX"},
404 {0x291, "GPUREG_GSH_FLOATUNIFORM_DATA0"},
405 {0x292, "GPUREG_GSH_FLOATUNIFORM_DATA1"},
406 {0x293, "GPUREG_GSH_FLOATUNIFORM_DATA2"},
407 {0x294, "GPUREG_GSH_FLOATUNIFORM_DATA3"},
408 {0x295, "GPUREG_GSH_FLOATUNIFORM_DATA4"},
409 {0x296, "GPUREG_GSH_FLOATUNIFORM_DATA5"},
410 {0x297, "GPUREG_GSH_FLOATUNIFORM_DATA6"},
411 {0x298, "GPUREG_GSH_FLOATUNIFORM_DATA7"},
412
413 {0x29B, "GPUREG_GSH_CODETRANSFER_INDEX"},
414 {0x29C, "GPUREG_GSH_CODETRANSFER_DATA0"},
415 {0x29D, "GPUREG_GSH_CODETRANSFER_DATA1"},
416 {0x29E, "GPUREG_GSH_CODETRANSFER_DATA2"},
417 {0x29F, "GPUREG_GSH_CODETRANSFER_DATA3"},
418 {0x2A0, "GPUREG_GSH_CODETRANSFER_DATA4"},
419 {0x2A1, "GPUREG_GSH_CODETRANSFER_DATA5"},
420 {0x2A2, "GPUREG_GSH_CODETRANSFER_DATA6"},
421 {0x2A3, "GPUREG_GSH_CODETRANSFER_DATA7"},
422
423 {0x2A5, "GPUREG_GSH_OPDESCS_INDEX"},
424 {0x2A6, "GPUREG_GSH_OPDESCS_DATA0"},
425 {0x2A7, "GPUREG_GSH_OPDESCS_DATA1"},
426 {0x2A8, "GPUREG_GSH_OPDESCS_DATA2"},
427 {0x2A9, "GPUREG_GSH_OPDESCS_DATA3"},
428 {0x2AA, "GPUREG_GSH_OPDESCS_DATA4"},
429 {0x2AB, "GPUREG_GSH_OPDESCS_DATA5"},
430 {0x2AC, "GPUREG_GSH_OPDESCS_DATA6"},
431 {0x2AD, "GPUREG_GSH_OPDESCS_DATA7"},
432
433 {0x2B0, "GPUREG_VSH_BOOLUNIFORM"},
434 {0x2B1, "GPUREG_VSH_INTUNIFORM_I0"},
435 {0x2B2, "GPUREG_VSH_INTUNIFORM_I1"},
436 {0x2B3, "GPUREG_VSH_INTUNIFORM_I2"},
437 {0x2B4, "GPUREG_VSH_INTUNIFORM_I3"},
438
439 {0x2B9, "GPUREG_VSH_INPUTBUFFER_CONFIG"},
440 {0x2BA, "GPUREG_VSH_ENTRYPOINT"},
441 {0x2BB, "GPUREG_VSH_ATTRIBUTES_PERMUTATION_LOW"},
442 {0x2BC, "GPUREG_VSH_ATTRIBUTES_PERMUTATION_HIGH"},
443 {0x2BD, "GPUREG_VSH_OUTMAP_MASK"},
444
445 {0x2BF, "GPUREG_VSH_CODETRANSFER_END"},
446 {0x2C0, "GPUREG_VSH_FLOATUNIFORM_INDEX"},
447 {0x2C1, "GPUREG_VSH_FLOATUNIFORM_DATA0"},
448 {0x2C2, "GPUREG_VSH_FLOATUNIFORM_DATA1"},
449 {0x2C3, "GPUREG_VSH_FLOATUNIFORM_DATA2"},
450 {0x2C4, "GPUREG_VSH_FLOATUNIFORM_DATA3"},
451 {0x2C5, "GPUREG_VSH_FLOATUNIFORM_DATA4"},
452 {0x2C6, "GPUREG_VSH_FLOATUNIFORM_DATA5"},
453 {0x2C7, "GPUREG_VSH_FLOATUNIFORM_DATA6"},
454 {0x2C8, "GPUREG_VSH_FLOATUNIFORM_DATA7"},
455
456 {0x2CB, "GPUREG_VSH_CODETRANSFER_INDEX"},
457 {0x2CC, "GPUREG_VSH_CODETRANSFER_DATA0"},
458 {0x2CD, "GPUREG_VSH_CODETRANSFER_DATA1"},
459 {0x2CE, "GPUREG_VSH_CODETRANSFER_DATA2"},
460 {0x2CF, "GPUREG_VSH_CODETRANSFER_DATA3"},
461 {0x2D0, "GPUREG_VSH_CODETRANSFER_DATA4"},
462 {0x2D1, "GPUREG_VSH_CODETRANSFER_DATA5"},
463 {0x2D2, "GPUREG_VSH_CODETRANSFER_DATA6"},
464 {0x2D3, "GPUREG_VSH_CODETRANSFER_DATA7"},
465
466 {0x2D5, "GPUREG_VSH_OPDESCS_INDEX"},
467 {0x2D6, "GPUREG_VSH_OPDESCS_DATA0"},
468 {0x2D7, "GPUREG_VSH_OPDESCS_DATA1"},
469 {0x2D8, "GPUREG_VSH_OPDESCS_DATA2"},
470 {0x2D9, "GPUREG_VSH_OPDESCS_DATA3"},
471 {0x2DA, "GPUREG_VSH_OPDESCS_DATA4"},
472 {0x2DB, "GPUREG_VSH_OPDESCS_DATA5"},
473 {0x2DC, "GPUREG_VSH_OPDESCS_DATA6"},
474 {0x2DD, "GPUREG_VSH_OPDESCS_DATA7"},
475};
476
477const char* Regs::GetRegisterName(u16 index) {
478 auto found = std::lower_bound(std::begin(register_names), std::end(register_names), index,
479 [](auto p, auto i) { return p.first < i; });
480 if (found->first == index) {
481 return found->second;
482 } else {
483 // Return empty string if no match is found
484 return "";
485 }
486}
487
488} // namespace Pica
diff --git a/src/video_core/regs.h b/src/video_core/regs.h
deleted file mode 100644
index 6d5f98cac..000000000
--- a/src/video_core/regs.h
+++ /dev/null
@@ -1,149 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <cstddef>
9#include <string>
10#ifndef _MSC_VER
11#include <type_traits> // for std::enable_if
12#endif
13
14#include "common/common_funcs.h"
15#include "common/common_types.h"
16#include "video_core/regs_framebuffer.h"
17#include "video_core/regs_lighting.h"
18#include "video_core/regs_pipeline.h"
19#include "video_core/regs_rasterizer.h"
20#include "video_core/regs_shader.h"
21#include "video_core/regs_texturing.h"
22
23namespace Pica {
24
25// Returns index corresponding to the Regs member labeled by field_name
26// TODO: Due to Visual studio bug 209229, offsetof does not return constant expressions
27// when used with array elements (e.g. PICA_REG_INDEX(vs_uniform_setup.set_value[1])).
28// For details cf.
29// https://connect.microsoft.com/VisualStudio/feedback/details/209229/offsetof-does-not-produce-a-constant-expression-for-array-members
30// Hopefully, this will be fixed sometime in the future.
31// For lack of better alternatives, we currently hardcode the offsets when constant
32// expressions are needed via PICA_REG_INDEX_WORKAROUND (on sane compilers, static_asserts
33// will then make sure the offsets indeed match the automatically calculated ones).
34#define PICA_REG_INDEX(field_name) (offsetof(Pica::Regs, field_name) / sizeof(u32))
35#if defined(_MSC_VER)
36#define PICA_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) (backup_workaround_index)
37#else
38// NOTE: Yeah, hacking in a static_assert here just to workaround the lacking MSVC compiler
39// really is this annoying. This macro just forwards its first argument to PICA_REG_INDEX
40// and then performs a (no-op) cast to size_t iff the second argument matches the expected
41// field offset. Otherwise, the compiler will fail to compile this code.
42#define PICA_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) \
43 ((typename std::enable_if<backup_workaround_index == PICA_REG_INDEX(field_name), \
44 size_t>::type)PICA_REG_INDEX(field_name))
45#endif // _MSC_VER
46
47struct Regs {
48 static constexpr size_t NUM_REGS = 0x300;
49
50 union {
51 struct {
52 INSERT_PADDING_WORDS(0x10);
53 u32 trigger_irq;
54 INSERT_PADDING_WORDS(0x2f);
55 RasterizerRegs rasterizer;
56 TexturingRegs texturing;
57 FramebufferRegs framebuffer;
58 LightingRegs lighting;
59 PipelineRegs pipeline;
60 ShaderRegs gs;
61 ShaderRegs vs;
62 INSERT_PADDING_WORDS(0x20);
63 };
64 std::array<u32, NUM_REGS> reg_array;
65 };
66
67 /// Map register indices to names readable by humans
68 static const char* GetRegisterName(u16 index);
69};
70
71static_assert(sizeof(Regs) == Regs::NUM_REGS * sizeof(u32), "Regs struct has wrong size");
72
73// TODO: MSVC does not support using offsetof() on non-static data members even though this
74// is technically allowed since C++11. This macro should be enabled once MSVC adds
75// support for that.
76#ifndef _MSC_VER
77#define ASSERT_REG_POSITION(field_name, position) \
78 static_assert(offsetof(Regs, field_name) == position * 4, \
79 "Field " #field_name " has invalid position")
80
81ASSERT_REG_POSITION(trigger_irq, 0x10);
82
83ASSERT_REG_POSITION(rasterizer, 0x40);
84ASSERT_REG_POSITION(rasterizer.cull_mode, 0x40);
85ASSERT_REG_POSITION(rasterizer.viewport_size_x, 0x41);
86ASSERT_REG_POSITION(rasterizer.viewport_size_y, 0x43);
87ASSERT_REG_POSITION(rasterizer.viewport_depth_range, 0x4d);
88ASSERT_REG_POSITION(rasterizer.viewport_depth_near_plane, 0x4e);
89ASSERT_REG_POSITION(rasterizer.vs_output_attributes[0], 0x50);
90ASSERT_REG_POSITION(rasterizer.vs_output_attributes[1], 0x51);
91ASSERT_REG_POSITION(rasterizer.scissor_test, 0x65);
92ASSERT_REG_POSITION(rasterizer.viewport_corner, 0x68);
93ASSERT_REG_POSITION(rasterizer.depthmap_enable, 0x6D);
94
95ASSERT_REG_POSITION(texturing, 0x80);
96ASSERT_REG_POSITION(texturing.main_config, 0x80);
97ASSERT_REG_POSITION(texturing.texture0, 0x81);
98ASSERT_REG_POSITION(texturing.texture0_format, 0x8e);
99ASSERT_REG_POSITION(texturing.fragment_lighting_enable, 0x8f);
100ASSERT_REG_POSITION(texturing.texture1, 0x91);
101ASSERT_REG_POSITION(texturing.texture1_format, 0x96);
102ASSERT_REG_POSITION(texturing.texture2, 0x99);
103ASSERT_REG_POSITION(texturing.texture2_format, 0x9e);
104ASSERT_REG_POSITION(texturing.proctex, 0xa8);
105ASSERT_REG_POSITION(texturing.proctex_noise_u, 0xa9);
106ASSERT_REG_POSITION(texturing.proctex_noise_v, 0xaa);
107ASSERT_REG_POSITION(texturing.proctex_noise_frequency, 0xab);
108ASSERT_REG_POSITION(texturing.proctex_lut, 0xac);
109ASSERT_REG_POSITION(texturing.proctex_lut_offset, 0xad);
110ASSERT_REG_POSITION(texturing.proctex_lut_config, 0xaf);
111ASSERT_REG_POSITION(texturing.tev_stage0, 0xc0);
112ASSERT_REG_POSITION(texturing.tev_stage1, 0xc8);
113ASSERT_REG_POSITION(texturing.tev_stage2, 0xd0);
114ASSERT_REG_POSITION(texturing.tev_stage3, 0xd8);
115ASSERT_REG_POSITION(texturing.tev_combiner_buffer_input, 0xe0);
116ASSERT_REG_POSITION(texturing.fog_mode, 0xe0);
117ASSERT_REG_POSITION(texturing.fog_color, 0xe1);
118ASSERT_REG_POSITION(texturing.fog_lut_offset, 0xe6);
119ASSERT_REG_POSITION(texturing.fog_lut_data, 0xe8);
120ASSERT_REG_POSITION(texturing.tev_stage4, 0xf0);
121ASSERT_REG_POSITION(texturing.tev_stage5, 0xf8);
122ASSERT_REG_POSITION(texturing.tev_combiner_buffer_color, 0xfd);
123
124ASSERT_REG_POSITION(framebuffer, 0x100);
125ASSERT_REG_POSITION(framebuffer.output_merger, 0x100);
126ASSERT_REG_POSITION(framebuffer.framebuffer, 0x110);
127
128ASSERT_REG_POSITION(lighting, 0x140);
129
130ASSERT_REG_POSITION(pipeline, 0x200);
131ASSERT_REG_POSITION(pipeline.vertex_attributes, 0x200);
132ASSERT_REG_POSITION(pipeline.index_array, 0x227);
133ASSERT_REG_POSITION(pipeline.num_vertices, 0x228);
134ASSERT_REG_POSITION(pipeline.vertex_offset, 0x22a);
135ASSERT_REG_POSITION(pipeline.trigger_draw, 0x22e);
136ASSERT_REG_POSITION(pipeline.trigger_draw_indexed, 0x22f);
137ASSERT_REG_POSITION(pipeline.vs_default_attributes_setup, 0x232);
138ASSERT_REG_POSITION(pipeline.command_buffer, 0x238);
139ASSERT_REG_POSITION(pipeline.gpu_mode, 0x245);
140ASSERT_REG_POSITION(pipeline.triangle_topology, 0x25e);
141ASSERT_REG_POSITION(pipeline.restart_primitive, 0x25f);
142
143ASSERT_REG_POSITION(gs, 0x280);
144ASSERT_REG_POSITION(vs, 0x2b0);
145
146#undef ASSERT_REG_POSITION
147#endif // !defined(_MSC_VER)
148
149} // namespace Pica
diff --git a/src/video_core/regs_framebuffer.h b/src/video_core/regs_framebuffer.h
deleted file mode 100644
index 7b565f911..000000000
--- a/src/video_core/regs_framebuffer.h
+++ /dev/null
@@ -1,283 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8
9#include "common/assert.h"
10#include "common/bit_field.h"
11#include "common/common_funcs.h"
12#include "common/common_types.h"
13#include "common/logging/log.h"
14
15namespace Pica {
16
17struct FramebufferRegs {
18 enum class LogicOp : u32 {
19 Clear = 0,
20 And = 1,
21 AndReverse = 2,
22 Copy = 3,
23 Set = 4,
24 CopyInverted = 5,
25 NoOp = 6,
26 Invert = 7,
27 Nand = 8,
28 Or = 9,
29 Nor = 10,
30 Xor = 11,
31 Equiv = 12,
32 AndInverted = 13,
33 OrReverse = 14,
34 OrInverted = 15,
35 };
36
37 enum class BlendEquation : u32 {
38 Add = 0,
39 Subtract = 1,
40 ReverseSubtract = 2,
41 Min = 3,
42 Max = 4,
43 };
44
45 enum class BlendFactor : u32 {
46 Zero = 0,
47 One = 1,
48 SourceColor = 2,
49 OneMinusSourceColor = 3,
50 DestColor = 4,
51 OneMinusDestColor = 5,
52 SourceAlpha = 6,
53 OneMinusSourceAlpha = 7,
54 DestAlpha = 8,
55 OneMinusDestAlpha = 9,
56 ConstantColor = 10,
57 OneMinusConstantColor = 11,
58 ConstantAlpha = 12,
59 OneMinusConstantAlpha = 13,
60 SourceAlphaSaturate = 14,
61 };
62
63 enum class CompareFunc : u32 {
64 Never = 0,
65 Always = 1,
66 Equal = 2,
67 NotEqual = 3,
68 LessThan = 4,
69 LessThanOrEqual = 5,
70 GreaterThan = 6,
71 GreaterThanOrEqual = 7,
72 };
73
74 enum class StencilAction : u32 {
75 Keep = 0,
76 Zero = 1,
77 Replace = 2,
78 Increment = 3,
79 Decrement = 4,
80 Invert = 5,
81 IncrementWrap = 6,
82 DecrementWrap = 7,
83 };
84
85 struct {
86 union {
87 // If false, logic blending is used
88 BitField<8, 1, u32> alphablend_enable;
89 };
90
91 union {
92 BitField<0, 3, BlendEquation> blend_equation_rgb;
93 BitField<8, 3, BlendEquation> blend_equation_a;
94
95 BitField<16, 4, BlendFactor> factor_source_rgb;
96 BitField<20, 4, BlendFactor> factor_dest_rgb;
97
98 BitField<24, 4, BlendFactor> factor_source_a;
99 BitField<28, 4, BlendFactor> factor_dest_a;
100 } alpha_blending;
101
102 union {
103 BitField<0, 4, LogicOp> logic_op;
104 };
105
106 union {
107 u32 raw;
108 BitField<0, 8, u32> r;
109 BitField<8, 8, u32> g;
110 BitField<16, 8, u32> b;
111 BitField<24, 8, u32> a;
112 } blend_const;
113
114 union {
115 BitField<0, 1, u32> enable;
116 BitField<4, 3, CompareFunc> func;
117 BitField<8, 8, u32> ref;
118 } alpha_test;
119
120 struct {
121 union {
122 // Raw value of this register
123 u32 raw_func;
124
125 // If true, enable stencil testing
126 BitField<0, 1, u32> enable;
127
128 // Comparison operation for stencil testing
129 BitField<4, 3, CompareFunc> func;
130
131 // Mask used to control writing to the stencil buffer
132 BitField<8, 8, u32> write_mask;
133
134 // Value to compare against for stencil testing
135 BitField<16, 8, u32> reference_value;
136
137 // Mask to apply on stencil test inputs
138 BitField<24, 8, u32> input_mask;
139 };
140
141 union {
142 // Raw value of this register
143 u32 raw_op;
144
145 // Action to perform when the stencil test fails
146 BitField<0, 3, StencilAction> action_stencil_fail;
147
148 // Action to perform when stencil testing passed but depth testing fails
149 BitField<4, 3, StencilAction> action_depth_fail;
150
151 // Action to perform when both stencil and depth testing pass
152 BitField<8, 3, StencilAction> action_depth_pass;
153 };
154 } stencil_test;
155
156 union {
157 BitField<0, 1, u32> depth_test_enable;
158 BitField<4, 3, CompareFunc> depth_test_func;
159 BitField<8, 1, u32> red_enable;
160 BitField<9, 1, u32> green_enable;
161 BitField<10, 1, u32> blue_enable;
162 BitField<11, 1, u32> alpha_enable;
163 BitField<12, 1, u32> depth_write_enable;
164 };
165
166 INSERT_PADDING_WORDS(0x8);
167 } output_merger;
168
169 // Components are laid out in reverse byte order, most significant bits first.
170 enum class ColorFormat : u32 {
171 RGBA8 = 0,
172 RGB8 = 1,
173 RGB5A1 = 2,
174 RGB565 = 3,
175 RGBA4 = 4,
176 };
177
178 enum class DepthFormat : u32 {
179 D16 = 0,
180 D24 = 2,
181 D24S8 = 3,
182 };
183
184 // Returns the number of bytes in the specified color format
185 static unsigned BytesPerColorPixel(ColorFormat format) {
186 switch (format) {
187 case ColorFormat::RGBA8:
188 return 4;
189 case ColorFormat::RGB8:
190 return 3;
191 case ColorFormat::RGB5A1:
192 case ColorFormat::RGB565:
193 case ColorFormat::RGBA4:
194 return 2;
195 default:
196 LOG_CRITICAL(HW_GPU, "Unknown color format %u", format);
197 UNIMPLEMENTED();
198 }
199 }
200
201 struct FramebufferConfig {
202 INSERT_PADDING_WORDS(0x3);
203
204 union {
205 BitField<0, 4, u32> allow_color_write; // 0 = disable, else enable
206 };
207
208 INSERT_PADDING_WORDS(0x1);
209
210 union {
211 BitField<0, 2, u32> allow_depth_stencil_write; // 0 = disable, else enable
212 };
213
214 BitField<0, 2, DepthFormat> depth_format;
215
216 BitField<16, 3, ColorFormat> color_format;
217
218 INSERT_PADDING_WORDS(0x4);
219
220 BitField<0, 28, u32> depth_buffer_address;
221 BitField<0, 28, u32> color_buffer_address;
222
223 union {
224 // Apparently, the framebuffer width is stored as expected,
225 // while the height is stored as the actual height minus one.
226 // Hence, don't access these fields directly but use the accessors
227 // GetWidth() and GetHeight() instead.
228 BitField<0, 11, u32> width;
229 BitField<12, 10, u32> height;
230 };
231
232 INSERT_PADDING_WORDS(0x1);
233
234 inline PAddr GetColorBufferPhysicalAddress() const {
235 return color_buffer_address * 8;
236 }
237 inline PAddr GetDepthBufferPhysicalAddress() const {
238 return depth_buffer_address * 8;
239 }
240
241 inline u32 GetWidth() const {
242 return width;
243 }
244
245 inline u32 GetHeight() const {
246 return height + 1;
247 }
248 } framebuffer;
249
250 // Returns the number of bytes in the specified depth format
251 static u32 BytesPerDepthPixel(DepthFormat format) {
252 switch (format) {
253 case DepthFormat::D16:
254 return 2;
255 case DepthFormat::D24:
256 return 3;
257 case DepthFormat::D24S8:
258 return 4;
259 }
260
261 ASSERT_MSG(false, "Unknown depth format %u", format);
262 }
263
264 // Returns the number of bits per depth component of the specified depth format
265 static u32 DepthBitsPerPixel(DepthFormat format) {
266 switch (format) {
267 case DepthFormat::D16:
268 return 16;
269 case DepthFormat::D24:
270 case DepthFormat::D24S8:
271 return 24;
272 }
273
274 ASSERT_MSG(false, "Unknown depth format %u", format);
275 }
276
277 INSERT_PADDING_WORDS(0x20);
278};
279
280static_assert(sizeof(FramebufferRegs) == 0x40 * sizeof(u32),
281 "FramebufferRegs struct has incorrect size");
282
283} // namespace Pica
diff --git a/src/video_core/regs_lighting.h b/src/video_core/regs_lighting.h
deleted file mode 100644
index b89709cfe..000000000
--- a/src/video_core/regs_lighting.h
+++ /dev/null
@@ -1,321 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8
9#include "common/assert.h"
10#include "common/bit_field.h"
11#include "common/common_funcs.h"
12#include "common/common_types.h"
13#include "common/vector_math.h"
14
15namespace Pica {
16
17struct LightingRegs {
18 enum class LightingSampler {
19 Distribution0 = 0,
20 Distribution1 = 1,
21 Fresnel = 3,
22 ReflectBlue = 4,
23 ReflectGreen = 5,
24 ReflectRed = 6,
25 SpotlightAttenuation = 8,
26 DistanceAttenuation = 16,
27 };
28
29 static constexpr unsigned NumLightingSampler = 24;
30
31 static LightingSampler SpotlightAttenuationSampler(unsigned index) {
32 return static_cast<LightingSampler>(
33 static_cast<unsigned>(LightingSampler::SpotlightAttenuation) + index);
34 }
35
36 static LightingSampler DistanceAttenuationSampler(unsigned index) {
37 return static_cast<LightingSampler>(
38 static_cast<unsigned>(LightingSampler::DistanceAttenuation) + index);
39 }
40
41 /**
42 * Pica fragment lighting supports using different LUTs for each lighting component: Reflectance
43 * R, G, and B channels, distribution function for specular components 0 and 1, fresnel factor,
44 * and spotlight attenuation. Furthermore, which LUTs are used for each channel (or whether a
45 * channel is enabled at all) is specified by various pre-defined lighting configurations. With
46 * configurations that require more LUTs, more cycles are required on HW to perform lighting
47 * computations.
48 */
49 enum class LightingConfig : u32 {
50 Config0 = 0, ///< Reflect Red, Distribution 0, Spotlight
51 Config1 = 1, ///< Reflect Red, Fresnel, Spotlight
52 Config2 = 2, ///< Reflect Red, Distribution 0/1
53 Config3 = 3, ///< Distribution 0/1, Fresnel
54 Config4 = 4, ///< Reflect Red/Green/Blue, Distribution 0/1, Spotlight
55 Config5 = 5, ///< Reflect Red/Green/Blue, Distribution 0, Fresnel, Spotlight
56 Config6 = 6, ///< Reflect Red, Distribution 0/1, Fresnel, Spotlight
57
58 Config7 = 8, ///< Reflect Red/Green/Blue, Distribution 0/1, Fresnel, Spotlight
59 ///< NOTE: '8' is intentional, '7' does not appear to be a valid configuration
60 };
61
62 /// Selects which lighting components are affected by fresnel
63 enum class LightingFresnelSelector : u32 {
64 None = 0, ///< Fresnel is disabled
65 PrimaryAlpha = 1, ///< Primary (diffuse) lighting alpha is affected by fresnel
66 SecondaryAlpha = 2, ///< Secondary (specular) lighting alpha is affected by fresnel
67 Both =
68 PrimaryAlpha |
69 SecondaryAlpha, ///< Both primary and secondary lighting alphas are affected by fresnel
70 };
71
72 /// Factor used to scale the output of a lighting LUT
73 enum class LightingScale : u32 {
74 Scale1 = 0, ///< Scale is 1x
75 Scale2 = 1, ///< Scale is 2x
76 Scale4 = 2, ///< Scale is 4x
77 Scale8 = 3, ///< Scale is 8x
78
79 Scale1_4 = 6, ///< Scale is 0.25x
80 Scale1_2 = 7, ///< Scale is 0.5x
81 };
82
83 enum class LightingLutInput : u32 {
84 NH = 0, // Cosine of the angle between the normal and half-angle vectors
85 VH = 1, // Cosine of the angle between the view and half-angle vectors
86 NV = 2, // Cosine of the angle between the normal and the view vector
87 LN = 3, // Cosine of the angle between the light and the normal vectors
88 SP = 4, // Cosine of the angle between the light and the inverse spotlight vectors
89 CP = 5, // Cosine of the angle between the tangent and projection of half-angle vectors
90 };
91
92 enum class LightingBumpMode : u32 {
93 None = 0,
94 NormalMap = 1,
95 TangentMap = 2,
96 };
97
98 union LightColor {
99 BitField<0, 10, u32> b;
100 BitField<10, 10, u32> g;
101 BitField<20, 10, u32> r;
102
103 Math::Vec3f ToVec3f() const {
104 // These fields are 10 bits wide, however 255 corresponds to 1.0f for each color
105 // component
106 return Math::MakeVec((f32)r / 255.f, (f32)g / 255.f, (f32)b / 255.f);
107 }
108 };
109
110 /// Returns true if the specified lighting sampler is supported by the current Pica lighting
111 /// configuration
112 static bool IsLightingSamplerSupported(LightingConfig config, LightingSampler sampler) {
113 switch (sampler) {
114 case LightingSampler::Distribution0:
115 return (config != LightingConfig::Config1);
116
117 case LightingSampler::Distribution1:
118 return (config != LightingConfig::Config0) && (config != LightingConfig::Config1) &&
119 (config != LightingConfig::Config5);
120
121 case LightingSampler::SpotlightAttenuation:
122 return (config != LightingConfig::Config2) && (config != LightingConfig::Config3);
123
124 case LightingSampler::Fresnel:
125 return (config != LightingConfig::Config0) && (config != LightingConfig::Config2) &&
126 (config != LightingConfig::Config4);
127
128 case LightingSampler::ReflectRed:
129 return (config != LightingConfig::Config3);
130
131 case LightingSampler::ReflectGreen:
132 case LightingSampler::ReflectBlue:
133 return (config == LightingConfig::Config4) || (config == LightingConfig::Config5) ||
134 (config == LightingConfig::Config7);
135 default:
136 UNREACHABLE_MSG("Regs::IsLightingSamplerSupported: Reached unreachable section, "
137 "sampler should be one of Distribution0, Distribution1, "
138 "SpotlightAttenuation, Fresnel, ReflectRed, ReflectGreen or "
139 "ReflectBlue, instead got %i",
140 static_cast<int>(config));
141 }
142 }
143
144 struct LightSrc {
145 LightColor specular_0; // material.specular_0 * light.specular_0
146 LightColor specular_1; // material.specular_1 * light.specular_1
147 LightColor diffuse; // material.diffuse * light.diffuse
148 LightColor ambient; // material.ambient * light.ambient
149
150 // Encoded as 16-bit floating point
151 union {
152 BitField<0, 16, u32> x;
153 BitField<16, 16, u32> y;
154 };
155 union {
156 BitField<0, 16, u32> z;
157 };
158
159 // inverse spotlight direction vector, encoded as fixed1.1.11
160 union {
161 BitField<0, 13, s32> spot_x;
162 BitField<16, 13, s32> spot_y;
163 };
164 union {
165 BitField<0, 13, s32> spot_z;
166 };
167
168 INSERT_PADDING_WORDS(0x1);
169
170 union {
171 BitField<0, 1, u32> directional;
172 BitField<1, 1, u32> two_sided_diffuse; // When disabled, clamp dot-product to 0
173 BitField<2, 1, u32> geometric_factor_0;
174 BitField<3, 1, u32> geometric_factor_1;
175 } config;
176
177 BitField<0, 20, u32> dist_atten_bias;
178 BitField<0, 20, u32> dist_atten_scale;
179
180 INSERT_PADDING_WORDS(0x4);
181 };
182 static_assert(sizeof(LightSrc) == 0x10 * sizeof(u32), "LightSrc structure must be 0x10 words");
183
184 LightSrc light[8];
185 LightColor global_ambient; // Emission + (material.ambient * lighting.ambient)
186 INSERT_PADDING_WORDS(0x1);
187 BitField<0, 3, u32> max_light_index; // Number of enabled lights - 1
188
189 union {
190 BitField<2, 2, LightingFresnelSelector> fresnel_selector;
191 BitField<4, 4, LightingConfig> config;
192 BitField<22, 2, u32> bump_selector; // 0: Texture 0, 1: Texture 1, 2: Texture 2
193 BitField<27, 1, u32> clamp_highlights;
194 BitField<28, 2, LightingBumpMode> bump_mode;
195 BitField<30, 1, u32> disable_bump_renorm;
196 } config0;
197
198 union {
199 u32 raw;
200
201 // Each bit specifies whether spot light attenuation should be applied for the corresponding
202 // light.
203 BitField<8, 8, u32> disable_spot_atten;
204
205 BitField<16, 1, u32> disable_lut_d0;
206 BitField<17, 1, u32> disable_lut_d1;
207 // Note: by intuition, BitField<18, 1, u32> should be disable_lut_sp, but it is actually a
208 // dummy bit which is always set as 1.
209 BitField<19, 1, u32> disable_lut_fr;
210 BitField<20, 1, u32> disable_lut_rr;
211 BitField<21, 1, u32> disable_lut_rg;
212 BitField<22, 1, u32> disable_lut_rb;
213
214 // Each bit specifies whether distance attenuation should be applied for the corresponding
215 // light.
216 BitField<24, 8, u32> disable_dist_atten;
217 } config1;
218
219 bool IsDistAttenDisabled(unsigned index) const {
220 return (config1.disable_dist_atten & (1 << index)) != 0;
221 }
222
223 bool IsSpotAttenDisabled(unsigned index) const {
224 return (config1.disable_spot_atten & (1 << index)) != 0;
225 }
226
227 union {
228 BitField<0, 8, u32> index; ///< Index at which to set data in the LUT
229 BitField<8, 5, u32> type; ///< Type of LUT for which to set data
230 } lut_config;
231
232 BitField<0, 1, u32> disable;
233 INSERT_PADDING_WORDS(0x1);
234
235 // When data is written to any of these registers, it gets written to the lookup table of the
236 // selected type at the selected index, specified above in the `lut_config` register. With each
237 // write, `lut_config.index` is incremented. It does not matter which of these registers is
238 // written to, the behavior will be the same.
239 u32 lut_data[8];
240
241 // These are used to specify if absolute (abs) value should be used for each LUT index. When
242 // abs mode is disabled, LUT indexes are in the range of (-1.0, 1.0). Otherwise, they are in
243 // the range of (0.0, 1.0).
244 union {
245 BitField<1, 1, u32> disable_d0;
246 BitField<5, 1, u32> disable_d1;
247 BitField<9, 1, u32> disable_sp;
248 BitField<13, 1, u32> disable_fr;
249 BitField<17, 1, u32> disable_rb;
250 BitField<21, 1, u32> disable_rg;
251 BitField<25, 1, u32> disable_rr;
252 } abs_lut_input;
253
254 union {
255 BitField<0, 3, LightingLutInput> d0;
256 BitField<4, 3, LightingLutInput> d1;
257 BitField<8, 3, LightingLutInput> sp;
258 BitField<12, 3, LightingLutInput> fr;
259 BitField<16, 3, LightingLutInput> rb;
260 BitField<20, 3, LightingLutInput> rg;
261 BitField<24, 3, LightingLutInput> rr;
262 } lut_input;
263
264 union {
265 BitField<0, 3, LightingScale> d0;
266 BitField<4, 3, LightingScale> d1;
267 BitField<8, 3, LightingScale> sp;
268 BitField<12, 3, LightingScale> fr;
269 BitField<16, 3, LightingScale> rb;
270 BitField<20, 3, LightingScale> rg;
271 BitField<24, 3, LightingScale> rr;
272
273 static float GetScale(LightingScale scale) {
274 switch (scale) {
275 case LightingScale::Scale1:
276 return 1.0f;
277 case LightingScale::Scale2:
278 return 2.0f;
279 case LightingScale::Scale4:
280 return 4.0f;
281 case LightingScale::Scale8:
282 return 8.0f;
283 case LightingScale::Scale1_4:
284 return 0.25f;
285 case LightingScale::Scale1_2:
286 return 0.5f;
287 }
288 return 0.0f;
289 }
290 } lut_scale;
291
292 INSERT_PADDING_WORDS(0x6);
293
294 union {
295 // There are 8 light enable "slots", corresponding to the total number of lights supported
296 // by Pica. For N enabled lights (specified by register 0x1c2, or 'src_num' above), the
297 // first N slots below will be set to integers within the range of 0-7, corresponding to the
298 // actual light that is enabled for each slot.
299
300 BitField<0, 3, u32> slot_0;
301 BitField<4, 3, u32> slot_1;
302 BitField<8, 3, u32> slot_2;
303 BitField<12, 3, u32> slot_3;
304 BitField<16, 3, u32> slot_4;
305 BitField<20, 3, u32> slot_5;
306 BitField<24, 3, u32> slot_6;
307 BitField<28, 3, u32> slot_7;
308
309 unsigned GetNum(unsigned index) const {
310 const unsigned enable_slots[] = {slot_0, slot_1, slot_2, slot_3,
311 slot_4, slot_5, slot_6, slot_7};
312 return enable_slots[index];
313 }
314 } light_enable;
315
316 INSERT_PADDING_WORDS(0x26);
317};
318
319static_assert(sizeof(LightingRegs) == 0xC0 * sizeof(u32), "LightingRegs struct has incorrect size");
320
321} // namespace Pica
diff --git a/src/video_core/regs_pipeline.h b/src/video_core/regs_pipeline.h
deleted file mode 100644
index e78c3e331..000000000
--- a/src/video_core/regs_pipeline.h
+++ /dev/null
@@ -1,269 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8
9#include "common/assert.h"
10#include "common/bit_field.h"
11#include "common/common_funcs.h"
12#include "common/common_types.h"
13
14namespace Pica {
15
16struct PipelineRegs {
17 enum class VertexAttributeFormat : u32 {
18 BYTE = 0,
19 UBYTE = 1,
20 SHORT = 2,
21 FLOAT = 3,
22 };
23
24 struct {
25 BitField<1, 28, u32> base_address;
26
27 PAddr GetPhysicalBaseAddress() const {
28 return base_address * 16;
29 }
30
31 // Descriptor for internal vertex attributes
32 union {
33 BitField<0, 2, VertexAttributeFormat> format0; // size of one element
34 BitField<2, 2, u32> size0; // number of elements minus 1
35 BitField<4, 2, VertexAttributeFormat> format1;
36 BitField<6, 2, u32> size1;
37 BitField<8, 2, VertexAttributeFormat> format2;
38 BitField<10, 2, u32> size2;
39 BitField<12, 2, VertexAttributeFormat> format3;
40 BitField<14, 2, u32> size3;
41 BitField<16, 2, VertexAttributeFormat> format4;
42 BitField<18, 2, u32> size4;
43 BitField<20, 2, VertexAttributeFormat> format5;
44 BitField<22, 2, u32> size5;
45 BitField<24, 2, VertexAttributeFormat> format6;
46 BitField<26, 2, u32> size6;
47 BitField<28, 2, VertexAttributeFormat> format7;
48 BitField<30, 2, u32> size7;
49 };
50
51 union {
52 BitField<0, 2, VertexAttributeFormat> format8;
53 BitField<2, 2, u32> size8;
54 BitField<4, 2, VertexAttributeFormat> format9;
55 BitField<6, 2, u32> size9;
56 BitField<8, 2, VertexAttributeFormat> format10;
57 BitField<10, 2, u32> size10;
58 BitField<12, 2, VertexAttributeFormat> format11;
59 BitField<14, 2, u32> size11;
60
61 BitField<16, 12, u32> attribute_mask;
62
63 // number of total attributes minus 1
64 BitField<28, 4, u32> max_attribute_index;
65 };
66
67 inline VertexAttributeFormat GetFormat(int n) const {
68 VertexAttributeFormat formats[] = {format0, format1, format2, format3,
69 format4, format5, format6, format7,
70 format8, format9, format10, format11};
71 return formats[n];
72 }
73
74 inline int GetNumElements(int n) const {
75 u32 sizes[] = {size0, size1, size2, size3, size4, size5,
76 size6, size7, size8, size9, size10, size11};
77 return (int)sizes[n] + 1;
78 }
79
80 inline int GetElementSizeInBytes(int n) const {
81 return (GetFormat(n) == VertexAttributeFormat::FLOAT)
82 ? 4
83 : (GetFormat(n) == VertexAttributeFormat::SHORT) ? 2 : 1;
84 }
85
86 inline int GetStride(int n) const {
87 return GetNumElements(n) * GetElementSizeInBytes(n);
88 }
89
90 inline bool IsDefaultAttribute(int id) const {
91 return (id >= 12) || (attribute_mask & (1ULL << id)) != 0;
92 }
93
94 inline int GetNumTotalAttributes() const {
95 return (int)max_attribute_index + 1;
96 }
97
98 // Attribute loaders map the source vertex data to input attributes
99 // This e.g. allows to load different attributes from different memory locations
100 struct {
101 // Source attribute data offset from the base address
102 BitField<0, 28, u32> data_offset;
103
104 union {
105 BitField<0, 4, u32> comp0;
106 BitField<4, 4, u32> comp1;
107 BitField<8, 4, u32> comp2;
108 BitField<12, 4, u32> comp3;
109 BitField<16, 4, u32> comp4;
110 BitField<20, 4, u32> comp5;
111 BitField<24, 4, u32> comp6;
112 BitField<28, 4, u32> comp7;
113 };
114
115 union {
116 BitField<0, 4, u32> comp8;
117 BitField<4, 4, u32> comp9;
118 BitField<8, 4, u32> comp10;
119 BitField<12, 4, u32> comp11;
120
121 // bytes for a single vertex in this loader
122 BitField<16, 8, u32> byte_count;
123
124 BitField<28, 4, u32> component_count;
125 };
126
127 inline int GetComponent(int n) const {
128 u32 components[] = {comp0, comp1, comp2, comp3, comp4, comp5,
129 comp6, comp7, comp8, comp9, comp10, comp11};
130 return (int)components[n];
131 }
132 } attribute_loaders[12];
133 } vertex_attributes;
134
135 struct {
136 enum IndexFormat : u32 {
137 BYTE = 0,
138 SHORT = 1,
139 };
140
141 union {
142 BitField<0, 31, u32> offset; // relative to base attribute address
143 BitField<31, 1, IndexFormat> format;
144 };
145 } index_array;
146
147 // Number of vertices to render
148 u32 num_vertices;
149
150 enum class UseGS : u32 {
151 No = 0,
152 Yes = 2,
153 };
154
155 union {
156 BitField<0, 2, UseGS> use_gs;
157 BitField<31, 1, u32> variable_primitive;
158 };
159
160 // The index of the first vertex to render
161 u32 vertex_offset;
162
163 INSERT_PADDING_WORDS(0x3);
164
165 // These two trigger rendering of triangles
166 u32 trigger_draw;
167 u32 trigger_draw_indexed;
168
169 INSERT_PADDING_WORDS(0x2);
170
171 // These registers are used to setup the default "fall-back" vertex shader attributes
172 struct {
173 // Index of the current default attribute
174 u32 index;
175
176 // Writing to these registers sets the "current" default attribute.
177 u32 set_value[3];
178 } vs_default_attributes_setup;
179
180 INSERT_PADDING_WORDS(0x2);
181
182 struct {
183 // There are two channels that can be used to configure the next command buffer, which can
184 // be then executed by writing to the "trigger" registers. There are two reasons why a game
185 // might use this feature:
186 // 1) With this, an arbitrary number of additional command buffers may be executed in
187 // sequence without requiring any intervention of the CPU after the initial one is
188 // kicked off.
189 // 2) Games can configure these registers to provide a command list subroutine mechanism.
190
191 // TODO: verify the bit length of these two fields
192 // According to 3dbrew, the bit length of them are 21 and 29, respectively
193 BitField<0, 20, u32> size[2]; ///< Size (in bytes / 8) of each channel's command buffer
194 BitField<0, 28, u32> addr[2]; ///< Physical address / 8 of each channel's command buffer
195 u32 trigger[2]; ///< Triggers execution of the channel's command buffer when written to
196
197 unsigned GetSize(unsigned index) const {
198 ASSERT(index < 2);
199 return 8 * size[index];
200 }
201
202 PAddr GetPhysicalAddress(unsigned index) const {
203 ASSERT(index < 2);
204 return (PAddr)(8 * addr[index]);
205 }
206 } command_buffer;
207
208 INSERT_PADDING_WORDS(4);
209
210 /// Number of input attributes to the vertex shader minus 1
211 BitField<0, 4, u32> max_input_attrib_index;
212
213 INSERT_PADDING_WORDS(1);
214
215 // The shader unit 3, which can be used for both vertex and geometry shader, gets its
216 // configuration depending on this register. If this is not set, unit 3 will share some
217 // configuration with other units. It is known that program code and swizzle pattern uploaded
218 // via regs.vs will be also uploaded to unit 3 if this is not set. Although very likely, it is
219 // still unclear whether uniforms and other configuration can be also shared.
220 BitField<0, 1, u32> gs_unit_exclusive_configuration;
221
222 enum class GPUMode : u32 {
223 Drawing = 0,
224 Configuring = 1,
225 };
226
227 GPUMode gpu_mode;
228
229 INSERT_PADDING_WORDS(0x4);
230 BitField<0, 4, u32> vs_outmap_total_minus_1_a;
231 INSERT_PADDING_WORDS(0x6);
232 BitField<0, 4, u32> vs_outmap_total_minus_1_b;
233
234 enum class GSMode : u32 {
235 Point = 0,
236 VariablePrimitive = 1,
237 FixedPrimitive = 2,
238 };
239
240 union {
241 BitField<0, 8, GSMode> mode;
242 BitField<8, 4, u32> fixed_vertex_num_minus_1;
243 BitField<12, 4, u32> stride_minus_1;
244 BitField<16, 4, u32> start_index;
245 } gs_config;
246
247 INSERT_PADDING_WORDS(0x1);
248
249 u32 variable_vertex_main_num_minus_1;
250
251 INSERT_PADDING_WORDS(0x9);
252
253 enum class TriangleTopology : u32 {
254 List = 0,
255 Strip = 1,
256 Fan = 2,
257 Shader = 3, // Programmable setup unit implemented in a geometry shader
258 };
259
260 BitField<8, 2, TriangleTopology> triangle_topology;
261
262 u32 restart_primitive;
263
264 INSERT_PADDING_WORDS(0x20);
265};
266
267static_assert(sizeof(PipelineRegs) == 0x80 * sizeof(u32), "PipelineRegs struct has incorrect size");
268
269} // namespace Pica
diff --git a/src/video_core/regs_rasterizer.h b/src/video_core/regs_rasterizer.h
deleted file mode 100644
index 4fef00d76..000000000
--- a/src/video_core/regs_rasterizer.h
+++ /dev/null
@@ -1,139 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include "common/bit_field.h"
9#include "common/common_funcs.h"
10#include "common/common_types.h"
11#include "video_core/pica_types.h"
12
13namespace Pica {
14
15struct RasterizerRegs {
16 enum class CullMode : u32 {
17 // Select which polygons are considered to be "frontfacing".
18 KeepAll = 0,
19 KeepClockWise = 1,
20 KeepCounterClockWise = 2,
21 // TODO: What does the third value imply?
22 };
23
24 union {
25 BitField<0, 2, CullMode> cull_mode;
26 };
27
28 BitField<0, 24, u32> viewport_size_x;
29
30 INSERT_PADDING_WORDS(0x1);
31
32 BitField<0, 24, u32> viewport_size_y;
33
34 INSERT_PADDING_WORDS(0x3);
35
36 BitField<0, 1, u32> clip_enable;
37 BitField<0, 24, u32> clip_coef[4]; // float24
38
39 Math::Vec4<float24> GetClipCoef() const {
40 return {float24::FromRaw(clip_coef[0]), float24::FromRaw(clip_coef[1]),
41 float24::FromRaw(clip_coef[2]), float24::FromRaw(clip_coef[3])};
42 }
43
44 INSERT_PADDING_WORDS(0x1);
45
46 BitField<0, 24, u32> viewport_depth_range; // float24
47 BitField<0, 24, u32> viewport_depth_near_plane; // float24
48
49 BitField<0, 3, u32> vs_output_total;
50
51 union VSOutputAttributes {
52 // Maps components of output vertex attributes to semantics
53 enum Semantic : u32 {
54 POSITION_X = 0,
55 POSITION_Y = 1,
56 POSITION_Z = 2,
57 POSITION_W = 3,
58
59 QUATERNION_X = 4,
60 QUATERNION_Y = 5,
61 QUATERNION_Z = 6,
62 QUATERNION_W = 7,
63
64 COLOR_R = 8,
65 COLOR_G = 9,
66 COLOR_B = 10,
67 COLOR_A = 11,
68
69 TEXCOORD0_U = 12,
70 TEXCOORD0_V = 13,
71 TEXCOORD1_U = 14,
72 TEXCOORD1_V = 15,
73
74 TEXCOORD0_W = 16,
75
76 VIEW_X = 18,
77 VIEW_Y = 19,
78 VIEW_Z = 20,
79
80 TEXCOORD2_U = 22,
81 TEXCOORD2_V = 23,
82
83 INVALID = 31,
84 };
85
86 BitField<0, 5, Semantic> map_x;
87 BitField<8, 5, Semantic> map_y;
88 BitField<16, 5, Semantic> map_z;
89 BitField<24, 5, Semantic> map_w;
90 } vs_output_attributes[7];
91
92 INSERT_PADDING_WORDS(0xe);
93
94 enum class ScissorMode : u32 {
95 Disabled = 0,
96 Exclude = 1, // Exclude pixels inside the scissor box
97
98 Include = 3 // Exclude pixels outside the scissor box
99 };
100
101 struct {
102 BitField<0, 2, ScissorMode> mode;
103
104 union {
105 BitField<0, 10, u32> x1;
106 BitField<16, 10, u32> y1;
107 };
108
109 union {
110 BitField<0, 10, u32> x2;
111 BitField<16, 10, u32> y2;
112 };
113 } scissor_test;
114
115 union {
116 BitField<0, 10, s32> x;
117 BitField<16, 10, s32> y;
118 } viewport_corner;
119
120 INSERT_PADDING_WORDS(0x1);
121
122 // TODO: early depth
123 INSERT_PADDING_WORDS(0x1);
124
125 INSERT_PADDING_WORDS(0x2);
126
127 enum DepthBuffering : u32 {
128 WBuffering = 0,
129 ZBuffering = 1,
130 };
131 BitField<0, 1, DepthBuffering> depthmap_enable;
132
133 INSERT_PADDING_WORDS(0x12);
134};
135
136static_assert(sizeof(RasterizerRegs) == 0x40 * sizeof(u32),
137 "RasterizerRegs struct has incorrect size");
138
139} // namespace Pica
diff --git a/src/video_core/regs_shader.h b/src/video_core/regs_shader.h
deleted file mode 100644
index c15d4d162..000000000
--- a/src/video_core/regs_shader.h
+++ /dev/null
@@ -1,111 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8
9#include "common/bit_field.h"
10#include "common/common_funcs.h"
11#include "common/common_types.h"
12
13namespace Pica {
14
15struct ShaderRegs {
16 BitField<0, 16, u32> bool_uniforms;
17
18 union {
19 BitField<0, 8, u32> x;
20 BitField<8, 8, u32> y;
21 BitField<16, 8, u32> z;
22 BitField<24, 8, u32> w;
23 } int_uniforms[4];
24
25 INSERT_PADDING_WORDS(0x4);
26
27 enum ShaderMode {
28 GS = 0x08,
29 VS = 0xA0,
30 };
31
32 union {
33 // Number of input attributes to shader unit - 1
34 BitField<0, 4, u32> max_input_attribute_index;
35 BitField<8, 8, u32> input_to_uniform;
36 BitField<24, 8, ShaderMode> shader_mode;
37 };
38
39 // Offset to shader program entry point (in words)
40 BitField<0, 16, u32> main_offset;
41
42 /// Maps input attributes to registers. 4-bits per attribute, specifying a register index
43 u32 input_attribute_to_register_map_low;
44 u32 input_attribute_to_register_map_high;
45
46 unsigned int GetRegisterForAttribute(unsigned int attribute_index) const {
47 u64 map = ((u64)input_attribute_to_register_map_high << 32) |
48 (u64)input_attribute_to_register_map_low;
49 return (map >> (attribute_index * 4)) & 0b1111;
50 }
51
52 BitField<0, 16, u32> output_mask;
53
54 // 0x28E, CODETRANSFER_END
55 INSERT_PADDING_WORDS(0x2);
56
57 struct {
58 enum Format : u32 {
59 FLOAT24 = 0,
60 FLOAT32 = 1,
61 };
62
63 bool IsFloat32() const {
64 return format == FLOAT32;
65 }
66
67 union {
68 // Index of the next uniform to write to
69 // TODO: ctrulib uses 8 bits for this, however that seems to yield lots of invalid
70 // indices
71 // TODO: Maybe the uppermost index is for the geometry shader? Investigate!
72 BitField<0, 7, u32> index;
73
74 BitField<31, 1, Format> format;
75 };
76
77 // Writing to these registers sets the current uniform.
78 u32 set_value[8];
79
80 } uniform_setup;
81
82 INSERT_PADDING_WORDS(0x2);
83
84 struct {
85 // Offset of the next instruction to write code to.
86 // Incremented with each instruction write.
87 u32 offset;
88
89 // Writing to these registers sets the "current" word in the shader program.
90 u32 set_word[8];
91 } program;
92
93 INSERT_PADDING_WORDS(0x1);
94
95 // This register group is used to load an internal table of swizzling patterns,
96 // which are indexed by each shader instruction to specify vector component swizzling.
97 struct {
98 // Offset of the next swizzle pattern to write code to.
99 // Incremented with each instruction write.
100 u32 offset;
101
102 // Writing to these registers sets the current swizzle pattern in the table.
103 u32 set_word[8];
104 } swizzle_patterns;
105
106 INSERT_PADDING_WORDS(0x2);
107};
108
109static_assert(sizeof(ShaderRegs) == 0x30 * sizeof(u32), "ShaderRegs struct has incorrect size");
110
111} // namespace Pica
diff --git a/src/video_core/regs_texturing.h b/src/video_core/regs_texturing.h
deleted file mode 100644
index 0b09f2299..000000000
--- a/src/video_core/regs_texturing.h
+++ /dev/null
@@ -1,452 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8
9#include "common/assert.h"
10#include "common/bit_field.h"
11#include "common/common_funcs.h"
12#include "common/common_types.h"
13
14namespace Pica {
15
16struct TexturingRegs {
17 struct TextureConfig {
18 enum TextureType : u32 {
19 Texture2D = 0,
20 TextureCube = 1,
21 Shadow2D = 2,
22 Projection2D = 3,
23 ShadowCube = 4,
24 Disabled = 5,
25 };
26
27 enum WrapMode : u32 {
28 ClampToEdge = 0,
29 ClampToBorder = 1,
30 Repeat = 2,
31 MirroredRepeat = 3,
32 // Mode 4-7 produces some weird result and may be just invalid:
33 ClampToEdge2 = 4, // Positive coord: clamp to edge; negative coord: repeat
34 ClampToBorder2 = 5, // Positive coord: clamp to border; negative coord: repeat
35 Repeat2 = 6, // Same as Repeat
36 Repeat3 = 7, // Same as Repeat
37 };
38
39 enum TextureFilter : u32 {
40 Nearest = 0,
41 Linear = 1,
42 };
43
44 union {
45 u32 raw;
46 BitField<0, 8, u32> r;
47 BitField<8, 8, u32> g;
48 BitField<16, 8, u32> b;
49 BitField<24, 8, u32> a;
50 } border_color;
51
52 union {
53 BitField<0, 11, u32> height;
54 BitField<16, 11, u32> width;
55 };
56
57 union {
58 BitField<1, 1, TextureFilter> mag_filter;
59 BitField<2, 1, TextureFilter> min_filter;
60 BitField<8, 3, WrapMode> wrap_t;
61 BitField<12, 3, WrapMode> wrap_s;
62 /// @note Only valid for texture 0 according to 3DBrew.
63 BitField<28, 3, TextureType> type;
64 };
65
66 INSERT_PADDING_WORDS(0x1);
67
68 BitField<0, 28, u32> address;
69
70 PAddr GetPhysicalAddress() const {
71 return address * 8;
72 }
73
74 // texture1 and texture2 store the texture format directly after the address
75 // whereas texture0 inserts some additional flags inbetween.
76 // Hence, we store the format separately so that all other parameters can be described
77 // in a single structure.
78 };
79
80 enum class TextureFormat : u32 {
81 RGBA8 = 0,
82 RGB8 = 1,
83 RGB5A1 = 2,
84 RGB565 = 3,
85 RGBA4 = 4,
86 IA8 = 5,
87 RG8 = 6, ///< @note Also called HILO8 in 3DBrew.
88 I8 = 7,
89 A8 = 8,
90 IA4 = 9,
91 I4 = 10,
92 A4 = 11,
93 ETC1 = 12, // compressed
94 ETC1A4 = 13, // compressed
95 };
96
97 static unsigned NibblesPerPixel(TextureFormat format) {
98 switch (format) {
99 case TextureFormat::RGBA8:
100 return 8;
101
102 case TextureFormat::RGB8:
103 return 6;
104
105 case TextureFormat::RGB5A1:
106 case TextureFormat::RGB565:
107 case TextureFormat::RGBA4:
108 case TextureFormat::IA8:
109 case TextureFormat::RG8:
110 return 4;
111
112 case TextureFormat::I4:
113 case TextureFormat::A4:
114 return 1;
115
116 case TextureFormat::I8:
117 case TextureFormat::A8:
118 case TextureFormat::IA4:
119
120 default: // placeholder for yet unknown formats
121 UNIMPLEMENTED();
122 return 0;
123 }
124 }
125
126 union {
127 BitField<0, 1, u32> texture0_enable;
128 BitField<1, 1, u32> texture1_enable;
129 BitField<2, 1, u32> texture2_enable;
130 BitField<8, 2, u32> texture3_coordinates;
131 BitField<10, 1, u32> texture3_enable;
132 BitField<13, 1, u32> texture2_use_coord1;
133 BitField<16, 1, u32> clear_texture_cache; // TODO: unimplemented
134 } main_config;
135 TextureConfig texture0;
136
137 enum class CubeFace {
138 PositiveX = 0,
139 NegativeX = 1,
140 PositiveY = 2,
141 NegativeY = 3,
142 PositiveZ = 4,
143 NegativeZ = 5,
144 };
145
146 BitField<0, 22, u32> cube_address[5];
147
148 PAddr GetCubePhysicalAddress(CubeFace face) const {
149 PAddr address = texture0.address;
150 if (face != CubeFace::PositiveX) {
151 // Bits [22:27] from the main texture address is shared with all cubemap additional
152 // addresses.
153 auto& face_addr = cube_address[static_cast<size_t>(face) - 1];
154 address &= ~face_addr.mask;
155 address |= face_addr;
156 }
157 // A multiplier of 8 is also needed in the same way as the main address.
158 return address * 8;
159 }
160
161 INSERT_PADDING_WORDS(0x3);
162 BitField<0, 4, TextureFormat> texture0_format;
163 BitField<0, 1, u32> fragment_lighting_enable;
164 INSERT_PADDING_WORDS(0x1);
165 TextureConfig texture1;
166 BitField<0, 4, TextureFormat> texture1_format;
167 INSERT_PADDING_WORDS(0x2);
168 TextureConfig texture2;
169 BitField<0, 4, TextureFormat> texture2_format;
170 INSERT_PADDING_WORDS(0x9);
171
172 struct FullTextureConfig {
173 const bool enabled;
174 const TextureConfig config;
175 const TextureFormat format;
176 };
177 const std::array<FullTextureConfig, 3> GetTextures() const {
178 return {{
179 {main_config.texture0_enable.ToBool(), texture0, texture0_format},
180 {main_config.texture1_enable.ToBool(), texture1, texture1_format},
181 {main_config.texture2_enable.ToBool(), texture2, texture2_format},
182 }};
183 }
184
185 // 0xa8-0xad: ProcTex Config
186 enum class ProcTexClamp : u32 {
187 ToZero = 0,
188 ToEdge = 1,
189 SymmetricalRepeat = 2,
190 MirroredRepeat = 3,
191 Pulse = 4,
192 };
193
194 enum class ProcTexCombiner : u32 {
195 U = 0, // u
196 U2 = 1, // u * u
197 V = 2, // v
198 V2 = 3, // v * v
199 Add = 4, // (u + v) / 2
200 Add2 = 5, // (u * u + v * v) / 2
201 SqrtAdd2 = 6, // sqrt(u * u + v * v)
202 Min = 7, // min(u, v)
203 Max = 8, // max(u, v)
204 RMax = 9, // Average of Max and SqrtAdd2
205 };
206
207 enum class ProcTexShift : u32 {
208 None = 0,
209 Odd = 1,
210 Even = 2,
211 };
212
213 union {
214 BitField<0, 3, ProcTexClamp> u_clamp;
215 BitField<3, 3, ProcTexClamp> v_clamp;
216 BitField<6, 4, ProcTexCombiner> color_combiner;
217 BitField<10, 4, ProcTexCombiner> alpha_combiner;
218 BitField<14, 1, u32> separate_alpha;
219 BitField<15, 1, u32> noise_enable;
220 BitField<16, 2, ProcTexShift> u_shift;
221 BitField<18, 2, ProcTexShift> v_shift;
222 BitField<20, 8, u32> bias_low; // float16 TODO: unimplemented
223 } proctex;
224
225 union ProcTexNoiseConfig {
226 BitField<0, 16, s32> amplitude; // fixed1.3.12
227 BitField<16, 16, u32> phase; // float16
228 };
229
230 ProcTexNoiseConfig proctex_noise_u;
231 ProcTexNoiseConfig proctex_noise_v;
232
233 union {
234 BitField<0, 16, u32> u; // float16
235 BitField<16, 16, u32> v; // float16
236 } proctex_noise_frequency;
237
238 enum class ProcTexFilter : u32 {
239 Nearest = 0,
240 Linear = 1,
241 NearestMipmapNearest = 2,
242 LinearMipmapNearest = 3,
243 NearestMipmapLinear = 4,
244 LinearMipmapLinear = 5,
245 };
246
247 union {
248 BitField<0, 3, ProcTexFilter> filter;
249 BitField<11, 8, u32> width;
250 BitField<19, 8, u32> bias_high; // TODO: unimplemented
251 } proctex_lut;
252
253 BitField<0, 8, u32> proctex_lut_offset;
254
255 INSERT_PADDING_WORDS(0x1);
256
257 // 0xaf-0xb7: ProcTex LUT
258 enum class ProcTexLutTable : u32 {
259 Noise = 0,
260 ColorMap = 2,
261 AlphaMap = 3,
262 Color = 4,
263 ColorDiff = 5,
264 };
265
266 union {
267 BitField<0, 8, u32> index;
268 BitField<8, 4, ProcTexLutTable> ref_table;
269 } proctex_lut_config;
270
271 u32 proctex_lut_data[8];
272
273 INSERT_PADDING_WORDS(0x8);
274
275 // 0xc0-0xff: Texture Combiner (akin to glTexEnv)
276 struct TevStageConfig {
277 enum class Source : u32 {
278 PrimaryColor = 0x0,
279 PrimaryFragmentColor = 0x1,
280 SecondaryFragmentColor = 0x2,
281
282 Texture0 = 0x3,
283 Texture1 = 0x4,
284 Texture2 = 0x5,
285 Texture3 = 0x6,
286
287 PreviousBuffer = 0xd,
288 Constant = 0xe,
289 Previous = 0xf,
290 };
291
292 enum class ColorModifier : u32 {
293 SourceColor = 0x0,
294 OneMinusSourceColor = 0x1,
295 SourceAlpha = 0x2,
296 OneMinusSourceAlpha = 0x3,
297 SourceRed = 0x4,
298 OneMinusSourceRed = 0x5,
299
300 SourceGreen = 0x8,
301 OneMinusSourceGreen = 0x9,
302
303 SourceBlue = 0xc,
304 OneMinusSourceBlue = 0xd,
305 };
306
307 enum class AlphaModifier : u32 {
308 SourceAlpha = 0x0,
309 OneMinusSourceAlpha = 0x1,
310 SourceRed = 0x2,
311 OneMinusSourceRed = 0x3,
312 SourceGreen = 0x4,
313 OneMinusSourceGreen = 0x5,
314 SourceBlue = 0x6,
315 OneMinusSourceBlue = 0x7,
316 };
317
318 enum class Operation : u32 {
319 Replace = 0,
320 Modulate = 1,
321 Add = 2,
322 AddSigned = 3,
323 Lerp = 4,
324 Subtract = 5,
325 Dot3_RGB = 6,
326 Dot3_RGBA = 7,
327 MultiplyThenAdd = 8,
328 AddThenMultiply = 9,
329 };
330
331 union {
332 u32 sources_raw;
333 BitField<0, 4, Source> color_source1;
334 BitField<4, 4, Source> color_source2;
335 BitField<8, 4, Source> color_source3;
336 BitField<16, 4, Source> alpha_source1;
337 BitField<20, 4, Source> alpha_source2;
338 BitField<24, 4, Source> alpha_source3;
339 };
340
341 union {
342 u32 modifiers_raw;
343 BitField<0, 4, ColorModifier> color_modifier1;
344 BitField<4, 4, ColorModifier> color_modifier2;
345 BitField<8, 4, ColorModifier> color_modifier3;
346 BitField<12, 3, AlphaModifier> alpha_modifier1;
347 BitField<16, 3, AlphaModifier> alpha_modifier2;
348 BitField<20, 3, AlphaModifier> alpha_modifier3;
349 };
350
351 union {
352 u32 ops_raw;
353 BitField<0, 4, Operation> color_op;
354 BitField<16, 4, Operation> alpha_op;
355 };
356
357 union {
358 u32 const_color;
359 BitField<0, 8, u32> const_r;
360 BitField<8, 8, u32> const_g;
361 BitField<16, 8, u32> const_b;
362 BitField<24, 8, u32> const_a;
363 };
364
365 union {
366 u32 scales_raw;
367 BitField<0, 2, u32> color_scale;
368 BitField<16, 2, u32> alpha_scale;
369 };
370
371 inline unsigned GetColorMultiplier() const {
372 return (color_scale < 3) ? (1 << color_scale) : 1;
373 }
374
375 inline unsigned GetAlphaMultiplier() const {
376 return (alpha_scale < 3) ? (1 << alpha_scale) : 1;
377 }
378 };
379
380 TevStageConfig tev_stage0;
381 INSERT_PADDING_WORDS(0x3);
382 TevStageConfig tev_stage1;
383 INSERT_PADDING_WORDS(0x3);
384 TevStageConfig tev_stage2;
385 INSERT_PADDING_WORDS(0x3);
386 TevStageConfig tev_stage3;
387 INSERT_PADDING_WORDS(0x3);
388
389 enum class FogMode : u32 {
390 None = 0,
391 Fog = 5,
392 Gas = 7,
393 };
394
395 union {
396 BitField<0, 3, FogMode> fog_mode;
397 BitField<16, 1, u32> fog_flip;
398
399 union {
400 // Tev stages 0-3 write their output to the combiner buffer if the corresponding bit in
401 // these masks are set
402 BitField<8, 4, u32> update_mask_rgb;
403 BitField<12, 4, u32> update_mask_a;
404
405 bool TevStageUpdatesCombinerBufferColor(unsigned stage_index) const {
406 return (stage_index < 4) && (update_mask_rgb & (1 << stage_index));
407 }
408
409 bool TevStageUpdatesCombinerBufferAlpha(unsigned stage_index) const {
410 return (stage_index < 4) && (update_mask_a & (1 << stage_index));
411 }
412 } tev_combiner_buffer_input;
413 };
414
415 union {
416 u32 raw;
417 BitField<0, 8, u32> r;
418 BitField<8, 8, u32> g;
419 BitField<16, 8, u32> b;
420 } fog_color;
421
422 INSERT_PADDING_WORDS(0x4);
423
424 BitField<0, 16, u32> fog_lut_offset;
425
426 INSERT_PADDING_WORDS(0x1);
427
428 u32 fog_lut_data[8];
429
430 TevStageConfig tev_stage4;
431 INSERT_PADDING_WORDS(0x3);
432 TevStageConfig tev_stage5;
433
434 union {
435 u32 raw;
436 BitField<0, 8, u32> r;
437 BitField<8, 8, u32> g;
438 BitField<16, 8, u32> b;
439 BitField<24, 8, u32> a;
440 } tev_combiner_buffer_color;
441
442 INSERT_PADDING_WORDS(0x2);
443
444 const std::array<TevStageConfig, 6> GetTevStages() const {
445 return {{tev_stage0, tev_stage1, tev_stage2, tev_stage3, tev_stage4, tev_stage5}};
446 };
447};
448
449static_assert(sizeof(TexturingRegs) == 0x80 * sizeof(u32),
450 "TexturingRegs struct has incorrect size");
451
452} // namespace Pica
diff --git a/src/video_core/renderer_base.cpp b/src/video_core/renderer_base.cpp
index f6ece5c4b..51e1d45f9 100644
--- a/src/video_core/renderer_base.cpp
+++ b/src/video_core/renderer_base.cpp
@@ -5,19 +5,6 @@
5#include <atomic> 5#include <atomic>
6#include <memory> 6#include <memory>
7#include "video_core/renderer_base.h" 7#include "video_core/renderer_base.h"
8#include "video_core/renderer_opengl/gl_rasterizer.h"
9#include "video_core/swrasterizer/swrasterizer.h"
10#include "video_core/video_core.h" 8#include "video_core/video_core.h"
11 9
12void RendererBase::RefreshRasterizerSetting() { 10void RendererBase::RefreshRasterizerSetting() {}
13 bool hw_renderer_enabled = VideoCore::g_hw_renderer_enabled;
14 if (rasterizer == nullptr || opengl_rasterizer_active != hw_renderer_enabled) {
15 opengl_rasterizer_active = hw_renderer_enabled;
16
17 if (hw_renderer_enabled) {
18 rasterizer = std::make_unique<RasterizerOpenGL>();
19 } else {
20 rasterizer = std::make_unique<VideoCore::SWRasterizer>();
21 }
22 }
23}
diff --git a/src/video_core/renderer_base.h b/src/video_core/renderer_base.h
index 28015aba9..d15db6c8c 100644
--- a/src/video_core/renderer_base.h
+++ b/src/video_core/renderer_base.h
@@ -5,8 +5,8 @@
5#pragma once 5#pragma once
6 6
7#include <memory> 7#include <memory>
8#include "common/assert.h"
8#include "common/common_types.h" 9#include "common/common_types.h"
9#include "video_core/rasterizer_interface.h"
10 10
11class EmuWindow; 11class EmuWindow;
12 12
@@ -72,14 +72,9 @@ public:
72 return m_current_frame; 72 return m_current_frame;
73 } 73 }
74 74
75 VideoCore::RasterizerInterface* Rasterizer() const {
76 return rasterizer.get();
77 }
78
79 void RefreshRasterizerSetting(); 75 void RefreshRasterizerSetting();
80 76
81protected: 77protected:
82 std::unique_ptr<VideoCore::RasterizerInterface> rasterizer;
83 f32 m_current_fps = 0.0f; ///< Current framerate, should be set by the renderer 78 f32 m_current_fps = 0.0f; ///< Current framerate, should be set by the renderer
84 int m_current_frame = 0; ///< Current frame, should be set by the renderer 79 int m_current_frame = 0; ///< Current frame, should be set by the renderer
85 80
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
deleted file mode 100644
index becaf7bde..000000000
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ /dev/null
@@ -1,1686 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <memory>
6#include <string>
7#include <tuple>
8#include <utility>
9#include <glad/glad.h>
10#include "common/assert.h"
11#include "common/color.h"
12#include "common/logging/log.h"
13#include "common/math_util.h"
14#include "common/microprofile.h"
15#include "common/vector_math.h"
16#include "core/hw/gpu.h"
17#include "video_core/pica_state.h"
18#include "video_core/regs_framebuffer.h"
19#include "video_core/regs_rasterizer.h"
20#include "video_core/regs_texturing.h"
21#include "video_core/renderer_opengl/gl_rasterizer.h"
22#include "video_core/renderer_opengl/gl_shader_gen.h"
23#include "video_core/renderer_opengl/pica_to_gl.h"
24#include "video_core/renderer_opengl/renderer_opengl.h"
25
26MICROPROFILE_DEFINE(OpenGL_Drawing, "OpenGL", "Drawing", MP_RGB(128, 128, 192));
27MICROPROFILE_DEFINE(OpenGL_Blits, "OpenGL", "Blits", MP_RGB(100, 100, 255));
28MICROPROFILE_DEFINE(OpenGL_CacheManagement, "OpenGL", "Cache Mgmt", MP_RGB(100, 255, 100));
29
30RasterizerOpenGL::RasterizerOpenGL() : shader_dirty(true) {
31 // Clipping plane 0 is always enabled for PICA fixed clip plane z <= 0
32 state.clip_distance[0] = true;
33
34 // Create sampler objects
35 for (size_t i = 0; i < texture_samplers.size(); ++i) {
36 texture_samplers[i].Create();
37 state.texture_units[i].sampler = texture_samplers[i].sampler.handle;
38 }
39
40 // Generate VBO, VAO and UBO
41 vertex_buffer.Create();
42 vertex_array.Create();
43 uniform_buffer.Create();
44
45 state.draw.vertex_array = vertex_array.handle;
46 state.draw.vertex_buffer = vertex_buffer.handle;
47 state.draw.uniform_buffer = uniform_buffer.handle;
48 state.Apply();
49
50 // Bind the UBO to binding point 0
51 glBindBufferBase(GL_UNIFORM_BUFFER, 0, uniform_buffer.handle);
52
53 uniform_block_data.dirty = true;
54
55 uniform_block_data.lut_dirty.fill(true);
56
57 uniform_block_data.fog_lut_dirty = true;
58
59 uniform_block_data.proctex_noise_lut_dirty = true;
60 uniform_block_data.proctex_color_map_dirty = true;
61 uniform_block_data.proctex_alpha_map_dirty = true;
62 uniform_block_data.proctex_lut_dirty = true;
63 uniform_block_data.proctex_diff_lut_dirty = true;
64
65 // Set vertex attributes
66 glVertexAttribPointer(GLShader::ATTRIBUTE_POSITION, 4, GL_FLOAT, GL_FALSE,
67 sizeof(HardwareVertex), (GLvoid*)offsetof(HardwareVertex, position));
68 glEnableVertexAttribArray(GLShader::ATTRIBUTE_POSITION);
69
70 glVertexAttribPointer(GLShader::ATTRIBUTE_COLOR, 4, GL_FLOAT, GL_FALSE, sizeof(HardwareVertex),
71 (GLvoid*)offsetof(HardwareVertex, color));
72 glEnableVertexAttribArray(GLShader::ATTRIBUTE_COLOR);
73
74 glVertexAttribPointer(GLShader::ATTRIBUTE_TEXCOORD0, 2, GL_FLOAT, GL_FALSE,
75 sizeof(HardwareVertex), (GLvoid*)offsetof(HardwareVertex, tex_coord0));
76 glVertexAttribPointer(GLShader::ATTRIBUTE_TEXCOORD1, 2, GL_FLOAT, GL_FALSE,
77 sizeof(HardwareVertex), (GLvoid*)offsetof(HardwareVertex, tex_coord1));
78 glVertexAttribPointer(GLShader::ATTRIBUTE_TEXCOORD2, 2, GL_FLOAT, GL_FALSE,
79 sizeof(HardwareVertex), (GLvoid*)offsetof(HardwareVertex, tex_coord2));
80 glEnableVertexAttribArray(GLShader::ATTRIBUTE_TEXCOORD0);
81 glEnableVertexAttribArray(GLShader::ATTRIBUTE_TEXCOORD1);
82 glEnableVertexAttribArray(GLShader::ATTRIBUTE_TEXCOORD2);
83
84 glVertexAttribPointer(GLShader::ATTRIBUTE_TEXCOORD0_W, 1, GL_FLOAT, GL_FALSE,
85 sizeof(HardwareVertex), (GLvoid*)offsetof(HardwareVertex, tex_coord0_w));
86 glEnableVertexAttribArray(GLShader::ATTRIBUTE_TEXCOORD0_W);
87
88 glVertexAttribPointer(GLShader::ATTRIBUTE_NORMQUAT, 4, GL_FLOAT, GL_FALSE,
89 sizeof(HardwareVertex), (GLvoid*)offsetof(HardwareVertex, normquat));
90 glEnableVertexAttribArray(GLShader::ATTRIBUTE_NORMQUAT);
91
92 glVertexAttribPointer(GLShader::ATTRIBUTE_VIEW, 3, GL_FLOAT, GL_FALSE, sizeof(HardwareVertex),
93 (GLvoid*)offsetof(HardwareVertex, view));
94 glEnableVertexAttribArray(GLShader::ATTRIBUTE_VIEW);
95
96 // Create render framebuffer
97 framebuffer.Create();
98
99 // Allocate and bind lighting lut textures
100 lighting_lut.Create();
101 state.lighting_lut.texture_buffer = lighting_lut.handle;
102 state.Apply();
103 lighting_lut_buffer.Create();
104 glBindBuffer(GL_TEXTURE_BUFFER, lighting_lut_buffer.handle);
105 glBufferData(GL_TEXTURE_BUFFER,
106 sizeof(GLfloat) * 2 * 256 * Pica::LightingRegs::NumLightingSampler, nullptr,
107 GL_DYNAMIC_DRAW);
108 glActiveTexture(TextureUnits::LightingLUT.Enum());
109 glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, lighting_lut_buffer.handle);
110
111 // Setup the LUT for the fog
112 fog_lut.Create();
113 state.fog_lut.texture_buffer = fog_lut.handle;
114 state.Apply();
115 fog_lut_buffer.Create();
116 glBindBuffer(GL_TEXTURE_BUFFER, fog_lut_buffer.handle);
117 glBufferData(GL_TEXTURE_BUFFER, sizeof(GLfloat) * 2 * 128, nullptr, GL_DYNAMIC_DRAW);
118 glActiveTexture(TextureUnits::FogLUT.Enum());
119 glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, fog_lut_buffer.handle);
120
121 // Setup the noise LUT for proctex
122 proctex_noise_lut.Create();
123 state.proctex_noise_lut.texture_buffer = proctex_noise_lut.handle;
124 state.Apply();
125 proctex_noise_lut_buffer.Create();
126 glBindBuffer(GL_TEXTURE_BUFFER, proctex_noise_lut_buffer.handle);
127 glBufferData(GL_TEXTURE_BUFFER, sizeof(GLfloat) * 2 * 128, nullptr, GL_DYNAMIC_DRAW);
128 glActiveTexture(TextureUnits::ProcTexNoiseLUT.Enum());
129 glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, proctex_noise_lut_buffer.handle);
130
131 // Setup the color map for proctex
132 proctex_color_map.Create();
133 state.proctex_color_map.texture_buffer = proctex_color_map.handle;
134 state.Apply();
135 proctex_color_map_buffer.Create();
136 glBindBuffer(GL_TEXTURE_BUFFER, proctex_color_map_buffer.handle);
137 glBufferData(GL_TEXTURE_BUFFER, sizeof(GLfloat) * 2 * 128, nullptr, GL_DYNAMIC_DRAW);
138 glActiveTexture(TextureUnits::ProcTexColorMap.Enum());
139 glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, proctex_color_map_buffer.handle);
140
141 // Setup the alpha map for proctex
142 proctex_alpha_map.Create();
143 state.proctex_alpha_map.texture_buffer = proctex_alpha_map.handle;
144 state.Apply();
145 proctex_alpha_map_buffer.Create();
146 glBindBuffer(GL_TEXTURE_BUFFER, proctex_alpha_map_buffer.handle);
147 glBufferData(GL_TEXTURE_BUFFER, sizeof(GLfloat) * 2 * 128, nullptr, GL_DYNAMIC_DRAW);
148 glActiveTexture(TextureUnits::ProcTexAlphaMap.Enum());
149 glTexBuffer(GL_TEXTURE_BUFFER, GL_RG32F, proctex_alpha_map_buffer.handle);
150
151 // Setup the LUT for proctex
152 proctex_lut.Create();
153 state.proctex_lut.texture_buffer = proctex_lut.handle;
154 state.Apply();
155 proctex_lut_buffer.Create();
156 glBindBuffer(GL_TEXTURE_BUFFER, proctex_lut_buffer.handle);
157 glBufferData(GL_TEXTURE_BUFFER, sizeof(GLfloat) * 4 * 256, nullptr, GL_DYNAMIC_DRAW);
158 glActiveTexture(TextureUnits::ProcTexLUT.Enum());
159 glTexBuffer(GL_TEXTURE_BUFFER, GL_RGBA32F, proctex_lut_buffer.handle);
160
161 // Setup the difference LUT for proctex
162 proctex_diff_lut.Create();
163 state.proctex_diff_lut.texture_buffer = proctex_diff_lut.handle;
164 state.Apply();
165 proctex_diff_lut_buffer.Create();
166 glBindBuffer(GL_TEXTURE_BUFFER, proctex_diff_lut_buffer.handle);
167 glBufferData(GL_TEXTURE_BUFFER, sizeof(GLfloat) * 4 * 256, nullptr, GL_DYNAMIC_DRAW);
168 glActiveTexture(TextureUnits::ProcTexDiffLUT.Enum());
169 glTexBuffer(GL_TEXTURE_BUFFER, GL_RGBA32F, proctex_diff_lut_buffer.handle);
170
171 // Sync fixed function OpenGL state
172 SyncClipEnabled();
173 SyncClipCoef();
174 SyncCullMode();
175 SyncBlendEnabled();
176 SyncBlendFuncs();
177 SyncBlendColor();
178 SyncLogicOp();
179 SyncStencilTest();
180 SyncDepthTest();
181 SyncColorWriteMask();
182 SyncStencilWriteMask();
183 SyncDepthWriteMask();
184}
185
186RasterizerOpenGL::~RasterizerOpenGL() {}
187
188/**
189 * This is a helper function to resolve an issue when interpolating opposite quaternions. See below
190 * for a detailed description of this issue (yuriks):
191 *
192 * For any rotation, there are two quaternions Q, and -Q, that represent the same rotation. If you
193 * interpolate two quaternions that are opposite, instead of going from one rotation to another
194 * using the shortest path, you'll go around the longest path. You can test if two quaternions are
195 * opposite by checking if Dot(Q1, Q2) < 0. In that case, you can flip either of them, therefore
196 * making Dot(Q1, -Q2) positive.
197 *
198 * This solution corrects this issue per-vertex before passing the quaternions to OpenGL. This is
199 * correct for most cases but can still rotate around the long way sometimes. An implementation
200 * which did `lerp(lerp(Q1, Q2), Q3)` (with proper weighting), applying the dot product check
201 * between each step would work for those cases at the cost of being more complex to implement.
202 *
203 * Fortunately however, the 3DS hardware happens to also use this exact same logic to work around
204 * these issues, making this basic implementation actually more accurate to the hardware.
205 */
206static bool AreQuaternionsOpposite(Math::Vec4<Pica::float24> qa, Math::Vec4<Pica::float24> qb) {
207 Math::Vec4f a{qa.x.ToFloat32(), qa.y.ToFloat32(), qa.z.ToFloat32(), qa.w.ToFloat32()};
208 Math::Vec4f b{qb.x.ToFloat32(), qb.y.ToFloat32(), qb.z.ToFloat32(), qb.w.ToFloat32()};
209
210 return (Math::Dot(a, b) < 0.f);
211}
212
213void RasterizerOpenGL::AddTriangle(const Pica::Shader::OutputVertex& v0,
214 const Pica::Shader::OutputVertex& v1,
215 const Pica::Shader::OutputVertex& v2) {
216 vertex_batch.emplace_back(v0, false);
217 vertex_batch.emplace_back(v1, AreQuaternionsOpposite(v0.quat, v1.quat));
218 vertex_batch.emplace_back(v2, AreQuaternionsOpposite(v0.quat, v2.quat));
219}
220
221void RasterizerOpenGL::DrawTriangles() {
222 if (vertex_batch.empty())
223 return;
224
225 MICROPROFILE_SCOPE(OpenGL_Drawing);
226 const auto& regs = Pica::g_state.regs;
227
228 // Sync and bind the framebuffer surfaces
229 CachedSurface* color_surface;
230 CachedSurface* depth_surface;
231 MathUtil::Rectangle<int> rect;
232 std::tie(color_surface, depth_surface, rect) =
233 res_cache.GetFramebufferSurfaces(regs.framebuffer.framebuffer);
234
235 state.draw.draw_framebuffer = framebuffer.handle;
236 state.Apply();
237
238 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
239 color_surface != nullptr ? color_surface->texture.handle : 0, 0);
240 if (depth_surface != nullptr) {
241 if (regs.framebuffer.framebuffer.depth_format ==
242 Pica::FramebufferRegs::DepthFormat::D24S8) {
243 // attach both depth and stencil
244 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
245 depth_surface->texture.handle, 0);
246 } else {
247 // attach depth
248 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D,
249 depth_surface->texture.handle, 0);
250 // clear stencil attachment
251 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
252 }
253 } else {
254 // clear both depth and stencil attachment
255 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
256 0);
257 }
258
259 // Sync the viewport
260 // These registers hold half-width and half-height, so must be multiplied by 2
261 GLsizei viewport_width =
262 (GLsizei)Pica::float24::FromRaw(regs.rasterizer.viewport_size_x).ToFloat32() * 2;
263 GLsizei viewport_height =
264 (GLsizei)Pica::float24::FromRaw(regs.rasterizer.viewport_size_y).ToFloat32() * 2;
265
266 glViewport(
267 (GLint)(rect.left + regs.rasterizer.viewport_corner.x * color_surface->res_scale_width),
268 (GLint)(rect.bottom + regs.rasterizer.viewport_corner.y * color_surface->res_scale_height),
269 (GLsizei)(viewport_width * color_surface->res_scale_width),
270 (GLsizei)(viewport_height * color_surface->res_scale_height));
271
272 if (uniform_block_data.data.framebuffer_scale[0] != color_surface->res_scale_width ||
273 uniform_block_data.data.framebuffer_scale[1] != color_surface->res_scale_height) {
274
275 uniform_block_data.data.framebuffer_scale[0] = color_surface->res_scale_width;
276 uniform_block_data.data.framebuffer_scale[1] = color_surface->res_scale_height;
277 uniform_block_data.dirty = true;
278 }
279
280 // Scissor checks are window-, not viewport-relative, which means that if the cached texture
281 // sub-rect changes, the scissor bounds also need to be updated.
282 GLint scissor_x1 = static_cast<GLint>(
283 rect.left + regs.rasterizer.scissor_test.x1 * color_surface->res_scale_width);
284 GLint scissor_y1 = static_cast<GLint>(
285 rect.bottom + regs.rasterizer.scissor_test.y1 * color_surface->res_scale_height);
286 // x2, y2 have +1 added to cover the entire pixel area, otherwise you might get cracks when
287 // scaling or doing multisampling.
288 GLint scissor_x2 = static_cast<GLint>(
289 rect.left + (regs.rasterizer.scissor_test.x2 + 1) * color_surface->res_scale_width);
290 GLint scissor_y2 = static_cast<GLint>(
291 rect.bottom + (regs.rasterizer.scissor_test.y2 + 1) * color_surface->res_scale_height);
292
293 if (uniform_block_data.data.scissor_x1 != scissor_x1 ||
294 uniform_block_data.data.scissor_x2 != scissor_x2 ||
295 uniform_block_data.data.scissor_y1 != scissor_y1 ||
296 uniform_block_data.data.scissor_y2 != scissor_y2) {
297
298 uniform_block_data.data.scissor_x1 = scissor_x1;
299 uniform_block_data.data.scissor_x2 = scissor_x2;
300 uniform_block_data.data.scissor_y1 = scissor_y1;
301 uniform_block_data.data.scissor_y2 = scissor_y2;
302 uniform_block_data.dirty = true;
303 }
304
305 // Sync and bind the texture surfaces
306 const auto pica_textures = regs.texturing.GetTextures();
307 for (unsigned texture_index = 0; texture_index < pica_textures.size(); ++texture_index) {
308 const auto& texture = pica_textures[texture_index];
309
310 if (texture.enabled) {
311 texture_samplers[texture_index].SyncWithConfig(texture.config);
312 CachedSurface* surface = res_cache.GetTextureSurface(texture);
313 if (surface != nullptr) {
314 state.texture_units[texture_index].texture_2d = surface->texture.handle;
315 } else {
316 // Can occur when texture addr is null or its memory is unmapped/invalid
317 state.texture_units[texture_index].texture_2d = 0;
318 }
319 } else {
320 state.texture_units[texture_index].texture_2d = 0;
321 }
322 }
323
324 // Sync and bind the shader
325 if (shader_dirty) {
326 SetShader();
327 shader_dirty = false;
328 }
329
330 // Sync the lighting luts
331 for (unsigned index = 0; index < uniform_block_data.lut_dirty.size(); index++) {
332 if (uniform_block_data.lut_dirty[index]) {
333 SyncLightingLUT(index);
334 uniform_block_data.lut_dirty[index] = false;
335 }
336 }
337
338 // Sync the fog lut
339 if (uniform_block_data.fog_lut_dirty) {
340 SyncFogLUT();
341 uniform_block_data.fog_lut_dirty = false;
342 }
343
344 // Sync the proctex noise lut
345 if (uniform_block_data.proctex_noise_lut_dirty) {
346 SyncProcTexNoiseLUT();
347 uniform_block_data.proctex_noise_lut_dirty = false;
348 }
349
350 // Sync the proctex color map
351 if (uniform_block_data.proctex_color_map_dirty) {
352 SyncProcTexColorMap();
353 uniform_block_data.proctex_color_map_dirty = false;
354 }
355
356 // Sync the proctex alpha map
357 if (uniform_block_data.proctex_alpha_map_dirty) {
358 SyncProcTexAlphaMap();
359 uniform_block_data.proctex_alpha_map_dirty = false;
360 }
361
362 // Sync the proctex lut
363 if (uniform_block_data.proctex_lut_dirty) {
364 SyncProcTexLUT();
365 uniform_block_data.proctex_lut_dirty = false;
366 }
367
368 // Sync the proctex difference lut
369 if (uniform_block_data.proctex_diff_lut_dirty) {
370 SyncProcTexDiffLUT();
371 uniform_block_data.proctex_diff_lut_dirty = false;
372 }
373
374 // Sync the uniform data
375 if (uniform_block_data.dirty) {
376 glBufferData(GL_UNIFORM_BUFFER, sizeof(UniformData), &uniform_block_data.data,
377 GL_STATIC_DRAW);
378 uniform_block_data.dirty = false;
379 }
380
381 state.Apply();
382
383 // Draw the vertex batch
384 glBufferData(GL_ARRAY_BUFFER, vertex_batch.size() * sizeof(HardwareVertex), vertex_batch.data(),
385 GL_STREAM_DRAW);
386 glDrawArrays(GL_TRIANGLES, 0, (GLsizei)vertex_batch.size());
387
388 // Mark framebuffer surfaces as dirty
389 // TODO: Restrict invalidation area to the viewport
390 if (color_surface != nullptr) {
391 color_surface->dirty = true;
392 res_cache.FlushRegion(color_surface->addr, color_surface->size, color_surface, true);
393 }
394 if (depth_surface != nullptr) {
395 depth_surface->dirty = true;
396 res_cache.FlushRegion(depth_surface->addr, depth_surface->size, depth_surface, true);
397 }
398
399 vertex_batch.clear();
400
401 // Unbind textures for potential future use as framebuffer attachments
402 for (unsigned texture_index = 0; texture_index < pica_textures.size(); ++texture_index) {
403 state.texture_units[texture_index].texture_2d = 0;
404 }
405 state.Apply();
406}
407
408void RasterizerOpenGL::NotifyPicaRegisterChanged(u32 id) {
409 const auto& regs = Pica::g_state.regs;
410
411 switch (id) {
412 // Culling
413 case PICA_REG_INDEX(rasterizer.cull_mode):
414 SyncCullMode();
415 break;
416
417 // Clipping plane
418 case PICA_REG_INDEX(rasterizer.clip_enable):
419 SyncClipEnabled();
420 break;
421
422 case PICA_REG_INDEX_WORKAROUND(rasterizer.clip_coef[0], 0x48):
423 case PICA_REG_INDEX_WORKAROUND(rasterizer.clip_coef[1], 0x49):
424 case PICA_REG_INDEX_WORKAROUND(rasterizer.clip_coef[2], 0x4a):
425 case PICA_REG_INDEX_WORKAROUND(rasterizer.clip_coef[3], 0x4b):
426 SyncClipCoef();
427 break;
428
429 // Depth modifiers
430 case PICA_REG_INDEX(rasterizer.viewport_depth_range):
431 SyncDepthScale();
432 break;
433 case PICA_REG_INDEX(rasterizer.viewport_depth_near_plane):
434 SyncDepthOffset();
435 break;
436
437 // Depth buffering
438 case PICA_REG_INDEX(rasterizer.depthmap_enable):
439 shader_dirty = true;
440 break;
441
442 // Blending
443 case PICA_REG_INDEX(framebuffer.output_merger.alphablend_enable):
444 SyncBlendEnabled();
445 break;
446 case PICA_REG_INDEX(framebuffer.output_merger.alpha_blending):
447 SyncBlendFuncs();
448 break;
449 case PICA_REG_INDEX(framebuffer.output_merger.blend_const):
450 SyncBlendColor();
451 break;
452
453 // Fog state
454 case PICA_REG_INDEX(texturing.fog_color):
455 SyncFogColor();
456 break;
457 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[0], 0xe8):
458 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[1], 0xe9):
459 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[2], 0xea):
460 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[3], 0xeb):
461 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[4], 0xec):
462 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[5], 0xed):
463 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[6], 0xee):
464 case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[7], 0xef):
465 uniform_block_data.fog_lut_dirty = true;
466 break;
467
468 // ProcTex state
469 case PICA_REG_INDEX(texturing.proctex):
470 case PICA_REG_INDEX(texturing.proctex_lut):
471 case PICA_REG_INDEX(texturing.proctex_lut_offset):
472 shader_dirty = true;
473 break;
474
475 case PICA_REG_INDEX(texturing.proctex_noise_u):
476 case PICA_REG_INDEX(texturing.proctex_noise_v):
477 case PICA_REG_INDEX(texturing.proctex_noise_frequency):
478 SyncProcTexNoise();
479 break;
480
481 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[0], 0xb0):
482 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[1], 0xb1):
483 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[2], 0xb2):
484 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[3], 0xb3):
485 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[4], 0xb4):
486 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[5], 0xb5):
487 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[6], 0xb6):
488 case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[7], 0xb7):
489 using Pica::TexturingRegs;
490 switch (regs.texturing.proctex_lut_config.ref_table.Value()) {
491 case TexturingRegs::ProcTexLutTable::Noise:
492 uniform_block_data.proctex_noise_lut_dirty = true;
493 break;
494 case TexturingRegs::ProcTexLutTable::ColorMap:
495 uniform_block_data.proctex_color_map_dirty = true;
496 break;
497 case TexturingRegs::ProcTexLutTable::AlphaMap:
498 uniform_block_data.proctex_alpha_map_dirty = true;
499 break;
500 case TexturingRegs::ProcTexLutTable::Color:
501 uniform_block_data.proctex_lut_dirty = true;
502 break;
503 case TexturingRegs::ProcTexLutTable::ColorDiff:
504 uniform_block_data.proctex_diff_lut_dirty = true;
505 break;
506 }
507 break;
508
509 // Alpha test
510 case PICA_REG_INDEX(framebuffer.output_merger.alpha_test):
511 SyncAlphaTest();
512 shader_dirty = true;
513 break;
514
515 // Sync GL stencil test + stencil write mask
516 // (Pica stencil test function register also contains a stencil write mask)
517 case PICA_REG_INDEX(framebuffer.output_merger.stencil_test.raw_func):
518 SyncStencilTest();
519 SyncStencilWriteMask();
520 break;
521 case PICA_REG_INDEX(framebuffer.output_merger.stencil_test.raw_op):
522 case PICA_REG_INDEX(framebuffer.framebuffer.depth_format):
523 SyncStencilTest();
524 break;
525
526 // Sync GL depth test + depth and color write mask
527 // (Pica depth test function register also contains a depth and color write mask)
528 case PICA_REG_INDEX(framebuffer.output_merger.depth_test_enable):
529 SyncDepthTest();
530 SyncDepthWriteMask();
531 SyncColorWriteMask();
532 break;
533
534 // Sync GL depth and stencil write mask
535 // (This is a dedicated combined depth / stencil write-enable register)
536 case PICA_REG_INDEX(framebuffer.framebuffer.allow_depth_stencil_write):
537 SyncDepthWriteMask();
538 SyncStencilWriteMask();
539 break;
540
541 // Sync GL color write mask
542 // (This is a dedicated color write-enable register)
543 case PICA_REG_INDEX(framebuffer.framebuffer.allow_color_write):
544 SyncColorWriteMask();
545 break;
546
547 // Scissor test
548 case PICA_REG_INDEX(rasterizer.scissor_test.mode):
549 shader_dirty = true;
550 break;
551
552 // Logic op
553 case PICA_REG_INDEX(framebuffer.output_merger.logic_op):
554 SyncLogicOp();
555 break;
556
557 case PICA_REG_INDEX(texturing.main_config):
558 shader_dirty = true;
559 break;
560
561 // Texture 0 type
562 case PICA_REG_INDEX(texturing.texture0.type):
563 shader_dirty = true;
564 break;
565
566 // TEV stages
567 // (This also syncs fog_mode and fog_flip which are part of tev_combiner_buffer_input)
568 case PICA_REG_INDEX(texturing.tev_stage0.color_source1):
569 case PICA_REG_INDEX(texturing.tev_stage0.color_modifier1):
570 case PICA_REG_INDEX(texturing.tev_stage0.color_op):
571 case PICA_REG_INDEX(texturing.tev_stage0.color_scale):
572 case PICA_REG_INDEX(texturing.tev_stage1.color_source1):
573 case PICA_REG_INDEX(texturing.tev_stage1.color_modifier1):
574 case PICA_REG_INDEX(texturing.tev_stage1.color_op):
575 case PICA_REG_INDEX(texturing.tev_stage1.color_scale):
576 case PICA_REG_INDEX(texturing.tev_stage2.color_source1):
577 case PICA_REG_INDEX(texturing.tev_stage2.color_modifier1):
578 case PICA_REG_INDEX(texturing.tev_stage2.color_op):
579 case PICA_REG_INDEX(texturing.tev_stage2.color_scale):
580 case PICA_REG_INDEX(texturing.tev_stage3.color_source1):
581 case PICA_REG_INDEX(texturing.tev_stage3.color_modifier1):
582 case PICA_REG_INDEX(texturing.tev_stage3.color_op):
583 case PICA_REG_INDEX(texturing.tev_stage3.color_scale):
584 case PICA_REG_INDEX(texturing.tev_stage4.color_source1):
585 case PICA_REG_INDEX(texturing.tev_stage4.color_modifier1):
586 case PICA_REG_INDEX(texturing.tev_stage4.color_op):
587 case PICA_REG_INDEX(texturing.tev_stage4.color_scale):
588 case PICA_REG_INDEX(texturing.tev_stage5.color_source1):
589 case PICA_REG_INDEX(texturing.tev_stage5.color_modifier1):
590 case PICA_REG_INDEX(texturing.tev_stage5.color_op):
591 case PICA_REG_INDEX(texturing.tev_stage5.color_scale):
592 case PICA_REG_INDEX(texturing.tev_combiner_buffer_input):
593 shader_dirty = true;
594 break;
595 case PICA_REG_INDEX(texturing.tev_stage0.const_r):
596 SyncTevConstColor(0, regs.texturing.tev_stage0);
597 break;
598 case PICA_REG_INDEX(texturing.tev_stage1.const_r):
599 SyncTevConstColor(1, regs.texturing.tev_stage1);
600 break;
601 case PICA_REG_INDEX(texturing.tev_stage2.const_r):
602 SyncTevConstColor(2, regs.texturing.tev_stage2);
603 break;
604 case PICA_REG_INDEX(texturing.tev_stage3.const_r):
605 SyncTevConstColor(3, regs.texturing.tev_stage3);
606 break;
607 case PICA_REG_INDEX(texturing.tev_stage4.const_r):
608 SyncTevConstColor(4, regs.texturing.tev_stage4);
609 break;
610 case PICA_REG_INDEX(texturing.tev_stage5.const_r):
611 SyncTevConstColor(5, regs.texturing.tev_stage5);
612 break;
613
614 // TEV combiner buffer color
615 case PICA_REG_INDEX(texturing.tev_combiner_buffer_color):
616 SyncCombinerColor();
617 break;
618
619 // Fragment lighting switches
620 case PICA_REG_INDEX(lighting.disable):
621 case PICA_REG_INDEX(lighting.max_light_index):
622 case PICA_REG_INDEX(lighting.config0):
623 case PICA_REG_INDEX(lighting.config1):
624 case PICA_REG_INDEX(lighting.abs_lut_input):
625 case PICA_REG_INDEX(lighting.lut_input):
626 case PICA_REG_INDEX(lighting.lut_scale):
627 case PICA_REG_INDEX(lighting.light_enable):
628 break;
629
630 // Fragment lighting specular 0 color
631 case PICA_REG_INDEX_WORKAROUND(lighting.light[0].specular_0, 0x140 + 0 * 0x10):
632 SyncLightSpecular0(0);
633 break;
634 case PICA_REG_INDEX_WORKAROUND(lighting.light[1].specular_0, 0x140 + 1 * 0x10):
635 SyncLightSpecular0(1);
636 break;
637 case PICA_REG_INDEX_WORKAROUND(lighting.light[2].specular_0, 0x140 + 2 * 0x10):
638 SyncLightSpecular0(2);
639 break;
640 case PICA_REG_INDEX_WORKAROUND(lighting.light[3].specular_0, 0x140 + 3 * 0x10):
641 SyncLightSpecular0(3);
642 break;
643 case PICA_REG_INDEX_WORKAROUND(lighting.light[4].specular_0, 0x140 + 4 * 0x10):
644 SyncLightSpecular0(4);
645 break;
646 case PICA_REG_INDEX_WORKAROUND(lighting.light[5].specular_0, 0x140 + 5 * 0x10):
647 SyncLightSpecular0(5);
648 break;
649 case PICA_REG_INDEX_WORKAROUND(lighting.light[6].specular_0, 0x140 + 6 * 0x10):
650 SyncLightSpecular0(6);
651 break;
652 case PICA_REG_INDEX_WORKAROUND(lighting.light[7].specular_0, 0x140 + 7 * 0x10):
653 SyncLightSpecular0(7);
654 break;
655
656 // Fragment lighting specular 1 color
657 case PICA_REG_INDEX_WORKAROUND(lighting.light[0].specular_1, 0x141 + 0 * 0x10):
658 SyncLightSpecular1(0);
659 break;
660 case PICA_REG_INDEX_WORKAROUND(lighting.light[1].specular_1, 0x141 + 1 * 0x10):
661 SyncLightSpecular1(1);
662 break;
663 case PICA_REG_INDEX_WORKAROUND(lighting.light[2].specular_1, 0x141 + 2 * 0x10):
664 SyncLightSpecular1(2);
665 break;
666 case PICA_REG_INDEX_WORKAROUND(lighting.light[3].specular_1, 0x141 + 3 * 0x10):
667 SyncLightSpecular1(3);
668 break;
669 case PICA_REG_INDEX_WORKAROUND(lighting.light[4].specular_1, 0x141 + 4 * 0x10):
670 SyncLightSpecular1(4);
671 break;
672 case PICA_REG_INDEX_WORKAROUND(lighting.light[5].specular_1, 0x141 + 5 * 0x10):
673 SyncLightSpecular1(5);
674 break;
675 case PICA_REG_INDEX_WORKAROUND(lighting.light[6].specular_1, 0x141 + 6 * 0x10):
676 SyncLightSpecular1(6);
677 break;
678 case PICA_REG_INDEX_WORKAROUND(lighting.light[7].specular_1, 0x141 + 7 * 0x10):
679 SyncLightSpecular1(7);
680 break;
681
682 // Fragment lighting diffuse color
683 case PICA_REG_INDEX_WORKAROUND(lighting.light[0].diffuse, 0x142 + 0 * 0x10):
684 SyncLightDiffuse(0);
685 break;
686 case PICA_REG_INDEX_WORKAROUND(lighting.light[1].diffuse, 0x142 + 1 * 0x10):
687 SyncLightDiffuse(1);
688 break;
689 case PICA_REG_INDEX_WORKAROUND(lighting.light[2].diffuse, 0x142 + 2 * 0x10):
690 SyncLightDiffuse(2);
691 break;
692 case PICA_REG_INDEX_WORKAROUND(lighting.light[3].diffuse, 0x142 + 3 * 0x10):
693 SyncLightDiffuse(3);
694 break;
695 case PICA_REG_INDEX_WORKAROUND(lighting.light[4].diffuse, 0x142 + 4 * 0x10):
696 SyncLightDiffuse(4);
697 break;
698 case PICA_REG_INDEX_WORKAROUND(lighting.light[5].diffuse, 0x142 + 5 * 0x10):
699 SyncLightDiffuse(5);
700 break;
701 case PICA_REG_INDEX_WORKAROUND(lighting.light[6].diffuse, 0x142 + 6 * 0x10):
702 SyncLightDiffuse(6);
703 break;
704 case PICA_REG_INDEX_WORKAROUND(lighting.light[7].diffuse, 0x142 + 7 * 0x10):
705 SyncLightDiffuse(7);
706 break;
707
708 // Fragment lighting ambient color
709 case PICA_REG_INDEX_WORKAROUND(lighting.light[0].ambient, 0x143 + 0 * 0x10):
710 SyncLightAmbient(0);
711 break;
712 case PICA_REG_INDEX_WORKAROUND(lighting.light[1].ambient, 0x143 + 1 * 0x10):
713 SyncLightAmbient(1);
714 break;
715 case PICA_REG_INDEX_WORKAROUND(lighting.light[2].ambient, 0x143 + 2 * 0x10):
716 SyncLightAmbient(2);
717 break;
718 case PICA_REG_INDEX_WORKAROUND(lighting.light[3].ambient, 0x143 + 3 * 0x10):
719 SyncLightAmbient(3);
720 break;
721 case PICA_REG_INDEX_WORKAROUND(lighting.light[4].ambient, 0x143 + 4 * 0x10):
722 SyncLightAmbient(4);
723 break;
724 case PICA_REG_INDEX_WORKAROUND(lighting.light[5].ambient, 0x143 + 5 * 0x10):
725 SyncLightAmbient(5);
726 break;
727 case PICA_REG_INDEX_WORKAROUND(lighting.light[6].ambient, 0x143 + 6 * 0x10):
728 SyncLightAmbient(6);
729 break;
730 case PICA_REG_INDEX_WORKAROUND(lighting.light[7].ambient, 0x143 + 7 * 0x10):
731 SyncLightAmbient(7);
732 break;
733
734 // Fragment lighting position
735 case PICA_REG_INDEX_WORKAROUND(lighting.light[0].x, 0x144 + 0 * 0x10):
736 case PICA_REG_INDEX_WORKAROUND(lighting.light[0].z, 0x145 + 0 * 0x10):
737 SyncLightPosition(0);
738 break;
739 case PICA_REG_INDEX_WORKAROUND(lighting.light[1].x, 0x144 + 1 * 0x10):
740 case PICA_REG_INDEX_WORKAROUND(lighting.light[1].z, 0x145 + 1 * 0x10):
741 SyncLightPosition(1);
742 break;
743 case PICA_REG_INDEX_WORKAROUND(lighting.light[2].x, 0x144 + 2 * 0x10):
744 case PICA_REG_INDEX_WORKAROUND(lighting.light[2].z, 0x145 + 2 * 0x10):
745 SyncLightPosition(2);
746 break;
747 case PICA_REG_INDEX_WORKAROUND(lighting.light[3].x, 0x144 + 3 * 0x10):
748 case PICA_REG_INDEX_WORKAROUND(lighting.light[3].z, 0x145 + 3 * 0x10):
749 SyncLightPosition(3);
750 break;
751 case PICA_REG_INDEX_WORKAROUND(lighting.light[4].x, 0x144 + 4 * 0x10):
752 case PICA_REG_INDEX_WORKAROUND(lighting.light[4].z, 0x145 + 4 * 0x10):
753 SyncLightPosition(4);
754 break;
755 case PICA_REG_INDEX_WORKAROUND(lighting.light[5].x, 0x144 + 5 * 0x10):
756 case PICA_REG_INDEX_WORKAROUND(lighting.light[5].z, 0x145 + 5 * 0x10):
757 SyncLightPosition(5);
758 break;
759 case PICA_REG_INDEX_WORKAROUND(lighting.light[6].x, 0x144 + 6 * 0x10):
760 case PICA_REG_INDEX_WORKAROUND(lighting.light[6].z, 0x145 + 6 * 0x10):
761 SyncLightPosition(6);
762 break;
763 case PICA_REG_INDEX_WORKAROUND(lighting.light[7].x, 0x144 + 7 * 0x10):
764 case PICA_REG_INDEX_WORKAROUND(lighting.light[7].z, 0x145 + 7 * 0x10):
765 SyncLightPosition(7);
766 break;
767
768 // Fragment spot lighting direction
769 case PICA_REG_INDEX_WORKAROUND(lighting.light[0].spot_x, 0x146 + 0 * 0x10):
770 case PICA_REG_INDEX_WORKAROUND(lighting.light[0].spot_z, 0x147 + 0 * 0x10):
771 SyncLightSpotDirection(0);
772 break;
773 case PICA_REG_INDEX_WORKAROUND(lighting.light[1].spot_x, 0x146 + 1 * 0x10):
774 case PICA_REG_INDEX_WORKAROUND(lighting.light[1].spot_z, 0x147 + 1 * 0x10):
775 SyncLightSpotDirection(1);
776 break;
777 case PICA_REG_INDEX_WORKAROUND(lighting.light[2].spot_x, 0x146 + 2 * 0x10):
778 case PICA_REG_INDEX_WORKAROUND(lighting.light[2].spot_z, 0x147 + 2 * 0x10):
779 SyncLightSpotDirection(2);
780 break;
781 case PICA_REG_INDEX_WORKAROUND(lighting.light[3].spot_x, 0x146 + 3 * 0x10):
782 case PICA_REG_INDEX_WORKAROUND(lighting.light[3].spot_z, 0x147 + 3 * 0x10):
783 SyncLightSpotDirection(3);
784 break;
785 case PICA_REG_INDEX_WORKAROUND(lighting.light[4].spot_x, 0x146 + 4 * 0x10):
786 case PICA_REG_INDEX_WORKAROUND(lighting.light[4].spot_z, 0x147 + 4 * 0x10):
787 SyncLightSpotDirection(4);
788 break;
789 case PICA_REG_INDEX_WORKAROUND(lighting.light[5].spot_x, 0x146 + 5 * 0x10):
790 case PICA_REG_INDEX_WORKAROUND(lighting.light[5].spot_z, 0x147 + 5 * 0x10):
791 SyncLightSpotDirection(5);
792 break;
793 case PICA_REG_INDEX_WORKAROUND(lighting.light[6].spot_x, 0x146 + 6 * 0x10):
794 case PICA_REG_INDEX_WORKAROUND(lighting.light[6].spot_z, 0x147 + 6 * 0x10):
795 SyncLightSpotDirection(6);
796 break;
797 case PICA_REG_INDEX_WORKAROUND(lighting.light[7].spot_x, 0x146 + 7 * 0x10):
798 case PICA_REG_INDEX_WORKAROUND(lighting.light[7].spot_z, 0x147 + 7 * 0x10):
799 SyncLightSpotDirection(7);
800 break;
801
802 // Fragment lighting light source config
803 case PICA_REG_INDEX_WORKAROUND(lighting.light[0].config, 0x149 + 0 * 0x10):
804 case PICA_REG_INDEX_WORKAROUND(lighting.light[1].config, 0x149 + 1 * 0x10):
805 case PICA_REG_INDEX_WORKAROUND(lighting.light[2].config, 0x149 + 2 * 0x10):
806 case PICA_REG_INDEX_WORKAROUND(lighting.light[3].config, 0x149 + 3 * 0x10):
807 case PICA_REG_INDEX_WORKAROUND(lighting.light[4].config, 0x149 + 4 * 0x10):
808 case PICA_REG_INDEX_WORKAROUND(lighting.light[5].config, 0x149 + 5 * 0x10):
809 case PICA_REG_INDEX_WORKAROUND(lighting.light[6].config, 0x149 + 6 * 0x10):
810 case PICA_REG_INDEX_WORKAROUND(lighting.light[7].config, 0x149 + 7 * 0x10):
811 shader_dirty = true;
812 break;
813
814 // Fragment lighting distance attenuation bias
815 case PICA_REG_INDEX_WORKAROUND(lighting.light[0].dist_atten_bias, 0x014A + 0 * 0x10):
816 SyncLightDistanceAttenuationBias(0);
817 break;
818 case PICA_REG_INDEX_WORKAROUND(lighting.light[1].dist_atten_bias, 0x014A + 1 * 0x10):
819 SyncLightDistanceAttenuationBias(1);
820 break;
821 case PICA_REG_INDEX_WORKAROUND(lighting.light[2].dist_atten_bias, 0x014A + 2 * 0x10):
822 SyncLightDistanceAttenuationBias(2);
823 break;
824 case PICA_REG_INDEX_WORKAROUND(lighting.light[3].dist_atten_bias, 0x014A + 3 * 0x10):
825 SyncLightDistanceAttenuationBias(3);
826 break;
827 case PICA_REG_INDEX_WORKAROUND(lighting.light[4].dist_atten_bias, 0x014A + 4 * 0x10):
828 SyncLightDistanceAttenuationBias(4);
829 break;
830 case PICA_REG_INDEX_WORKAROUND(lighting.light[5].dist_atten_bias, 0x014A + 5 * 0x10):
831 SyncLightDistanceAttenuationBias(5);
832 break;
833 case PICA_REG_INDEX_WORKAROUND(lighting.light[6].dist_atten_bias, 0x014A + 6 * 0x10):
834 SyncLightDistanceAttenuationBias(6);
835 break;
836 case PICA_REG_INDEX_WORKAROUND(lighting.light[7].dist_atten_bias, 0x014A + 7 * 0x10):
837 SyncLightDistanceAttenuationBias(7);
838 break;
839
840 // Fragment lighting distance attenuation scale
841 case PICA_REG_INDEX_WORKAROUND(lighting.light[0].dist_atten_scale, 0x014B + 0 * 0x10):
842 SyncLightDistanceAttenuationScale(0);
843 break;
844 case PICA_REG_INDEX_WORKAROUND(lighting.light[1].dist_atten_scale, 0x014B + 1 * 0x10):
845 SyncLightDistanceAttenuationScale(1);
846 break;
847 case PICA_REG_INDEX_WORKAROUND(lighting.light[2].dist_atten_scale, 0x014B + 2 * 0x10):
848 SyncLightDistanceAttenuationScale(2);
849 break;
850 case PICA_REG_INDEX_WORKAROUND(lighting.light[3].dist_atten_scale, 0x014B + 3 * 0x10):
851 SyncLightDistanceAttenuationScale(3);
852 break;
853 case PICA_REG_INDEX_WORKAROUND(lighting.light[4].dist_atten_scale, 0x014B + 4 * 0x10):
854 SyncLightDistanceAttenuationScale(4);
855 break;
856 case PICA_REG_INDEX_WORKAROUND(lighting.light[5].dist_atten_scale, 0x014B + 5 * 0x10):
857 SyncLightDistanceAttenuationScale(5);
858 break;
859 case PICA_REG_INDEX_WORKAROUND(lighting.light[6].dist_atten_scale, 0x014B + 6 * 0x10):
860 SyncLightDistanceAttenuationScale(6);
861 break;
862 case PICA_REG_INDEX_WORKAROUND(lighting.light[7].dist_atten_scale, 0x014B + 7 * 0x10):
863 SyncLightDistanceAttenuationScale(7);
864 break;
865
866 // Fragment lighting global ambient color (emission + ambient * ambient)
867 case PICA_REG_INDEX_WORKAROUND(lighting.global_ambient, 0x1c0):
868 SyncGlobalAmbient();
869 break;
870
871 // Fragment lighting lookup tables
872 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[0], 0x1c8):
873 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[1], 0x1c9):
874 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[2], 0x1ca):
875 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[3], 0x1cb):
876 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[4], 0x1cc):
877 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[5], 0x1cd):
878 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[6], 0x1ce):
879 case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[7], 0x1cf): {
880 auto& lut_config = regs.lighting.lut_config;
881 uniform_block_data.lut_dirty[lut_config.type] = true;
882 break;
883 }
884 }
885}
886
887void RasterizerOpenGL::FlushAll() {
888 MICROPROFILE_SCOPE(OpenGL_CacheManagement);
889 res_cache.FlushAll();
890}
891
892void RasterizerOpenGL::FlushRegion(PAddr addr, u64 size) {
893 MICROPROFILE_SCOPE(OpenGL_CacheManagement);
894 res_cache.FlushRegion(addr, size, nullptr, false);
895}
896
897void RasterizerOpenGL::FlushAndInvalidateRegion(PAddr addr, u64 size) {
898 MICROPROFILE_SCOPE(OpenGL_CacheManagement);
899 res_cache.FlushRegion(addr, size, nullptr, true);
900}
901
902bool RasterizerOpenGL::AccelerateDisplayTransfer(const GPU::Regs::DisplayTransferConfig& config) {
903 MICROPROFILE_SCOPE(OpenGL_Blits);
904
905 CachedSurface src_params;
906 src_params.addr = config.GetPhysicalInputAddress();
907 // It's important to use the correct source input width to properly skip over parts of the input
908 // image which will be cropped from the output but still affect the stride of the input image.
909 src_params.width = config.input_width;
910 // Using the output's height is fine because we don't read or skip over the remaining part of
911 // the image, and it allows for smaller texture cache lookup rectangles.
912 src_params.height = config.output_height;
913 src_params.is_tiled = !config.input_linear;
914 src_params.pixel_format = CachedSurface::PixelFormatFromGPUPixelFormat(config.input_format);
915
916 CachedSurface dst_params;
917 dst_params.addr = config.GetPhysicalOutputAddress();
918 dst_params.width =
919 config.scaling != config.NoScale ? config.output_width / 2 : config.output_width.Value();
920 dst_params.height =
921 config.scaling == config.ScaleXY ? config.output_height / 2 : config.output_height.Value();
922 dst_params.is_tiled = config.input_linear != config.dont_swizzle;
923 dst_params.pixel_format = CachedSurface::PixelFormatFromGPUPixelFormat(config.output_format);
924
925 MathUtil::Rectangle<int> src_rect;
926 CachedSurface* src_surface = res_cache.GetSurfaceRect(src_params, false, true, src_rect);
927
928 if (src_surface == nullptr) {
929 return false;
930 }
931
932 // Adjust the source rectangle to take into account parts of the input lines being cropped
933 if (config.input_width > config.output_width) {
934 src_rect.right -= static_cast<int>((config.input_width - config.output_width) *
935 src_surface->res_scale_width);
936 }
937
938 // Require destination surface to have same resolution scale as source to preserve scaling
939 dst_params.res_scale_width = src_surface->res_scale_width;
940 dst_params.res_scale_height = src_surface->res_scale_height;
941
942 MathUtil::Rectangle<int> dst_rect;
943 CachedSurface* dst_surface = res_cache.GetSurfaceRect(dst_params, true, false, dst_rect);
944
945 if (dst_surface == nullptr) {
946 return false;
947 }
948
949 // Don't accelerate if the src and dst surfaces are the same
950 if (src_surface == dst_surface) {
951 return false;
952 }
953
954 if (config.flip_vertically) {
955 std::swap(dst_rect.top, dst_rect.bottom);
956 }
957
958 if (!res_cache.TryBlitSurfaces(src_surface, src_rect, dst_surface, dst_rect)) {
959 return false;
960 }
961
962 u32 dst_size = dst_params.width * dst_params.height *
963 CachedSurface::GetFormatBpp(dst_params.pixel_format) / 8;
964 dst_surface->dirty = true;
965 res_cache.FlushRegion(config.GetPhysicalOutputAddress(), dst_size, dst_surface, true);
966 return true;
967}
968
969bool RasterizerOpenGL::AccelerateTextureCopy(const GPU::Regs::DisplayTransferConfig& config) {
970 // TODO(tfarley): Try to hardware accelerate this
971 return false;
972}
973
974bool RasterizerOpenGL::AccelerateFill(const GPU::Regs::MemoryFillConfig& config) {
975 MICROPROFILE_SCOPE(OpenGL_Blits);
976 using PixelFormat = CachedSurface::PixelFormat;
977 using SurfaceType = CachedSurface::SurfaceType;
978
979 CachedSurface* dst_surface = res_cache.TryGetFillSurface(config);
980
981 if (dst_surface == nullptr) {
982 return false;
983 }
984
985 OpenGLState cur_state = OpenGLState::GetCurState();
986
987 SurfaceType dst_type = CachedSurface::GetFormatType(dst_surface->pixel_format);
988
989 GLuint old_fb = cur_state.draw.draw_framebuffer;
990 cur_state.draw.draw_framebuffer = framebuffer.handle;
991 // TODO: When scissor test is implemented, need to disable scissor test in cur_state here so
992 // Clear call isn't affected
993 cur_state.Apply();
994
995 if (dst_type == SurfaceType::Color || dst_type == SurfaceType::Texture) {
996 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
997 dst_surface->texture.handle, 0);
998 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
999 0);
1000
1001 GLfloat color_values[4] = {0.0f, 0.0f, 0.0f, 0.0f};
1002
1003 // TODO: Handle additional pixel format and fill value size combinations to accelerate more
1004 // cases
1005 // For instance, checking if fill value's bytes/bits repeat to allow filling
1006 // I8/A8/I4/A4/...
1007 // Currently only handles formats that are multiples of the fill value size
1008
1009 if (config.fill_24bit) {
1010 switch (dst_surface->pixel_format) {
1011 case PixelFormat::RGB8:
1012 color_values[0] = config.value_24bit_r / 255.0f;
1013 color_values[1] = config.value_24bit_g / 255.0f;
1014 color_values[2] = config.value_24bit_b / 255.0f;
1015 break;
1016 default:
1017 return false;
1018 }
1019 } else if (config.fill_32bit) {
1020 u32 value = config.value_32bit;
1021
1022 switch (dst_surface->pixel_format) {
1023 case PixelFormat::RGBA8:
1024 color_values[0] = (value >> 24) / 255.0f;
1025 color_values[1] = ((value >> 16) & 0xFF) / 255.0f;
1026 color_values[2] = ((value >> 8) & 0xFF) / 255.0f;
1027 color_values[3] = (value & 0xFF) / 255.0f;
1028 break;
1029 default:
1030 return false;
1031 }
1032 } else {
1033 u16 value_16bit = config.value_16bit.Value();
1034 Math::Vec4<u8> color;
1035
1036 switch (dst_surface->pixel_format) {
1037 case PixelFormat::RGBA8:
1038 color_values[0] = (value_16bit >> 8) / 255.0f;
1039 color_values[1] = (value_16bit & 0xFF) / 255.0f;
1040 color_values[2] = color_values[0];
1041 color_values[3] = color_values[1];
1042 break;
1043 case PixelFormat::RGB5A1:
1044 color = Color::DecodeRGB5A1((const u8*)&value_16bit);
1045 color_values[0] = color[0] / 31.0f;
1046 color_values[1] = color[1] / 31.0f;
1047 color_values[2] = color[2] / 31.0f;
1048 color_values[3] = color[3];
1049 break;
1050 case PixelFormat::RGB565:
1051 color = Color::DecodeRGB565((const u8*)&value_16bit);
1052 color_values[0] = color[0] / 31.0f;
1053 color_values[1] = color[1] / 63.0f;
1054 color_values[2] = color[2] / 31.0f;
1055 break;
1056 case PixelFormat::RGBA4:
1057 color = Color::DecodeRGBA4((const u8*)&value_16bit);
1058 color_values[0] = color[0] / 15.0f;
1059 color_values[1] = color[1] / 15.0f;
1060 color_values[2] = color[2] / 15.0f;
1061 color_values[3] = color[3] / 15.0f;
1062 break;
1063 case PixelFormat::IA8:
1064 case PixelFormat::RG8:
1065 color_values[0] = (value_16bit >> 8) / 255.0f;
1066 color_values[1] = (value_16bit & 0xFF) / 255.0f;
1067 break;
1068 default:
1069 return false;
1070 }
1071 }
1072
1073 cur_state.color_mask.red_enabled = GL_TRUE;
1074 cur_state.color_mask.green_enabled = GL_TRUE;
1075 cur_state.color_mask.blue_enabled = GL_TRUE;
1076 cur_state.color_mask.alpha_enabled = GL_TRUE;
1077 cur_state.Apply();
1078 glClearBufferfv(GL_COLOR, 0, color_values);
1079 } else if (dst_type == SurfaceType::Depth) {
1080 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
1081 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D,
1082 dst_surface->texture.handle, 0);
1083 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
1084
1085 GLfloat value_float;
1086 if (dst_surface->pixel_format == CachedSurface::PixelFormat::D16) {
1087 value_float = config.value_32bit / 65535.0f; // 2^16 - 1
1088 } else if (dst_surface->pixel_format == CachedSurface::PixelFormat::D24) {
1089 value_float = config.value_32bit / 16777215.0f; // 2^24 - 1
1090 }
1091
1092 cur_state.depth.write_mask = GL_TRUE;
1093 cur_state.Apply();
1094 glClearBufferfv(GL_DEPTH, 0, &value_float);
1095 } else if (dst_type == SurfaceType::DepthStencil) {
1096 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
1097 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
1098 dst_surface->texture.handle, 0);
1099
1100 GLfloat value_float = (config.value_32bit & 0xFFFFFF) / 16777215.0f; // 2^24 - 1
1101 GLint value_int = (config.value_32bit >> 24);
1102
1103 cur_state.depth.write_mask = GL_TRUE;
1104 cur_state.stencil.write_mask = 0xFF;
1105 cur_state.Apply();
1106 glClearBufferfi(GL_DEPTH_STENCIL, 0, value_float, value_int);
1107 }
1108
1109 cur_state.draw.draw_framebuffer = old_fb;
1110 // TODO: Return scissor test to previous value when scissor test is implemented
1111 cur_state.Apply();
1112
1113 dst_surface->dirty = true;
1114 res_cache.FlushRegion(dst_surface->addr, dst_surface->size, dst_surface, true);
1115 return true;
1116}
1117
1118bool RasterizerOpenGL::AccelerateDisplay(const GPU::Regs::FramebufferConfig& config,
1119 PAddr framebuffer_addr, u32 pixel_stride,
1120 ScreenInfo& screen_info) {
1121 if (framebuffer_addr == 0) {
1122 return false;
1123 }
1124 MICROPROFILE_SCOPE(OpenGL_CacheManagement);
1125
1126 CachedSurface src_params;
1127 src_params.addr = framebuffer_addr;
1128 src_params.width = config.width;
1129 src_params.height = config.height;
1130 src_params.pixel_stride = pixel_stride;
1131 src_params.is_tiled = false;
1132 src_params.pixel_format = CachedSurface::PixelFormatFromGPUPixelFormat(config.color_format);
1133
1134 MathUtil::Rectangle<int> src_rect;
1135 CachedSurface* src_surface = res_cache.GetSurfaceRect(src_params, false, true, src_rect);
1136
1137 if (src_surface == nullptr) {
1138 return false;
1139 }
1140
1141 u32 scaled_width = src_surface->GetScaledWidth();
1142 u32 scaled_height = src_surface->GetScaledHeight();
1143
1144 screen_info.display_texcoords = MathUtil::Rectangle<float>(
1145 (float)src_rect.top / (float)scaled_height, (float)src_rect.left / (float)scaled_width,
1146 (float)src_rect.bottom / (float)scaled_height, (float)src_rect.right / (float)scaled_width);
1147
1148 screen_info.display_texture = src_surface->texture.handle;
1149
1150 return true;
1151}
1152
1153void RasterizerOpenGL::SamplerInfo::Create() {
1154 sampler.Create();
1155 mag_filter = min_filter = TextureConfig::Linear;
1156 wrap_s = wrap_t = TextureConfig::Repeat;
1157 border_color = 0;
1158
1159 glSamplerParameteri(sampler.handle, GL_TEXTURE_MIN_FILTER,
1160 GL_LINEAR); // default is GL_LINEAR_MIPMAP_LINEAR
1161 // Other attributes have correct defaults
1162}
1163
1164void RasterizerOpenGL::SamplerInfo::SyncWithConfig(
1165 const Pica::TexturingRegs::TextureConfig& config) {
1166
1167 GLuint s = sampler.handle;
1168
1169 if (mag_filter != config.mag_filter) {
1170 mag_filter = config.mag_filter;
1171 glSamplerParameteri(s, GL_TEXTURE_MAG_FILTER, PicaToGL::TextureFilterMode(mag_filter));
1172 }
1173 if (min_filter != config.min_filter) {
1174 min_filter = config.min_filter;
1175 glSamplerParameteri(s, GL_TEXTURE_MIN_FILTER, PicaToGL::TextureFilterMode(min_filter));
1176 }
1177
1178 if (wrap_s != config.wrap_s) {
1179 wrap_s = config.wrap_s;
1180 glSamplerParameteri(s, GL_TEXTURE_WRAP_S, PicaToGL::WrapMode(wrap_s));
1181 }
1182 if (wrap_t != config.wrap_t) {
1183 wrap_t = config.wrap_t;
1184 glSamplerParameteri(s, GL_TEXTURE_WRAP_T, PicaToGL::WrapMode(wrap_t));
1185 }
1186
1187 if (wrap_s == TextureConfig::ClampToBorder || wrap_t == TextureConfig::ClampToBorder) {
1188 if (border_color != config.border_color.raw) {
1189 border_color = config.border_color.raw;
1190 auto gl_color = PicaToGL::ColorRGBA8(border_color);
1191 glSamplerParameterfv(s, GL_TEXTURE_BORDER_COLOR, gl_color.data());
1192 }
1193 }
1194}
1195
1196void RasterizerOpenGL::SetShader() {
1197 auto config = GLShader::PicaShaderConfig::BuildFromRegs(Pica::g_state.regs);
1198 std::unique_ptr<PicaShader> shader = std::make_unique<PicaShader>();
1199
1200 // Find (or generate) the GLSL shader for the current TEV state
1201 auto cached_shader = shader_cache.find(config);
1202 if (cached_shader != shader_cache.end()) {
1203 current_shader = cached_shader->second.get();
1204
1205 state.draw.shader_program = current_shader->shader.handle;
1206 state.Apply();
1207 } else {
1208 LOG_DEBUG(Render_OpenGL, "Creating new shader");
1209
1210 shader->shader.Create(GLShader::GenerateVertexShader().c_str(),
1211 GLShader::GenerateFragmentShader(config).c_str());
1212
1213 state.draw.shader_program = shader->shader.handle;
1214 state.Apply();
1215
1216 // Set the texture samplers to correspond to different texture units
1217 GLint uniform_tex = glGetUniformLocation(shader->shader.handle, "tex[0]");
1218 if (uniform_tex != -1) {
1219 glUniform1i(uniform_tex, TextureUnits::PicaTexture(0).id);
1220 }
1221 uniform_tex = glGetUniformLocation(shader->shader.handle, "tex[1]");
1222 if (uniform_tex != -1) {
1223 glUniform1i(uniform_tex, TextureUnits::PicaTexture(1).id);
1224 }
1225 uniform_tex = glGetUniformLocation(shader->shader.handle, "tex[2]");
1226 if (uniform_tex != -1) {
1227 glUniform1i(uniform_tex, TextureUnits::PicaTexture(2).id);
1228 }
1229
1230 // Set the texture samplers to correspond to different lookup table texture units
1231 GLint uniform_lut = glGetUniformLocation(shader->shader.handle, "lighting_lut");
1232 if (uniform_lut != -1) {
1233 glUniform1i(uniform_lut, TextureUnits::LightingLUT.id);
1234 }
1235
1236 GLint uniform_fog_lut = glGetUniformLocation(shader->shader.handle, "fog_lut");
1237 if (uniform_fog_lut != -1) {
1238 glUniform1i(uniform_fog_lut, TextureUnits::FogLUT.id);
1239 }
1240
1241 GLint uniform_proctex_noise_lut =
1242 glGetUniformLocation(shader->shader.handle, "proctex_noise_lut");
1243 if (uniform_proctex_noise_lut != -1) {
1244 glUniform1i(uniform_proctex_noise_lut, TextureUnits::ProcTexNoiseLUT.id);
1245 }
1246
1247 GLint uniform_proctex_color_map =
1248 glGetUniformLocation(shader->shader.handle, "proctex_color_map");
1249 if (uniform_proctex_color_map != -1) {
1250 glUniform1i(uniform_proctex_color_map, TextureUnits::ProcTexColorMap.id);
1251 }
1252
1253 GLint uniform_proctex_alpha_map =
1254 glGetUniformLocation(shader->shader.handle, "proctex_alpha_map");
1255 if (uniform_proctex_alpha_map != -1) {
1256 glUniform1i(uniform_proctex_alpha_map, TextureUnits::ProcTexAlphaMap.id);
1257 }
1258
1259 GLint uniform_proctex_lut = glGetUniformLocation(shader->shader.handle, "proctex_lut");
1260 if (uniform_proctex_lut != -1) {
1261 glUniform1i(uniform_proctex_lut, TextureUnits::ProcTexLUT.id);
1262 }
1263
1264 GLint uniform_proctex_diff_lut =
1265 glGetUniformLocation(shader->shader.handle, "proctex_diff_lut");
1266 if (uniform_proctex_diff_lut != -1) {
1267 glUniform1i(uniform_proctex_diff_lut, TextureUnits::ProcTexDiffLUT.id);
1268 }
1269
1270 current_shader = shader_cache.emplace(config, std::move(shader)).first->second.get();
1271
1272 GLuint block_index = glGetUniformBlockIndex(current_shader->shader.handle, "shader_data");
1273 if (block_index != GL_INVALID_INDEX) {
1274 GLint block_size;
1275 glGetActiveUniformBlockiv(current_shader->shader.handle, block_index,
1276 GL_UNIFORM_BLOCK_DATA_SIZE, &block_size);
1277 ASSERT_MSG(block_size == sizeof(UniformData),
1278 "Uniform block size did not match! Got %d, expected %zu",
1279 static_cast<int>(block_size), sizeof(UniformData));
1280 glUniformBlockBinding(current_shader->shader.handle, block_index, 0);
1281
1282 // Update uniforms
1283 SyncDepthScale();
1284 SyncDepthOffset();
1285 SyncAlphaTest();
1286 SyncCombinerColor();
1287 auto& tev_stages = Pica::g_state.regs.texturing.GetTevStages();
1288 for (int index = 0; index < tev_stages.size(); ++index)
1289 SyncTevConstColor(index, tev_stages[index]);
1290
1291 SyncGlobalAmbient();
1292 for (int light_index = 0; light_index < 8; light_index++) {
1293 SyncLightSpecular0(light_index);
1294 SyncLightSpecular1(light_index);
1295 SyncLightDiffuse(light_index);
1296 SyncLightAmbient(light_index);
1297 SyncLightPosition(light_index);
1298 SyncLightDistanceAttenuationBias(light_index);
1299 SyncLightDistanceAttenuationScale(light_index);
1300 }
1301
1302 SyncFogColor();
1303 SyncProcTexNoise();
1304 }
1305 }
1306}
1307
1308void RasterizerOpenGL::SyncClipEnabled() {
1309 state.clip_distance[1] = Pica::g_state.regs.rasterizer.clip_enable != 0;
1310}
1311
1312void RasterizerOpenGL::SyncClipCoef() {
1313 const auto raw_clip_coef = Pica::g_state.regs.rasterizer.GetClipCoef();
1314 const GLvec4 new_clip_coef = {raw_clip_coef.x.ToFloat32(), raw_clip_coef.y.ToFloat32(),
1315 raw_clip_coef.z.ToFloat32(), raw_clip_coef.w.ToFloat32()};
1316 if (new_clip_coef != uniform_block_data.data.clip_coef) {
1317 uniform_block_data.data.clip_coef = new_clip_coef;
1318 uniform_block_data.dirty = true;
1319 }
1320}
1321
1322void RasterizerOpenGL::SyncCullMode() {
1323 const auto& regs = Pica::g_state.regs;
1324
1325 switch (regs.rasterizer.cull_mode) {
1326 case Pica::RasterizerRegs::CullMode::KeepAll:
1327 state.cull.enabled = false;
1328 break;
1329
1330 case Pica::RasterizerRegs::CullMode::KeepClockWise:
1331 state.cull.enabled = true;
1332 state.cull.front_face = GL_CW;
1333 break;
1334
1335 case Pica::RasterizerRegs::CullMode::KeepCounterClockWise:
1336 state.cull.enabled = true;
1337 state.cull.front_face = GL_CCW;
1338 break;
1339
1340 default:
1341 LOG_CRITICAL(Render_OpenGL, "Unknown cull mode %d", regs.rasterizer.cull_mode.Value());
1342 UNIMPLEMENTED();
1343 break;
1344 }
1345}
1346
1347void RasterizerOpenGL::SyncDepthScale() {
1348 float depth_scale =
1349 Pica::float24::FromRaw(Pica::g_state.regs.rasterizer.viewport_depth_range).ToFloat32();
1350 if (depth_scale != uniform_block_data.data.depth_scale) {
1351 uniform_block_data.data.depth_scale = depth_scale;
1352 uniform_block_data.dirty = true;
1353 }
1354}
1355
1356void RasterizerOpenGL::SyncDepthOffset() {
1357 float depth_offset =
1358 Pica::float24::FromRaw(Pica::g_state.regs.rasterizer.viewport_depth_near_plane).ToFloat32();
1359 if (depth_offset != uniform_block_data.data.depth_offset) {
1360 uniform_block_data.data.depth_offset = depth_offset;
1361 uniform_block_data.dirty = true;
1362 }
1363}
1364
1365void RasterizerOpenGL::SyncBlendEnabled() {
1366 state.blend.enabled = (Pica::g_state.regs.framebuffer.output_merger.alphablend_enable == 1);
1367}
1368
1369void RasterizerOpenGL::SyncBlendFuncs() {
1370 const auto& regs = Pica::g_state.regs;
1371 state.blend.rgb_equation =
1372 PicaToGL::BlendEquation(regs.framebuffer.output_merger.alpha_blending.blend_equation_rgb);
1373 state.blend.a_equation =
1374 PicaToGL::BlendEquation(regs.framebuffer.output_merger.alpha_blending.blend_equation_a);
1375 state.blend.src_rgb_func =
1376 PicaToGL::BlendFunc(regs.framebuffer.output_merger.alpha_blending.factor_source_rgb);
1377 state.blend.dst_rgb_func =
1378 PicaToGL::BlendFunc(regs.framebuffer.output_merger.alpha_blending.factor_dest_rgb);
1379 state.blend.src_a_func =
1380 PicaToGL::BlendFunc(regs.framebuffer.output_merger.alpha_blending.factor_source_a);
1381 state.blend.dst_a_func =
1382 PicaToGL::BlendFunc(regs.framebuffer.output_merger.alpha_blending.factor_dest_a);
1383}
1384
1385void RasterizerOpenGL::SyncBlendColor() {
1386 auto blend_color =
1387 PicaToGL::ColorRGBA8(Pica::g_state.regs.framebuffer.output_merger.blend_const.raw);
1388 state.blend.color.red = blend_color[0];
1389 state.blend.color.green = blend_color[1];
1390 state.blend.color.blue = blend_color[2];
1391 state.blend.color.alpha = blend_color[3];
1392}
1393
1394void RasterizerOpenGL::SyncFogColor() {
1395 const auto& regs = Pica::g_state.regs;
1396 uniform_block_data.data.fog_color = {
1397 regs.texturing.fog_color.r.Value() / 255.0f, regs.texturing.fog_color.g.Value() / 255.0f,
1398 regs.texturing.fog_color.b.Value() / 255.0f,
1399 };
1400 uniform_block_data.dirty = true;
1401}
1402
1403void RasterizerOpenGL::SyncFogLUT() {
1404 std::array<GLvec2, 128> new_data;
1405
1406 std::transform(Pica::g_state.fog.lut.begin(), Pica::g_state.fog.lut.end(), new_data.begin(),
1407 [](const auto& entry) {
1408 return GLvec2{entry.ToFloat(), entry.DiffToFloat()};
1409 });
1410
1411 if (new_data != fog_lut_data) {
1412 fog_lut_data = new_data;
1413 glBindBuffer(GL_TEXTURE_BUFFER, fog_lut_buffer.handle);
1414 glBufferSubData(GL_TEXTURE_BUFFER, 0, new_data.size() * sizeof(GLvec2), new_data.data());
1415 }
1416}
1417
1418void RasterizerOpenGL::SyncProcTexNoise() {
1419 const auto& regs = Pica::g_state.regs.texturing;
1420 uniform_block_data.data.proctex_noise_f = {
1421 Pica::float16::FromRaw(regs.proctex_noise_frequency.u).ToFloat32(),
1422 Pica::float16::FromRaw(regs.proctex_noise_frequency.v).ToFloat32(),
1423 };
1424 uniform_block_data.data.proctex_noise_a = {
1425 regs.proctex_noise_u.amplitude / 4095.0f, regs.proctex_noise_v.amplitude / 4095.0f,
1426 };
1427 uniform_block_data.data.proctex_noise_p = {
1428 Pica::float16::FromRaw(regs.proctex_noise_u.phase).ToFloat32(),
1429 Pica::float16::FromRaw(regs.proctex_noise_v.phase).ToFloat32(),
1430 };
1431
1432 uniform_block_data.dirty = true;
1433}
1434
1435// helper function for SyncProcTexNoiseLUT/ColorMap/AlphaMap
1436static void SyncProcTexValueLUT(const std::array<Pica::State::ProcTex::ValueEntry, 128>& lut,
1437 std::array<GLvec2, 128>& lut_data, GLuint buffer) {
1438 std::array<GLvec2, 128> new_data;
1439 std::transform(lut.begin(), lut.end(), new_data.begin(), [](const auto& entry) {
1440 return GLvec2{entry.ToFloat(), entry.DiffToFloat()};
1441 });
1442
1443 if (new_data != lut_data) {
1444 lut_data = new_data;
1445 glBindBuffer(GL_TEXTURE_BUFFER, buffer);
1446 glBufferSubData(GL_TEXTURE_BUFFER, 0, new_data.size() * sizeof(GLvec2), new_data.data());
1447 }
1448}
1449
1450void RasterizerOpenGL::SyncProcTexNoiseLUT() {
1451 SyncProcTexValueLUT(Pica::g_state.proctex.noise_table, proctex_noise_lut_data,
1452 proctex_noise_lut_buffer.handle);
1453}
1454
1455void RasterizerOpenGL::SyncProcTexColorMap() {
1456 SyncProcTexValueLUT(Pica::g_state.proctex.color_map_table, proctex_color_map_data,
1457 proctex_color_map_buffer.handle);
1458}
1459
1460void RasterizerOpenGL::SyncProcTexAlphaMap() {
1461 SyncProcTexValueLUT(Pica::g_state.proctex.alpha_map_table, proctex_alpha_map_data,
1462 proctex_alpha_map_buffer.handle);
1463}
1464
1465void RasterizerOpenGL::SyncProcTexLUT() {
1466 std::array<GLvec4, 256> new_data;
1467
1468 std::transform(Pica::g_state.proctex.color_table.begin(),
1469 Pica::g_state.proctex.color_table.end(), new_data.begin(),
1470 [](const auto& entry) {
1471 auto rgba = entry.ToVector() / 255.0f;
1472 return GLvec4{rgba.r(), rgba.g(), rgba.b(), rgba.a()};
1473 });
1474
1475 if (new_data != proctex_lut_data) {
1476 proctex_lut_data = new_data;
1477 glBindBuffer(GL_TEXTURE_BUFFER, proctex_lut_buffer.handle);
1478 glBufferSubData(GL_TEXTURE_BUFFER, 0, new_data.size() * sizeof(GLvec4), new_data.data());
1479 }
1480}
1481
1482void RasterizerOpenGL::SyncProcTexDiffLUT() {
1483 std::array<GLvec4, 256> new_data;
1484
1485 std::transform(Pica::g_state.proctex.color_diff_table.begin(),
1486 Pica::g_state.proctex.color_diff_table.end(), new_data.begin(),
1487 [](const auto& entry) {
1488 auto rgba = entry.ToVector() / 255.0f;
1489 return GLvec4{rgba.r(), rgba.g(), rgba.b(), rgba.a()};
1490 });
1491
1492 if (new_data != proctex_diff_lut_data) {
1493 proctex_diff_lut_data = new_data;
1494 glBindBuffer(GL_TEXTURE_BUFFER, proctex_diff_lut_buffer.handle);
1495 glBufferSubData(GL_TEXTURE_BUFFER, 0, new_data.size() * sizeof(GLvec4), new_data.data());
1496 }
1497}
1498
1499void RasterizerOpenGL::SyncAlphaTest() {
1500 const auto& regs = Pica::g_state.regs;
1501 if (regs.framebuffer.output_merger.alpha_test.ref != uniform_block_data.data.alphatest_ref) {
1502 uniform_block_data.data.alphatest_ref = regs.framebuffer.output_merger.alpha_test.ref;
1503 uniform_block_data.dirty = true;
1504 }
1505}
1506
1507void RasterizerOpenGL::SyncLogicOp() {
1508 state.logic_op = PicaToGL::LogicOp(Pica::g_state.regs.framebuffer.output_merger.logic_op);
1509}
1510
1511void RasterizerOpenGL::SyncColorWriteMask() {
1512 const auto& regs = Pica::g_state.regs;
1513
1514 auto IsColorWriteEnabled = [&](u32 value) {
1515 return (regs.framebuffer.framebuffer.allow_color_write != 0 && value != 0) ? GL_TRUE
1516 : GL_FALSE;
1517 };
1518
1519 state.color_mask.red_enabled = IsColorWriteEnabled(regs.framebuffer.output_merger.red_enable);
1520 state.color_mask.green_enabled =
1521 IsColorWriteEnabled(regs.framebuffer.output_merger.green_enable);
1522 state.color_mask.blue_enabled = IsColorWriteEnabled(regs.framebuffer.output_merger.blue_enable);
1523 state.color_mask.alpha_enabled =
1524 IsColorWriteEnabled(regs.framebuffer.output_merger.alpha_enable);
1525}
1526
1527void RasterizerOpenGL::SyncStencilWriteMask() {
1528 const auto& regs = Pica::g_state.regs;
1529 state.stencil.write_mask =
1530 (regs.framebuffer.framebuffer.allow_depth_stencil_write != 0)
1531 ? static_cast<GLuint>(regs.framebuffer.output_merger.stencil_test.write_mask)
1532 : 0;
1533}
1534
1535void RasterizerOpenGL::SyncDepthWriteMask() {
1536 const auto& regs = Pica::g_state.regs;
1537 state.depth.write_mask = (regs.framebuffer.framebuffer.allow_depth_stencil_write != 0 &&
1538 regs.framebuffer.output_merger.depth_write_enable)
1539 ? GL_TRUE
1540 : GL_FALSE;
1541}
1542
1543void RasterizerOpenGL::SyncStencilTest() {
1544 const auto& regs = Pica::g_state.regs;
1545 state.stencil.test_enabled =
1546 regs.framebuffer.output_merger.stencil_test.enable &&
1547 regs.framebuffer.framebuffer.depth_format == Pica::FramebufferRegs::DepthFormat::D24S8;
1548 state.stencil.test_func =
1549 PicaToGL::CompareFunc(regs.framebuffer.output_merger.stencil_test.func);
1550 state.stencil.test_ref = regs.framebuffer.output_merger.stencil_test.reference_value;
1551 state.stencil.test_mask = regs.framebuffer.output_merger.stencil_test.input_mask;
1552 state.stencil.action_stencil_fail =
1553 PicaToGL::StencilOp(regs.framebuffer.output_merger.stencil_test.action_stencil_fail);
1554 state.stencil.action_depth_fail =
1555 PicaToGL::StencilOp(regs.framebuffer.output_merger.stencil_test.action_depth_fail);
1556 state.stencil.action_depth_pass =
1557 PicaToGL::StencilOp(regs.framebuffer.output_merger.stencil_test.action_depth_pass);
1558}
1559
1560void RasterizerOpenGL::SyncDepthTest() {
1561 const auto& regs = Pica::g_state.regs;
1562 state.depth.test_enabled = regs.framebuffer.output_merger.depth_test_enable == 1 ||
1563 regs.framebuffer.output_merger.depth_write_enable == 1;
1564 state.depth.test_func =
1565 regs.framebuffer.output_merger.depth_test_enable == 1
1566 ? PicaToGL::CompareFunc(regs.framebuffer.output_merger.depth_test_func)
1567 : GL_ALWAYS;
1568}
1569
1570void RasterizerOpenGL::SyncCombinerColor() {
1571 auto combiner_color =
1572 PicaToGL::ColorRGBA8(Pica::g_state.regs.texturing.tev_combiner_buffer_color.raw);
1573 if (combiner_color != uniform_block_data.data.tev_combiner_buffer_color) {
1574 uniform_block_data.data.tev_combiner_buffer_color = combiner_color;
1575 uniform_block_data.dirty = true;
1576 }
1577}
1578
1579void RasterizerOpenGL::SyncTevConstColor(int stage_index,
1580 const Pica::TexturingRegs::TevStageConfig& tev_stage) {
1581 auto const_color = PicaToGL::ColorRGBA8(tev_stage.const_color);
1582 if (const_color != uniform_block_data.data.const_color[stage_index]) {
1583 uniform_block_data.data.const_color[stage_index] = const_color;
1584 uniform_block_data.dirty = true;
1585 }
1586}
1587
1588void RasterizerOpenGL::SyncGlobalAmbient() {
1589 auto color = PicaToGL::LightColor(Pica::g_state.regs.lighting.global_ambient);
1590 if (color != uniform_block_data.data.lighting_global_ambient) {
1591 uniform_block_data.data.lighting_global_ambient = color;
1592 uniform_block_data.dirty = true;
1593 }
1594}
1595
1596void RasterizerOpenGL::SyncLightingLUT(unsigned lut_index) {
1597 std::array<GLvec2, 256> new_data;
1598 const auto& source_lut = Pica::g_state.lighting.luts[lut_index];
1599 std::transform(source_lut.begin(), source_lut.end(), new_data.begin(), [](const auto& entry) {
1600 return GLvec2{entry.ToFloat(), entry.DiffToFloat()};
1601 });
1602
1603 if (new_data != lighting_lut_data[lut_index]) {
1604 lighting_lut_data[lut_index] = new_data;
1605 glBindBuffer(GL_TEXTURE_BUFFER, lighting_lut_buffer.handle);
1606 glBufferSubData(GL_TEXTURE_BUFFER, lut_index * new_data.size() * sizeof(GLvec2),
1607 new_data.size() * sizeof(GLvec2), new_data.data());
1608 }
1609}
1610
1611void RasterizerOpenGL::SyncLightSpecular0(int light_index) {
1612 auto color = PicaToGL::LightColor(Pica::g_state.regs.lighting.light[light_index].specular_0);
1613 if (color != uniform_block_data.data.light_src[light_index].specular_0) {
1614 uniform_block_data.data.light_src[light_index].specular_0 = color;
1615 uniform_block_data.dirty = true;
1616 }
1617}
1618
1619void RasterizerOpenGL::SyncLightSpecular1(int light_index) {
1620 auto color = PicaToGL::LightColor(Pica::g_state.regs.lighting.light[light_index].specular_1);
1621 if (color != uniform_block_data.data.light_src[light_index].specular_1) {
1622 uniform_block_data.data.light_src[light_index].specular_1 = color;
1623 uniform_block_data.dirty = true;
1624 }
1625}
1626
1627void RasterizerOpenGL::SyncLightDiffuse(int light_index) {
1628 auto color = PicaToGL::LightColor(Pica::g_state.regs.lighting.light[light_index].diffuse);
1629 if (color != uniform_block_data.data.light_src[light_index].diffuse) {
1630 uniform_block_data.data.light_src[light_index].diffuse = color;
1631 uniform_block_data.dirty = true;
1632 }
1633}
1634
1635void RasterizerOpenGL::SyncLightAmbient(int light_index) {
1636 auto color = PicaToGL::LightColor(Pica::g_state.regs.lighting.light[light_index].ambient);
1637 if (color != uniform_block_data.data.light_src[light_index].ambient) {
1638 uniform_block_data.data.light_src[light_index].ambient = color;
1639 uniform_block_data.dirty = true;
1640 }
1641}
1642
1643void RasterizerOpenGL::SyncLightPosition(int light_index) {
1644 GLvec3 position = {
1645 Pica::float16::FromRaw(Pica::g_state.regs.lighting.light[light_index].x).ToFloat32(),
1646 Pica::float16::FromRaw(Pica::g_state.regs.lighting.light[light_index].y).ToFloat32(),
1647 Pica::float16::FromRaw(Pica::g_state.regs.lighting.light[light_index].z).ToFloat32()};
1648
1649 if (position != uniform_block_data.data.light_src[light_index].position) {
1650 uniform_block_data.data.light_src[light_index].position = position;
1651 uniform_block_data.dirty = true;
1652 }
1653}
1654
1655void RasterizerOpenGL::SyncLightSpotDirection(int light_index) {
1656 const auto& light = Pica::g_state.regs.lighting.light[light_index];
1657 GLvec3 spot_direction = {light.spot_x / 2047.0f, light.spot_y / 2047.0f,
1658 light.spot_z / 2047.0f};
1659
1660 if (spot_direction != uniform_block_data.data.light_src[light_index].spot_direction) {
1661 uniform_block_data.data.light_src[light_index].spot_direction = spot_direction;
1662 uniform_block_data.dirty = true;
1663 }
1664}
1665
1666void RasterizerOpenGL::SyncLightDistanceAttenuationBias(int light_index) {
1667 GLfloat dist_atten_bias =
1668 Pica::float20::FromRaw(Pica::g_state.regs.lighting.light[light_index].dist_atten_bias)
1669 .ToFloat32();
1670
1671 if (dist_atten_bias != uniform_block_data.data.light_src[light_index].dist_atten_bias) {
1672 uniform_block_data.data.light_src[light_index].dist_atten_bias = dist_atten_bias;
1673 uniform_block_data.dirty = true;
1674 }
1675}
1676
1677void RasterizerOpenGL::SyncLightDistanceAttenuationScale(int light_index) {
1678 GLfloat dist_atten_scale =
1679 Pica::float20::FromRaw(Pica::g_state.regs.lighting.light[light_index].dist_atten_scale)
1680 .ToFloat32();
1681
1682 if (dist_atten_scale != uniform_block_data.data.light_src[light_index].dist_atten_scale) {
1683 uniform_block_data.data.light_src[light_index].dist_atten_scale = dist_atten_scale;
1684 uniform_block_data.dirty = true;
1685 }
1686}
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
deleted file mode 100644
index d02c157e8..000000000
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ /dev/null
@@ -1,316 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <cstddef>
9#include <cstring>
10#include <memory>
11#include <unordered_map>
12#include <vector>
13#include <glad/glad.h>
14#include "common/bit_field.h"
15#include "common/common_types.h"
16#include "common/hash.h"
17#include "common/vector_math.h"
18#include "core/hw/gpu.h"
19#include "video_core/pica_state.h"
20#include "video_core/pica_types.h"
21#include "video_core/rasterizer_interface.h"
22#include "video_core/regs_framebuffer.h"
23#include "video_core/regs_lighting.h"
24#include "video_core/regs_rasterizer.h"
25#include "video_core/regs_texturing.h"
26#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
27#include "video_core/renderer_opengl/gl_resource_manager.h"
28#include "video_core/renderer_opengl/gl_shader_gen.h"
29#include "video_core/renderer_opengl/gl_state.h"
30#include "video_core/renderer_opengl/pica_to_gl.h"
31#include "video_core/shader/shader.h"
32
33struct ScreenInfo;
34
35class RasterizerOpenGL : public VideoCore::RasterizerInterface {
36public:
37 RasterizerOpenGL();
38 ~RasterizerOpenGL() override;
39
40 void AddTriangle(const Pica::Shader::OutputVertex& v0, const Pica::Shader::OutputVertex& v1,
41 const Pica::Shader::OutputVertex& v2) override;
42 void DrawTriangles() override;
43 void NotifyPicaRegisterChanged(u32 id) override;
44 void FlushAll() override;
45 void FlushRegion(PAddr addr, u64 size) override;
46 void FlushAndInvalidateRegion(PAddr addr, u64 size) override;
47 bool AccelerateDisplayTransfer(const GPU::Regs::DisplayTransferConfig& config) override;
48 bool AccelerateTextureCopy(const GPU::Regs::DisplayTransferConfig& config) override;
49 bool AccelerateFill(const GPU::Regs::MemoryFillConfig& config) override;
50 bool AccelerateDisplay(const GPU::Regs::FramebufferConfig& config, PAddr framebuffer_addr,
51 u32 pixel_stride, ScreenInfo& screen_info) override;
52
53 /// OpenGL shader generated for a given Pica register state
54 struct PicaShader {
55 /// OpenGL shader resource
56 OGLShader shader;
57 };
58
59private:
60 struct SamplerInfo {
61 using TextureConfig = Pica::TexturingRegs::TextureConfig;
62
63 OGLSampler sampler;
64
65 /// Creates the sampler object, initializing its state so that it's in sync with the
66 /// SamplerInfo struct.
67 void Create();
68 /// Syncs the sampler object with the config, updating any necessary state.
69 void SyncWithConfig(const TextureConfig& config);
70
71 private:
72 TextureConfig::TextureFilter mag_filter;
73 TextureConfig::TextureFilter min_filter;
74 TextureConfig::WrapMode wrap_s;
75 TextureConfig::WrapMode wrap_t;
76 u32 border_color;
77 };
78
79 /// Structure that the hardware rendered vertices are composed of
80 struct HardwareVertex {
81 HardwareVertex(const Pica::Shader::OutputVertex& v, bool flip_quaternion) {
82 position[0] = v.pos.x.ToFloat32();
83 position[1] = v.pos.y.ToFloat32();
84 position[2] = v.pos.z.ToFloat32();
85 position[3] = v.pos.w.ToFloat32();
86 color[0] = v.color.x.ToFloat32();
87 color[1] = v.color.y.ToFloat32();
88 color[2] = v.color.z.ToFloat32();
89 color[3] = v.color.w.ToFloat32();
90 tex_coord0[0] = v.tc0.x.ToFloat32();
91 tex_coord0[1] = v.tc0.y.ToFloat32();
92 tex_coord1[0] = v.tc1.x.ToFloat32();
93 tex_coord1[1] = v.tc1.y.ToFloat32();
94 tex_coord2[0] = v.tc2.x.ToFloat32();
95 tex_coord2[1] = v.tc2.y.ToFloat32();
96 tex_coord0_w = v.tc0_w.ToFloat32();
97 normquat[0] = v.quat.x.ToFloat32();
98 normquat[1] = v.quat.y.ToFloat32();
99 normquat[2] = v.quat.z.ToFloat32();
100 normquat[3] = v.quat.w.ToFloat32();
101 view[0] = v.view.x.ToFloat32();
102 view[1] = v.view.y.ToFloat32();
103 view[2] = v.view.z.ToFloat32();
104
105 if (flip_quaternion) {
106 for (float& x : normquat) {
107 x = -x;
108 }
109 }
110 }
111
112 GLfloat position[4];
113 GLfloat color[4];
114 GLfloat tex_coord0[2];
115 GLfloat tex_coord1[2];
116 GLfloat tex_coord2[2];
117 GLfloat tex_coord0_w;
118 GLfloat normquat[4];
119 GLfloat view[3];
120 };
121
122 struct LightSrc {
123 alignas(16) GLvec3 specular_0;
124 alignas(16) GLvec3 specular_1;
125 alignas(16) GLvec3 diffuse;
126 alignas(16) GLvec3 ambient;
127 alignas(16) GLvec3 position;
128 alignas(16) GLvec3 spot_direction; // negated
129 GLfloat dist_atten_bias;
130 GLfloat dist_atten_scale;
131 };
132
133 /// Uniform structure for the Uniform Buffer Object, all vectors must be 16-byte aligned
134 // NOTE: Always keep a vec4 at the end. The GL spec is not clear wether the alignment at
135 // the end of a uniform block is included in UNIFORM_BLOCK_DATA_SIZE or not.
136 // Not following that rule will cause problems on some AMD drivers.
137 struct UniformData {
138 alignas(8) GLvec2 framebuffer_scale;
139 GLint alphatest_ref;
140 GLfloat depth_scale;
141 GLfloat depth_offset;
142 GLint scissor_x1;
143 GLint scissor_y1;
144 GLint scissor_x2;
145 GLint scissor_y2;
146 alignas(16) GLvec3 fog_color;
147 alignas(8) GLvec2 proctex_noise_f;
148 alignas(8) GLvec2 proctex_noise_a;
149 alignas(8) GLvec2 proctex_noise_p;
150 alignas(16) GLvec3 lighting_global_ambient;
151 LightSrc light_src[8];
152 alignas(16) GLvec4 const_color[6]; // A vec4 color for each of the six tev stages
153 alignas(16) GLvec4 tev_combiner_buffer_color;
154 alignas(16) GLvec4 clip_coef;
155 };
156
157 static_assert(
158 sizeof(UniformData) == 0x470,
159 "The size of the UniformData structure has changed, update the structure in the shader");
160 static_assert(sizeof(UniformData) < 16384,
161 "UniformData structure must be less than 16kb as per the OpenGL spec");
162
163 /// Syncs the clip enabled status to match the PICA register
164 void SyncClipEnabled();
165
166 /// Syncs the clip coefficients to match the PICA register
167 void SyncClipCoef();
168
169 /// Sets the OpenGL shader in accordance with the current PICA register state
170 void SetShader();
171
172 /// Syncs the cull mode to match the PICA register
173 void SyncCullMode();
174
175 /// Syncs the depth scale to match the PICA register
176 void SyncDepthScale();
177
178 /// Syncs the depth offset to match the PICA register
179 void SyncDepthOffset();
180
181 /// Syncs the blend enabled status to match the PICA register
182 void SyncBlendEnabled();
183
184 /// Syncs the blend functions to match the PICA register
185 void SyncBlendFuncs();
186
187 /// Syncs the blend color to match the PICA register
188 void SyncBlendColor();
189
190 /// Syncs the fog states to match the PICA register
191 void SyncFogColor();
192 void SyncFogLUT();
193
194 /// Sync the procedural texture noise configuration to match the PICA register
195 void SyncProcTexNoise();
196
197 /// Sync the procedural texture lookup tables
198 void SyncProcTexNoiseLUT();
199 void SyncProcTexColorMap();
200 void SyncProcTexAlphaMap();
201 void SyncProcTexLUT();
202 void SyncProcTexDiffLUT();
203
204 /// Syncs the alpha test states to match the PICA register
205 void SyncAlphaTest();
206
207 /// Syncs the logic op states to match the PICA register
208 void SyncLogicOp();
209
210 /// Syncs the color write mask to match the PICA register state
211 void SyncColorWriteMask();
212
213 /// Syncs the stencil write mask to match the PICA register state
214 void SyncStencilWriteMask();
215
216 /// Syncs the depth write mask to match the PICA register state
217 void SyncDepthWriteMask();
218
219 /// Syncs the stencil test states to match the PICA register
220 void SyncStencilTest();
221
222 /// Syncs the depth test states to match the PICA register
223 void SyncDepthTest();
224
225 /// Syncs the TEV combiner color buffer to match the PICA register
226 void SyncCombinerColor();
227
228 /// Syncs the TEV constant color to match the PICA register
229 void SyncTevConstColor(int tev_index, const Pica::TexturingRegs::TevStageConfig& tev_stage);
230
231 /// Syncs the lighting global ambient color to match the PICA register
232 void SyncGlobalAmbient();
233
234 /// Syncs the lighting lookup tables
235 void SyncLightingLUT(unsigned index);
236
237 /// Syncs the specified light's specular 0 color to match the PICA register
238 void SyncLightSpecular0(int light_index);
239
240 /// Syncs the specified light's specular 1 color to match the PICA register
241 void SyncLightSpecular1(int light_index);
242
243 /// Syncs the specified light's diffuse color to match the PICA register
244 void SyncLightDiffuse(int light_index);
245
246 /// Syncs the specified light's ambient color to match the PICA register
247 void SyncLightAmbient(int light_index);
248
249 /// Syncs the specified light's position to match the PICA register
250 void SyncLightPosition(int light_index);
251
252 /// Syncs the specified spot light direcition to match the PICA register
253 void SyncLightSpotDirection(int light_index);
254
255 /// Syncs the specified light's distance attenuation bias to match the PICA register
256 void SyncLightDistanceAttenuationBias(int light_index);
257
258 /// Syncs the specified light's distance attenuation scale to match the PICA register
259 void SyncLightDistanceAttenuationScale(int light_index);
260
261 OpenGLState state;
262
263 RasterizerCacheOpenGL res_cache;
264
265 std::vector<HardwareVertex> vertex_batch;
266
267 std::unordered_map<GLShader::PicaShaderConfig, std::unique_ptr<PicaShader>> shader_cache;
268 const PicaShader* current_shader = nullptr;
269 bool shader_dirty;
270
271 struct {
272 UniformData data;
273 std::array<bool, Pica::LightingRegs::NumLightingSampler> lut_dirty;
274 bool fog_lut_dirty;
275 bool proctex_noise_lut_dirty;
276 bool proctex_color_map_dirty;
277 bool proctex_alpha_map_dirty;
278 bool proctex_lut_dirty;
279 bool proctex_diff_lut_dirty;
280 bool dirty;
281 } uniform_block_data = {};
282
283 std::array<SamplerInfo, 3> texture_samplers;
284 OGLVertexArray vertex_array;
285 OGLBuffer vertex_buffer;
286 OGLBuffer uniform_buffer;
287 OGLFramebuffer framebuffer;
288
289 OGLBuffer lighting_lut_buffer;
290 OGLTexture lighting_lut;
291 std::array<std::array<GLvec2, 256>, Pica::LightingRegs::NumLightingSampler> lighting_lut_data{};
292
293 OGLBuffer fog_lut_buffer;
294 OGLTexture fog_lut;
295 std::array<GLvec2, 128> fog_lut_data{};
296
297 OGLBuffer proctex_noise_lut_buffer;
298 OGLTexture proctex_noise_lut;
299 std::array<GLvec2, 128> proctex_noise_lut_data{};
300
301 OGLBuffer proctex_color_map_buffer;
302 OGLTexture proctex_color_map;
303 std::array<GLvec2, 128> proctex_color_map_data{};
304
305 OGLBuffer proctex_alpha_map_buffer;
306 OGLTexture proctex_alpha_map;
307 std::array<GLvec2, 128> proctex_alpha_map_data{};
308
309 OGLBuffer proctex_lut_buffer;
310 OGLTexture proctex_lut;
311 std::array<GLvec4, 256> proctex_lut_data{};
312
313 OGLBuffer proctex_diff_lut_buffer;
314 OGLTexture proctex_diff_lut;
315 std::array<GLvec4, 256> proctex_diff_lut_data{};
316};
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
deleted file mode 100644
index f37894e7a..000000000
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
+++ /dev/null
@@ -1,799 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <atomic>
7#include <cstring>
8#include <iterator>
9#include <unordered_set>
10#include <utility>
11#include <vector>
12#include <glad/glad.h>
13#include "common/bit_field.h"
14#include "common/logging/log.h"
15#include "common/math_util.h"
16#include "common/microprofile.h"
17#include "common/vector_math.h"
18#include "core/frontend/emu_window.h"
19#include "core/memory.h"
20#include "core/settings.h"
21#include "video_core/pica_state.h"
22#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
23#include "video_core/renderer_opengl/gl_state.h"
24#include "video_core/texture/texture_decode.h"
25#include "video_core/utils.h"
26#include "video_core/video_core.h"
27
28struct FormatTuple {
29 GLint internal_format;
30 GLenum format;
31 GLenum type;
32};
33
34static const std::array<FormatTuple, 5> fb_format_tuples = {{
35 {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8}, // RGBA8
36 {GL_RGB8, GL_BGR, GL_UNSIGNED_BYTE}, // RGB8
37 {GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_5_5_5_1}, // RGB5A1
38 {GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5}, // RGB565
39 {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4}, // RGBA4
40}};
41
42static const std::array<FormatTuple, 4> depth_format_tuples = {{
43 {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT}, // D16
44 {},
45 {GL_DEPTH_COMPONENT24, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT}, // D24
46 {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // D24S8
47}};
48
49RasterizerCacheOpenGL::RasterizerCacheOpenGL() {
50 transfer_framebuffers[0].Create();
51 transfer_framebuffers[1].Create();
52}
53
54RasterizerCacheOpenGL::~RasterizerCacheOpenGL() {
55 FlushAll();
56}
57
58static void MortonCopyPixels(CachedSurface::PixelFormat pixel_format, u32 width, u32 height,
59 u32 bytes_per_pixel, u32 gl_bytes_per_pixel, u8* morton_data,
60 u8* gl_data, bool morton_to_gl) {
61 using PixelFormat = CachedSurface::PixelFormat;
62
63 u8* data_ptrs[2];
64 u32 depth_stencil_shifts[2] = {24, 8};
65
66 if (morton_to_gl) {
67 std::swap(depth_stencil_shifts[0], depth_stencil_shifts[1]);
68 }
69
70 if (pixel_format == PixelFormat::D24S8) {
71 for (unsigned y = 0; y < height; ++y) {
72 for (unsigned x = 0; x < width; ++x) {
73 const u32 coarse_y = y & ~7;
74 u32 morton_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) +
75 coarse_y * width * bytes_per_pixel;
76 u32 gl_pixel_index = (x + (height - 1 - y) * width) * gl_bytes_per_pixel;
77
78 data_ptrs[morton_to_gl] = morton_data + morton_offset;
79 data_ptrs[!morton_to_gl] = &gl_data[gl_pixel_index];
80
81 // Swap depth and stencil value ordering since 3DS does not match OpenGL
82 u32 depth_stencil;
83 memcpy(&depth_stencil, data_ptrs[1], sizeof(u32));
84 depth_stencil = (depth_stencil << depth_stencil_shifts[0]) |
85 (depth_stencil >> depth_stencil_shifts[1]);
86
87 memcpy(data_ptrs[0], &depth_stencil, sizeof(u32));
88 }
89 }
90 } else {
91 for (unsigned y = 0; y < height; ++y) {
92 for (unsigned x = 0; x < width; ++x) {
93 const u32 coarse_y = y & ~7;
94 u32 morton_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) +
95 coarse_y * width * bytes_per_pixel;
96 u32 gl_pixel_index = (x + (height - 1 - y) * width) * gl_bytes_per_pixel;
97
98 data_ptrs[morton_to_gl] = morton_data + morton_offset;
99 data_ptrs[!morton_to_gl] = &gl_data[gl_pixel_index];
100
101 memcpy(data_ptrs[0], data_ptrs[1], bytes_per_pixel);
102 }
103 }
104 }
105}
106
107void RasterizerCacheOpenGL::BlitTextures(GLuint src_tex, GLuint dst_tex,
108 CachedSurface::SurfaceType type,
109 const MathUtil::Rectangle<int>& src_rect,
110 const MathUtil::Rectangle<int>& dst_rect) {
111 using SurfaceType = CachedSurface::SurfaceType;
112
113 OpenGLState cur_state = OpenGLState::GetCurState();
114
115 // Make sure textures aren't bound to texture units, since going to bind them to framebuffer
116 // components
117 OpenGLState::ResetTexture(src_tex);
118 OpenGLState::ResetTexture(dst_tex);
119
120 // Keep track of previous framebuffer bindings
121 GLuint old_fbs[2] = {cur_state.draw.read_framebuffer, cur_state.draw.draw_framebuffer};
122 cur_state.draw.read_framebuffer = transfer_framebuffers[0].handle;
123 cur_state.draw.draw_framebuffer = transfer_framebuffers[1].handle;
124 cur_state.Apply();
125
126 u32 buffers = 0;
127
128 if (type == SurfaceType::Color || type == SurfaceType::Texture) {
129 glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, src_tex,
130 0);
131 glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
132 0);
133
134 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, dst_tex,
135 0);
136 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
137 0);
138
139 buffers = GL_COLOR_BUFFER_BIT;
140 } else if (type == SurfaceType::Depth) {
141 glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
142 glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, src_tex, 0);
143 glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
144
145 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
146 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, dst_tex, 0);
147 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
148
149 buffers = GL_DEPTH_BUFFER_BIT;
150 } else if (type == SurfaceType::DepthStencil) {
151 glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
152 glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
153 src_tex, 0);
154
155 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
156 glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
157 dst_tex, 0);
158
159 buffers = GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
160 }
161
162 glBlitFramebuffer(src_rect.left, src_rect.top, src_rect.right, src_rect.bottom, dst_rect.left,
163 dst_rect.top, dst_rect.right, dst_rect.bottom, buffers,
164 buffers == GL_COLOR_BUFFER_BIT ? GL_LINEAR : GL_NEAREST);
165
166 // Restore previous framebuffer bindings
167 cur_state.draw.read_framebuffer = old_fbs[0];
168 cur_state.draw.draw_framebuffer = old_fbs[1];
169 cur_state.Apply();
170}
171
172bool RasterizerCacheOpenGL::TryBlitSurfaces(CachedSurface* src_surface,
173 const MathUtil::Rectangle<int>& src_rect,
174 CachedSurface* dst_surface,
175 const MathUtil::Rectangle<int>& dst_rect) {
176
177 if (!CachedSurface::CheckFormatsBlittable(src_surface->pixel_format,
178 dst_surface->pixel_format)) {
179 return false;
180 }
181
182 BlitTextures(src_surface->texture.handle, dst_surface->texture.handle,
183 CachedSurface::GetFormatType(src_surface->pixel_format), src_rect, dst_rect);
184 return true;
185}
186
187static void AllocateSurfaceTexture(GLuint texture, CachedSurface::PixelFormat pixel_format,
188 u32 width, u32 height) {
189 // Allocate an uninitialized texture of appropriate size and format for the surface
190 using SurfaceType = CachedSurface::SurfaceType;
191
192 OpenGLState cur_state = OpenGLState::GetCurState();
193
194 // Keep track of previous texture bindings
195 GLuint old_tex = cur_state.texture_units[0].texture_2d;
196 cur_state.texture_units[0].texture_2d = texture;
197 cur_state.Apply();
198 glActiveTexture(GL_TEXTURE0);
199
200 SurfaceType type = CachedSurface::GetFormatType(pixel_format);
201
202 FormatTuple tuple;
203 if (type == SurfaceType::Color) {
204 ASSERT((size_t)pixel_format < fb_format_tuples.size());
205 tuple = fb_format_tuples[(unsigned int)pixel_format];
206 } else if (type == SurfaceType::Depth || type == SurfaceType::DepthStencil) {
207 size_t tuple_idx = (size_t)pixel_format - 14;
208 ASSERT(tuple_idx < depth_format_tuples.size());
209 tuple = depth_format_tuples[tuple_idx];
210 } else {
211 tuple = {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE};
212 }
213
214 glTexImage2D(GL_TEXTURE_2D, 0, tuple.internal_format, width, height, 0, tuple.format,
215 tuple.type, nullptr);
216
217 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
218 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
219 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
220 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
221
222 // Restore previous texture bindings
223 cur_state.texture_units[0].texture_2d = old_tex;
224 cur_state.Apply();
225}
226
227MICROPROFILE_DEFINE(OpenGL_SurfaceUpload, "OpenGL", "Surface Upload", MP_RGB(128, 64, 192));
228CachedSurface* RasterizerCacheOpenGL::GetSurface(const CachedSurface& params, bool match_res_scale,
229 bool load_if_create) {
230 using PixelFormat = CachedSurface::PixelFormat;
231 using SurfaceType = CachedSurface::SurfaceType;
232
233 if (params.addr == 0) {
234 return nullptr;
235 }
236
237 u32 params_size =
238 params.width * params.height * CachedSurface::GetFormatBpp(params.pixel_format) / 8;
239
240 // Check for an exact match in existing surfaces
241 CachedSurface* best_exact_surface = nullptr;
242 float exact_surface_goodness = -1.f;
243
244 auto surface_interval =
245 boost::icl::interval<PAddr>::right_open(params.addr, params.addr + params_size);
246 auto range = surface_cache.equal_range(surface_interval);
247 for (auto it = range.first; it != range.second; ++it) {
248 for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) {
249 CachedSurface* surface = it2->get();
250
251 // Check if the request matches the surface exactly
252 if (params.addr == surface->addr && params.width == surface->width &&
253 params.height == surface->height && params.pixel_format == surface->pixel_format) {
254 // Make sure optional param-matching criteria are fulfilled
255 bool tiling_match = (params.is_tiled == surface->is_tiled);
256 bool res_scale_match = (params.res_scale_width == surface->res_scale_width &&
257 params.res_scale_height == surface->res_scale_height);
258 if (!match_res_scale || res_scale_match) {
259 // Prioritize same-tiling and highest resolution surfaces
260 float match_goodness =
261 (float)tiling_match + surface->res_scale_width * surface->res_scale_height;
262 if (match_goodness > exact_surface_goodness || surface->dirty) {
263 exact_surface_goodness = match_goodness;
264 best_exact_surface = surface;
265 }
266 }
267 }
268 }
269 }
270
271 // Return the best exact surface if found
272 if (best_exact_surface != nullptr) {
273 return best_exact_surface;
274 }
275
276 // No matching surfaces found, so create a new one
277 u8* texture_src_data = Memory::GetPhysicalPointer(params.addr);
278 if (texture_src_data == nullptr) {
279 return nullptr;
280 }
281
282 MICROPROFILE_SCOPE(OpenGL_SurfaceUpload);
283
284 // Stride only applies to linear images.
285 ASSERT(params.pixel_stride == 0 || !params.is_tiled);
286
287 std::shared_ptr<CachedSurface> new_surface = std::make_shared<CachedSurface>();
288
289 new_surface->addr = params.addr;
290 new_surface->size = params_size;
291
292 new_surface->texture.Create();
293 new_surface->width = params.width;
294 new_surface->height = params.height;
295 new_surface->pixel_stride = params.pixel_stride;
296 new_surface->res_scale_width = params.res_scale_width;
297 new_surface->res_scale_height = params.res_scale_height;
298
299 new_surface->is_tiled = params.is_tiled;
300 new_surface->pixel_format = params.pixel_format;
301 new_surface->dirty = false;
302
303 if (!load_if_create) {
304 // Don't load any data; just allocate the surface's texture
305 AllocateSurfaceTexture(new_surface->texture.handle, new_surface->pixel_format,
306 new_surface->GetScaledWidth(), new_surface->GetScaledHeight());
307 } else {
308 // TODO: Consider attempting subrect match in existing surfaces and direct blit here instead
309 // of memory upload below if that's a common scenario in some game
310
311 Memory::RasterizerFlushRegion(params.addr, params_size);
312
313 // Load data from memory to the new surface
314 OpenGLState cur_state = OpenGLState::GetCurState();
315
316 GLuint old_tex = cur_state.texture_units[0].texture_2d;
317 cur_state.texture_units[0].texture_2d = new_surface->texture.handle;
318 cur_state.Apply();
319 glActiveTexture(GL_TEXTURE0);
320
321 if (!new_surface->is_tiled) {
322 // TODO: Ensure this will always be a color format, not a depth or other format
323 ASSERT((size_t)new_surface->pixel_format < fb_format_tuples.size());
324 const FormatTuple& tuple = fb_format_tuples[(unsigned int)params.pixel_format];
325
326 glPixelStorei(GL_UNPACK_ROW_LENGTH, (GLint)new_surface->pixel_stride);
327 glTexImage2D(GL_TEXTURE_2D, 0, tuple.internal_format, params.width, params.height, 0,
328 tuple.format, tuple.type, texture_src_data);
329 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
330 } else {
331 SurfaceType type = CachedSurface::GetFormatType(new_surface->pixel_format);
332 if (type != SurfaceType::Depth && type != SurfaceType::DepthStencil) {
333 FormatTuple tuple;
334 if ((size_t)params.pixel_format < fb_format_tuples.size()) {
335 tuple = fb_format_tuples[(unsigned int)params.pixel_format];
336 } else {
337 // Texture
338 tuple = {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE};
339 }
340
341 std::vector<Math::Vec4<u8>> tex_buffer(params.width * params.height);
342
343 Pica::Texture::TextureInfo tex_info;
344 tex_info.width = params.width;
345 tex_info.height = params.height;
346 tex_info.format = (Pica::TexturingRegs::TextureFormat)params.pixel_format;
347 tex_info.SetDefaultStride();
348 tex_info.physical_address = params.addr;
349
350 for (unsigned y = 0; y < params.height; ++y) {
351 for (unsigned x = 0; x < params.width; ++x) {
352 tex_buffer[x + params.width * y] = Pica::Texture::LookupTexture(
353 texture_src_data, x, params.height - 1 - y, tex_info);
354 }
355 }
356
357 glTexImage2D(GL_TEXTURE_2D, 0, tuple.internal_format, params.width, params.height,
358 0, GL_RGBA, GL_UNSIGNED_BYTE, tex_buffer.data());
359 } else {
360 // Depth/Stencil formats need special treatment since they aren't sampleable using
361 // LookupTexture and can't use RGBA format
362 size_t tuple_idx = (size_t)params.pixel_format - 14;
363 ASSERT(tuple_idx < depth_format_tuples.size());
364 const FormatTuple& tuple = depth_format_tuples[tuple_idx];
365
366 u32 bytes_per_pixel = CachedSurface::GetFormatBpp(params.pixel_format) / 8;
367
368 // OpenGL needs 4 bpp alignment for D24 since using GL_UNSIGNED_INT as type
369 bool use_4bpp = (params.pixel_format == PixelFormat::D24);
370
371 u32 gl_bytes_per_pixel = use_4bpp ? 4 : bytes_per_pixel;
372
373 std::vector<u8> temp_fb_depth_buffer(params.width * params.height *
374 gl_bytes_per_pixel);
375
376 u8* temp_fb_depth_buffer_ptr =
377 use_4bpp ? temp_fb_depth_buffer.data() + 1 : temp_fb_depth_buffer.data();
378
379 MortonCopyPixels(params.pixel_format, params.width, params.height, bytes_per_pixel,
380 gl_bytes_per_pixel, texture_src_data, temp_fb_depth_buffer_ptr,
381 true);
382
383 glTexImage2D(GL_TEXTURE_2D, 0, tuple.internal_format, params.width, params.height,
384 0, tuple.format, tuple.type, temp_fb_depth_buffer.data());
385 }
386 }
387
388 // If not 1x scale, blit 1x texture to a new scaled texture and replace texture in surface
389 if (new_surface->res_scale_width != 1.f || new_surface->res_scale_height != 1.f) {
390 OGLTexture scaled_texture;
391 scaled_texture.Create();
392
393 AllocateSurfaceTexture(scaled_texture.handle, new_surface->pixel_format,
394 new_surface->GetScaledWidth(), new_surface->GetScaledHeight());
395 BlitTextures(new_surface->texture.handle, scaled_texture.handle,
396 CachedSurface::GetFormatType(new_surface->pixel_format),
397 MathUtil::Rectangle<int>(0, 0, new_surface->width, new_surface->height),
398 MathUtil::Rectangle<int>(0, 0, new_surface->GetScaledWidth(),
399 new_surface->GetScaledHeight()));
400
401 new_surface->texture.Release();
402 new_surface->texture.handle = scaled_texture.handle;
403 scaled_texture.handle = 0;
404 cur_state.texture_units[0].texture_2d = new_surface->texture.handle;
405 cur_state.Apply();
406 }
407
408 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
409 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
410 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
411 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
412
413 cur_state.texture_units[0].texture_2d = old_tex;
414 cur_state.Apply();
415 }
416
417 Memory::RasterizerMarkRegionCached(new_surface->addr, new_surface->size, 1);
418 surface_cache.add(std::make_pair(boost::icl::interval<PAddr>::right_open(
419 new_surface->addr, new_surface->addr + new_surface->size),
420 std::set<std::shared_ptr<CachedSurface>>({new_surface})));
421 return new_surface.get();
422}
423
424CachedSurface* RasterizerCacheOpenGL::GetSurfaceRect(const CachedSurface& params,
425 bool match_res_scale, bool load_if_create,
426 MathUtil::Rectangle<int>& out_rect) {
427 if (params.addr == 0) {
428 return nullptr;
429 }
430
431 u32 total_pixels = params.width * params.height;
432 u32 params_size = total_pixels * CachedSurface::GetFormatBpp(params.pixel_format) / 8;
433
434 // Attempt to find encompassing surfaces
435 CachedSurface* best_subrect_surface = nullptr;
436 float subrect_surface_goodness = -1.f;
437
438 auto surface_interval =
439 boost::icl::interval<PAddr>::right_open(params.addr, params.addr + params_size);
440 auto cache_upper_bound = surface_cache.upper_bound(surface_interval);
441 for (auto it = surface_cache.lower_bound(surface_interval); it != cache_upper_bound; ++it) {
442 for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) {
443 CachedSurface* surface = it2->get();
444
445 // Check if the request is contained in the surface
446 if (params.addr >= surface->addr &&
447 params.addr + params_size - 1 <= surface->addr + surface->size - 1 &&
448 params.pixel_format == surface->pixel_format) {
449 // Make sure optional param-matching criteria are fulfilled
450 bool tiling_match = (params.is_tiled == surface->is_tiled);
451 bool res_scale_match = (params.res_scale_width == surface->res_scale_width &&
452 params.res_scale_height == surface->res_scale_height);
453 if (!match_res_scale || res_scale_match) {
454 // Prioritize same-tiling and highest resolution surfaces
455 float match_goodness =
456 (float)tiling_match + surface->res_scale_width * surface->res_scale_height;
457 if (match_goodness > subrect_surface_goodness || surface->dirty) {
458 subrect_surface_goodness = match_goodness;
459 best_subrect_surface = surface;
460 }
461 }
462 }
463 }
464 }
465
466 // Return the best subrect surface if found
467 if (best_subrect_surface != nullptr) {
468 unsigned int bytes_per_pixel =
469 (CachedSurface::GetFormatBpp(best_subrect_surface->pixel_format) / 8);
470
471 int x0, y0;
472
473 if (!params.is_tiled) {
474 u32 begin_pixel_index = (params.addr - best_subrect_surface->addr) / bytes_per_pixel;
475 x0 = begin_pixel_index % best_subrect_surface->width;
476 y0 = begin_pixel_index / best_subrect_surface->width;
477
478 out_rect = MathUtil::Rectangle<int>(x0, y0, x0 + params.width, y0 + params.height);
479 } else {
480 u32 bytes_per_tile = 8 * 8 * bytes_per_pixel;
481 u32 tiles_per_row = best_subrect_surface->width / 8;
482
483 u32 begin_tile_index = (params.addr - best_subrect_surface->addr) / bytes_per_tile;
484 x0 = begin_tile_index % tiles_per_row * 8;
485 y0 = begin_tile_index / tiles_per_row * 8;
486
487 // Tiled surfaces are flipped vertically in the rasterizer vs. 3DS memory.
488 out_rect =
489 MathUtil::Rectangle<int>(x0, best_subrect_surface->height - y0, x0 + params.width,
490 best_subrect_surface->height - (y0 + params.height));
491 }
492
493 out_rect.left = (int)(out_rect.left * best_subrect_surface->res_scale_width);
494 out_rect.right = (int)(out_rect.right * best_subrect_surface->res_scale_width);
495 out_rect.top = (int)(out_rect.top * best_subrect_surface->res_scale_height);
496 out_rect.bottom = (int)(out_rect.bottom * best_subrect_surface->res_scale_height);
497
498 return best_subrect_surface;
499 }
500
501 // No subrect found - create and return a new surface
502 if (!params.is_tiled) {
503 out_rect = MathUtil::Rectangle<int>(0, 0, (int)(params.width * params.res_scale_width),
504 (int)(params.height * params.res_scale_height));
505 } else {
506 out_rect = MathUtil::Rectangle<int>(0, (int)(params.height * params.res_scale_height),
507 (int)(params.width * params.res_scale_width), 0);
508 }
509
510 return GetSurface(params, match_res_scale, load_if_create);
511}
512
513CachedSurface* RasterizerCacheOpenGL::GetTextureSurface(
514 const Pica::TexturingRegs::FullTextureConfig& config) {
515
516 Pica::Texture::TextureInfo info =
517 Pica::Texture::TextureInfo::FromPicaRegister(config.config, config.format);
518
519 CachedSurface params;
520 params.addr = info.physical_address;
521 params.width = info.width;
522 params.height = info.height;
523 params.is_tiled = true;
524 params.pixel_format = CachedSurface::PixelFormatFromTextureFormat(info.format);
525 return GetSurface(params, false, true);
526}
527
528std::tuple<CachedSurface*, CachedSurface*, MathUtil::Rectangle<int>>
529RasterizerCacheOpenGL::GetFramebufferSurfaces(
530 const Pica::FramebufferRegs::FramebufferConfig& config) {
531
532 const auto& regs = Pica::g_state.regs;
533
534 // Make sur that framebuffers don't overlap if both color and depth are being used
535 u32 fb_area = config.GetWidth() * config.GetHeight();
536 bool framebuffers_overlap =
537 config.GetColorBufferPhysicalAddress() != 0 &&
538 config.GetDepthBufferPhysicalAddress() != 0 &&
539 MathUtil::IntervalsIntersect(
540 config.GetColorBufferPhysicalAddress(),
541 fb_area * GPU::Regs::BytesPerPixel(GPU::Regs::PixelFormat(config.color_format.Value())),
542 config.GetDepthBufferPhysicalAddress(),
543 fb_area * Pica::FramebufferRegs::BytesPerDepthPixel(config.depth_format));
544 bool using_color_fb = config.GetColorBufferPhysicalAddress() != 0;
545 bool depth_write_enable = regs.framebuffer.output_merger.depth_write_enable &&
546 regs.framebuffer.framebuffer.allow_depth_stencil_write;
547 bool using_depth_fb = config.GetDepthBufferPhysicalAddress() != 0 &&
548 (regs.framebuffer.output_merger.depth_test_enable || depth_write_enable ||
549 !framebuffers_overlap);
550
551 if (framebuffers_overlap && using_color_fb && using_depth_fb) {
552 LOG_CRITICAL(Render_OpenGL, "Color and depth framebuffer memory regions overlap; "
553 "overlapping framebuffers not supported!");
554 using_depth_fb = false;
555 }
556
557 // get color and depth surfaces
558 CachedSurface color_params;
559 CachedSurface depth_params;
560 color_params.width = depth_params.width = config.GetWidth();
561 color_params.height = depth_params.height = config.GetHeight();
562 color_params.is_tiled = depth_params.is_tiled = true;
563
564 // Set the internal resolution, assume the same scaling factor for top and bottom screens
565 float resolution_scale_factor = Settings::values.resolution_factor;
566 if (resolution_scale_factor == 0.0f) {
567 // Auto - scale resolution to the window size
568 resolution_scale_factor = VideoCore::g_emu_window->GetFramebufferLayout().GetScalingRatio();
569 }
570 // Scale the resolution by the specified factor
571 color_params.res_scale_width = resolution_scale_factor;
572 depth_params.res_scale_width = resolution_scale_factor;
573 color_params.res_scale_height = resolution_scale_factor;
574 depth_params.res_scale_height = resolution_scale_factor;
575
576 color_params.addr = config.GetColorBufferPhysicalAddress();
577 color_params.pixel_format = CachedSurface::PixelFormatFromColorFormat(config.color_format);
578
579 depth_params.addr = config.GetDepthBufferPhysicalAddress();
580 depth_params.pixel_format = CachedSurface::PixelFormatFromDepthFormat(config.depth_format);
581
582 MathUtil::Rectangle<int> color_rect;
583 CachedSurface* color_surface =
584 using_color_fb ? GetSurfaceRect(color_params, true, true, color_rect) : nullptr;
585
586 MathUtil::Rectangle<int> depth_rect;
587 CachedSurface* depth_surface =
588 using_depth_fb ? GetSurfaceRect(depth_params, true, true, depth_rect) : nullptr;
589
590 // Sanity check to make sure found surfaces aren't the same
591 if (using_depth_fb && using_color_fb && color_surface == depth_surface) {
592 LOG_CRITICAL(
593 Render_OpenGL,
594 "Color and depth framebuffer surfaces overlap; overlapping surfaces not supported!");
595 using_depth_fb = false;
596 depth_surface = nullptr;
597 }
598
599 MathUtil::Rectangle<int> rect;
600
601 if (color_surface != nullptr && depth_surface != nullptr &&
602 (depth_rect.left != color_rect.left || depth_rect.top != color_rect.top)) {
603 // Can't specify separate color and depth viewport offsets in OpenGL, so re-zero both if
604 // they don't match
605 if (color_rect.left != 0 || color_rect.top != 0) {
606 color_surface = GetSurface(color_params, true, true);
607 }
608
609 if (depth_rect.left != 0 || depth_rect.top != 0) {
610 depth_surface = GetSurface(depth_params, true, true);
611 }
612
613 if (!color_surface->is_tiled) {
614 rect = MathUtil::Rectangle<int>(
615 0, 0, (int)(color_params.width * color_params.res_scale_width),
616 (int)(color_params.height * color_params.res_scale_height));
617 } else {
618 rect = MathUtil::Rectangle<int>(
619 0, (int)(color_params.height * color_params.res_scale_height),
620 (int)(color_params.width * color_params.res_scale_width), 0);
621 }
622 } else if (color_surface != nullptr) {
623 rect = color_rect;
624 } else if (depth_surface != nullptr) {
625 rect = depth_rect;
626 } else {
627 rect = MathUtil::Rectangle<int>(0, 0, 0, 0);
628 }
629
630 return std::make_tuple(color_surface, depth_surface, rect);
631}
632
633CachedSurface* RasterizerCacheOpenGL::TryGetFillSurface(const GPU::Regs::MemoryFillConfig& config) {
634 auto surface_interval =
635 boost::icl::interval<PAddr>::right_open(config.GetStartAddress(), config.GetEndAddress());
636 auto range = surface_cache.equal_range(surface_interval);
637 for (auto it = range.first; it != range.second; ++it) {
638 for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) {
639 int bits_per_value = 0;
640 if (config.fill_24bit) {
641 bits_per_value = 24;
642 } else if (config.fill_32bit) {
643 bits_per_value = 32;
644 } else {
645 bits_per_value = 16;
646 }
647
648 CachedSurface* surface = it2->get();
649
650 if (surface->addr == config.GetStartAddress() &&
651 CachedSurface::GetFormatBpp(surface->pixel_format) == bits_per_value &&
652 (surface->width * surface->height *
653 CachedSurface::GetFormatBpp(surface->pixel_format) / 8) ==
654 (config.GetEndAddress() - config.GetStartAddress())) {
655 return surface;
656 }
657 }
658 }
659
660 return nullptr;
661}
662
663MICROPROFILE_DEFINE(OpenGL_SurfaceDownload, "OpenGL", "Surface Download", MP_RGB(128, 192, 64));
664void RasterizerCacheOpenGL::FlushSurface(CachedSurface* surface) {
665 using PixelFormat = CachedSurface::PixelFormat;
666 using SurfaceType = CachedSurface::SurfaceType;
667
668 if (!surface->dirty) {
669 return;
670 }
671
672 MICROPROFILE_SCOPE(OpenGL_SurfaceDownload);
673
674 u8* dst_buffer = Memory::GetPhysicalPointer(surface->addr);
675 if (dst_buffer == nullptr) {
676 return;
677 }
678
679 OpenGLState cur_state = OpenGLState::GetCurState();
680 GLuint old_tex = cur_state.texture_units[0].texture_2d;
681
682 OGLTexture unscaled_tex;
683 GLuint texture_to_flush = surface->texture.handle;
684
685 // If not 1x scale, blit scaled texture to a new 1x texture and use that to flush
686 if (surface->res_scale_width != 1.f || surface->res_scale_height != 1.f) {
687 unscaled_tex.Create();
688
689 AllocateSurfaceTexture(unscaled_tex.handle, surface->pixel_format, surface->width,
690 surface->height);
691 BlitTextures(
692 surface->texture.handle, unscaled_tex.handle,
693 CachedSurface::GetFormatType(surface->pixel_format),
694 MathUtil::Rectangle<int>(0, 0, surface->GetScaledWidth(), surface->GetScaledHeight()),
695 MathUtil::Rectangle<int>(0, 0, surface->width, surface->height));
696
697 texture_to_flush = unscaled_tex.handle;
698 }
699
700 cur_state.texture_units[0].texture_2d = texture_to_flush;
701 cur_state.Apply();
702 glActiveTexture(GL_TEXTURE0);
703
704 if (!surface->is_tiled) {
705 // TODO: Ensure this will always be a color format, not a depth or other format
706 ASSERT((size_t)surface->pixel_format < fb_format_tuples.size());
707 const FormatTuple& tuple = fb_format_tuples[(unsigned int)surface->pixel_format];
708
709 glPixelStorei(GL_PACK_ROW_LENGTH, (GLint)surface->pixel_stride);
710 glGetTexImage(GL_TEXTURE_2D, 0, tuple.format, tuple.type, dst_buffer);
711 glPixelStorei(GL_PACK_ROW_LENGTH, 0);
712 } else {
713 SurfaceType type = CachedSurface::GetFormatType(surface->pixel_format);
714 if (type != SurfaceType::Depth && type != SurfaceType::DepthStencil) {
715 ASSERT((size_t)surface->pixel_format < fb_format_tuples.size());
716 const FormatTuple& tuple = fb_format_tuples[(unsigned int)surface->pixel_format];
717
718 u32 bytes_per_pixel = CachedSurface::GetFormatBpp(surface->pixel_format) / 8;
719
720 std::vector<u8> temp_gl_buffer(surface->width * surface->height * bytes_per_pixel);
721
722 glGetTexImage(GL_TEXTURE_2D, 0, tuple.format, tuple.type, temp_gl_buffer.data());
723
724 // Directly copy pixels. Internal OpenGL color formats are consistent so no conversion
725 // is necessary.
726 MortonCopyPixels(surface->pixel_format, surface->width, surface->height,
727 bytes_per_pixel, bytes_per_pixel, dst_buffer, temp_gl_buffer.data(),
728 false);
729 } else {
730 // Depth/Stencil formats need special treatment since they aren't sampleable using
731 // LookupTexture and can't use RGBA format
732 size_t tuple_idx = (size_t)surface->pixel_format - 14;
733 ASSERT(tuple_idx < depth_format_tuples.size());
734 const FormatTuple& tuple = depth_format_tuples[tuple_idx];
735
736 u32 bytes_per_pixel = CachedSurface::GetFormatBpp(surface->pixel_format) / 8;
737
738 // OpenGL needs 4 bpp alignment for D24 since using GL_UNSIGNED_INT as type
739 bool use_4bpp = (surface->pixel_format == PixelFormat::D24);
740
741 u32 gl_bytes_per_pixel = use_4bpp ? 4 : bytes_per_pixel;
742
743 std::vector<u8> temp_gl_buffer(surface->width * surface->height * gl_bytes_per_pixel);
744
745 glGetTexImage(GL_TEXTURE_2D, 0, tuple.format, tuple.type, temp_gl_buffer.data());
746
747 u8* temp_gl_buffer_ptr = use_4bpp ? temp_gl_buffer.data() + 1 : temp_gl_buffer.data();
748
749 MortonCopyPixels(surface->pixel_format, surface->width, surface->height,
750 bytes_per_pixel, gl_bytes_per_pixel, dst_buffer, temp_gl_buffer_ptr,
751 false);
752 }
753 }
754
755 surface->dirty = false;
756
757 cur_state.texture_units[0].texture_2d = old_tex;
758 cur_state.Apply();
759}
760
761void RasterizerCacheOpenGL::FlushRegion(PAddr addr, u32 size, const CachedSurface* skip_surface,
762 bool invalidate) {
763 if (size == 0) {
764 return;
765 }
766
767 // Gather up unique surfaces that touch the region
768 std::unordered_set<std::shared_ptr<CachedSurface>> touching_surfaces;
769
770 auto surface_interval = boost::icl::interval<PAddr>::right_open(addr, addr + size);
771 auto cache_upper_bound = surface_cache.upper_bound(surface_interval);
772 for (auto it = surface_cache.lower_bound(surface_interval); it != cache_upper_bound; ++it) {
773 std::copy_if(it->second.begin(), it->second.end(),
774 std::inserter(touching_surfaces, touching_surfaces.end()),
775 [skip_surface](std::shared_ptr<CachedSurface> surface) {
776 return (surface.get() != skip_surface);
777 });
778 }
779
780 // Flush and invalidate surfaces
781 for (auto surface : touching_surfaces) {
782 FlushSurface(surface.get());
783 if (invalidate) {
784 Memory::RasterizerMarkRegionCached(surface->addr, surface->size, -1);
785 surface_cache.subtract(
786 std::make_pair(boost::icl::interval<PAddr>::right_open(
787 surface->addr, surface->addr + surface->size),
788 std::set<std::shared_ptr<CachedSurface>>({surface})));
789 }
790 }
791}
792
793void RasterizerCacheOpenGL::FlushAll() {
794 for (auto& surfaces : surface_cache) {
795 for (auto& surface : surfaces.second) {
796 FlushSurface(surface.get());
797 }
798 }
799}
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
deleted file mode 100644
index aea20c693..000000000
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h
+++ /dev/null
@@ -1,239 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <memory>
9#include <set>
10#include <tuple>
11#ifdef __GNUC__
12#pragma GCC diagnostic push
13#pragma GCC diagnostic ignored "-Wunused-local-typedef"
14#endif
15#include <boost/icl/interval_map.hpp>
16#ifdef __GNUC__
17#pragma GCC diagnostic pop
18#endif
19#include <glad/glad.h>
20#include "common/assert.h"
21#include "common/common_funcs.h"
22#include "common/common_types.h"
23#include "core/hw/gpu.h"
24#include "video_core/regs_framebuffer.h"
25#include "video_core/regs_texturing.h"
26#include "video_core/renderer_opengl/gl_resource_manager.h"
27
28namespace MathUtil {
29template <class T>
30struct Rectangle;
31}
32
33struct CachedSurface;
34
35using SurfaceCache = boost::icl::interval_map<PAddr, std::set<std::shared_ptr<CachedSurface>>>;
36
37struct CachedSurface {
38 enum class PixelFormat {
39 // First 5 formats are shared between textures and color buffers
40 RGBA8 = 0,
41 RGB8 = 1,
42 RGB5A1 = 2,
43 RGB565 = 3,
44 RGBA4 = 4,
45
46 // Texture-only formats
47 IA8 = 5,
48 RG8 = 6,
49 I8 = 7,
50 A8 = 8,
51 IA4 = 9,
52 I4 = 10,
53 A4 = 11,
54 ETC1 = 12,
55 ETC1A4 = 13,
56
57 // Depth buffer-only formats
58 D16 = 14,
59 // gap
60 D24 = 16,
61 D24S8 = 17,
62
63 Invalid = 255,
64 };
65
66 enum class SurfaceType {
67 Color = 0,
68 Texture = 1,
69 Depth = 2,
70 DepthStencil = 3,
71 Invalid = 4,
72 };
73
74 static unsigned int GetFormatBpp(CachedSurface::PixelFormat format) {
75 static const std::array<unsigned int, 18> bpp_table = {
76 32, // RGBA8
77 24, // RGB8
78 16, // RGB5A1
79 16, // RGB565
80 16, // RGBA4
81 16, // IA8
82 16, // RG8
83 8, // I8
84 8, // A8
85 8, // IA4
86 4, // I4
87 4, // A4
88 4, // ETC1
89 8, // ETC1A4
90 16, // D16
91 0,
92 24, // D24
93 32, // D24S8
94 };
95
96 ASSERT((unsigned int)format < ARRAY_SIZE(bpp_table));
97 return bpp_table[(unsigned int)format];
98 }
99
100 static PixelFormat PixelFormatFromTextureFormat(Pica::TexturingRegs::TextureFormat format) {
101 return ((unsigned int)format < 14) ? (PixelFormat)format : PixelFormat::Invalid;
102 }
103
104 static PixelFormat PixelFormatFromColorFormat(Pica::FramebufferRegs::ColorFormat format) {
105 return ((unsigned int)format < 5) ? (PixelFormat)format : PixelFormat::Invalid;
106 }
107
108 static PixelFormat PixelFormatFromDepthFormat(Pica::FramebufferRegs::DepthFormat format) {
109 return ((unsigned int)format < 4) ? (PixelFormat)((unsigned int)format + 14)
110 : PixelFormat::Invalid;
111 }
112
113 static PixelFormat PixelFormatFromGPUPixelFormat(GPU::Regs::PixelFormat format) {
114 switch (format) {
115 // RGB565 and RGB5A1 are switched in PixelFormat compared to ColorFormat
116 case GPU::Regs::PixelFormat::RGB565:
117 return PixelFormat::RGB565;
118 case GPU::Regs::PixelFormat::RGB5A1:
119 return PixelFormat::RGB5A1;
120 default:
121 return ((unsigned int)format < 5) ? (PixelFormat)format : PixelFormat::Invalid;
122 }
123 }
124
125 static bool CheckFormatsBlittable(PixelFormat pixel_format_a, PixelFormat pixel_format_b) {
126 SurfaceType a_type = GetFormatType(pixel_format_a);
127 SurfaceType b_type = GetFormatType(pixel_format_b);
128
129 if ((a_type == SurfaceType::Color || a_type == SurfaceType::Texture) &&
130 (b_type == SurfaceType::Color || b_type == SurfaceType::Texture)) {
131 return true;
132 }
133
134 if (a_type == SurfaceType::Depth && b_type == SurfaceType::Depth) {
135 return true;
136 }
137
138 if (a_type == SurfaceType::DepthStencil && b_type == SurfaceType::DepthStencil) {
139 return true;
140 }
141
142 return false;
143 }
144
145 static SurfaceType GetFormatType(PixelFormat pixel_format) {
146 if ((unsigned int)pixel_format < 5) {
147 return SurfaceType::Color;
148 }
149
150 if ((unsigned int)pixel_format < 14) {
151 return SurfaceType::Texture;
152 }
153
154 if (pixel_format == PixelFormat::D16 || pixel_format == PixelFormat::D24) {
155 return SurfaceType::Depth;
156 }
157
158 if (pixel_format == PixelFormat::D24S8) {
159 return SurfaceType::DepthStencil;
160 }
161
162 return SurfaceType::Invalid;
163 }
164
165 u32 GetScaledWidth() const {
166 return (u32)(width * res_scale_width);
167 }
168
169 u32 GetScaledHeight() const {
170 return (u32)(height * res_scale_height);
171 }
172
173 PAddr addr;
174 u32 size;
175
176 PAddr min_valid;
177 PAddr max_valid;
178
179 OGLTexture texture;
180 u32 width;
181 u32 height;
182 /// Stride between lines, in pixels. Only valid for images in linear format.
183 u32 pixel_stride = 0;
184 float res_scale_width = 1.f;
185 float res_scale_height = 1.f;
186
187 bool is_tiled;
188 PixelFormat pixel_format;
189 bool dirty;
190};
191
192class RasterizerCacheOpenGL : NonCopyable {
193public:
194 RasterizerCacheOpenGL();
195 ~RasterizerCacheOpenGL();
196
197 /// Blits one texture to another
198 void BlitTextures(GLuint src_tex, GLuint dst_tex, CachedSurface::SurfaceType type,
199 const MathUtil::Rectangle<int>& src_rect,
200 const MathUtil::Rectangle<int>& dst_rect);
201
202 /// Attempt to blit one surface's texture to another
203 bool TryBlitSurfaces(CachedSurface* src_surface, const MathUtil::Rectangle<int>& src_rect,
204 CachedSurface* dst_surface, const MathUtil::Rectangle<int>& dst_rect);
205
206 /// Loads a texture from 3DS memory to OpenGL and caches it (if not already cached)
207 CachedSurface* GetSurface(const CachedSurface& params, bool match_res_scale,
208 bool load_if_create);
209
210 /// Attempt to find a subrect (resolution scaled) of a surface, otherwise loads a texture from
211 /// 3DS memory to OpenGL and caches it (if not already cached)
212 CachedSurface* GetSurfaceRect(const CachedSurface& params, bool match_res_scale,
213 bool load_if_create, MathUtil::Rectangle<int>& out_rect);
214
215 /// Gets a surface based on the texture configuration
216 CachedSurface* GetTextureSurface(const Pica::TexturingRegs::FullTextureConfig& config);
217
218 /// Gets the color and depth surfaces and rect (resolution scaled) based on the framebuffer
219 /// configuration
220 std::tuple<CachedSurface*, CachedSurface*, MathUtil::Rectangle<int>> GetFramebufferSurfaces(
221 const Pica::FramebufferRegs::FramebufferConfig& config);
222
223 /// Attempt to get a surface that exactly matches the fill region and format
224 CachedSurface* TryGetFillSurface(const GPU::Regs::MemoryFillConfig& config);
225
226 /// Write the surface back to memory
227 void FlushSurface(CachedSurface* surface);
228
229 /// Write any cached resources overlapping the region back to memory (if dirty) and optionally
230 /// invalidate them in the cache
231 void FlushRegion(PAddr addr, u32 size, const CachedSurface* skip_surface, bool invalidate);
232
233 /// Flush all cached resources tracked by this cache manager
234 void FlushAll();
235
236private:
237 SurfaceCache surface_cache;
238 OGLFramebuffer transfer_framebuffers[2];
239};
diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp
deleted file mode 100644
index 9fe183944..000000000
--- a/src/video_core/renderer_opengl/gl_shader_gen.cpp
+++ /dev/null
@@ -1,1231 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <array>
6#include <cstddef>
7#include <cstring>
8#include "common/assert.h"
9#include "common/bit_field.h"
10#include "common/logging/log.h"
11#include "core/core.h"
12#include "video_core/regs_framebuffer.h"
13#include "video_core/regs_lighting.h"
14#include "video_core/regs_rasterizer.h"
15#include "video_core/regs_texturing.h"
16#include "video_core/renderer_opengl/gl_rasterizer.h"
17#include "video_core/renderer_opengl/gl_shader_gen.h"
18#include "video_core/renderer_opengl/gl_shader_util.h"
19
20using Pica::FramebufferRegs;
21using Pica::LightingRegs;
22using Pica::RasterizerRegs;
23using Pica::TexturingRegs;
24using TevStageConfig = TexturingRegs::TevStageConfig;
25
26namespace GLShader {
27
28static const std::string UniformBlockDef = R"(
29#define NUM_TEV_STAGES 6
30#define NUM_LIGHTS 8
31
32struct LightSrc {
33 vec3 specular_0;
34 vec3 specular_1;
35 vec3 diffuse;
36 vec3 ambient;
37 vec3 position;
38 vec3 spot_direction;
39 float dist_atten_bias;
40 float dist_atten_scale;
41};
42
43layout (std140) uniform shader_data {
44 vec2 framebuffer_scale;
45 int alphatest_ref;
46 float depth_scale;
47 float depth_offset;
48 int scissor_x1;
49 int scissor_y1;
50 int scissor_x2;
51 int scissor_y2;
52 vec3 fog_color;
53 vec2 proctex_noise_f;
54 vec2 proctex_noise_a;
55 vec2 proctex_noise_p;
56 vec3 lighting_global_ambient;
57 LightSrc light_src[NUM_LIGHTS];
58 vec4 const_color[NUM_TEV_STAGES];
59 vec4 tev_combiner_buffer_color;
60 vec4 clip_coef;
61};
62)";
63
64PicaShaderConfig PicaShaderConfig::BuildFromRegs(const Pica::Regs& regs) {
65 PicaShaderConfig res;
66
67 auto& state = res.state;
68 std::memset(&state, 0, sizeof(PicaShaderConfig::State));
69
70 state.scissor_test_mode = regs.rasterizer.scissor_test.mode;
71
72 state.depthmap_enable = regs.rasterizer.depthmap_enable;
73
74 state.alpha_test_func = regs.framebuffer.output_merger.alpha_test.enable
75 ? regs.framebuffer.output_merger.alpha_test.func.Value()
76 : Pica::FramebufferRegs::CompareFunc::Always;
77
78 state.texture0_type = regs.texturing.texture0.type;
79
80 state.texture2_use_coord1 = regs.texturing.main_config.texture2_use_coord1 != 0;
81
82 // Copy relevant tev stages fields.
83 // We don't sync const_color here because of the high variance, it is a
84 // shader uniform instead.
85 const auto& tev_stages = regs.texturing.GetTevStages();
86 DEBUG_ASSERT(state.tev_stages.size() == tev_stages.size());
87 for (size_t i = 0; i < tev_stages.size(); i++) {
88 const auto& tev_stage = tev_stages[i];
89 state.tev_stages[i].sources_raw = tev_stage.sources_raw;
90 state.tev_stages[i].modifiers_raw = tev_stage.modifiers_raw;
91 state.tev_stages[i].ops_raw = tev_stage.ops_raw;
92 state.tev_stages[i].scales_raw = tev_stage.scales_raw;
93 }
94
95 state.fog_mode = regs.texturing.fog_mode;
96 state.fog_flip = regs.texturing.fog_flip != 0;
97
98 state.combiner_buffer_input = regs.texturing.tev_combiner_buffer_input.update_mask_rgb.Value() |
99 regs.texturing.tev_combiner_buffer_input.update_mask_a.Value()
100 << 4;
101
102 // Fragment lighting
103
104 state.lighting.enable = !regs.lighting.disable;
105 state.lighting.src_num = regs.lighting.max_light_index + 1;
106
107 for (unsigned light_index = 0; light_index < state.lighting.src_num; ++light_index) {
108 unsigned num = regs.lighting.light_enable.GetNum(light_index);
109 const auto& light = regs.lighting.light[num];
110 state.lighting.light[light_index].num = num;
111 state.lighting.light[light_index].directional = light.config.directional != 0;
112 state.lighting.light[light_index].two_sided_diffuse = light.config.two_sided_diffuse != 0;
113 state.lighting.light[light_index].geometric_factor_0 = light.config.geometric_factor_0 != 0;
114 state.lighting.light[light_index].geometric_factor_1 = light.config.geometric_factor_1 != 0;
115 state.lighting.light[light_index].dist_atten_enable =
116 !regs.lighting.IsDistAttenDisabled(num);
117 state.lighting.light[light_index].spot_atten_enable =
118 !regs.lighting.IsSpotAttenDisabled(num);
119 }
120
121 state.lighting.lut_d0.enable = regs.lighting.config1.disable_lut_d0 == 0;
122 state.lighting.lut_d0.abs_input = regs.lighting.abs_lut_input.disable_d0 == 0;
123 state.lighting.lut_d0.type = regs.lighting.lut_input.d0.Value();
124 state.lighting.lut_d0.scale = regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.d0);
125
126 state.lighting.lut_d1.enable = regs.lighting.config1.disable_lut_d1 == 0;
127 state.lighting.lut_d1.abs_input = regs.lighting.abs_lut_input.disable_d1 == 0;
128 state.lighting.lut_d1.type = regs.lighting.lut_input.d1.Value();
129 state.lighting.lut_d1.scale = regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.d1);
130
131 // this is a dummy field due to lack of the corresponding register
132 state.lighting.lut_sp.enable = true;
133 state.lighting.lut_sp.abs_input = regs.lighting.abs_lut_input.disable_sp == 0;
134 state.lighting.lut_sp.type = regs.lighting.lut_input.sp.Value();
135 state.lighting.lut_sp.scale = regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.sp);
136
137 state.lighting.lut_fr.enable = regs.lighting.config1.disable_lut_fr == 0;
138 state.lighting.lut_fr.abs_input = regs.lighting.abs_lut_input.disable_fr == 0;
139 state.lighting.lut_fr.type = regs.lighting.lut_input.fr.Value();
140 state.lighting.lut_fr.scale = regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.fr);
141
142 state.lighting.lut_rr.enable = regs.lighting.config1.disable_lut_rr == 0;
143 state.lighting.lut_rr.abs_input = regs.lighting.abs_lut_input.disable_rr == 0;
144 state.lighting.lut_rr.type = regs.lighting.lut_input.rr.Value();
145 state.lighting.lut_rr.scale = regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.rr);
146
147 state.lighting.lut_rg.enable = regs.lighting.config1.disable_lut_rg == 0;
148 state.lighting.lut_rg.abs_input = regs.lighting.abs_lut_input.disable_rg == 0;
149 state.lighting.lut_rg.type = regs.lighting.lut_input.rg.Value();
150 state.lighting.lut_rg.scale = regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.rg);
151
152 state.lighting.lut_rb.enable = regs.lighting.config1.disable_lut_rb == 0;
153 state.lighting.lut_rb.abs_input = regs.lighting.abs_lut_input.disable_rb == 0;
154 state.lighting.lut_rb.type = regs.lighting.lut_input.rb.Value();
155 state.lighting.lut_rb.scale = regs.lighting.lut_scale.GetScale(regs.lighting.lut_scale.rb);
156
157 state.lighting.config = regs.lighting.config0.config;
158 state.lighting.fresnel_selector = regs.lighting.config0.fresnel_selector;
159 state.lighting.bump_mode = regs.lighting.config0.bump_mode;
160 state.lighting.bump_selector = regs.lighting.config0.bump_selector;
161 state.lighting.bump_renorm = regs.lighting.config0.disable_bump_renorm == 0;
162 state.lighting.clamp_highlights = regs.lighting.config0.clamp_highlights != 0;
163
164 state.proctex.enable = regs.texturing.main_config.texture3_enable;
165 if (state.proctex.enable) {
166 state.proctex.coord = regs.texturing.main_config.texture3_coordinates;
167 state.proctex.u_clamp = regs.texturing.proctex.u_clamp;
168 state.proctex.v_clamp = regs.texturing.proctex.v_clamp;
169 state.proctex.color_combiner = regs.texturing.proctex.color_combiner;
170 state.proctex.alpha_combiner = regs.texturing.proctex.alpha_combiner;
171 state.proctex.separate_alpha = regs.texturing.proctex.separate_alpha;
172 state.proctex.noise_enable = regs.texturing.proctex.noise_enable;
173 state.proctex.u_shift = regs.texturing.proctex.u_shift;
174 state.proctex.v_shift = regs.texturing.proctex.v_shift;
175 state.proctex.lut_width = regs.texturing.proctex_lut.width;
176 state.proctex.lut_offset = regs.texturing.proctex_lut_offset;
177 state.proctex.lut_filter = regs.texturing.proctex_lut.filter;
178 }
179
180 return res;
181}
182
183/// Detects if a TEV stage is configured to be skipped (to avoid generating unnecessary code)
184static bool IsPassThroughTevStage(const TevStageConfig& stage) {
185 return (stage.color_op == TevStageConfig::Operation::Replace &&
186 stage.alpha_op == TevStageConfig::Operation::Replace &&
187 stage.color_source1 == TevStageConfig::Source::Previous &&
188 stage.alpha_source1 == TevStageConfig::Source::Previous &&
189 stage.color_modifier1 == TevStageConfig::ColorModifier::SourceColor &&
190 stage.alpha_modifier1 == TevStageConfig::AlphaModifier::SourceAlpha &&
191 stage.GetColorMultiplier() == 1 && stage.GetAlphaMultiplier() == 1);
192}
193
194static std::string SampleTexture(const PicaShaderConfig& config, unsigned texture_unit) {
195 const auto& state = config.state;
196 switch (texture_unit) {
197 case 0:
198 // Only unit 0 respects the texturing type
199 switch (state.texture0_type) {
200 case TexturingRegs::TextureConfig::Texture2D:
201 return "texture(tex[0], texcoord[0])";
202 case TexturingRegs::TextureConfig::Projection2D:
203 return "textureProj(tex[0], vec3(texcoord[0], texcoord0_w))";
204 default:
205 LOG_CRITICAL(HW_GPU, "Unhandled texture type %x",
206 static_cast<int>(state.texture0_type));
207 UNIMPLEMENTED();
208 return "texture(tex[0], texcoord[0])";
209 }
210 case 1:
211 return "texture(tex[1], texcoord[1])";
212 case 2:
213 if (state.texture2_use_coord1)
214 return "texture(tex[2], texcoord[1])";
215 else
216 return "texture(tex[2], texcoord[2])";
217 case 3:
218 if (state.proctex.enable) {
219 return "ProcTex()";
220 } else {
221 LOG_ERROR(Render_OpenGL, "Using Texture3 without enabling it");
222 return "vec4(0.0)";
223 }
224 default:
225 UNREACHABLE();
226 return "";
227 }
228}
229
230/// Writes the specified TEV stage source component(s)
231static void AppendSource(std::string& out, const PicaShaderConfig& config,
232 TevStageConfig::Source source, const std::string& index_name) {
233 const auto& state = config.state;
234 using Source = TevStageConfig::Source;
235 switch (source) {
236 case Source::PrimaryColor:
237 out += "primary_color";
238 break;
239 case Source::PrimaryFragmentColor:
240 out += "primary_fragment_color";
241 break;
242 case Source::SecondaryFragmentColor:
243 out += "secondary_fragment_color";
244 break;
245 case Source::Texture0:
246 out += SampleTexture(config, 0);
247 break;
248 case Source::Texture1:
249 out += SampleTexture(config, 1);
250 break;
251 case Source::Texture2:
252 out += SampleTexture(config, 2);
253 break;
254 case Source::Texture3:
255 out += SampleTexture(config, 3);
256 break;
257 case Source::PreviousBuffer:
258 out += "combiner_buffer";
259 break;
260 case Source::Constant:
261 ((out += "const_color[") += index_name) += ']';
262 break;
263 case Source::Previous:
264 out += "last_tex_env_out";
265 break;
266 default:
267 out += "vec4(0.0)";
268 LOG_CRITICAL(Render_OpenGL, "Unknown source op %u", source);
269 break;
270 }
271}
272
273/// Writes the color components to use for the specified TEV stage color modifier
274static void AppendColorModifier(std::string& out, const PicaShaderConfig& config,
275 TevStageConfig::ColorModifier modifier,
276 TevStageConfig::Source source, const std::string& index_name) {
277 using ColorModifier = TevStageConfig::ColorModifier;
278 switch (modifier) {
279 case ColorModifier::SourceColor:
280 AppendSource(out, config, source, index_name);
281 out += ".rgb";
282 break;
283 case ColorModifier::OneMinusSourceColor:
284 out += "vec3(1.0) - ";
285 AppendSource(out, config, source, index_name);
286 out += ".rgb";
287 break;
288 case ColorModifier::SourceAlpha:
289 AppendSource(out, config, source, index_name);
290 out += ".aaa";
291 break;
292 case ColorModifier::OneMinusSourceAlpha:
293 out += "vec3(1.0) - ";
294 AppendSource(out, config, source, index_name);
295 out += ".aaa";
296 break;
297 case ColorModifier::SourceRed:
298 AppendSource(out, config, source, index_name);
299 out += ".rrr";
300 break;
301 case ColorModifier::OneMinusSourceRed:
302 out += "vec3(1.0) - ";
303 AppendSource(out, config, source, index_name);
304 out += ".rrr";
305 break;
306 case ColorModifier::SourceGreen:
307 AppendSource(out, config, source, index_name);
308 out += ".ggg";
309 break;
310 case ColorModifier::OneMinusSourceGreen:
311 out += "vec3(1.0) - ";
312 AppendSource(out, config, source, index_name);
313 out += ".ggg";
314 break;
315 case ColorModifier::SourceBlue:
316 AppendSource(out, config, source, index_name);
317 out += ".bbb";
318 break;
319 case ColorModifier::OneMinusSourceBlue:
320 out += "vec3(1.0) - ";
321 AppendSource(out, config, source, index_name);
322 out += ".bbb";
323 break;
324 default:
325 out += "vec3(0.0)";
326 LOG_CRITICAL(Render_OpenGL, "Unknown color modifier op %u", modifier);
327 break;
328 }
329}
330
331/// Writes the alpha component to use for the specified TEV stage alpha modifier
332static void AppendAlphaModifier(std::string& out, const PicaShaderConfig& config,
333 TevStageConfig::AlphaModifier modifier,
334 TevStageConfig::Source source, const std::string& index_name) {
335 using AlphaModifier = TevStageConfig::AlphaModifier;
336 switch (modifier) {
337 case AlphaModifier::SourceAlpha:
338 AppendSource(out, config, source, index_name);
339 out += ".a";
340 break;
341 case AlphaModifier::OneMinusSourceAlpha:
342 out += "1.0 - ";
343 AppendSource(out, config, source, index_name);
344 out += ".a";
345 break;
346 case AlphaModifier::SourceRed:
347 AppendSource(out, config, source, index_name);
348 out += ".r";
349 break;
350 case AlphaModifier::OneMinusSourceRed:
351 out += "1.0 - ";
352 AppendSource(out, config, source, index_name);
353 out += ".r";
354 break;
355 case AlphaModifier::SourceGreen:
356 AppendSource(out, config, source, index_name);
357 out += ".g";
358 break;
359 case AlphaModifier::OneMinusSourceGreen:
360 out += "1.0 - ";
361 AppendSource(out, config, source, index_name);
362 out += ".g";
363 break;
364 case AlphaModifier::SourceBlue:
365 AppendSource(out, config, source, index_name);
366 out += ".b";
367 break;
368 case AlphaModifier::OneMinusSourceBlue:
369 out += "1.0 - ";
370 AppendSource(out, config, source, index_name);
371 out += ".b";
372 break;
373 default:
374 out += "0.0";
375 LOG_CRITICAL(Render_OpenGL, "Unknown alpha modifier op %u", modifier);
376 break;
377 }
378}
379
380/// Writes the combiner function for the color components for the specified TEV stage operation
381static void AppendColorCombiner(std::string& out, TevStageConfig::Operation operation,
382 const std::string& variable_name) {
383 out += "clamp(";
384 using Operation = TevStageConfig::Operation;
385 switch (operation) {
386 case Operation::Replace:
387 out += variable_name + "[0]";
388 break;
389 case Operation::Modulate:
390 out += variable_name + "[0] * " + variable_name + "[1]";
391 break;
392 case Operation::Add:
393 out += variable_name + "[0] + " + variable_name + "[1]";
394 break;
395 case Operation::AddSigned:
396 out += variable_name + "[0] + " + variable_name + "[1] - vec3(0.5)";
397 break;
398 case Operation::Lerp:
399 out += variable_name + "[0] * " + variable_name + "[2] + " + variable_name +
400 "[1] * (vec3(1.0) - " + variable_name + "[2])";
401 break;
402 case Operation::Subtract:
403 out += variable_name + "[0] - " + variable_name + "[1]";
404 break;
405 case Operation::MultiplyThenAdd:
406 out += variable_name + "[0] * " + variable_name + "[1] + " + variable_name + "[2]";
407 break;
408 case Operation::AddThenMultiply:
409 out += "min(" + variable_name + "[0] + " + variable_name + "[1], vec3(1.0)) * " +
410 variable_name + "[2]";
411 break;
412 case Operation::Dot3_RGB:
413 case Operation::Dot3_RGBA:
414 out += "vec3(dot(" + variable_name + "[0] - vec3(0.5), " + variable_name +
415 "[1] - vec3(0.5)) * 4.0)";
416 break;
417 default:
418 out += "vec3(0.0)";
419 LOG_CRITICAL(Render_OpenGL, "Unknown color combiner operation: %u", operation);
420 break;
421 }
422 out += ", vec3(0.0), vec3(1.0))"; // Clamp result to 0.0, 1.0
423}
424
425/// Writes the combiner function for the alpha component for the specified TEV stage operation
426static void AppendAlphaCombiner(std::string& out, TevStageConfig::Operation operation,
427 const std::string& variable_name) {
428 out += "clamp(";
429 using Operation = TevStageConfig::Operation;
430 switch (operation) {
431 case Operation::Replace:
432 out += variable_name + "[0]";
433 break;
434 case Operation::Modulate:
435 out += variable_name + "[0] * " + variable_name + "[1]";
436 break;
437 case Operation::Add:
438 out += variable_name + "[0] + " + variable_name + "[1]";
439 break;
440 case Operation::AddSigned:
441 out += variable_name + "[0] + " + variable_name + "[1] - 0.5";
442 break;
443 case Operation::Lerp:
444 out += variable_name + "[0] * " + variable_name + "[2] + " + variable_name +
445 "[1] * (1.0 - " + variable_name + "[2])";
446 break;
447 case Operation::Subtract:
448 out += variable_name + "[0] - " + variable_name + "[1]";
449 break;
450 case Operation::MultiplyThenAdd:
451 out += variable_name + "[0] * " + variable_name + "[1] + " + variable_name + "[2]";
452 break;
453 case Operation::AddThenMultiply:
454 out += "min(" + variable_name + "[0] + " + variable_name + "[1], 1.0) * " + variable_name +
455 "[2]";
456 break;
457 default:
458 out += "0.0";
459 LOG_CRITICAL(Render_OpenGL, "Unknown alpha combiner operation: %u", operation);
460 break;
461 }
462 out += ", 0.0, 1.0)";
463}
464
465/// Writes the if-statement condition used to evaluate alpha testing
466static void AppendAlphaTestCondition(std::string& out, FramebufferRegs::CompareFunc func) {
467 using CompareFunc = FramebufferRegs::CompareFunc;
468 switch (func) {
469 case CompareFunc::Never:
470 out += "true";
471 break;
472 case CompareFunc::Always:
473 out += "false";
474 break;
475 case CompareFunc::Equal:
476 case CompareFunc::NotEqual:
477 case CompareFunc::LessThan:
478 case CompareFunc::LessThanOrEqual:
479 case CompareFunc::GreaterThan:
480 case CompareFunc::GreaterThanOrEqual: {
481 static const char* op[] = {"!=", "==", ">=", ">", "<=", "<"};
482 unsigned index = (unsigned)func - (unsigned)CompareFunc::Equal;
483 out += "int(last_tex_env_out.a * 255.0) " + std::string(op[index]) + " alphatest_ref";
484 break;
485 }
486
487 default:
488 out += "false";
489 LOG_CRITICAL(Render_OpenGL, "Unknown alpha test condition %u", func);
490 break;
491 }
492}
493
494/// Writes the code to emulate the specified TEV stage
495static void WriteTevStage(std::string& out, const PicaShaderConfig& config, unsigned index) {
496 const auto stage =
497 static_cast<const TexturingRegs::TevStageConfig>(config.state.tev_stages[index]);
498 if (!IsPassThroughTevStage(stage)) {
499 std::string index_name = std::to_string(index);
500
501 out += "vec3 color_results_" + index_name + "[3] = vec3[3](";
502 AppendColorModifier(out, config, stage.color_modifier1, stage.color_source1, index_name);
503 out += ", ";
504 AppendColorModifier(out, config, stage.color_modifier2, stage.color_source2, index_name);
505 out += ", ";
506 AppendColorModifier(out, config, stage.color_modifier3, stage.color_source3, index_name);
507 out += ");\n";
508
509 out += "vec3 color_output_" + index_name + " = ";
510 AppendColorCombiner(out, stage.color_op, "color_results_" + index_name);
511 out += ";\n";
512
513 if (stage.color_op == TevStageConfig::Operation::Dot3_RGBA) {
514 // result of Dot3_RGBA operation is also placed to the alpha component
515 out += "float alpha_output_" + index_name + " = color_output_" + index_name + "[0];\n";
516 } else {
517 out += "float alpha_results_" + index_name + "[3] = float[3](";
518 AppendAlphaModifier(out, config, stage.alpha_modifier1, stage.alpha_source1,
519 index_name);
520 out += ", ";
521 AppendAlphaModifier(out, config, stage.alpha_modifier2, stage.alpha_source2,
522 index_name);
523 out += ", ";
524 AppendAlphaModifier(out, config, stage.alpha_modifier3, stage.alpha_source3,
525 index_name);
526 out += ");\n";
527
528 out += "float alpha_output_" + index_name + " = ";
529 AppendAlphaCombiner(out, stage.alpha_op, "alpha_results_" + index_name);
530 out += ";\n";
531 }
532
533 out += "last_tex_env_out = vec4("
534 "clamp(color_output_" +
535 index_name + " * " + std::to_string(stage.GetColorMultiplier()) +
536 ".0, vec3(0.0), vec3(1.0)),"
537 "clamp(alpha_output_" +
538 index_name + " * " + std::to_string(stage.GetAlphaMultiplier()) +
539 ".0, 0.0, 1.0));\n";
540 }
541
542 out += "combiner_buffer = next_combiner_buffer;\n";
543
544 if (config.TevStageUpdatesCombinerBufferColor(index))
545 out += "next_combiner_buffer.rgb = last_tex_env_out.rgb;\n";
546
547 if (config.TevStageUpdatesCombinerBufferAlpha(index))
548 out += "next_combiner_buffer.a = last_tex_env_out.a;\n";
549}
550
551/// Writes the code to emulate fragment lighting
552static void WriteLighting(std::string& out, const PicaShaderConfig& config) {
553 const auto& lighting = config.state.lighting;
554
555 // Define lighting globals
556 out += "vec4 diffuse_sum = vec4(0.0, 0.0, 0.0, 1.0);\n"
557 "vec4 specular_sum = vec4(0.0, 0.0, 0.0, 1.0);\n"
558 "vec3 light_vector = vec3(0.0);\n"
559 "vec3 refl_value = vec3(0.0);\n"
560 "vec3 spot_dir = vec3(0.0);\n"
561 "vec3 half_vector = vec3(0.0);\n"
562 "float geo_factor = 1.0;\n";
563
564 // Compute fragment normals and tangents
565 auto Perturbation = [&]() {
566 return "2.0 * (" + SampleTexture(config, lighting.bump_selector) + ").rgb - 1.0";
567 };
568 if (lighting.bump_mode == LightingRegs::LightingBumpMode::NormalMap) {
569 // Bump mapping is enabled using a normal map
570 out += "vec3 surface_normal = " + Perturbation() + ";\n";
571
572 // Recompute Z-component of perturbation if 'renorm' is enabled, this provides a higher
573 // precision result
574 if (lighting.bump_renorm) {
575 std::string val =
576 "(1.0 - (surface_normal.x*surface_normal.x + surface_normal.y*surface_normal.y))";
577 out += "surface_normal.z = sqrt(max(" + val + ", 0.0));\n";
578 }
579
580 // The tangent vector is not perturbed by the normal map and is just a unit vector.
581 out += "vec3 surface_tangent = vec3(1.0, 0.0, 0.0);\n";
582 } else if (lighting.bump_mode == LightingRegs::LightingBumpMode::TangentMap) {
583 // Bump mapping is enabled using a tangent map
584 out += "vec3 surface_tangent = " + Perturbation() + ";\n";
585 // Mathematically, recomputing Z-component of the tangent vector won't affect the relevant
586 // computation below, which is also confirmed on 3DS. So we don't bother recomputing here
587 // even if 'renorm' is enabled.
588
589 // The normal vector is not perturbed by the tangent map and is just a unit vector.
590 out += "vec3 surface_normal = vec3(0.0, 0.0, 1.0);\n";
591 } else {
592 // No bump mapping - surface local normal and tangent are just unit vectors
593 out += "vec3 surface_normal = vec3(0.0, 0.0, 1.0);\n";
594 out += "vec3 surface_tangent = vec3(1.0, 0.0, 0.0);\n";
595 }
596
597 // Rotate the surface-local normal by the interpolated normal quaternion to convert it to
598 // eyespace.
599 out += "vec4 normalized_normquat = normalize(normquat);\n";
600 out += "vec3 normal = quaternion_rotate(normalized_normquat, surface_normal);\n";
601 out += "vec3 tangent = quaternion_rotate(normalized_normquat, surface_tangent);\n";
602
603 // Samples the specified lookup table for specular lighting
604 auto GetLutValue = [&lighting](LightingRegs::LightingSampler sampler, unsigned light_num,
605 LightingRegs::LightingLutInput input, bool abs) {
606 std::string index;
607 switch (input) {
608 case LightingRegs::LightingLutInput::NH:
609 index = "dot(normal, normalize(half_vector))";
610 break;
611
612 case LightingRegs::LightingLutInput::VH:
613 index = std::string("dot(normalize(view), normalize(half_vector))");
614 break;
615
616 case LightingRegs::LightingLutInput::NV:
617 index = std::string("dot(normal, normalize(view))");
618 break;
619
620 case LightingRegs::LightingLutInput::LN:
621 index = std::string("dot(light_vector, normal)");
622 break;
623
624 case LightingRegs::LightingLutInput::SP:
625 index = std::string("dot(light_vector, spot_dir)");
626 break;
627
628 case LightingRegs::LightingLutInput::CP:
629 // CP input is only available with configuration 7
630 if (lighting.config == LightingRegs::LightingConfig::Config7) {
631 // Note: even if the normal vector is modified by normal map, which is not the
632 // normal of the tangent plane anymore, the half angle vector is still projected
633 // using the modified normal vector.
634 std::string half_angle_proj =
635 "normalize(half_vector) - normal * dot(normal, normalize(half_vector))";
636 // Note: the half angle vector projection is confirmed not normalized before the dot
637 // product. The result is in fact not cos(phi) as the name suggested.
638 index = "dot(" + half_angle_proj + ", tangent)";
639 } else {
640 index = "0.0";
641 }
642 break;
643
644 default:
645 LOG_CRITICAL(HW_GPU, "Unknown lighting LUT input %d\n", (int)input);
646 UNIMPLEMENTED();
647 index = "0.0";
648 break;
649 }
650
651 std::string sampler_string = std::to_string(static_cast<unsigned>(sampler));
652
653 if (abs) {
654 // LUT index is in the range of (0.0, 1.0)
655 index = lighting.light[light_num].two_sided_diffuse ? "abs(" + index + ")"
656 : "max(" + index + ", 0.0)";
657 return "LookupLightingLUTUnsigned(" + sampler_string + ", " + index + ")";
658 } else {
659 // LUT index is in the range of (-1.0, 1.0)
660 return "LookupLightingLUTSigned(" + sampler_string + ", " + index + ")";
661 }
662
663 };
664
665 // Write the code to emulate each enabled light
666 for (unsigned light_index = 0; light_index < lighting.src_num; ++light_index) {
667 const auto& light_config = lighting.light[light_index];
668 std::string light_src = "light_src[" + std::to_string(light_config.num) + "]";
669
670 // Compute light vector (directional or positional)
671 if (light_config.directional)
672 out += "light_vector = normalize(" + light_src + ".position);\n";
673 else
674 out += "light_vector = normalize(" + light_src + ".position + view);\n";
675
676 out += "spot_dir = " + light_src + ".spot_direction;\n";
677 out += "half_vector = normalize(view) + light_vector;\n";
678
679 // Compute dot product of light_vector and normal, adjust if lighting is one-sided or
680 // two-sided
681 std::string dot_product = light_config.two_sided_diffuse
682 ? "abs(dot(light_vector, normal))"
683 : "max(dot(light_vector, normal), 0.0)";
684
685 // If enabled, compute spot light attenuation value
686 std::string spot_atten = "1.0";
687 if (light_config.spot_atten_enable &&
688 LightingRegs::IsLightingSamplerSupported(
689 lighting.config, LightingRegs::LightingSampler::SpotlightAttenuation)) {
690 std::string value =
691 GetLutValue(LightingRegs::SpotlightAttenuationSampler(light_config.num),
692 light_config.num, lighting.lut_sp.type, lighting.lut_sp.abs_input);
693 spot_atten = "(" + std::to_string(lighting.lut_sp.scale) + " * " + value + ")";
694 }
695
696 // If enabled, compute distance attenuation value
697 std::string dist_atten = "1.0";
698 if (light_config.dist_atten_enable) {
699 std::string index = "clamp(" + light_src + ".dist_atten_scale * length(-view - " +
700 light_src + ".position) + " + light_src +
701 ".dist_atten_bias, 0.0, 1.0)";
702 auto sampler = LightingRegs::DistanceAttenuationSampler(light_config.num);
703 dist_atten = "LookupLightingLUTUnsigned(" +
704 std::to_string(static_cast<unsigned>(sampler)) + "," + index + ")";
705 }
706
707 // If enabled, clamp specular component if lighting result is negative
708 std::string clamp_highlights =
709 lighting.clamp_highlights ? "(dot(light_vector, normal) <= 0.0 ? 0.0 : 1.0)" : "1.0";
710
711 if (light_config.geometric_factor_0 || light_config.geometric_factor_1) {
712 out += "geo_factor = dot(half_vector, half_vector);\n"
713 "geo_factor = geo_factor == 0.0 ? 0.0 : min(" +
714 dot_product + " / geo_factor, 1.0);\n";
715 }
716
717 // Specular 0 component
718 std::string d0_lut_value = "1.0";
719 if (lighting.lut_d0.enable &&
720 LightingRegs::IsLightingSamplerSupported(
721 lighting.config, LightingRegs::LightingSampler::Distribution0)) {
722 // Lookup specular "distribution 0" LUT value
723 std::string value =
724 GetLutValue(LightingRegs::LightingSampler::Distribution0, light_config.num,
725 lighting.lut_d0.type, lighting.lut_d0.abs_input);
726 d0_lut_value = "(" + std::to_string(lighting.lut_d0.scale) + " * " + value + ")";
727 }
728 std::string specular_0 = "(" + d0_lut_value + " * " + light_src + ".specular_0)";
729 if (light_config.geometric_factor_0) {
730 specular_0 = "(" + specular_0 + " * geo_factor)";
731 }
732
733 // If enabled, lookup ReflectRed value, otherwise, 1.0 is used
734 if (lighting.lut_rr.enable &&
735 LightingRegs::IsLightingSamplerSupported(lighting.config,
736 LightingRegs::LightingSampler::ReflectRed)) {
737 std::string value =
738 GetLutValue(LightingRegs::LightingSampler::ReflectRed, light_config.num,
739 lighting.lut_rr.type, lighting.lut_rr.abs_input);
740 value = "(" + std::to_string(lighting.lut_rr.scale) + " * " + value + ")";
741 out += "refl_value.r = " + value + ";\n";
742 } else {
743 out += "refl_value.r = 1.0;\n";
744 }
745
746 // If enabled, lookup ReflectGreen value, otherwise, ReflectRed value is used
747 if (lighting.lut_rg.enable &&
748 LightingRegs::IsLightingSamplerSupported(lighting.config,
749 LightingRegs::LightingSampler::ReflectGreen)) {
750 std::string value =
751 GetLutValue(LightingRegs::LightingSampler::ReflectGreen, light_config.num,
752 lighting.lut_rg.type, lighting.lut_rg.abs_input);
753 value = "(" + std::to_string(lighting.lut_rg.scale) + " * " + value + ")";
754 out += "refl_value.g = " + value + ";\n";
755 } else {
756 out += "refl_value.g = refl_value.r;\n";
757 }
758
759 // If enabled, lookup ReflectBlue value, otherwise, ReflectRed value is used
760 if (lighting.lut_rb.enable &&
761 LightingRegs::IsLightingSamplerSupported(lighting.config,
762 LightingRegs::LightingSampler::ReflectBlue)) {
763 std::string value =
764 GetLutValue(LightingRegs::LightingSampler::ReflectBlue, light_config.num,
765 lighting.lut_rb.type, lighting.lut_rb.abs_input);
766 value = "(" + std::to_string(lighting.lut_rb.scale) + " * " + value + ")";
767 out += "refl_value.b = " + value + ";\n";
768 } else {
769 out += "refl_value.b = refl_value.r;\n";
770 }
771
772 // Specular 1 component
773 std::string d1_lut_value = "1.0";
774 if (lighting.lut_d1.enable &&
775 LightingRegs::IsLightingSamplerSupported(
776 lighting.config, LightingRegs::LightingSampler::Distribution1)) {
777 // Lookup specular "distribution 1" LUT value
778 std::string value =
779 GetLutValue(LightingRegs::LightingSampler::Distribution1, light_config.num,
780 lighting.lut_d1.type, lighting.lut_d1.abs_input);
781 d1_lut_value = "(" + std::to_string(lighting.lut_d1.scale) + " * " + value + ")";
782 }
783 std::string specular_1 =
784 "(" + d1_lut_value + " * refl_value * " + light_src + ".specular_1)";
785 if (light_config.geometric_factor_1) {
786 specular_1 = "(" + specular_1 + " * geo_factor)";
787 }
788
789 // Fresnel
790 // Note: only the last entry in the light slots applies the Fresnel factor
791 if (light_index == lighting.src_num - 1 && lighting.lut_fr.enable &&
792 LightingRegs::IsLightingSamplerSupported(lighting.config,
793 LightingRegs::LightingSampler::Fresnel)) {
794 // Lookup fresnel LUT value
795 std::string value =
796 GetLutValue(LightingRegs::LightingSampler::Fresnel, light_config.num,
797 lighting.lut_fr.type, lighting.lut_fr.abs_input);
798 value = "(" + std::to_string(lighting.lut_fr.scale) + " * " + value + ")";
799
800 // Enabled for diffuse lighting alpha component
801 if (lighting.fresnel_selector == LightingRegs::LightingFresnelSelector::PrimaryAlpha ||
802 lighting.fresnel_selector == LightingRegs::LightingFresnelSelector::Both) {
803 out += "diffuse_sum.a = " + value + ";\n";
804 }
805
806 // Enabled for the specular lighting alpha component
807 if (lighting.fresnel_selector ==
808 LightingRegs::LightingFresnelSelector::SecondaryAlpha ||
809 lighting.fresnel_selector == LightingRegs::LightingFresnelSelector::Both) {
810 out += "specular_sum.a = " + value + ";\n";
811 }
812 }
813
814 // Compute primary fragment color (diffuse lighting) function
815 out += "diffuse_sum.rgb += ((" + light_src + ".diffuse * " + dot_product + ") + " +
816 light_src + ".ambient) * " + dist_atten + " * " + spot_atten + ";\n";
817
818 // Compute secondary fragment color (specular lighting) function
819 out += "specular_sum.rgb += (" + specular_0 + " + " + specular_1 + ") * " +
820 clamp_highlights + " * " + dist_atten + " * " + spot_atten + ";\n";
821 }
822
823 // Sum final lighting result
824 out += "diffuse_sum.rgb += lighting_global_ambient;\n";
825 out += "primary_fragment_color = clamp(diffuse_sum, vec4(0.0), vec4(1.0));\n";
826 out += "secondary_fragment_color = clamp(specular_sum, vec4(0.0), vec4(1.0));\n";
827}
828
829using ProcTexClamp = TexturingRegs::ProcTexClamp;
830using ProcTexShift = TexturingRegs::ProcTexShift;
831using ProcTexCombiner = TexturingRegs::ProcTexCombiner;
832using ProcTexFilter = TexturingRegs::ProcTexFilter;
833
834void AppendProcTexShiftOffset(std::string& out, const std::string& v, ProcTexShift mode,
835 ProcTexClamp clamp_mode) {
836 std::string offset = (clamp_mode == ProcTexClamp::MirroredRepeat) ? "1.0" : "0.5";
837 switch (mode) {
838 case ProcTexShift::None:
839 out += "0";
840 break;
841 case ProcTexShift::Odd:
842 out += offset + " * ((int(" + v + ") / 2) % 2)";
843 break;
844 case ProcTexShift::Even:
845 out += offset + " * (((int(" + v + ") + 1) / 2) % 2)";
846 break;
847 default:
848 LOG_CRITICAL(HW_GPU, "Unknown shift mode %u", static_cast<u32>(mode));
849 out += "0";
850 break;
851 }
852}
853
854void AppendProcTexClamp(std::string& out, const std::string& var, ProcTexClamp mode) {
855 switch (mode) {
856 case ProcTexClamp::ToZero:
857 out += var + " = " + var + " > 1.0 ? 0 : " + var + ";\n";
858 break;
859 case ProcTexClamp::ToEdge:
860 out += var + " = " + "min(" + var + ", 1.0);\n";
861 break;
862 case ProcTexClamp::SymmetricalRepeat:
863 out += var + " = " + "fract(" + var + ");\n";
864 break;
865 case ProcTexClamp::MirroredRepeat: {
866 out +=
867 var + " = int(" + var + ") % 2 == 0 ? fract(" + var + ") : 1.0 - fract(" + var + ");\n";
868 break;
869 }
870 case ProcTexClamp::Pulse:
871 out += var + " = " + var + " > 0.5 ? 1.0 : 0.0;\n";
872 break;
873 default:
874 LOG_CRITICAL(HW_GPU, "Unknown clamp mode %u", static_cast<u32>(mode));
875 out += var + " = " + "min(" + var + ", 1.0);\n";
876 break;
877 }
878}
879
880void AppendProcTexCombineAndMap(std::string& out, ProcTexCombiner combiner,
881 const std::string& map_lut) {
882 std::string combined;
883 switch (combiner) {
884 case ProcTexCombiner::U:
885 combined = "u";
886 break;
887 case ProcTexCombiner::U2:
888 combined = "(u * u)";
889 break;
890 case TexturingRegs::ProcTexCombiner::V:
891 combined = "v";
892 break;
893 case TexturingRegs::ProcTexCombiner::V2:
894 combined = "(v * v)";
895 break;
896 case TexturingRegs::ProcTexCombiner::Add:
897 combined = "((u + v) * 0.5)";
898 break;
899 case TexturingRegs::ProcTexCombiner::Add2:
900 combined = "((u * u + v * v) * 0.5)";
901 break;
902 case TexturingRegs::ProcTexCombiner::SqrtAdd2:
903 combined = "min(sqrt(u * u + v * v), 1.0)";
904 break;
905 case TexturingRegs::ProcTexCombiner::Min:
906 combined = "min(u, v)";
907 break;
908 case TexturingRegs::ProcTexCombiner::Max:
909 combined = "max(u, v)";
910 break;
911 case TexturingRegs::ProcTexCombiner::RMax:
912 combined = "min(((u + v) * 0.5 + sqrt(u * u + v * v)) * 0.5, 1.0)";
913 break;
914 default:
915 LOG_CRITICAL(HW_GPU, "Unknown combiner %u", static_cast<u32>(combiner));
916 combined = "0.0";
917 break;
918 }
919 out += "ProcTexLookupLUT(" + map_lut + ", " + combined + ")";
920}
921
922void AppendProcTexSampler(std::string& out, const PicaShaderConfig& config) {
923 // LUT sampling uitlity
924 // For NoiseLUT/ColorMap/AlphaMap, coord=0.0 is lut[0], coord=127.0/128.0 is lut[127] and
925 // coord=1.0 is lut[127]+lut_diff[127]. For other indices, the result is interpolated using
926 // value entries and difference entries.
927 out += R"(
928float ProcTexLookupLUT(samplerBuffer lut, float coord) {
929 coord *= 128;
930 float index_i = clamp(floor(coord), 0.0, 127.0);
931 float index_f = coord - index_i; // fract() cannot be used here because 128.0 needs to be
932 // extracted as index_i = 127.0 and index_f = 1.0
933 vec2 entry = texelFetch(lut, int(index_i)).rg;
934 return clamp(entry.r + entry.g * index_f, 0.0, 1.0);
935}
936 )";
937
938 // Noise utility
939 if (config.state.proctex.noise_enable) {
940 // See swrasterizer/proctex.cpp for more information about these functions
941 out += R"(
942int ProcTexNoiseRand1D(int v) {
943 const int table[] = int[](0,4,10,8,4,9,7,12,5,15,13,14,11,15,2,11);
944 return ((v % 9 + 2) * 3 & 0xF) ^ table[(v / 9) & 0xF];
945}
946
947float ProcTexNoiseRand2D(vec2 point) {
948 const int table[] = int[](10,2,15,8,0,7,4,5,5,13,2,6,13,9,3,14);
949 int u2 = ProcTexNoiseRand1D(int(point.x));
950 int v2 = ProcTexNoiseRand1D(int(point.y));
951 v2 += ((u2 & 3) == 1) ? 4 : 0;
952 v2 ^= (u2 & 1) * 6;
953 v2 += 10 + u2;
954 v2 &= 0xF;
955 v2 ^= table[u2];
956 return -1.0 + float(v2) * 2.0/ 15.0;
957}
958
959float ProcTexNoiseCoef(vec2 x) {
960 vec2 grid = 9.0 * proctex_noise_f * abs(x + proctex_noise_p);
961 vec2 point = floor(grid);
962 vec2 frac = grid - point;
963
964 float g0 = ProcTexNoiseRand2D(point) * (frac.x + frac.y);
965 float g1 = ProcTexNoiseRand2D(point + vec2(1.0, 0.0)) * (frac.x + frac.y - 1.0);
966 float g2 = ProcTexNoiseRand2D(point + vec2(0.0, 1.0)) * (frac.x + frac.y - 1.0);
967 float g3 = ProcTexNoiseRand2D(point + vec2(1.0, 1.0)) * (frac.x + frac.y - 2.0);
968
969 float x_noise = ProcTexLookupLUT(proctex_noise_lut, frac.x);
970 float y_noise = ProcTexLookupLUT(proctex_noise_lut, frac.y);
971 float x0 = mix(g0, g1, x_noise);
972 float x1 = mix(g2, g3, x_noise);
973 return mix(x0, x1, y_noise);
974}
975 )";
976 }
977
978 out += "vec4 ProcTex() {\n";
979 out += "vec2 uv = abs(texcoord[" + std::to_string(config.state.proctex.coord) + "]);\n";
980
981 // Get shift offset before noise generation
982 out += "float u_shift = ";
983 AppendProcTexShiftOffset(out, "uv.y", config.state.proctex.u_shift,
984 config.state.proctex.u_clamp);
985 out += ";\n";
986 out += "float v_shift = ";
987 AppendProcTexShiftOffset(out, "uv.x", config.state.proctex.v_shift,
988 config.state.proctex.v_clamp);
989 out += ";\n";
990
991 // Generate noise
992 if (config.state.proctex.noise_enable) {
993 out += "uv += proctex_noise_a * ProcTexNoiseCoef(uv);\n";
994 out += "uv = abs(uv);\n";
995 }
996
997 // Shift
998 out += "float u = uv.x + u_shift;\n";
999 out += "float v = uv.y + v_shift;\n";
1000
1001 // Clamp
1002 AppendProcTexClamp(out, "u", config.state.proctex.u_clamp);
1003 AppendProcTexClamp(out, "v", config.state.proctex.v_clamp);
1004
1005 // Combine and map
1006 out += "float lut_coord = ";
1007 AppendProcTexCombineAndMap(out, config.state.proctex.color_combiner, "proctex_color_map");
1008 out += ";\n";
1009
1010 // Look up color
1011 // For the color lut, coord=0.0 is lut[offset] and coord=1.0 is lut[offset+width-1]
1012 out += "lut_coord *= " + std::to_string(config.state.proctex.lut_width - 1) + ";\n";
1013 // TODO(wwylele): implement mipmap
1014 switch (config.state.proctex.lut_filter) {
1015 case ProcTexFilter::Linear:
1016 case ProcTexFilter::LinearMipmapLinear:
1017 case ProcTexFilter::LinearMipmapNearest:
1018 out += "int lut_index_i = int(lut_coord) + " +
1019 std::to_string(config.state.proctex.lut_offset) + ";\n";
1020 out += "float lut_index_f = fract(lut_coord);\n";
1021 out += "vec4 final_color = texelFetch(proctex_lut, lut_index_i) + lut_index_f * "
1022 "texelFetch(proctex_diff_lut, lut_index_i);\n";
1023 break;
1024 case ProcTexFilter::Nearest:
1025 case ProcTexFilter::NearestMipmapLinear:
1026 case ProcTexFilter::NearestMipmapNearest:
1027 out += "lut_coord += " + std::to_string(config.state.proctex.lut_offset) + ";\n";
1028 out += "vec4 final_color = texelFetch(proctex_lut, int(round(lut_coord)));\n";
1029 break;
1030 }
1031
1032 if (config.state.proctex.separate_alpha) {
1033 // Note: in separate alpha mode, the alpha channel skips the color LUT look up stage. It
1034 // uses the output of CombineAndMap directly instead.
1035 out += "float final_alpha = ";
1036 AppendProcTexCombineAndMap(out, config.state.proctex.alpha_combiner, "proctex_alpha_map");
1037 out += ";\n";
1038 out += "return vec4(final_color.xyz, final_alpha);\n}\n";
1039 } else {
1040 out += "return final_color;\n}\n";
1041 }
1042}
1043
1044std::string GenerateFragmentShader(const PicaShaderConfig& config) {
1045 const auto& state = config.state;
1046
1047 std::string out = R"(
1048#version 330 core
1049
1050in vec4 primary_color;
1051in vec2 texcoord[3];
1052in float texcoord0_w;
1053in vec4 normquat;
1054in vec3 view;
1055
1056in vec4 gl_FragCoord;
1057
1058out vec4 color;
1059
1060uniform sampler2D tex[3];
1061uniform samplerBuffer lighting_lut;
1062uniform samplerBuffer fog_lut;
1063uniform samplerBuffer proctex_noise_lut;
1064uniform samplerBuffer proctex_color_map;
1065uniform samplerBuffer proctex_alpha_map;
1066uniform samplerBuffer proctex_lut;
1067uniform samplerBuffer proctex_diff_lut;
1068)";
1069
1070 out += UniformBlockDef;
1071
1072 out += R"(
1073// Rotate the vector v by the quaternion q
1074vec3 quaternion_rotate(vec4 q, vec3 v) {
1075 return v + 2.0 * cross(q.xyz, cross(q.xyz, v) + q.w * v);
1076}
1077
1078float LookupLightingLUT(int lut_index, int index, float delta) {
1079 vec2 entry = texelFetch(lighting_lut, lut_index * 256 + index).rg;
1080 return entry.r + entry.g * delta;
1081}
1082
1083float LookupLightingLUTUnsigned(int lut_index, float pos) {
1084 int index = clamp(int(pos * 256.0), 0, 255);
1085 float delta = pos * 256.0 - index;
1086 return LookupLightingLUT(lut_index, index, delta);
1087}
1088
1089float LookupLightingLUTSigned(int lut_index, float pos) {
1090 int index = clamp(int(pos * 128.0), -128, 127);
1091 float delta = pos * 128.0 - index;
1092 if (index < 0) index += 256;
1093 return LookupLightingLUT(lut_index, index, delta);
1094}
1095
1096)";
1097
1098 if (config.state.proctex.enable)
1099 AppendProcTexSampler(out, config);
1100
1101 out += R"(
1102void main() {
1103vec4 primary_fragment_color = vec4(0.0);
1104vec4 secondary_fragment_color = vec4(0.0);
1105)";
1106
1107 // Do not do any sort of processing if it's obvious we're not going to pass the alpha test
1108 if (state.alpha_test_func == FramebufferRegs::CompareFunc::Never) {
1109 out += "discard; }";
1110 return out;
1111 }
1112
1113 // Append the scissor test
1114 if (state.scissor_test_mode != RasterizerRegs::ScissorMode::Disabled) {
1115 out += "if (";
1116 // Negate the condition if we have to keep only the pixels outside the scissor box
1117 if (state.scissor_test_mode == RasterizerRegs::ScissorMode::Include)
1118 out += "!";
1119 out += "(gl_FragCoord.x >= scissor_x1 && "
1120 "gl_FragCoord.y >= scissor_y1 && "
1121 "gl_FragCoord.x < scissor_x2 && "
1122 "gl_FragCoord.y < scissor_y2)) discard;\n";
1123 }
1124
1125 // After perspective divide, OpenGL transform z_over_w from [-1, 1] to [near, far]. Here we use
1126 // default near = 0 and far = 1, and undo the transformation to get the original z_over_w, then
1127 // do our own transformation according to PICA specification.
1128 out += "float z_over_w = 2.0 * gl_FragCoord.z - 1.0;\n";
1129 out += "float depth = z_over_w * depth_scale + depth_offset;\n";
1130 if (state.depthmap_enable == RasterizerRegs::DepthBuffering::WBuffering) {
1131 out += "depth /= gl_FragCoord.w;\n";
1132 }
1133
1134 if (state.lighting.enable)
1135 WriteLighting(out, config);
1136
1137 out += "vec4 combiner_buffer = vec4(0.0);\n";
1138 out += "vec4 next_combiner_buffer = tev_combiner_buffer_color;\n";
1139 out += "vec4 last_tex_env_out = vec4(0.0);\n";
1140
1141 for (size_t index = 0; index < state.tev_stages.size(); ++index)
1142 WriteTevStage(out, config, (unsigned)index);
1143
1144 if (state.alpha_test_func != FramebufferRegs::CompareFunc::Always) {
1145 out += "if (";
1146 AppendAlphaTestCondition(out, state.alpha_test_func);
1147 out += ") discard;\n";
1148 }
1149
1150 // Append fog combiner
1151 if (state.fog_mode == TexturingRegs::FogMode::Fog) {
1152 // Get index into fog LUT
1153 if (state.fog_flip) {
1154 out += "float fog_index = (1.0 - depth) * 128.0;\n";
1155 } else {
1156 out += "float fog_index = depth * 128.0;\n";
1157 }
1158
1159 // Generate clamped fog factor from LUT for given fog index
1160 out += "float fog_i = clamp(floor(fog_index), 0.0, 127.0);\n";
1161 out += "float fog_f = fog_index - fog_i;\n";
1162 out += "vec2 fog_lut_entry = texelFetch(fog_lut, int(fog_i)).rg;\n";
1163 out += "float fog_factor = fog_lut_entry.r + fog_lut_entry.g * fog_f;\n";
1164 out += "fog_factor = clamp(fog_factor, 0.0, 1.0);\n";
1165
1166 // Blend the fog
1167 out += "last_tex_env_out.rgb = mix(fog_color.rgb, last_tex_env_out.rgb, fog_factor);\n";
1168 } else if (state.fog_mode == TexturingRegs::FogMode::Gas) {
1169 Core::Telemetry().AddField(Telemetry::FieldType::Session, "VideoCore_Pica_UseGasMode",
1170 true);
1171 LOG_CRITICAL(Render_OpenGL, "Unimplemented gas mode");
1172 UNIMPLEMENTED();
1173 }
1174
1175 out += "gl_FragDepth = depth;\n";
1176 out += "color = last_tex_env_out;\n";
1177
1178 out += "}";
1179
1180 return out;
1181}
1182
1183std::string GenerateVertexShader() {
1184 std::string out = "#version 330 core\n";
1185
1186 out += "layout(location = " + std::to_string((int)ATTRIBUTE_POSITION) +
1187 ") in vec4 vert_position;\n";
1188 out += "layout(location = " + std::to_string((int)ATTRIBUTE_COLOR) + ") in vec4 vert_color;\n";
1189 out += "layout(location = " + std::to_string((int)ATTRIBUTE_TEXCOORD0) +
1190 ") in vec2 vert_texcoord0;\n";
1191 out += "layout(location = " + std::to_string((int)ATTRIBUTE_TEXCOORD1) +
1192 ") in vec2 vert_texcoord1;\n";
1193 out += "layout(location = " + std::to_string((int)ATTRIBUTE_TEXCOORD2) +
1194 ") in vec2 vert_texcoord2;\n";
1195 out += "layout(location = " + std::to_string((int)ATTRIBUTE_TEXCOORD0_W) +
1196 ") in float vert_texcoord0_w;\n";
1197 out += "layout(location = " + std::to_string((int)ATTRIBUTE_NORMQUAT) +
1198 ") in vec4 vert_normquat;\n";
1199 out += "layout(location = " + std::to_string((int)ATTRIBUTE_VIEW) + ") in vec3 vert_view;\n";
1200
1201 out += R"(
1202out vec4 primary_color;
1203out vec2 texcoord[3];
1204out float texcoord0_w;
1205out vec4 normquat;
1206out vec3 view;
1207
1208)";
1209
1210 out += UniformBlockDef;
1211
1212 out += R"(
1213
1214void main() {
1215 primary_color = vert_color;
1216 texcoord[0] = vert_texcoord0;
1217 texcoord[1] = vert_texcoord1;
1218 texcoord[2] = vert_texcoord2;
1219 texcoord0_w = vert_texcoord0_w;
1220 normquat = vert_normquat;
1221 view = vert_view;
1222 gl_Position = vert_position;
1223 gl_ClipDistance[0] = -vert_position.z; // fixed PICA clipping plane z <= 0
1224 gl_ClipDistance[1] = dot(clip_coef, vert_position);
1225}
1226)";
1227
1228 return out;
1229}
1230
1231} // namespace GLShader
diff --git a/src/video_core/renderer_opengl/gl_shader_gen.h b/src/video_core/renderer_opengl/gl_shader_gen.h
deleted file mode 100644
index 2302ae453..000000000
--- a/src/video_core/renderer_opengl/gl_shader_gen.h
+++ /dev/null
@@ -1,162 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <cstring>
9#include <functional>
10#include <string>
11#include <type_traits>
12#include "video_core/regs.h"
13
14namespace GLShader {
15
16enum Attributes {
17 ATTRIBUTE_POSITION,
18 ATTRIBUTE_COLOR,
19 ATTRIBUTE_TEXCOORD0,
20 ATTRIBUTE_TEXCOORD1,
21 ATTRIBUTE_TEXCOORD2,
22 ATTRIBUTE_TEXCOORD0_W,
23 ATTRIBUTE_NORMQUAT,
24 ATTRIBUTE_VIEW,
25};
26
27/**
28 * This struct contains all state used to generate the GLSL shader program that emulates the current
29 * Pica register configuration. This struct is used as a cache key for generated GLSL shader
30 * programs. The functions in gl_shader_gen.cpp should retrieve state from this struct only, not by
31 * directly accessing Pica registers. This should reduce the risk of bugs in shader generation where
32 * Pica state is not being captured in the shader cache key, thereby resulting in (what should be)
33 * two separate shaders sharing the same key.
34 *
35 * We use a union because "implicitly-defined copy/move constructor for a union X copies the object
36 * representation of X." and "implicitly-defined copy assignment operator for a union X copies the
37 * object representation (3.9) of X." = Bytewise copy instead of memberwise copy. This is important
38 * because the padding bytes are included in the hash and comparison between objects.
39 */
40union PicaShaderConfig {
41
42 /// Construct a PicaShaderConfig with the given Pica register configuration.
43 static PicaShaderConfig BuildFromRegs(const Pica::Regs& regs);
44
45 bool TevStageUpdatesCombinerBufferColor(unsigned stage_index) const {
46 return (stage_index < 4) && (state.combiner_buffer_input & (1 << stage_index));
47 }
48
49 bool TevStageUpdatesCombinerBufferAlpha(unsigned stage_index) const {
50 return (stage_index < 4) && ((state.combiner_buffer_input >> 4) & (1 << stage_index));
51 }
52
53 bool operator==(const PicaShaderConfig& o) const {
54 return std::memcmp(&state, &o.state, sizeof(PicaShaderConfig::State)) == 0;
55 };
56
57 // NOTE: MSVC15 (Update 2) doesn't think `delete`'d constructors and operators are TC.
58 // This makes BitField not TC when used in a union or struct so we have to resort
59 // to this ugly hack.
60 // Once that bug is fixed we can use Pica::Regs::TevStageConfig here.
61 // Doesn't include const_color because we don't sync it, see comment in BuildFromRegs()
62 struct TevStageConfigRaw {
63 u32 sources_raw;
64 u32 modifiers_raw;
65 u32 ops_raw;
66 u32 scales_raw;
67 explicit operator Pica::TexturingRegs::TevStageConfig() const noexcept {
68 Pica::TexturingRegs::TevStageConfig stage;
69 stage.sources_raw = sources_raw;
70 stage.modifiers_raw = modifiers_raw;
71 stage.ops_raw = ops_raw;
72 stage.const_color = 0;
73 stage.scales_raw = scales_raw;
74 return stage;
75 }
76 };
77
78 struct State {
79 Pica::FramebufferRegs::CompareFunc alpha_test_func;
80 Pica::RasterizerRegs::ScissorMode scissor_test_mode;
81 Pica::TexturingRegs::TextureConfig::TextureType texture0_type;
82 bool texture2_use_coord1;
83 std::array<TevStageConfigRaw, 6> tev_stages;
84 u8 combiner_buffer_input;
85
86 Pica::RasterizerRegs::DepthBuffering depthmap_enable;
87 Pica::TexturingRegs::FogMode fog_mode;
88 bool fog_flip;
89
90 struct {
91 struct {
92 unsigned num;
93 bool directional;
94 bool two_sided_diffuse;
95 bool dist_atten_enable;
96 bool spot_atten_enable;
97 bool geometric_factor_0;
98 bool geometric_factor_1;
99 } light[8];
100
101 bool enable;
102 unsigned src_num;
103 Pica::LightingRegs::LightingBumpMode bump_mode;
104 unsigned bump_selector;
105 bool bump_renorm;
106 bool clamp_highlights;
107
108 Pica::LightingRegs::LightingConfig config;
109 Pica::LightingRegs::LightingFresnelSelector fresnel_selector;
110
111 struct {
112 bool enable;
113 bool abs_input;
114 Pica::LightingRegs::LightingLutInput type;
115 float scale;
116 } lut_d0, lut_d1, lut_sp, lut_fr, lut_rr, lut_rg, lut_rb;
117 } lighting;
118
119 struct {
120 bool enable;
121 u32 coord;
122 Pica::TexturingRegs::ProcTexClamp u_clamp, v_clamp;
123 Pica::TexturingRegs::ProcTexCombiner color_combiner, alpha_combiner;
124 bool separate_alpha;
125 bool noise_enable;
126 Pica::TexturingRegs::ProcTexShift u_shift, v_shift;
127 u32 lut_width;
128 u32 lut_offset;
129 Pica::TexturingRegs::ProcTexFilter lut_filter;
130 } proctex;
131
132 } state;
133};
134#if (__GNUC__ >= 5) || defined(__clang__) || defined(_MSC_VER)
135static_assert(std::is_trivially_copyable<PicaShaderConfig::State>::value,
136 "PicaShaderConfig::State must be trivially copyable");
137#endif
138
139/**
140 * Generates the GLSL vertex shader program source code for the current Pica state
141 * @returns String of the shader source code
142 */
143std::string GenerateVertexShader();
144
145/**
146 * Generates the GLSL fragment shader program source code for the current Pica state
147 * @param config ShaderCacheKey object generated for the current Pica state, used for the shader
148 * configuration (NOTE: Use state in this struct only, not the Pica registers!)
149 * @returns String of the shader source code
150 */
151std::string GenerateFragmentShader(const PicaShaderConfig& config);
152
153} // namespace GLShader
154
155namespace std {
156template <>
157struct hash<GLShader::PicaShaderConfig> {
158 size_t operator()(const GLShader::PicaShaderConfig& k) const {
159 return Common::ComputeHash64(&k.state, sizeof(GLShader::PicaShaderConfig::State));
160 }
161};
162} // namespace std
diff --git a/src/video_core/renderer_opengl/pica_to_gl.h b/src/video_core/renderer_opengl/pica_to_gl.h
deleted file mode 100644
index c7fa1f873..000000000
--- a/src/video_core/renderer_opengl/pica_to_gl.h
+++ /dev/null
@@ -1,235 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <cstddef>
9#include <glad/glad.h>
10#include "common/assert.h"
11#include "common/bit_field.h"
12#include "common/common_funcs.h"
13#include "common/common_types.h"
14#include "common/logging/log.h"
15#include "core/core.h"
16#include "video_core/regs_framebuffer.h"
17#include "video_core/regs_lighting.h"
18#include "video_core/regs_texturing.h"
19
20using GLvec2 = std::array<GLfloat, 2>;
21using GLvec3 = std::array<GLfloat, 3>;
22using GLvec4 = std::array<GLfloat, 4>;
23
24namespace PicaToGL {
25
26inline GLenum TextureFilterMode(Pica::TexturingRegs::TextureConfig::TextureFilter mode) {
27 static const GLenum filter_mode_table[] = {
28 GL_NEAREST, // TextureFilter::Nearest
29 GL_LINEAR, // TextureFilter::Linear
30 };
31
32 // Range check table for input
33 if (static_cast<size_t>(mode) >= ARRAY_SIZE(filter_mode_table)) {
34 LOG_CRITICAL(Render_OpenGL, "Unknown texture filtering mode %d", mode);
35 UNREACHABLE();
36
37 return GL_LINEAR;
38 }
39
40 GLenum gl_mode = filter_mode_table[mode];
41
42 // Check for dummy values indicating an unknown mode
43 if (gl_mode == 0) {
44 LOG_CRITICAL(Render_OpenGL, "Unknown texture filtering mode %d", mode);
45 UNIMPLEMENTED();
46
47 return GL_LINEAR;
48 }
49
50 return gl_mode;
51}
52
53inline GLenum WrapMode(Pica::TexturingRegs::TextureConfig::WrapMode mode) {
54 static const GLenum wrap_mode_table[] = {
55 GL_CLAMP_TO_EDGE, // WrapMode::ClampToEdge
56 GL_CLAMP_TO_BORDER, // WrapMode::ClampToBorder
57 GL_REPEAT, // WrapMode::Repeat
58 GL_MIRRORED_REPEAT, // WrapMode::MirroredRepeat
59 // TODO(wwylele): ClampToEdge2 and ClampToBorder2 are not properly implemented here. See the
60 // comments in enum WrapMode.
61 GL_CLAMP_TO_EDGE, // WrapMode::ClampToEdge2
62 GL_CLAMP_TO_BORDER, // WrapMode::ClampToBorder2
63 GL_REPEAT, // WrapMode::Repeat2
64 GL_REPEAT, // WrapMode::Repeat3
65 };
66
67 // Range check table for input
68 if (static_cast<size_t>(mode) >= ARRAY_SIZE(wrap_mode_table)) {
69 LOG_CRITICAL(Render_OpenGL, "Unknown texture wrap mode %d", mode);
70 UNREACHABLE();
71
72 return GL_CLAMP_TO_EDGE;
73 }
74
75 if (static_cast<u32>(mode) > 3) {
76 Core::Telemetry().AddField(Telemetry::FieldType::Session,
77 "VideoCore_Pica_UnsupportedTextureWrapMode",
78 static_cast<u32>(mode));
79 LOG_WARNING(Render_OpenGL, "Using texture wrap mode %u", static_cast<u32>(mode));
80 }
81
82 GLenum gl_mode = wrap_mode_table[mode];
83
84 // Check for dummy values indicating an unknown mode
85 if (gl_mode == 0) {
86 LOG_CRITICAL(Render_OpenGL, "Unknown texture wrap mode %d", mode);
87 UNIMPLEMENTED();
88
89 return GL_CLAMP_TO_EDGE;
90 }
91
92 return gl_mode;
93}
94
95inline GLenum BlendEquation(Pica::FramebufferRegs::BlendEquation equation) {
96 static const GLenum blend_equation_table[] = {
97 GL_FUNC_ADD, // BlendEquation::Add
98 GL_FUNC_SUBTRACT, // BlendEquation::Subtract
99 GL_FUNC_REVERSE_SUBTRACT, // BlendEquation::ReverseSubtract
100 GL_MIN, // BlendEquation::Min
101 GL_MAX, // BlendEquation::Max
102 };
103
104 // Range check table for input
105 if (static_cast<size_t>(equation) >= ARRAY_SIZE(blend_equation_table)) {
106 LOG_CRITICAL(Render_OpenGL, "Unknown blend equation %d", equation);
107 UNREACHABLE();
108
109 return GL_FUNC_ADD;
110 }
111
112 return blend_equation_table[(unsigned)equation];
113}
114
115inline GLenum BlendFunc(Pica::FramebufferRegs::BlendFactor factor) {
116 static const GLenum blend_func_table[] = {
117 GL_ZERO, // BlendFactor::Zero
118 GL_ONE, // BlendFactor::One
119 GL_SRC_COLOR, // BlendFactor::SourceColor
120 GL_ONE_MINUS_SRC_COLOR, // BlendFactor::OneMinusSourceColor
121 GL_DST_COLOR, // BlendFactor::DestColor
122 GL_ONE_MINUS_DST_COLOR, // BlendFactor::OneMinusDestColor
123 GL_SRC_ALPHA, // BlendFactor::SourceAlpha
124 GL_ONE_MINUS_SRC_ALPHA, // BlendFactor::OneMinusSourceAlpha
125 GL_DST_ALPHA, // BlendFactor::DestAlpha
126 GL_ONE_MINUS_DST_ALPHA, // BlendFactor::OneMinusDestAlpha
127 GL_CONSTANT_COLOR, // BlendFactor::ConstantColor
128 GL_ONE_MINUS_CONSTANT_COLOR, // BlendFactor::OneMinusConstantColor
129 GL_CONSTANT_ALPHA, // BlendFactor::ConstantAlpha
130 GL_ONE_MINUS_CONSTANT_ALPHA, // BlendFactor::OneMinusConstantAlpha
131 GL_SRC_ALPHA_SATURATE, // BlendFactor::SourceAlphaSaturate
132 };
133
134 // Range check table for input
135 if (static_cast<size_t>(factor) >= ARRAY_SIZE(blend_func_table)) {
136 LOG_CRITICAL(Render_OpenGL, "Unknown blend factor %d", factor);
137 UNREACHABLE();
138
139 return GL_ONE;
140 }
141
142 return blend_func_table[(unsigned)factor];
143}
144
145inline GLenum LogicOp(Pica::FramebufferRegs::LogicOp op) {
146 static const GLenum logic_op_table[] = {
147 GL_CLEAR, // Clear
148 GL_AND, // And
149 GL_AND_REVERSE, // AndReverse
150 GL_COPY, // Copy
151 GL_SET, // Set
152 GL_COPY_INVERTED, // CopyInverted
153 GL_NOOP, // NoOp
154 GL_INVERT, // Invert
155 GL_NAND, // Nand
156 GL_OR, // Or
157 GL_NOR, // Nor
158 GL_XOR, // Xor
159 GL_EQUIV, // Equiv
160 GL_AND_INVERTED, // AndInverted
161 GL_OR_REVERSE, // OrReverse
162 GL_OR_INVERTED, // OrInverted
163 };
164
165 // Range check table for input
166 if (static_cast<size_t>(op) >= ARRAY_SIZE(logic_op_table)) {
167 LOG_CRITICAL(Render_OpenGL, "Unknown logic op %d", op);
168 UNREACHABLE();
169
170 return GL_COPY;
171 }
172
173 return logic_op_table[(unsigned)op];
174}
175
176inline GLenum CompareFunc(Pica::FramebufferRegs::CompareFunc func) {
177 static const GLenum compare_func_table[] = {
178 GL_NEVER, // CompareFunc::Never
179 GL_ALWAYS, // CompareFunc::Always
180 GL_EQUAL, // CompareFunc::Equal
181 GL_NOTEQUAL, // CompareFunc::NotEqual
182 GL_LESS, // CompareFunc::LessThan
183 GL_LEQUAL, // CompareFunc::LessThanOrEqual
184 GL_GREATER, // CompareFunc::GreaterThan
185 GL_GEQUAL, // CompareFunc::GreaterThanOrEqual
186 };
187
188 // Range check table for input
189 if (static_cast<size_t>(func) >= ARRAY_SIZE(compare_func_table)) {
190 LOG_CRITICAL(Render_OpenGL, "Unknown compare function %d", func);
191 UNREACHABLE();
192
193 return GL_ALWAYS;
194 }
195
196 return compare_func_table[(unsigned)func];
197}
198
199inline GLenum StencilOp(Pica::FramebufferRegs::StencilAction action) {
200 static const GLenum stencil_op_table[] = {
201 GL_KEEP, // StencilAction::Keep
202 GL_ZERO, // StencilAction::Zero
203 GL_REPLACE, // StencilAction::Replace
204 GL_INCR, // StencilAction::Increment
205 GL_DECR, // StencilAction::Decrement
206 GL_INVERT, // StencilAction::Invert
207 GL_INCR_WRAP, // StencilAction::IncrementWrap
208 GL_DECR_WRAP, // StencilAction::DecrementWrap
209 };
210
211 // Range check table for input
212 if (static_cast<size_t>(action) >= ARRAY_SIZE(stencil_op_table)) {
213 LOG_CRITICAL(Render_OpenGL, "Unknown stencil op %d", action);
214 UNREACHABLE();
215
216 return GL_KEEP;
217 }
218
219 return stencil_op_table[(unsigned)action];
220}
221
222inline GLvec4 ColorRGBA8(const u32 color) {
223 return {{
224 (color >> 0 & 0xFF) / 255.0f, (color >> 8 & 0xFF) / 255.0f, (color >> 16 & 0xFF) / 255.0f,
225 (color >> 24 & 0xFF) / 255.0f,
226 }};
227}
228
229inline std::array<GLfloat, 3> LightColor(const Pica::LightingRegs::LightColor& color) {
230 return {{
231 color.r / 255.0f, color.g / 255.0f, color.b / 255.0f,
232 }};
233}
234
235} // namespace
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index e59eb7d76..410b0e959 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -13,14 +13,11 @@
13#include "core/core.h" 13#include "core/core.h"
14#include "core/core_timing.h" 14#include "core/core_timing.h"
15#include "core/frontend/emu_window.h" 15#include "core/frontend/emu_window.h"
16#include "core/hw/gpu.h"
17#include "core/hw/hw.h" 16#include "core/hw/hw.h"
18#include "core/hw/lcd.h" 17#include "core/hw/lcd.h"
19#include "core/memory.h" 18#include "core/memory.h"
20#include "core/settings.h" 19#include "core/settings.h"
21#include "core/tracer/recorder.h" 20#include "core/tracer/recorder.h"
22#include "video_core/debug_utils/debug_utils.h"
23#include "video_core/rasterizer_interface.h"
24#include "video_core/renderer_opengl/renderer_opengl.h" 21#include "video_core/renderer_opengl/renderer_opengl.h"
25#include "video_core/video_core.h" 22#include "video_core/video_core.h"
26 23
@@ -128,10 +125,6 @@ void RendererOpenGL::SwapBuffers(const FramebufferInfo& framebuffer_info) {
128 125
129 prev_state.Apply(); 126 prev_state.Apply();
130 RefreshRasterizerSetting(); 127 RefreshRasterizerSetting();
131
132 if (Pica::g_debug_context && Pica::g_debug_context->recorder) {
133 Pica::g_debug_context->recorder->FrameFinished();
134 }
135} 128}
136 129
137static inline u32 MortonInterleave128(u32 x, u32 y) { 130static inline u32 MortonInterleave128(u32 x, u32 y) {
diff --git a/src/video_core/renderer_opengl/renderer_opengl.h b/src/video_core/renderer_opengl/renderer_opengl.h
index 9d2bb8423..dc21d7a38 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.h
+++ b/src/video_core/renderer_opengl/renderer_opengl.h
@@ -8,7 +8,6 @@
8#include <glad/glad.h> 8#include <glad/glad.h>
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/math_util.h" 10#include "common/math_util.h"
11#include "core/hw/gpu.h"
12#include "video_core/renderer_base.h" 11#include "video_core/renderer_base.h"
13#include "video_core/renderer_opengl/gl_resource_manager.h" 12#include "video_core/renderer_opengl/gl_resource_manager.h"
14#include "video_core/renderer_opengl/gl_state.h" 13#include "video_core/renderer_opengl/gl_state.h"
diff --git a/src/video_core/shader/debug_data.h b/src/video_core/shader/debug_data.h
deleted file mode 100644
index 9e82122e1..000000000
--- a/src/video_core/shader/debug_data.h
+++ /dev/null
@@ -1,186 +0,0 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <vector>
8#include "common/common_types.h"
9#include "common/vector_math.h"
10#include "video_core/pica_types.h"
11
12namespace Pica {
13namespace Shader {
14
15/// Helper structure used to keep track of data useful for inspection of shader emulation
16template <bool full_debugging>
17struct DebugData;
18
19template <>
20struct DebugData<false> {
21 // TODO: Hide these behind and interface and move them to DebugData<true>
22 u32 max_offset = 0; ///< maximum program counter ever reached
23 u32 max_opdesc_id = 0; ///< maximum swizzle pattern index ever used
24};
25
26template <>
27struct DebugData<true> {
28 /// Records store the input and output operands of a particular instruction.
29 struct Record {
30 enum Type {
31 // Floating point arithmetic operands
32 SRC1 = 0x1,
33 SRC2 = 0x2,
34 SRC3 = 0x4,
35
36 // Initial and final output operand value
37 DEST_IN = 0x8,
38 DEST_OUT = 0x10,
39
40 // Current and next instruction offset (in words)
41 CUR_INSTR = 0x20,
42 NEXT_INSTR = 0x40,
43
44 // Output address register value
45 ADDR_REG_OUT = 0x80,
46
47 // Result of a comparison instruction
48 CMP_RESULT = 0x100,
49
50 // Input values for conditional flow control instructions
51 COND_BOOL_IN = 0x200,
52 COND_CMP_IN = 0x400,
53
54 // Input values for a loop
55 LOOP_INT_IN = 0x800,
56 };
57
58 Math::Vec4<float24> src1;
59 Math::Vec4<float24> src2;
60 Math::Vec4<float24> src3;
61
62 Math::Vec4<float24> dest_in;
63 Math::Vec4<float24> dest_out;
64
65 s32 address_registers[2];
66 bool conditional_code[2];
67 bool cond_bool;
68 bool cond_cmp[2];
69 Math::Vec4<u8> loop_int;
70
71 u32 instruction_offset;
72 u32 next_instruction;
73
74 /// set of enabled fields (as a combination of Type flags)
75 unsigned mask = 0;
76 };
77
78 u32 max_offset = 0; ///< maximum program counter ever reached
79 u32 max_opdesc_id = 0; ///< maximum swizzle pattern index ever used
80
81 /// List of records for each executed shader instruction
82 std::vector<DebugData<true>::Record> records;
83};
84
85/// Type alias for better readability
86using DebugDataRecord = DebugData<true>::Record;
87
88/// Helper function to set a DebugData<true>::Record field based on the template enum parameter.
89template <DebugDataRecord::Type type, typename ValueType>
90inline void SetField(DebugDataRecord& record, ValueType value);
91
92template <>
93inline void SetField<DebugDataRecord::SRC1>(DebugDataRecord& record, float24* value) {
94 record.src1.x = value[0];
95 record.src1.y = value[1];
96 record.src1.z = value[2];
97 record.src1.w = value[3];
98}
99
100template <>
101inline void SetField<DebugDataRecord::SRC2>(DebugDataRecord& record, float24* value) {
102 record.src2.x = value[0];
103 record.src2.y = value[1];
104 record.src2.z = value[2];
105 record.src2.w = value[3];
106}
107
108template <>
109inline void SetField<DebugDataRecord::SRC3>(DebugDataRecord& record, float24* value) {
110 record.src3.x = value[0];
111 record.src3.y = value[1];
112 record.src3.z = value[2];
113 record.src3.w = value[3];
114}
115
116template <>
117inline void SetField<DebugDataRecord::DEST_IN>(DebugDataRecord& record, float24* value) {
118 record.dest_in.x = value[0];
119 record.dest_in.y = value[1];
120 record.dest_in.z = value[2];
121 record.dest_in.w = value[3];
122}
123
124template <>
125inline void SetField<DebugDataRecord::DEST_OUT>(DebugDataRecord& record, float24* value) {
126 record.dest_out.x = value[0];
127 record.dest_out.y = value[1];
128 record.dest_out.z = value[2];
129 record.dest_out.w = value[3];
130}
131
132template <>
133inline void SetField<DebugDataRecord::ADDR_REG_OUT>(DebugDataRecord& record, s32* value) {
134 record.address_registers[0] = value[0];
135 record.address_registers[1] = value[1];
136}
137
138template <>
139inline void SetField<DebugDataRecord::CMP_RESULT>(DebugDataRecord& record, bool* value) {
140 record.conditional_code[0] = value[0];
141 record.conditional_code[1] = value[1];
142}
143
144template <>
145inline void SetField<DebugDataRecord::COND_BOOL_IN>(DebugDataRecord& record, bool value) {
146 record.cond_bool = value;
147}
148
149template <>
150inline void SetField<DebugDataRecord::COND_CMP_IN>(DebugDataRecord& record, bool* value) {
151 record.cond_cmp[0] = value[0];
152 record.cond_cmp[1] = value[1];
153}
154
155template <>
156inline void SetField<DebugDataRecord::LOOP_INT_IN>(DebugDataRecord& record, Math::Vec4<u8> value) {
157 record.loop_int = value;
158}
159
160template <>
161inline void SetField<DebugDataRecord::CUR_INSTR>(DebugDataRecord& record, u32 value) {
162 record.instruction_offset = value;
163}
164
165template <>
166inline void SetField<DebugDataRecord::NEXT_INSTR>(DebugDataRecord& record, u32 value) {
167 record.next_instruction = value;
168}
169
170/// Helper function to set debug information on the current shader iteration.
171template <DebugDataRecord::Type type, typename ValueType>
172inline void Record(DebugData<false>& debug_data, u32 offset, ValueType value) {
173 // Debugging disabled => nothing to do
174}
175
176template <DebugDataRecord::Type type, typename ValueType>
177inline void Record(DebugData<true>& debug_data, u32 offset, ValueType value) {
178 if (offset >= debug_data.records.size())
179 debug_data.records.resize(offset + 1);
180
181 SetField<type, ValueType>(debug_data.records[offset], value);
182 debug_data.records[offset].mask |= type;
183}
184
185} // namespace Shader
186} // namespace Pica
diff --git a/src/video_core/shader/shader.cpp b/src/video_core/shader/shader.cpp
deleted file mode 100644
index 2857d2829..000000000
--- a/src/video_core/shader/shader.cpp
+++ /dev/null
@@ -1,154 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <cmath>
6#include <cstring>
7#include "common/bit_set.h"
8#include "common/logging/log.h"
9#include "common/microprofile.h"
10#include "video_core/pica_state.h"
11#include "video_core/regs_rasterizer.h"
12#include "video_core/regs_shader.h"
13#include "video_core/shader/shader.h"
14#include "video_core/shader/shader_interpreter.h"
15#ifdef ARCHITECTURE_x86_64
16#include "video_core/shader/shader_jit_x64.h"
17#endif // ARCHITECTURE_x86_64
18#include "video_core/video_core.h"
19
20namespace Pica {
21
22namespace Shader {
23
24OutputVertex OutputVertex::FromAttributeBuffer(const RasterizerRegs& regs,
25 const AttributeBuffer& input) {
26 // Setup output data
27 union {
28 OutputVertex ret{};
29 std::array<float24, 24> vertex_slots;
30 };
31 static_assert(sizeof(vertex_slots) == sizeof(ret), "Struct and array have different sizes.");
32
33 unsigned int num_attributes = regs.vs_output_total;
34 ASSERT(num_attributes <= 7);
35 for (unsigned int i = 0; i < num_attributes; ++i) {
36 const auto& output_register_map = regs.vs_output_attributes[i];
37
38 RasterizerRegs::VSOutputAttributes::Semantic semantics[4] = {
39 output_register_map.map_x, output_register_map.map_y, output_register_map.map_z,
40 output_register_map.map_w};
41
42 for (unsigned comp = 0; comp < 4; ++comp) {
43 RasterizerRegs::VSOutputAttributes::Semantic semantic = semantics[comp];
44 if (semantic < vertex_slots.size()) {
45 vertex_slots[semantic] = input.attr[i][comp];
46 } else if (semantic != RasterizerRegs::VSOutputAttributes::INVALID) {
47 LOG_ERROR(HW_GPU, "Invalid/unknown semantic id: %u", (unsigned int)semantic);
48 }
49 }
50 }
51
52 // The hardware takes the absolute and saturates vertex colors like this, *before* doing
53 // interpolation
54 for (unsigned i = 0; i < 4; ++i) {
55 float c = std::fabs(ret.color[i].ToFloat32());
56 ret.color[i] = float24::FromFloat32(c < 1.0f ? c : 1.0f);
57 }
58
59 LOG_TRACE(HW_GPU, "Output vertex: pos(%.2f, %.2f, %.2f, %.2f), quat(%.2f, %.2f, %.2f, %.2f), "
60 "col(%.2f, %.2f, %.2f, %.2f), tc0(%.2f, %.2f), view(%.2f, %.2f, %.2f)",
61 ret.pos.x.ToFloat32(), ret.pos.y.ToFloat32(), ret.pos.z.ToFloat32(),
62 ret.pos.w.ToFloat32(), ret.quat.x.ToFloat32(), ret.quat.y.ToFloat32(),
63 ret.quat.z.ToFloat32(), ret.quat.w.ToFloat32(), ret.color.x.ToFloat32(),
64 ret.color.y.ToFloat32(), ret.color.z.ToFloat32(), ret.color.w.ToFloat32(),
65 ret.tc0.u().ToFloat32(), ret.tc0.v().ToFloat32(), ret.view.x.ToFloat32(),
66 ret.view.y.ToFloat32(), ret.view.z.ToFloat32());
67
68 return ret;
69}
70
71void UnitState::LoadInput(const ShaderRegs& config, const AttributeBuffer& input) {
72 const unsigned max_attribute = config.max_input_attribute_index;
73
74 for (unsigned attr = 0; attr <= max_attribute; ++attr) {
75 unsigned reg = config.GetRegisterForAttribute(attr);
76 registers.input[reg] = input.attr[attr];
77 }
78}
79
80void UnitState::WriteOutput(const ShaderRegs& config, AttributeBuffer& output) {
81 unsigned int output_i = 0;
82 for (unsigned int reg : Common::BitSet<u32>(config.output_mask)) {
83 output.attr[output_i++] = registers.output[reg];
84 }
85}
86
87UnitState::UnitState(GSEmitter* emitter) : emitter_ptr(emitter) {}
88
89GSEmitter::GSEmitter() {
90 handlers = new Handlers;
91}
92
93GSEmitter::~GSEmitter() {
94 delete handlers;
95}
96
97void GSEmitter::Emit(Math::Vec4<float24> (&vertex)[16]) {
98 ASSERT(vertex_id < 3);
99 std::copy(std::begin(vertex), std::end(vertex), buffer[vertex_id].begin());
100 if (prim_emit) {
101 if (winding)
102 handlers->winding_setter();
103 for (size_t i = 0; i < buffer.size(); ++i) {
104 AttributeBuffer output;
105 unsigned int output_i = 0;
106 for (unsigned int reg : Common::BitSet<u32>(output_mask)) {
107 output.attr[output_i++] = buffer[i][reg];
108 }
109 handlers->vertex_handler(output);
110 }
111 }
112}
113
114GSUnitState::GSUnitState() : UnitState(&emitter) {}
115
116void GSUnitState::SetVertexHandler(VertexHandler vertex_handler, WindingSetter winding_setter) {
117 emitter.handlers->vertex_handler = std::move(vertex_handler);
118 emitter.handlers->winding_setter = std::move(winding_setter);
119}
120
121void GSUnitState::ConfigOutput(const ShaderRegs& config) {
122 emitter.output_mask = config.output_mask;
123}
124
125MICROPROFILE_DEFINE(GPU_Shader, "GPU", "Shader", MP_RGB(50, 50, 240));
126
127#ifdef ARCHITECTURE_x86_64
128static std::unique_ptr<JitX64Engine> jit_engine;
129#endif // ARCHITECTURE_x86_64
130static InterpreterEngine interpreter_engine;
131
132ShaderEngine* GetEngine() {
133#ifdef ARCHITECTURE_x86_64
134 // TODO(yuriks): Re-initialize on each change rather than being persistent
135 if (VideoCore::g_shader_jit_enabled) {
136 if (jit_engine == nullptr) {
137 jit_engine = std::make_unique<JitX64Engine>();
138 }
139 return jit_engine.get();
140 }
141#endif // ARCHITECTURE_x86_64
142
143 return &interpreter_engine;
144}
145
146void Shutdown() {
147#ifdef ARCHITECTURE_x86_64
148 jit_engine = nullptr;
149#endif // ARCHITECTURE_x86_64
150}
151
152} // namespace Shader
153
154} // namespace Pica
diff --git a/src/video_core/shader/shader.h b/src/video_core/shader/shader.h
deleted file mode 100644
index a3789da01..000000000
--- a/src/video_core/shader/shader.h
+++ /dev/null
@@ -1,233 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <cstddef>
9#include <functional>
10#include <type_traits>
11#include <nihstro/shader_bytecode.h>
12#include "common/assert.h"
13#include "common/common_funcs.h"
14#include "common/common_types.h"
15#include "common/vector_math.h"
16#include "video_core/pica_types.h"
17#include "video_core/regs_rasterizer.h"
18#include "video_core/regs_shader.h"
19
20using nihstro::RegisterType;
21using nihstro::SourceRegister;
22using nihstro::DestRegister;
23
24namespace Pica {
25
26namespace Shader {
27
28constexpr unsigned MAX_PROGRAM_CODE_LENGTH = 4096;
29constexpr unsigned MAX_SWIZZLE_DATA_LENGTH = 4096;
30
31struct AttributeBuffer {
32 alignas(16) Math::Vec4<float24> attr[16];
33};
34
35/// Handler type for receiving vertex outputs from vertex shader or geometry shader
36using VertexHandler = std::function<void(const AttributeBuffer&)>;
37
38/// Handler type for signaling to invert the vertex order of the next triangle
39using WindingSetter = std::function<void()>;
40
41struct OutputVertex {
42 Math::Vec4<float24> pos;
43 Math::Vec4<float24> quat;
44 Math::Vec4<float24> color;
45 Math::Vec2<float24> tc0;
46 Math::Vec2<float24> tc1;
47 float24 tc0_w;
48 INSERT_PADDING_WORDS(1);
49 Math::Vec3<float24> view;
50 INSERT_PADDING_WORDS(1);
51 Math::Vec2<float24> tc2;
52
53 static OutputVertex FromAttributeBuffer(const RasterizerRegs& regs,
54 const AttributeBuffer& output);
55};
56#define ASSERT_POS(var, pos) \
57 static_assert(offsetof(OutputVertex, var) == pos * sizeof(float24), "Semantic at wrong " \
58 "offset.")
59ASSERT_POS(pos, RasterizerRegs::VSOutputAttributes::POSITION_X);
60ASSERT_POS(quat, RasterizerRegs::VSOutputAttributes::QUATERNION_X);
61ASSERT_POS(color, RasterizerRegs::VSOutputAttributes::COLOR_R);
62ASSERT_POS(tc0, RasterizerRegs::VSOutputAttributes::TEXCOORD0_U);
63ASSERT_POS(tc1, RasterizerRegs::VSOutputAttributes::TEXCOORD1_U);
64ASSERT_POS(tc0_w, RasterizerRegs::VSOutputAttributes::TEXCOORD0_W);
65ASSERT_POS(view, RasterizerRegs::VSOutputAttributes::VIEW_X);
66ASSERT_POS(tc2, RasterizerRegs::VSOutputAttributes::TEXCOORD2_U);
67#undef ASSERT_POS
68static_assert(std::is_pod<OutputVertex>::value, "Structure is not POD");
69static_assert(sizeof(OutputVertex) == 24 * sizeof(float), "OutputVertex has invalid size");
70
71/**
72 * This structure contains state information for primitive emitting in geometry shader.
73 */
74struct GSEmitter {
75 std::array<std::array<Math::Vec4<float24>, 16>, 3> buffer;
76 u8 vertex_id;
77 bool prim_emit;
78 bool winding;
79 u32 output_mask;
80
81 // Function objects are hidden behind a raw pointer to make the structure standard layout type,
82 // for JIT to use offsetof to access other members.
83 struct Handlers {
84 VertexHandler vertex_handler;
85 WindingSetter winding_setter;
86 } * handlers;
87
88 GSEmitter();
89 ~GSEmitter();
90 void Emit(Math::Vec4<float24> (&vertex)[16]);
91};
92static_assert(std::is_standard_layout<GSEmitter>::value, "GSEmitter is not standard layout type");
93
94/**
95 * This structure contains the state information that needs to be unique for a shader unit. The 3DS
96 * has four shader units that process shaders in parallel. At the present, Citra only implements a
97 * single shader unit that processes all shaders serially. Putting the state information in a struct
98 * here will make it easier for us to parallelize the shader processing later.
99 */
100struct UnitState {
101 explicit UnitState(GSEmitter* emitter = nullptr);
102 struct Registers {
103 // The registers are accessed by the shader JIT using SSE instructions, and are therefore
104 // required to be 16-byte aligned.
105 alignas(16) Math::Vec4<float24> input[16];
106 alignas(16) Math::Vec4<float24> temporary[16];
107 alignas(16) Math::Vec4<float24> output[16];
108 } registers;
109 static_assert(std::is_pod<Registers>::value, "Structure is not POD");
110
111 bool conditional_code[2];
112
113 // Two Address registers and one loop counter
114 // TODO: How many bits do these actually have?
115 s32 address_registers[3];
116
117 GSEmitter* emitter_ptr;
118
119 static size_t InputOffset(const SourceRegister& reg) {
120 switch (reg.GetRegisterType()) {
121 case RegisterType::Input:
122 return offsetof(UnitState, registers.input) +
123 reg.GetIndex() * sizeof(Math::Vec4<float24>);
124
125 case RegisterType::Temporary:
126 return offsetof(UnitState, registers.temporary) +
127 reg.GetIndex() * sizeof(Math::Vec4<float24>);
128
129 default:
130 UNREACHABLE();
131 return 0;
132 }
133 }
134
135 static size_t OutputOffset(const DestRegister& reg) {
136 switch (reg.GetRegisterType()) {
137 case RegisterType::Output:
138 return offsetof(UnitState, registers.output) +
139 reg.GetIndex() * sizeof(Math::Vec4<float24>);
140
141 case RegisterType::Temporary:
142 return offsetof(UnitState, registers.temporary) +
143 reg.GetIndex() * sizeof(Math::Vec4<float24>);
144
145 default:
146 UNREACHABLE();
147 return 0;
148 }
149 }
150
151 /**
152 * Loads the unit state with an input vertex.
153 *
154 * @param config Shader configuration registers corresponding to the unit.
155 * @param input Attribute buffer to load into the input registers.
156 */
157 void LoadInput(const ShaderRegs& config, const AttributeBuffer& input);
158
159 void WriteOutput(const ShaderRegs& config, AttributeBuffer& output);
160};
161
162/**
163 * This is an extended shader unit state that represents the special unit that can run both vertex
164 * shader and geometry shader. It contains an additional primitive emitter and utilities for
165 * geometry shader.
166 */
167struct GSUnitState : public UnitState {
168 GSUnitState();
169 void SetVertexHandler(VertexHandler vertex_handler, WindingSetter winding_setter);
170 void ConfigOutput(const ShaderRegs& config);
171
172 GSEmitter emitter;
173};
174
175struct ShaderSetup {
176 struct {
177 // The float uniforms are accessed by the shader JIT using SSE instructions, and are
178 // therefore required to be 16-byte aligned.
179 alignas(16) Math::Vec4<float24> f[96];
180
181 std::array<bool, 16> b;
182 std::array<Math::Vec4<u8>, 4> i;
183 } uniforms;
184
185 static size_t GetFloatUniformOffset(unsigned index) {
186 return offsetof(ShaderSetup, uniforms.f) + index * sizeof(Math::Vec4<float24>);
187 }
188
189 static size_t GetBoolUniformOffset(unsigned index) {
190 return offsetof(ShaderSetup, uniforms.b) + index * sizeof(bool);
191 }
192
193 static size_t GetIntUniformOffset(unsigned index) {
194 return offsetof(ShaderSetup, uniforms.i) + index * sizeof(Math::Vec4<u8>);
195 }
196
197 std::array<u32, MAX_PROGRAM_CODE_LENGTH> program_code;
198 std::array<u32, MAX_SWIZZLE_DATA_LENGTH> swizzle_data;
199
200 /// Data private to ShaderEngines
201 struct EngineData {
202 unsigned int entry_point;
203 /// Used by the JIT, points to a compiled shader object.
204 const void* cached_shader = nullptr;
205 } engine_data;
206};
207
208class ShaderEngine {
209public:
210 virtual ~ShaderEngine() = default;
211
212 /**
213 * Performs any shader unit setup that only needs to happen once per shader (as opposed to once
214 * per vertex, which would happen within the `Run` function).
215 */
216 virtual void SetupBatch(ShaderSetup& setup, unsigned int entry_point) = 0;
217
218 /**
219 * Runs the currently setup shader.
220 *
221 * @param setup Shader engine state, must be setup with SetupBatch on each shader change.
222 * @param state Shader unit state, must be setup with input data before each shader invocation.
223 */
224 virtual void Run(const ShaderSetup& setup, UnitState& state) const = 0;
225};
226
227// TODO(yuriks): Remove and make it non-global state somewhere
228ShaderEngine* GetEngine();
229void Shutdown();
230
231} // namespace Shader
232
233} // namespace Pica
diff --git a/src/video_core/shader/shader_interpreter.cpp b/src/video_core/shader/shader_interpreter.cpp
deleted file mode 100644
index 9d4da4904..000000000
--- a/src/video_core/shader/shader_interpreter.cpp
+++ /dev/null
@@ -1,701 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <array>
7#include <cmath>
8#include <numeric>
9#include <boost/container/static_vector.hpp>
10#include <boost/range/algorithm/fill.hpp>
11#include <nihstro/shader_bytecode.h>
12#include "common/assert.h"
13#include "common/common_types.h"
14#include "common/logging/log.h"
15#include "common/microprofile.h"
16#include "common/vector_math.h"
17#include "video_core/pica_state.h"
18#include "video_core/pica_types.h"
19#include "video_core/shader/shader.h"
20#include "video_core/shader/shader_interpreter.h"
21
22using nihstro::OpCode;
23using nihstro::Instruction;
24using nihstro::RegisterType;
25using nihstro::SourceRegister;
26using nihstro::SwizzlePattern;
27
28namespace Pica {
29
30namespace Shader {
31
32struct CallStackElement {
33 u32 final_address; // Address upon which we jump to return_address
34 u32 return_address; // Where to jump when leaving scope
35 u8 repeat_counter; // How often to repeat until this call stack element is removed
36 u8 loop_increment; // Which value to add to the loop counter after an iteration
37 // TODO: Should this be a signed value? Does it even matter?
38 u32 loop_address; // The address where we'll return to after each loop iteration
39};
40
41template <bool Debug>
42static void RunInterpreter(const ShaderSetup& setup, UnitState& state, DebugData<Debug>& debug_data,
43 unsigned offset) {
44 // TODO: Is there a maximal size for this?
45 boost::container::static_vector<CallStackElement, 16> call_stack;
46 u32 program_counter = offset;
47
48 state.conditional_code[0] = false;
49 state.conditional_code[1] = false;
50
51 auto call = [&program_counter, &call_stack](u32 offset, u32 num_instructions, u32 return_offset,
52 u8 repeat_count, u8 loop_increment) {
53 // -1 to make sure when incrementing the PC we end up at the correct offset
54 program_counter = offset - 1;
55 ASSERT(call_stack.size() < call_stack.capacity());
56 call_stack.push_back(
57 {offset + num_instructions, return_offset, repeat_count, loop_increment, offset});
58 };
59
60 auto evaluate_condition = [&state](Instruction::FlowControlType flow_control) {
61 using Op = Instruction::FlowControlType::Op;
62
63 bool result_x = flow_control.refx.Value() == state.conditional_code[0];
64 bool result_y = flow_control.refy.Value() == state.conditional_code[1];
65
66 switch (flow_control.op) {
67 case Op::Or:
68 return result_x || result_y;
69 case Op::And:
70 return result_x && result_y;
71 case Op::JustX:
72 return result_x;
73 case Op::JustY:
74 return result_y;
75 default:
76 UNREACHABLE();
77 return false;
78 }
79 };
80
81 const auto& uniforms = setup.uniforms;
82 const auto& swizzle_data = setup.swizzle_data;
83 const auto& program_code = setup.program_code;
84
85 // Placeholder for invalid inputs
86 static float24 dummy_vec4_float24[4];
87
88 unsigned iteration = 0;
89 bool exit_loop = false;
90 while (!exit_loop) {
91 if (!call_stack.empty()) {
92 auto& top = call_stack.back();
93 if (program_counter == top.final_address) {
94 state.address_registers[2] += top.loop_increment;
95
96 if (top.repeat_counter-- == 0) {
97 program_counter = top.return_address;
98 call_stack.pop_back();
99 } else {
100 program_counter = top.loop_address;
101 }
102
103 // TODO: Is "trying again" accurate to hardware?
104 continue;
105 }
106 }
107
108 const Instruction instr = {program_code[program_counter]};
109 const SwizzlePattern swizzle = {swizzle_data[instr.common.operand_desc_id]};
110
111 Record<DebugDataRecord::CUR_INSTR>(debug_data, iteration, program_counter);
112 if (iteration > 0)
113 Record<DebugDataRecord::NEXT_INSTR>(debug_data, iteration - 1, program_counter);
114
115 debug_data.max_offset = std::max<u32>(debug_data.max_offset, 1 + program_counter);
116
117 auto LookupSourceRegister = [&](const SourceRegister& source_reg) -> const float24* {
118 switch (source_reg.GetRegisterType()) {
119 case RegisterType::Input:
120 return &state.registers.input[source_reg.GetIndex()].x;
121
122 case RegisterType::Temporary:
123 return &state.registers.temporary[source_reg.GetIndex()].x;
124
125 case RegisterType::FloatUniform:
126 return &uniforms.f[source_reg.GetIndex()].x;
127
128 default:
129 return dummy_vec4_float24;
130 }
131 };
132
133 switch (instr.opcode.Value().GetInfo().type) {
134 case OpCode::Type::Arithmetic: {
135 const bool is_inverted =
136 (0 != (instr.opcode.Value().GetInfo().subtype & OpCode::Info::SrcInversed));
137
138 const int address_offset =
139 (instr.common.address_register_index == 0)
140 ? 0
141 : state.address_registers[instr.common.address_register_index - 1];
142
143 const float24* src1_ = LookupSourceRegister(instr.common.GetSrc1(is_inverted) +
144 (is_inverted ? 0 : address_offset));
145 const float24* src2_ = LookupSourceRegister(instr.common.GetSrc2(is_inverted) +
146 (is_inverted ? address_offset : 0));
147
148 const bool negate_src1 = ((bool)swizzle.negate_src1 != false);
149 const bool negate_src2 = ((bool)swizzle.negate_src2 != false);
150
151 float24 src1[4] = {
152 src1_[(int)swizzle.src1_selector_0.Value()],
153 src1_[(int)swizzle.src1_selector_1.Value()],
154 src1_[(int)swizzle.src1_selector_2.Value()],
155 src1_[(int)swizzle.src1_selector_3.Value()],
156 };
157 if (negate_src1) {
158 src1[0] = -src1[0];
159 src1[1] = -src1[1];
160 src1[2] = -src1[2];
161 src1[3] = -src1[3];
162 }
163 float24 src2[4] = {
164 src2_[(int)swizzle.src2_selector_0.Value()],
165 src2_[(int)swizzle.src2_selector_1.Value()],
166 src2_[(int)swizzle.src2_selector_2.Value()],
167 src2_[(int)swizzle.src2_selector_3.Value()],
168 };
169 if (negate_src2) {
170 src2[0] = -src2[0];
171 src2[1] = -src2[1];
172 src2[2] = -src2[2];
173 src2[3] = -src2[3];
174 }
175
176 float24* dest =
177 (instr.common.dest.Value() < 0x10)
178 ? &state.registers.output[instr.common.dest.Value().GetIndex()][0]
179 : (instr.common.dest.Value() < 0x20)
180 ? &state.registers.temporary[instr.common.dest.Value().GetIndex()][0]
181 : dummy_vec4_float24;
182
183 debug_data.max_opdesc_id =
184 std::max<u32>(debug_data.max_opdesc_id, 1 + instr.common.operand_desc_id);
185
186 switch (instr.opcode.Value().EffectiveOpCode()) {
187 case OpCode::Id::ADD: {
188 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
189 Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
190 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
191 for (int i = 0; i < 4; ++i) {
192 if (!swizzle.DestComponentEnabled(i))
193 continue;
194
195 dest[i] = src1[i] + src2[i];
196 }
197 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
198 break;
199 }
200
201 case OpCode::Id::MUL: {
202 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
203 Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
204 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
205 for (int i = 0; i < 4; ++i) {
206 if (!swizzle.DestComponentEnabled(i))
207 continue;
208
209 dest[i] = src1[i] * src2[i];
210 }
211 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
212 break;
213 }
214
215 case OpCode::Id::FLR:
216 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
217 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
218 for (int i = 0; i < 4; ++i) {
219 if (!swizzle.DestComponentEnabled(i))
220 continue;
221
222 dest[i] = float24::FromFloat32(std::floor(src1[i].ToFloat32()));
223 }
224 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
225 break;
226
227 case OpCode::Id::MAX:
228 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
229 Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
230 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
231 for (int i = 0; i < 4; ++i) {
232 if (!swizzle.DestComponentEnabled(i))
233 continue;
234
235 // NOTE: Exact form required to match NaN semantics to hardware:
236 // max(0, NaN) -> NaN
237 // max(NaN, 0) -> 0
238 dest[i] = (src1[i] > src2[i]) ? src1[i] : src2[i];
239 }
240 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
241 break;
242
243 case OpCode::Id::MIN:
244 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
245 Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
246 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
247 for (int i = 0; i < 4; ++i) {
248 if (!swizzle.DestComponentEnabled(i))
249 continue;
250
251 // NOTE: Exact form required to match NaN semantics to hardware:
252 // min(0, NaN) -> NaN
253 // min(NaN, 0) -> 0
254 dest[i] = (src1[i] < src2[i]) ? src1[i] : src2[i];
255 }
256 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
257 break;
258
259 case OpCode::Id::DP3:
260 case OpCode::Id::DP4:
261 case OpCode::Id::DPH:
262 case OpCode::Id::DPHI: {
263 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
264 Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
265 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
266
267 OpCode::Id opcode = instr.opcode.Value().EffectiveOpCode();
268 if (opcode == OpCode::Id::DPH || opcode == OpCode::Id::DPHI)
269 src1[3] = float24::FromFloat32(1.0f);
270
271 int num_components = (opcode == OpCode::Id::DP3) ? 3 : 4;
272 float24 dot = std::inner_product(src1, src1 + num_components, src2,
273 float24::FromFloat32(0.f));
274
275 for (int i = 0; i < 4; ++i) {
276 if (!swizzle.DestComponentEnabled(i))
277 continue;
278
279 dest[i] = dot;
280 }
281 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
282 break;
283 }
284
285 // Reciprocal
286 case OpCode::Id::RCP: {
287 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
288 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
289 float24 rcp_res = float24::FromFloat32(1.0f / src1[0].ToFloat32());
290 for (int i = 0; i < 4; ++i) {
291 if (!swizzle.DestComponentEnabled(i))
292 continue;
293
294 dest[i] = rcp_res;
295 }
296 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
297 break;
298 }
299
300 // Reciprocal Square Root
301 case OpCode::Id::RSQ: {
302 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
303 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
304 float24 rsq_res = float24::FromFloat32(1.0f / std::sqrt(src1[0].ToFloat32()));
305 for (int i = 0; i < 4; ++i) {
306 if (!swizzle.DestComponentEnabled(i))
307 continue;
308
309 dest[i] = rsq_res;
310 }
311 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
312 break;
313 }
314
315 case OpCode::Id::MOVA: {
316 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
317 for (int i = 0; i < 2; ++i) {
318 if (!swizzle.DestComponentEnabled(i))
319 continue;
320
321 // TODO: Figure out how the rounding is done on hardware
322 state.address_registers[i] = static_cast<s32>(src1[i].ToFloat32());
323 }
324 Record<DebugDataRecord::ADDR_REG_OUT>(debug_data, iteration,
325 state.address_registers);
326 break;
327 }
328
329 case OpCode::Id::MOV: {
330 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
331 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
332 for (int i = 0; i < 4; ++i) {
333 if (!swizzle.DestComponentEnabled(i))
334 continue;
335
336 dest[i] = src1[i];
337 }
338 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
339 break;
340 }
341
342 case OpCode::Id::SGE:
343 case OpCode::Id::SGEI:
344 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
345 Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
346 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
347 for (int i = 0; i < 4; ++i) {
348 if (!swizzle.DestComponentEnabled(i))
349 continue;
350
351 dest[i] = (src1[i] >= src2[i]) ? float24::FromFloat32(1.0f)
352 : float24::FromFloat32(0.0f);
353 }
354 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
355 break;
356
357 case OpCode::Id::SLT:
358 case OpCode::Id::SLTI:
359 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
360 Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
361 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
362 for (int i = 0; i < 4; ++i) {
363 if (!swizzle.DestComponentEnabled(i))
364 continue;
365
366 dest[i] = (src1[i] < src2[i]) ? float24::FromFloat32(1.0f)
367 : float24::FromFloat32(0.0f);
368 }
369 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
370 break;
371
372 case OpCode::Id::CMP:
373 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
374 Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
375 for (int i = 0; i < 2; ++i) {
376 // TODO: Can you restrict to one compare via dest masking?
377
378 auto compare_op = instr.common.compare_op;
379 auto op = (i == 0) ? compare_op.x.Value() : compare_op.y.Value();
380
381 switch (op) {
382 case Instruction::Common::CompareOpType::Equal:
383 state.conditional_code[i] = (src1[i] == src2[i]);
384 break;
385
386 case Instruction::Common::CompareOpType::NotEqual:
387 state.conditional_code[i] = (src1[i] != src2[i]);
388 break;
389
390 case Instruction::Common::CompareOpType::LessThan:
391 state.conditional_code[i] = (src1[i] < src2[i]);
392 break;
393
394 case Instruction::Common::CompareOpType::LessEqual:
395 state.conditional_code[i] = (src1[i] <= src2[i]);
396 break;
397
398 case Instruction::Common::CompareOpType::GreaterThan:
399 state.conditional_code[i] = (src1[i] > src2[i]);
400 break;
401
402 case Instruction::Common::CompareOpType::GreaterEqual:
403 state.conditional_code[i] = (src1[i] >= src2[i]);
404 break;
405
406 default:
407 LOG_ERROR(HW_GPU, "Unknown compare mode %x", static_cast<int>(op));
408 break;
409 }
410 }
411 Record<DebugDataRecord::CMP_RESULT>(debug_data, iteration, state.conditional_code);
412 break;
413
414 case OpCode::Id::EX2: {
415 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
416 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
417
418 // EX2 only takes first component exp2 and writes it to all dest components
419 float24 ex2_res = float24::FromFloat32(std::exp2(src1[0].ToFloat32()));
420 for (int i = 0; i < 4; ++i) {
421 if (!swizzle.DestComponentEnabled(i))
422 continue;
423
424 dest[i] = ex2_res;
425 }
426
427 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
428 break;
429 }
430
431 case OpCode::Id::LG2: {
432 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
433 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
434
435 // LG2 only takes the first component log2 and writes it to all dest components
436 float24 lg2_res = float24::FromFloat32(std::log2(src1[0].ToFloat32()));
437 for (int i = 0; i < 4; ++i) {
438 if (!swizzle.DestComponentEnabled(i))
439 continue;
440
441 dest[i] = lg2_res;
442 }
443
444 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
445 break;
446 }
447
448 default:
449 LOG_ERROR(HW_GPU, "Unhandled arithmetic instruction: 0x%02x (%s): 0x%08x",
450 (int)instr.opcode.Value().EffectiveOpCode(),
451 instr.opcode.Value().GetInfo().name, instr.hex);
452 DEBUG_ASSERT(false);
453 break;
454 }
455
456 break;
457 }
458
459 case OpCode::Type::MultiplyAdd: {
460 if ((instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MAD) ||
461 (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI)) {
462 const SwizzlePattern& swizzle = *reinterpret_cast<const SwizzlePattern*>(
463 &swizzle_data[instr.mad.operand_desc_id]);
464
465 bool is_inverted = (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI);
466
467 const int address_offset =
468 (instr.mad.address_register_index == 0)
469 ? 0
470 : state.address_registers[instr.mad.address_register_index - 1];
471
472 const float24* src1_ = LookupSourceRegister(instr.mad.GetSrc1(is_inverted));
473 const float24* src2_ = LookupSourceRegister(instr.mad.GetSrc2(is_inverted) +
474 (!is_inverted * address_offset));
475 const float24* src3_ = LookupSourceRegister(instr.mad.GetSrc3(is_inverted) +
476 (is_inverted * address_offset));
477
478 const bool negate_src1 = ((bool)swizzle.negate_src1 != false);
479 const bool negate_src2 = ((bool)swizzle.negate_src2 != false);
480 const bool negate_src3 = ((bool)swizzle.negate_src3 != false);
481
482 float24 src1[4] = {
483 src1_[(int)swizzle.src1_selector_0.Value()],
484 src1_[(int)swizzle.src1_selector_1.Value()],
485 src1_[(int)swizzle.src1_selector_2.Value()],
486 src1_[(int)swizzle.src1_selector_3.Value()],
487 };
488 if (negate_src1) {
489 src1[0] = -src1[0];
490 src1[1] = -src1[1];
491 src1[2] = -src1[2];
492 src1[3] = -src1[3];
493 }
494 float24 src2[4] = {
495 src2_[(int)swizzle.src2_selector_0.Value()],
496 src2_[(int)swizzle.src2_selector_1.Value()],
497 src2_[(int)swizzle.src2_selector_2.Value()],
498 src2_[(int)swizzle.src2_selector_3.Value()],
499 };
500 if (negate_src2) {
501 src2[0] = -src2[0];
502 src2[1] = -src2[1];
503 src2[2] = -src2[2];
504 src2[3] = -src2[3];
505 }
506 float24 src3[4] = {
507 src3_[(int)swizzle.src3_selector_0.Value()],
508 src3_[(int)swizzle.src3_selector_1.Value()],
509 src3_[(int)swizzle.src3_selector_2.Value()],
510 src3_[(int)swizzle.src3_selector_3.Value()],
511 };
512 if (negate_src3) {
513 src3[0] = -src3[0];
514 src3[1] = -src3[1];
515 src3[2] = -src3[2];
516 src3[3] = -src3[3];
517 }
518
519 float24* dest =
520 (instr.mad.dest.Value() < 0x10)
521 ? &state.registers.output[instr.mad.dest.Value().GetIndex()][0]
522 : (instr.mad.dest.Value() < 0x20)
523 ? &state.registers.temporary[instr.mad.dest.Value().GetIndex()][0]
524 : dummy_vec4_float24;
525
526 Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
527 Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
528 Record<DebugDataRecord::SRC3>(debug_data, iteration, src3);
529 Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
530 for (int i = 0; i < 4; ++i) {
531 if (!swizzle.DestComponentEnabled(i))
532 continue;
533
534 dest[i] = src1[i] * src2[i] + src3[i];
535 }
536 Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
537 } else {
538 LOG_ERROR(HW_GPU, "Unhandled multiply-add instruction: 0x%02x (%s): 0x%08x",
539 (int)instr.opcode.Value().EffectiveOpCode(),
540 instr.opcode.Value().GetInfo().name, instr.hex);
541 }
542 break;
543 }
544
545 default: {
546 // Handle each instruction on its own
547 switch (instr.opcode.Value()) {
548 case OpCode::Id::END:
549 exit_loop = true;
550 break;
551
552 case OpCode::Id::JMPC:
553 Record<DebugDataRecord::COND_CMP_IN>(debug_data, iteration, state.conditional_code);
554 if (evaluate_condition(instr.flow_control)) {
555 program_counter = instr.flow_control.dest_offset - 1;
556 }
557 break;
558
559 case OpCode::Id::JMPU:
560 Record<DebugDataRecord::COND_BOOL_IN>(
561 debug_data, iteration, uniforms.b[instr.flow_control.bool_uniform_id]);
562
563 if (uniforms.b[instr.flow_control.bool_uniform_id] ==
564 !(instr.flow_control.num_instructions & 1)) {
565 program_counter = instr.flow_control.dest_offset - 1;
566 }
567 break;
568
569 case OpCode::Id::CALL:
570 call(instr.flow_control.dest_offset, instr.flow_control.num_instructions,
571 program_counter + 1, 0, 0);
572 break;
573
574 case OpCode::Id::CALLU:
575 Record<DebugDataRecord::COND_BOOL_IN>(
576 debug_data, iteration, uniforms.b[instr.flow_control.bool_uniform_id]);
577 if (uniforms.b[instr.flow_control.bool_uniform_id]) {
578 call(instr.flow_control.dest_offset, instr.flow_control.num_instructions,
579 program_counter + 1, 0, 0);
580 }
581 break;
582
583 case OpCode::Id::CALLC:
584 Record<DebugDataRecord::COND_CMP_IN>(debug_data, iteration, state.conditional_code);
585 if (evaluate_condition(instr.flow_control)) {
586 call(instr.flow_control.dest_offset, instr.flow_control.num_instructions,
587 program_counter + 1, 0, 0);
588 }
589 break;
590
591 case OpCode::Id::NOP:
592 break;
593
594 case OpCode::Id::IFU:
595 Record<DebugDataRecord::COND_BOOL_IN>(
596 debug_data, iteration, uniforms.b[instr.flow_control.bool_uniform_id]);
597 if (uniforms.b[instr.flow_control.bool_uniform_id]) {
598 call(program_counter + 1, instr.flow_control.dest_offset - program_counter - 1,
599 instr.flow_control.dest_offset + instr.flow_control.num_instructions, 0,
600 0);
601 } else {
602 call(instr.flow_control.dest_offset, instr.flow_control.num_instructions,
603 instr.flow_control.dest_offset + instr.flow_control.num_instructions, 0,
604 0);
605 }
606
607 break;
608
609 case OpCode::Id::IFC: {
610 // TODO: Do we need to consider swizzlers here?
611
612 Record<DebugDataRecord::COND_CMP_IN>(debug_data, iteration, state.conditional_code);
613 if (evaluate_condition(instr.flow_control)) {
614 call(program_counter + 1, instr.flow_control.dest_offset - program_counter - 1,
615 instr.flow_control.dest_offset + instr.flow_control.num_instructions, 0,
616 0);
617 } else {
618 call(instr.flow_control.dest_offset, instr.flow_control.num_instructions,
619 instr.flow_control.dest_offset + instr.flow_control.num_instructions, 0,
620 0);
621 }
622
623 break;
624 }
625
626 case OpCode::Id::LOOP: {
627 Math::Vec4<u8> loop_param(uniforms.i[instr.flow_control.int_uniform_id].x,
628 uniforms.i[instr.flow_control.int_uniform_id].y,
629 uniforms.i[instr.flow_control.int_uniform_id].z,
630 uniforms.i[instr.flow_control.int_uniform_id].w);
631 state.address_registers[2] = loop_param.y;
632
633 Record<DebugDataRecord::LOOP_INT_IN>(debug_data, iteration, loop_param);
634 call(program_counter + 1, instr.flow_control.dest_offset - program_counter,
635 instr.flow_control.dest_offset + 1, loop_param.x, loop_param.z);
636 break;
637 }
638
639 case OpCode::Id::EMIT: {
640 GSEmitter* emitter = state.emitter_ptr;
641 ASSERT_MSG(emitter, "Execute EMIT on VS");
642 emitter->Emit(state.registers.output);
643 break;
644 }
645
646 case OpCode::Id::SETEMIT: {
647 GSEmitter* emitter = state.emitter_ptr;
648 ASSERT_MSG(emitter, "Execute SETEMIT on VS");
649 emitter->vertex_id = instr.setemit.vertex_id;
650 emitter->prim_emit = instr.setemit.prim_emit != 0;
651 emitter->winding = instr.setemit.winding != 0;
652 break;
653 }
654
655 default:
656 LOG_ERROR(HW_GPU, "Unhandled instruction: 0x%02x (%s): 0x%08x",
657 (int)instr.opcode.Value().EffectiveOpCode(),
658 instr.opcode.Value().GetInfo().name, instr.hex);
659 break;
660 }
661
662 break;
663 }
664 }
665
666 ++program_counter;
667 ++iteration;
668 }
669}
670
671void InterpreterEngine::SetupBatch(ShaderSetup& setup, unsigned int entry_point) {
672 ASSERT(entry_point < MAX_PROGRAM_CODE_LENGTH);
673 setup.engine_data.entry_point = entry_point;
674}
675
676MICROPROFILE_DECLARE(GPU_Shader);
677
678void InterpreterEngine::Run(const ShaderSetup& setup, UnitState& state) const {
679
680 MICROPROFILE_SCOPE(GPU_Shader);
681
682 DebugData<false> dummy_debug_data;
683 RunInterpreter(setup, state, dummy_debug_data, setup.engine_data.entry_point);
684}
685
686DebugData<true> InterpreterEngine::ProduceDebugInfo(const ShaderSetup& setup,
687 const AttributeBuffer& input,
688 const ShaderRegs& config) const {
689 UnitState state;
690 DebugData<true> debug_data;
691
692 // Setup input register table
693 boost::fill(state.registers.input, Math::Vec4<float24>::AssignToAll(float24::Zero()));
694 state.LoadInput(config, input);
695 RunInterpreter(setup, state, debug_data, setup.engine_data.entry_point);
696 return debug_data;
697}
698
699} // namespace
700
701} // namespace
diff --git a/src/video_core/shader/shader_interpreter.h b/src/video_core/shader/shader_interpreter.h
deleted file mode 100644
index 50fd7c69d..000000000
--- a/src/video_core/shader/shader_interpreter.h
+++ /dev/null
@@ -1,32 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "video_core/shader/debug_data.h"
8#include "video_core/shader/shader.h"
9
10namespace Pica {
11
12namespace Shader {
13
14class InterpreterEngine final : public ShaderEngine {
15public:
16 void SetupBatch(ShaderSetup& setup, unsigned int entry_point) override;
17 void Run(const ShaderSetup& setup, UnitState& state) const override;
18
19 /**
20 * Produce debug information based on the given shader and input vertex
21 * @param setup Shader engine state
22 * @param input Input vertex into the shader
23 * @param config Configuration object for the shader pipeline
24 * @return Debug information for this shader with regards to the given vertex
25 */
26 DebugData<true> ProduceDebugInfo(const ShaderSetup& setup, const AttributeBuffer& input,
27 const ShaderRegs& config) const;
28};
29
30} // namespace
31
32} // namespace
diff --git a/src/video_core/shader/shader_jit_x64.cpp b/src/video_core/shader/shader_jit_x64.cpp
deleted file mode 100644
index 73c21871c..000000000
--- a/src/video_core/shader/shader_jit_x64.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/hash.h"
6#include "common/microprofile.h"
7#include "video_core/shader/shader.h"
8#include "video_core/shader/shader_jit_x64.h"
9#include "video_core/shader/shader_jit_x64_compiler.h"
10
11namespace Pica {
12namespace Shader {
13
14JitX64Engine::JitX64Engine() = default;
15JitX64Engine::~JitX64Engine() = default;
16
17void JitX64Engine::SetupBatch(ShaderSetup& setup, unsigned int entry_point) {
18 ASSERT(entry_point < MAX_PROGRAM_CODE_LENGTH);
19 setup.engine_data.entry_point = entry_point;
20
21 u64 code_hash = Common::ComputeHash64(&setup.program_code, sizeof(setup.program_code));
22 u64 swizzle_hash = Common::ComputeHash64(&setup.swizzle_data, sizeof(setup.swizzle_data));
23
24 u64 cache_key = code_hash ^ swizzle_hash;
25 auto iter = cache.find(cache_key);
26 if (iter != cache.end()) {
27 setup.engine_data.cached_shader = iter->second.get();
28 } else {
29 auto shader = std::make_unique<JitShader>();
30 shader->Compile(&setup.program_code, &setup.swizzle_data);
31 setup.engine_data.cached_shader = shader.get();
32 cache.emplace_hint(iter, cache_key, std::move(shader));
33 }
34}
35
36MICROPROFILE_DECLARE(GPU_Shader);
37
38void JitX64Engine::Run(const ShaderSetup& setup, UnitState& state) const {
39 ASSERT(setup.engine_data.cached_shader != nullptr);
40
41 MICROPROFILE_SCOPE(GPU_Shader);
42
43 const JitShader* shader = static_cast<const JitShader*>(setup.engine_data.cached_shader);
44 shader->Run(setup, state, setup.engine_data.entry_point);
45}
46
47} // namespace Shader
48} // namespace Pica
diff --git a/src/video_core/shader/shader_jit_x64.h b/src/video_core/shader/shader_jit_x64.h
deleted file mode 100644
index 078b2cba5..000000000
--- a/src/video_core/shader/shader_jit_x64.h
+++ /dev/null
@@ -1,30 +0,0 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <unordered_map>
9#include "common/common_types.h"
10#include "video_core/shader/shader.h"
11
12namespace Pica {
13namespace Shader {
14
15class JitShader;
16
17class JitX64Engine final : public ShaderEngine {
18public:
19 JitX64Engine();
20 ~JitX64Engine() override;
21
22 void SetupBatch(ShaderSetup& setup, unsigned int entry_point) override;
23 void Run(const ShaderSetup& setup, UnitState& state) const override;
24
25private:
26 std::unordered_map<u64, std::unique_ptr<JitShader>> cache;
27};
28
29} // namespace Shader
30} // namespace Pica
diff --git a/src/video_core/shader/shader_jit_x64_compiler.cpp b/src/video_core/shader/shader_jit_x64_compiler.cpp
deleted file mode 100644
index 1b31623bd..000000000
--- a/src/video_core/shader/shader_jit_x64_compiler.cpp
+++ /dev/null
@@ -1,942 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <cmath>
7#include <cstdint>
8#include <nihstro/shader_bytecode.h>
9#include <smmintrin.h>
10#include <xmmintrin.h>
11#include "common/assert.h"
12#include "common/logging/log.h"
13#include "common/vector_math.h"
14#include "common/x64/cpu_detect.h"
15#include "common/x64/xbyak_abi.h"
16#include "common/x64/xbyak_util.h"
17#include "video_core/pica_state.h"
18#include "video_core/pica_types.h"
19#include "video_core/shader/shader.h"
20#include "video_core/shader/shader_jit_x64_compiler.h"
21
22using namespace Common::X64;
23using namespace Xbyak::util;
24using Xbyak::Label;
25using Xbyak::Reg32;
26using Xbyak::Reg64;
27using Xbyak::Xmm;
28
29namespace Pica {
30
31namespace Shader {
32
33typedef void (JitShader::*JitFunction)(Instruction instr);
34
35const JitFunction instr_table[64] = {
36 &JitShader::Compile_ADD, // add
37 &JitShader::Compile_DP3, // dp3
38 &JitShader::Compile_DP4, // dp4
39 &JitShader::Compile_DPH, // dph
40 nullptr, // unknown
41 &JitShader::Compile_EX2, // ex2
42 &JitShader::Compile_LG2, // lg2
43 nullptr, // unknown
44 &JitShader::Compile_MUL, // mul
45 &JitShader::Compile_SGE, // sge
46 &JitShader::Compile_SLT, // slt
47 &JitShader::Compile_FLR, // flr
48 &JitShader::Compile_MAX, // max
49 &JitShader::Compile_MIN, // min
50 &JitShader::Compile_RCP, // rcp
51 &JitShader::Compile_RSQ, // rsq
52 nullptr, // unknown
53 nullptr, // unknown
54 &JitShader::Compile_MOVA, // mova
55 &JitShader::Compile_MOV, // mov
56 nullptr, // unknown
57 nullptr, // unknown
58 nullptr, // unknown
59 nullptr, // unknown
60 &JitShader::Compile_DPH, // dphi
61 nullptr, // unknown
62 &JitShader::Compile_SGE, // sgei
63 &JitShader::Compile_SLT, // slti
64 nullptr, // unknown
65 nullptr, // unknown
66 nullptr, // unknown
67 nullptr, // unknown
68 nullptr, // unknown
69 &JitShader::Compile_NOP, // nop
70 &JitShader::Compile_END, // end
71 nullptr, // break
72 &JitShader::Compile_CALL, // call
73 &JitShader::Compile_CALLC, // callc
74 &JitShader::Compile_CALLU, // callu
75 &JitShader::Compile_IF, // ifu
76 &JitShader::Compile_IF, // ifc
77 &JitShader::Compile_LOOP, // loop
78 &JitShader::Compile_EMIT, // emit
79 &JitShader::Compile_SETE, // sete
80 &JitShader::Compile_JMP, // jmpc
81 &JitShader::Compile_JMP, // jmpu
82 &JitShader::Compile_CMP, // cmp
83 &JitShader::Compile_CMP, // cmp
84 &JitShader::Compile_MAD, // madi
85 &JitShader::Compile_MAD, // madi
86 &JitShader::Compile_MAD, // madi
87 &JitShader::Compile_MAD, // madi
88 &JitShader::Compile_MAD, // madi
89 &JitShader::Compile_MAD, // madi
90 &JitShader::Compile_MAD, // madi
91 &JitShader::Compile_MAD, // madi
92 &JitShader::Compile_MAD, // mad
93 &JitShader::Compile_MAD, // mad
94 &JitShader::Compile_MAD, // mad
95 &JitShader::Compile_MAD, // mad
96 &JitShader::Compile_MAD, // mad
97 &JitShader::Compile_MAD, // mad
98 &JitShader::Compile_MAD, // mad
99 &JitShader::Compile_MAD, // mad
100};
101
102// The following is used to alias some commonly used registers. Generally, RAX-RDX and XMM0-XMM3 can
103// be used as scratch registers within a compiler function. The other registers have designated
104// purposes, as documented below:
105
106/// Pointer to the uniform memory
107static const Reg64 SETUP = r9;
108/// The two 32-bit VS address offset registers set by the MOVA instruction
109static const Reg64 ADDROFFS_REG_0 = r10;
110static const Reg64 ADDROFFS_REG_1 = r11;
111/// VS loop count register (Multiplied by 16)
112static const Reg32 LOOPCOUNT_REG = r12d;
113/// Current VS loop iteration number (we could probably use LOOPCOUNT_REG, but this quicker)
114static const Reg32 LOOPCOUNT = esi;
115/// Number to increment LOOPCOUNT_REG by on each loop iteration (Multiplied by 16)
116static const Reg32 LOOPINC = edi;
117/// Result of the previous CMP instruction for the X-component comparison
118static const Reg64 COND0 = r13;
119/// Result of the previous CMP instruction for the Y-component comparison
120static const Reg64 COND1 = r14;
121/// Pointer to the UnitState instance for the current VS unit
122static const Reg64 STATE = r15;
123/// SIMD scratch register
124static const Xmm SCRATCH = xmm0;
125/// Loaded with the first swizzled source register, otherwise can be used as a scratch register
126static const Xmm SRC1 = xmm1;
127/// Loaded with the second swizzled source register, otherwise can be used as a scratch register
128static const Xmm SRC2 = xmm2;
129/// Loaded with the third swizzled source register, otherwise can be used as a scratch register
130static const Xmm SRC3 = xmm3;
131/// Additional scratch register
132static const Xmm SCRATCH2 = xmm4;
133/// Constant vector of [1.0f, 1.0f, 1.0f, 1.0f], used to efficiently set a vector to one
134static const Xmm ONE = xmm14;
135/// Constant vector of [-0.f, -0.f, -0.f, -0.f], used to efficiently negate a vector with XOR
136static const Xmm NEGBIT = xmm15;
137
138// State registers that must not be modified by external functions calls
139// Scratch registers, e.g., SRC1 and SCRATCH, have to be saved on the side if needed
140static const BitSet32 persistent_regs = BuildRegSet({
141 // Pointers to register blocks
142 SETUP, STATE,
143 // Cached registers
144 ADDROFFS_REG_0, ADDROFFS_REG_1, LOOPCOUNT_REG, COND0, COND1,
145 // Constants
146 ONE, NEGBIT,
147 // Loop variables
148 LOOPCOUNT, LOOPINC,
149});
150
151/// Raw constant for the source register selector that indicates no swizzling is performed
152static const u8 NO_SRC_REG_SWIZZLE = 0x1b;
153/// Raw constant for the destination register enable mask that indicates all components are enabled
154static const u8 NO_DEST_REG_MASK = 0xf;
155
156static void LogCritical(const char* msg) {
157 LOG_CRITICAL(HW_GPU, "%s", msg);
158}
159
160void JitShader::Compile_Assert(bool condition, const char* msg) {
161 if (!condition) {
162 mov(ABI_PARAM1, reinterpret_cast<size_t>(msg));
163 CallFarFunction(*this, LogCritical);
164 }
165}
166
167/**
168 * Loads and swizzles a source register into the specified XMM register.
169 * @param instr VS instruction, used for determining how to load the source register
170 * @param src_num Number indicating which source register to load (1 = src1, 2 = src2, 3 = src3)
171 * @param src_reg SourceRegister object corresponding to the source register to load
172 * @param dest Destination XMM register to store the loaded, swizzled source register
173 */
174void JitShader::Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRegister src_reg,
175 Xmm dest) {
176 Reg64 src_ptr;
177 size_t src_offset;
178
179 if (src_reg.GetRegisterType() == RegisterType::FloatUniform) {
180 src_ptr = SETUP;
181 src_offset = ShaderSetup::GetFloatUniformOffset(src_reg.GetIndex());
182 } else {
183 src_ptr = STATE;
184 src_offset = UnitState::InputOffset(src_reg);
185 }
186
187 int src_offset_disp = (int)src_offset;
188 ASSERT_MSG(src_offset == src_offset_disp, "Source register offset too large for int type");
189
190 unsigned operand_desc_id;
191
192 const bool is_inverted =
193 (0 != (instr.opcode.Value().GetInfo().subtype & OpCode::Info::SrcInversed));
194
195 unsigned address_register_index;
196 unsigned offset_src;
197
198 if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MAD ||
199 instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI) {
200 operand_desc_id = instr.mad.operand_desc_id;
201 offset_src = is_inverted ? 3 : 2;
202 address_register_index = instr.mad.address_register_index;
203 } else {
204 operand_desc_id = instr.common.operand_desc_id;
205 offset_src = is_inverted ? 2 : 1;
206 address_register_index = instr.common.address_register_index;
207 }
208
209 if (src_num == offset_src && address_register_index != 0) {
210 switch (address_register_index) {
211 case 1: // address offset 1
212 movaps(dest, xword[src_ptr + ADDROFFS_REG_0 + src_offset_disp]);
213 break;
214 case 2: // address offset 2
215 movaps(dest, xword[src_ptr + ADDROFFS_REG_1 + src_offset_disp]);
216 break;
217 case 3: // address offset 3
218 movaps(dest, xword[src_ptr + LOOPCOUNT_REG.cvt64() + src_offset_disp]);
219 break;
220 default:
221 UNREACHABLE();
222 break;
223 }
224 } else {
225 // Load the source
226 movaps(dest, xword[src_ptr + src_offset_disp]);
227 }
228
229 SwizzlePattern swiz = {(*swizzle_data)[operand_desc_id]};
230
231 // Generate instructions for source register swizzling as needed
232 u8 sel = swiz.GetRawSelector(src_num);
233 if (sel != NO_SRC_REG_SWIZZLE) {
234 // Selector component order needs to be reversed for the SHUFPS instruction
235 sel = ((sel & 0xc0) >> 6) | ((sel & 3) << 6) | ((sel & 0xc) << 2) | ((sel & 0x30) >> 2);
236
237 // Shuffle inputs for swizzle
238 shufps(dest, dest, sel);
239 }
240
241 // If the source register should be negated, flip the negative bit using XOR
242 const bool negate[] = {swiz.negate_src1, swiz.negate_src2, swiz.negate_src3};
243 if (negate[src_num - 1]) {
244 xorps(dest, NEGBIT);
245 }
246}
247
248void JitShader::Compile_DestEnable(Instruction instr, Xmm src) {
249 DestRegister dest;
250 unsigned operand_desc_id;
251 if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MAD ||
252 instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI) {
253 operand_desc_id = instr.mad.operand_desc_id;
254 dest = instr.mad.dest.Value();
255 } else {
256 operand_desc_id = instr.common.operand_desc_id;
257 dest = instr.common.dest.Value();
258 }
259
260 SwizzlePattern swiz = {(*swizzle_data)[operand_desc_id]};
261
262 size_t dest_offset_disp = UnitState::OutputOffset(dest);
263
264 // If all components are enabled, write the result to the destination register
265 if (swiz.dest_mask == NO_DEST_REG_MASK) {
266 // Store dest back to memory
267 movaps(xword[STATE + dest_offset_disp], src);
268
269 } else {
270 // Not all components are enabled, so mask the result when storing to the destination
271 // register...
272 movaps(SCRATCH, xword[STATE + dest_offset_disp]);
273
274 if (Common::GetCPUCaps().sse4_1) {
275 u8 mask = ((swiz.dest_mask & 1) << 3) | ((swiz.dest_mask & 8) >> 3) |
276 ((swiz.dest_mask & 2) << 1) | ((swiz.dest_mask & 4) >> 1);
277 blendps(SCRATCH, src, mask);
278 } else {
279 movaps(SCRATCH2, src);
280 unpckhps(SCRATCH2, SCRATCH); // Unpack X/Y components of source and destination
281 unpcklps(SCRATCH, src); // Unpack Z/W components of source and destination
282
283 // Compute selector to selectively copy source components to destination for SHUFPS
284 // instruction
285 u8 sel = ((swiz.DestComponentEnabled(0) ? 1 : 0) << 0) |
286 ((swiz.DestComponentEnabled(1) ? 3 : 2) << 2) |
287 ((swiz.DestComponentEnabled(2) ? 0 : 1) << 4) |
288 ((swiz.DestComponentEnabled(3) ? 2 : 3) << 6);
289 shufps(SCRATCH, SCRATCH2, sel);
290 }
291
292 // Store dest back to memory
293 movaps(xword[STATE + dest_offset_disp], SCRATCH);
294 }
295}
296
297void JitShader::Compile_SanitizedMul(Xmm src1, Xmm src2, Xmm scratch) {
298 // 0 * inf and inf * 0 in the PICA should return 0 instead of NaN. This can be implemented by
299 // checking for NaNs before and after the multiplication. If the multiplication result is NaN
300 // where neither source was, this NaN was generated by a 0 * inf multiplication, and so the
301 // result should be transformed to 0 to match PICA fp rules.
302
303 // Set scratch to mask of (src1 != NaN and src2 != NaN)
304 movaps(scratch, src1);
305 cmpordps(scratch, src2);
306
307 mulps(src1, src2);
308
309 // Set src2 to mask of (result == NaN)
310 movaps(src2, src1);
311 cmpunordps(src2, src2);
312
313 // Clear components where scratch != src2 (i.e. if result is NaN where neither source was NaN)
314 xorps(scratch, src2);
315 andps(src1, scratch);
316}
317
318void JitShader::Compile_EvaluateCondition(Instruction instr) {
319 // Note: NXOR is used below to check for equality
320 switch (instr.flow_control.op) {
321 case Instruction::FlowControlType::Or:
322 mov(eax, COND0);
323 mov(ebx, COND1);
324 xor_(eax, (instr.flow_control.refx.Value() ^ 1));
325 xor_(ebx, (instr.flow_control.refy.Value() ^ 1));
326 or_(eax, ebx);
327 break;
328
329 case Instruction::FlowControlType::And:
330 mov(eax, COND0);
331 mov(ebx, COND1);
332 xor_(eax, (instr.flow_control.refx.Value() ^ 1));
333 xor_(ebx, (instr.flow_control.refy.Value() ^ 1));
334 and_(eax, ebx);
335 break;
336
337 case Instruction::FlowControlType::JustX:
338 mov(eax, COND0);
339 xor_(eax, (instr.flow_control.refx.Value() ^ 1));
340 break;
341
342 case Instruction::FlowControlType::JustY:
343 mov(eax, COND1);
344 xor_(eax, (instr.flow_control.refy.Value() ^ 1));
345 break;
346 }
347}
348
349void JitShader::Compile_UniformCondition(Instruction instr) {
350 size_t offset = ShaderSetup::GetBoolUniformOffset(instr.flow_control.bool_uniform_id);
351 cmp(byte[SETUP + offset], 0);
352}
353
354BitSet32 JitShader::PersistentCallerSavedRegs() {
355 return persistent_regs & ABI_ALL_CALLER_SAVED;
356}
357
358void JitShader::Compile_ADD(Instruction instr) {
359 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
360 Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
361 addps(SRC1, SRC2);
362 Compile_DestEnable(instr, SRC1);
363}
364
365void JitShader::Compile_DP3(Instruction instr) {
366 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
367 Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
368
369 Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
370
371 movaps(SRC2, SRC1);
372 shufps(SRC2, SRC2, _MM_SHUFFLE(1, 1, 1, 1));
373
374 movaps(SRC3, SRC1);
375 shufps(SRC3, SRC3, _MM_SHUFFLE(2, 2, 2, 2));
376
377 shufps(SRC1, SRC1, _MM_SHUFFLE(0, 0, 0, 0));
378 addps(SRC1, SRC2);
379 addps(SRC1, SRC3);
380
381 Compile_DestEnable(instr, SRC1);
382}
383
384void JitShader::Compile_DP4(Instruction instr) {
385 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
386 Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
387
388 Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
389
390 movaps(SRC2, SRC1);
391 shufps(SRC1, SRC1, _MM_SHUFFLE(2, 3, 0, 1)); // XYZW -> ZWXY
392 addps(SRC1, SRC2);
393
394 movaps(SRC2, SRC1);
395 shufps(SRC1, SRC1, _MM_SHUFFLE(0, 1, 2, 3)); // XYZW -> WZYX
396 addps(SRC1, SRC2);
397
398 Compile_DestEnable(instr, SRC1);
399}
400
401void JitShader::Compile_DPH(Instruction instr) {
402 if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::DPHI) {
403 Compile_SwizzleSrc(instr, 1, instr.common.src1i, SRC1);
404 Compile_SwizzleSrc(instr, 2, instr.common.src2i, SRC2);
405 } else {
406 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
407 Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
408 }
409
410 if (Common::GetCPUCaps().sse4_1) {
411 // Set 4th component to 1.0
412 blendps(SRC1, ONE, 0b1000);
413 } else {
414 // Set 4th component to 1.0
415 movaps(SCRATCH, SRC1);
416 unpckhps(SCRATCH, ONE); // XYZW, 1111 -> Z1__
417 unpcklpd(SRC1, SCRATCH); // XYZW, Z1__ -> XYZ1
418 }
419
420 Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
421
422 movaps(SRC2, SRC1);
423 shufps(SRC1, SRC1, _MM_SHUFFLE(2, 3, 0, 1)); // XYZW -> ZWXY
424 addps(SRC1, SRC2);
425
426 movaps(SRC2, SRC1);
427 shufps(SRC1, SRC1, _MM_SHUFFLE(0, 1, 2, 3)); // XYZW -> WZYX
428 addps(SRC1, SRC2);
429
430 Compile_DestEnable(instr, SRC1);
431}
432
433void JitShader::Compile_EX2(Instruction instr) {
434 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
435 movss(xmm0, SRC1); // ABI_PARAM1
436
437 ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
438 CallFarFunction(*this, exp2f);
439 ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
440
441 shufps(xmm0, xmm0, _MM_SHUFFLE(0, 0, 0, 0)); // ABI_RETURN
442 movaps(SRC1, xmm0);
443 Compile_DestEnable(instr, SRC1);
444}
445
446void JitShader::Compile_LG2(Instruction instr) {
447 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
448 movss(xmm0, SRC1); // ABI_PARAM1
449
450 ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
451 CallFarFunction(*this, log2f);
452 ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
453
454 shufps(xmm0, xmm0, _MM_SHUFFLE(0, 0, 0, 0)); // ABI_RETURN
455 movaps(SRC1, xmm0);
456 Compile_DestEnable(instr, SRC1);
457}
458
459void JitShader::Compile_MUL(Instruction instr) {
460 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
461 Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
462 Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
463 Compile_DestEnable(instr, SRC1);
464}
465
466void JitShader::Compile_SGE(Instruction instr) {
467 if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::SGEI) {
468 Compile_SwizzleSrc(instr, 1, instr.common.src1i, SRC1);
469 Compile_SwizzleSrc(instr, 2, instr.common.src2i, SRC2);
470 } else {
471 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
472 Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
473 }
474
475 cmpleps(SRC2, SRC1);
476 andps(SRC2, ONE);
477
478 Compile_DestEnable(instr, SRC2);
479}
480
481void JitShader::Compile_SLT(Instruction instr) {
482 if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::SLTI) {
483 Compile_SwizzleSrc(instr, 1, instr.common.src1i, SRC1);
484 Compile_SwizzleSrc(instr, 2, instr.common.src2i, SRC2);
485 } else {
486 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
487 Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
488 }
489
490 cmpltps(SRC1, SRC2);
491 andps(SRC1, ONE);
492
493 Compile_DestEnable(instr, SRC1);
494}
495
496void JitShader::Compile_FLR(Instruction instr) {
497 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
498
499 if (Common::GetCPUCaps().sse4_1) {
500 roundps(SRC1, SRC1, _MM_FROUND_FLOOR);
501 } else {
502 cvttps2dq(SRC1, SRC1);
503 cvtdq2ps(SRC1, SRC1);
504 }
505
506 Compile_DestEnable(instr, SRC1);
507}
508
509void JitShader::Compile_MAX(Instruction instr) {
510 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
511 Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
512 // SSE semantics match PICA200 ones: In case of NaN, SRC2 is returned.
513 maxps(SRC1, SRC2);
514 Compile_DestEnable(instr, SRC1);
515}
516
517void JitShader::Compile_MIN(Instruction instr) {
518 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
519 Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
520 // SSE semantics match PICA200 ones: In case of NaN, SRC2 is returned.
521 minps(SRC1, SRC2);
522 Compile_DestEnable(instr, SRC1);
523}
524
525void JitShader::Compile_MOVA(Instruction instr) {
526 SwizzlePattern swiz = {(*swizzle_data)[instr.common.operand_desc_id]};
527
528 if (!swiz.DestComponentEnabled(0) && !swiz.DestComponentEnabled(1)) {
529 return; // NoOp
530 }
531
532 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
533
534 // Convert floats to integers using truncation (only care about X and Y components)
535 cvttps2dq(SRC1, SRC1);
536
537 // Get result
538 movq(rax, SRC1);
539
540 // Handle destination enable
541 if (swiz.DestComponentEnabled(0) && swiz.DestComponentEnabled(1)) {
542 // Move and sign-extend low 32 bits
543 movsxd(ADDROFFS_REG_0, eax);
544
545 // Move and sign-extend high 32 bits
546 shr(rax, 32);
547 movsxd(ADDROFFS_REG_1, eax);
548
549 // Multiply by 16 to be used as an offset later
550 shl(ADDROFFS_REG_0, 4);
551 shl(ADDROFFS_REG_1, 4);
552 } else {
553 if (swiz.DestComponentEnabled(0)) {
554 // Move and sign-extend low 32 bits
555 movsxd(ADDROFFS_REG_0, eax);
556
557 // Multiply by 16 to be used as an offset later
558 shl(ADDROFFS_REG_0, 4);
559 } else if (swiz.DestComponentEnabled(1)) {
560 // Move and sign-extend high 32 bits
561 shr(rax, 32);
562 movsxd(ADDROFFS_REG_1, eax);
563
564 // Multiply by 16 to be used as an offset later
565 shl(ADDROFFS_REG_1, 4);
566 }
567 }
568}
569
570void JitShader::Compile_MOV(Instruction instr) {
571 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
572 Compile_DestEnable(instr, SRC1);
573}
574
575void JitShader::Compile_RCP(Instruction instr) {
576 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
577
578 // TODO(bunnei): RCPSS is a pretty rough approximation, this might cause problems if Pica
579 // performs this operation more accurately. This should be checked on hardware.
580 rcpss(SRC1, SRC1);
581 shufps(SRC1, SRC1, _MM_SHUFFLE(0, 0, 0, 0)); // XYWZ -> XXXX
582
583 Compile_DestEnable(instr, SRC1);
584}
585
586void JitShader::Compile_RSQ(Instruction instr) {
587 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
588
589 // TODO(bunnei): RSQRTSS is a pretty rough approximation, this might cause problems if Pica
590 // performs this operation more accurately. This should be checked on hardware.
591 rsqrtss(SRC1, SRC1);
592 shufps(SRC1, SRC1, _MM_SHUFFLE(0, 0, 0, 0)); // XYWZ -> XXXX
593
594 Compile_DestEnable(instr, SRC1);
595}
596
597void JitShader::Compile_NOP(Instruction instr) {}
598
599void JitShader::Compile_END(Instruction instr) {
600 ABI_PopRegistersAndAdjustStack(*this, ABI_ALL_CALLEE_SAVED, 8, 16);
601 ret();
602}
603
604void JitShader::Compile_CALL(Instruction instr) {
605 // Push offset of the return
606 push(qword, (instr.flow_control.dest_offset + instr.flow_control.num_instructions));
607
608 // Call the subroutine
609 call(instruction_labels[instr.flow_control.dest_offset]);
610
611 // Skip over the return offset that's on the stack
612 add(rsp, 8);
613}
614
615void JitShader::Compile_CALLC(Instruction instr) {
616 Compile_EvaluateCondition(instr);
617 Label b;
618 jz(b);
619 Compile_CALL(instr);
620 L(b);
621}
622
623void JitShader::Compile_CALLU(Instruction instr) {
624 Compile_UniformCondition(instr);
625 Label b;
626 jz(b);
627 Compile_CALL(instr);
628 L(b);
629}
630
631void JitShader::Compile_CMP(Instruction instr) {
632 using Op = Instruction::Common::CompareOpType::Op;
633 Op op_x = instr.common.compare_op.x;
634 Op op_y = instr.common.compare_op.y;
635
636 Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
637 Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
638
639 // SSE doesn't have greater-than (GT) or greater-equal (GE) comparison operators. You need to
640 // emulate them by swapping the lhs and rhs and using LT and LE. NLT and NLE can't be used here
641 // because they don't match when used with NaNs.
642 static const u8 cmp[] = {CMP_EQ, CMP_NEQ, CMP_LT, CMP_LE, CMP_LT, CMP_LE};
643
644 bool invert_op_x = (op_x == Op::GreaterThan || op_x == Op::GreaterEqual);
645 Xmm lhs_x = invert_op_x ? SRC2 : SRC1;
646 Xmm rhs_x = invert_op_x ? SRC1 : SRC2;
647
648 if (op_x == op_y) {
649 // Compare X-component and Y-component together
650 cmpps(lhs_x, rhs_x, cmp[op_x]);
651 movq(COND0, lhs_x);
652
653 mov(COND1, COND0);
654 } else {
655 bool invert_op_y = (op_y == Op::GreaterThan || op_y == Op::GreaterEqual);
656 Xmm lhs_y = invert_op_y ? SRC2 : SRC1;
657 Xmm rhs_y = invert_op_y ? SRC1 : SRC2;
658
659 // Compare X-component
660 movaps(SCRATCH, lhs_x);
661 cmpss(SCRATCH, rhs_x, cmp[op_x]);
662
663 // Compare Y-component
664 cmpps(lhs_y, rhs_y, cmp[op_y]);
665
666 movq(COND0, SCRATCH);
667 movq(COND1, lhs_y);
668 }
669
670 shr(COND0.cvt32(), 31); // ignores upper 32 bits in source
671 shr(COND1, 63);
672}
673
674void JitShader::Compile_MAD(Instruction instr) {
675 Compile_SwizzleSrc(instr, 1, instr.mad.src1, SRC1);
676
677 if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI) {
678 Compile_SwizzleSrc(instr, 2, instr.mad.src2i, SRC2);
679 Compile_SwizzleSrc(instr, 3, instr.mad.src3i, SRC3);
680 } else {
681 Compile_SwizzleSrc(instr, 2, instr.mad.src2, SRC2);
682 Compile_SwizzleSrc(instr, 3, instr.mad.src3, SRC3);
683 }
684
685 Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
686 addps(SRC1, SRC3);
687
688 Compile_DestEnable(instr, SRC1);
689}
690
691void JitShader::Compile_IF(Instruction instr) {
692 Compile_Assert(instr.flow_control.dest_offset >= program_counter,
693 "Backwards if-statements not supported");
694 Label l_else, l_endif;
695
696 // Evaluate the "IF" condition
697 if (instr.opcode.Value() == OpCode::Id::IFU) {
698 Compile_UniformCondition(instr);
699 } else if (instr.opcode.Value() == OpCode::Id::IFC) {
700 Compile_EvaluateCondition(instr);
701 }
702 jz(l_else, T_NEAR);
703
704 // Compile the code that corresponds to the condition evaluating as true
705 Compile_Block(instr.flow_control.dest_offset);
706
707 // If there isn't an "ELSE" condition, we are done here
708 if (instr.flow_control.num_instructions == 0) {
709 L(l_else);
710 return;
711 }
712
713 jmp(l_endif, T_NEAR);
714
715 L(l_else);
716 // This code corresponds to the "ELSE" condition
717 // Comple the code that corresponds to the condition evaluating as false
718 Compile_Block(instr.flow_control.dest_offset + instr.flow_control.num_instructions);
719
720 L(l_endif);
721}
722
723void JitShader::Compile_LOOP(Instruction instr) {
724 Compile_Assert(instr.flow_control.dest_offset >= program_counter,
725 "Backwards loops not supported");
726 Compile_Assert(!looping, "Nested loops not supported");
727
728 looping = true;
729
730 // This decodes the fields from the integer uniform at index instr.flow_control.int_uniform_id.
731 // The Y (LOOPCOUNT_REG) and Z (LOOPINC) component are kept multiplied by 16 (Left shifted by
732 // 4 bits) to be used as an offset into the 16-byte vector registers later
733 size_t offset = ShaderSetup::GetIntUniformOffset(instr.flow_control.int_uniform_id);
734 mov(LOOPCOUNT, dword[SETUP + offset]);
735 mov(LOOPCOUNT_REG, LOOPCOUNT);
736 shr(LOOPCOUNT_REG, 4);
737 and_(LOOPCOUNT_REG, 0xFF0); // Y-component is the start
738 mov(LOOPINC, LOOPCOUNT);
739 shr(LOOPINC, 12);
740 and_(LOOPINC, 0xFF0); // Z-component is the incrementer
741 movzx(LOOPCOUNT, LOOPCOUNT.cvt8()); // X-component is iteration count
742 add(LOOPCOUNT, 1); // Iteration count is X-component + 1
743
744 Label l_loop_start;
745 L(l_loop_start);
746
747 Compile_Block(instr.flow_control.dest_offset + 1);
748
749 add(LOOPCOUNT_REG, LOOPINC); // Increment LOOPCOUNT_REG by Z-component
750 sub(LOOPCOUNT, 1); // Increment loop count by 1
751 jnz(l_loop_start); // Loop if not equal
752
753 looping = false;
754}
755
756void JitShader::Compile_JMP(Instruction instr) {
757 if (instr.opcode.Value() == OpCode::Id::JMPC)
758 Compile_EvaluateCondition(instr);
759 else if (instr.opcode.Value() == OpCode::Id::JMPU)
760 Compile_UniformCondition(instr);
761 else
762 UNREACHABLE();
763
764 bool inverted_condition =
765 (instr.opcode.Value() == OpCode::Id::JMPU) && (instr.flow_control.num_instructions & 1);
766
767 Label& b = instruction_labels[instr.flow_control.dest_offset];
768 if (inverted_condition) {
769 jz(b, T_NEAR);
770 } else {
771 jnz(b, T_NEAR);
772 }
773}
774
775static void Emit(GSEmitter* emitter, Math::Vec4<float24> (*output)[16]) {
776 emitter->Emit(*output);
777}
778
779void JitShader::Compile_EMIT(Instruction instr) {
780 Label have_emitter, end;
781 mov(rax, qword[STATE + offsetof(UnitState, emitter_ptr)]);
782 test(rax, rax);
783 jnz(have_emitter);
784
785 ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
786 mov(ABI_PARAM1, reinterpret_cast<size_t>("Execute EMIT on VS"));
787 CallFarFunction(*this, LogCritical);
788 ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
789 jmp(end);
790
791 L(have_emitter);
792 ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
793 mov(ABI_PARAM1, rax);
794 mov(ABI_PARAM2, STATE);
795 add(ABI_PARAM2, static_cast<Xbyak::uint32>(offsetof(UnitState, registers.output)));
796 CallFarFunction(*this, Emit);
797 ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
798 L(end);
799}
800
801void JitShader::Compile_SETE(Instruction instr) {
802 Label have_emitter, end;
803 mov(rax, qword[STATE + offsetof(UnitState, emitter_ptr)]);
804 test(rax, rax);
805 jnz(have_emitter);
806
807 ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
808 mov(ABI_PARAM1, reinterpret_cast<size_t>("Execute SETEMIT on VS"));
809 CallFarFunction(*this, LogCritical);
810 ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
811 jmp(end);
812
813 L(have_emitter);
814 mov(byte[rax + offsetof(GSEmitter, vertex_id)], instr.setemit.vertex_id);
815 mov(byte[rax + offsetof(GSEmitter, prim_emit)], instr.setemit.prim_emit);
816 mov(byte[rax + offsetof(GSEmitter, winding)], instr.setemit.winding);
817 L(end);
818}
819
820void JitShader::Compile_Block(unsigned end) {
821 while (program_counter < end) {
822 Compile_NextInstr();
823 }
824}
825
826void JitShader::Compile_Return() {
827 // Peek return offset on the stack and check if we're at that offset
828 mov(rax, qword[rsp + 8]);
829 cmp(eax, (program_counter));
830
831 // If so, jump back to before CALL
832 Label b;
833 jnz(b);
834 ret();
835 L(b);
836}
837
838void JitShader::Compile_NextInstr() {
839 if (std::binary_search(return_offsets.begin(), return_offsets.end(), program_counter)) {
840 Compile_Return();
841 }
842
843 L(instruction_labels[program_counter]);
844
845 Instruction instr = {(*program_code)[program_counter++]};
846
847 OpCode::Id opcode = instr.opcode.Value();
848 auto instr_func = instr_table[static_cast<unsigned>(opcode)];
849
850 if (instr_func) {
851 // JIT the instruction!
852 ((*this).*instr_func)(instr);
853 } else {
854 // Unhandled instruction
855 LOG_CRITICAL(HW_GPU, "Unhandled instruction: 0x%02x (0x%08x)",
856 instr.opcode.Value().EffectiveOpCode(), instr.hex);
857 }
858}
859
860void JitShader::FindReturnOffsets() {
861 return_offsets.clear();
862
863 for (size_t offset = 0; offset < program_code->size(); ++offset) {
864 Instruction instr = {(*program_code)[offset]};
865
866 switch (instr.opcode.Value()) {
867 case OpCode::Id::CALL:
868 case OpCode::Id::CALLC:
869 case OpCode::Id::CALLU:
870 return_offsets.push_back(instr.flow_control.dest_offset +
871 instr.flow_control.num_instructions);
872 break;
873 default:
874 break;
875 }
876 }
877
878 // Sort for efficient binary search later
879 std::sort(return_offsets.begin(), return_offsets.end());
880}
881
882void JitShader::Compile(const std::array<u32, MAX_PROGRAM_CODE_LENGTH>* program_code_,
883 const std::array<u32, MAX_SWIZZLE_DATA_LENGTH>* swizzle_data_) {
884 program_code = program_code_;
885 swizzle_data = swizzle_data_;
886
887 // Reset flow control state
888 program = (CompiledShader*)getCurr();
889 program_counter = 0;
890 looping = false;
891 instruction_labels.fill(Xbyak::Label());
892
893 // Find all `CALL` instructions and identify return locations
894 FindReturnOffsets();
895
896 // The stack pointer is 8 modulo 16 at the entry of a procedure
897 // We reserve 16 bytes and assign a dummy value to the first 8 bytes, to catch any potential
898 // return checks (see Compile_Return) that happen in shader main routine.
899 ABI_PushRegistersAndAdjustStack(*this, ABI_ALL_CALLEE_SAVED, 8, 16);
900 mov(qword[rsp + 8], 0xFFFFFFFFFFFFFFFFULL);
901
902 mov(SETUP, ABI_PARAM1);
903 mov(STATE, ABI_PARAM2);
904
905 // Zero address/loop registers
906 xor_(ADDROFFS_REG_0.cvt32(), ADDROFFS_REG_0.cvt32());
907 xor_(ADDROFFS_REG_1.cvt32(), ADDROFFS_REG_1.cvt32());
908 xor_(LOOPCOUNT_REG, LOOPCOUNT_REG);
909
910 // Used to set a register to one
911 static const __m128 one = {1.f, 1.f, 1.f, 1.f};
912 mov(rax, reinterpret_cast<size_t>(&one));
913 movaps(ONE, xword[rax]);
914
915 // Used to negate registers
916 static const __m128 neg = {-0.f, -0.f, -0.f, -0.f};
917 mov(rax, reinterpret_cast<size_t>(&neg));
918 movaps(NEGBIT, xword[rax]);
919
920 // Jump to start of the shader program
921 jmp(ABI_PARAM3);
922
923 // Compile entire program
924 Compile_Block(static_cast<unsigned>(program_code->size()));
925
926 // Free memory that's no longer needed
927 program_code = nullptr;
928 swizzle_data = nullptr;
929 return_offsets.clear();
930 return_offsets.shrink_to_fit();
931
932 ready();
933
934 ASSERT_MSG(getSize() <= MAX_SHADER_SIZE, "Compiled a shader that exceeds the allocated size!");
935 LOG_DEBUG(HW_GPU, "Compiled shader size=%lu", getSize());
936}
937
938JitShader::JitShader() : Xbyak::CodeGenerator(MAX_SHADER_SIZE) {}
939
940} // namespace Shader
941
942} // namespace Pica
diff --git a/src/video_core/shader/shader_jit_x64_compiler.h b/src/video_core/shader/shader_jit_x64_compiler.h
deleted file mode 100644
index 4aee56b1d..000000000
--- a/src/video_core/shader/shader_jit_x64_compiler.h
+++ /dev/null
@@ -1,127 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <cstddef>
9#include <utility>
10#include <vector>
11#include <nihstro/shader_bytecode.h>
12#include <xbyak.h>
13#include "common/bit_set.h"
14#include "common/common_types.h"
15#include "video_core/shader/shader.h"
16
17using nihstro::Instruction;
18using nihstro::OpCode;
19using nihstro::SwizzlePattern;
20
21namespace Pica {
22
23namespace Shader {
24
25/// Memory allocated for each compiled shader
26constexpr size_t MAX_SHADER_SIZE = MAX_PROGRAM_CODE_LENGTH * 64;
27
28/**
29 * This class implements the shader JIT compiler. It recompiles a Pica shader program into x86_64
30 * code that can be executed on the host machine directly.
31 */
32class JitShader : public Xbyak::CodeGenerator {
33public:
34 JitShader();
35
36 void Run(const ShaderSetup& setup, UnitState& state, unsigned offset) const {
37 program(&setup, &state, instruction_labels[offset].getAddress());
38 }
39
40 void Compile(const std::array<u32, MAX_PROGRAM_CODE_LENGTH>* program_code,
41 const std::array<u32, MAX_SWIZZLE_DATA_LENGTH>* swizzle_data);
42
43 void Compile_ADD(Instruction instr);
44 void Compile_DP3(Instruction instr);
45 void Compile_DP4(Instruction instr);
46 void Compile_DPH(Instruction instr);
47 void Compile_EX2(Instruction instr);
48 void Compile_LG2(Instruction instr);
49 void Compile_MUL(Instruction instr);
50 void Compile_SGE(Instruction instr);
51 void Compile_SLT(Instruction instr);
52 void Compile_FLR(Instruction instr);
53 void Compile_MAX(Instruction instr);
54 void Compile_MIN(Instruction instr);
55 void Compile_RCP(Instruction instr);
56 void Compile_RSQ(Instruction instr);
57 void Compile_MOVA(Instruction instr);
58 void Compile_MOV(Instruction instr);
59 void Compile_NOP(Instruction instr);
60 void Compile_END(Instruction instr);
61 void Compile_CALL(Instruction instr);
62 void Compile_CALLC(Instruction instr);
63 void Compile_CALLU(Instruction instr);
64 void Compile_IF(Instruction instr);
65 void Compile_LOOP(Instruction instr);
66 void Compile_JMP(Instruction instr);
67 void Compile_CMP(Instruction instr);
68 void Compile_MAD(Instruction instr);
69 void Compile_EMIT(Instruction instr);
70 void Compile_SETE(Instruction instr);
71
72private:
73 void Compile_Block(unsigned end);
74 void Compile_NextInstr();
75
76 void Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRegister src_reg,
77 Xbyak::Xmm dest);
78 void Compile_DestEnable(Instruction instr, Xbyak::Xmm dest);
79
80 /**
81 * Compiles a `MUL src1, src2` operation, properly handling the PICA semantics when multiplying
82 * zero by inf. Clobbers `src2` and `scratch`.
83 */
84 void Compile_SanitizedMul(Xbyak::Xmm src1, Xbyak::Xmm src2, Xbyak::Xmm scratch);
85
86 void Compile_EvaluateCondition(Instruction instr);
87 void Compile_UniformCondition(Instruction instr);
88
89 /**
90 * Emits the code to conditionally return from a subroutine envoked by the `CALL` instruction.
91 */
92 void Compile_Return();
93
94 BitSet32 PersistentCallerSavedRegs();
95
96 /**
97 * Assertion evaluated at compile-time, but only triggered if executed at runtime.
98 * @param condition Condition to be evaluated.
99 * @param msg Message to be logged if the assertion fails.
100 */
101 void Compile_Assert(bool condition, const char* msg);
102
103 /**
104 * Analyzes the entire shader program for `CALL` instructions before emitting any code,
105 * identifying the locations where a return needs to be inserted.
106 */
107 void FindReturnOffsets();
108
109 const std::array<u32, MAX_PROGRAM_CODE_LENGTH>* program_code = nullptr;
110 const std::array<u32, MAX_SWIZZLE_DATA_LENGTH>* swizzle_data = nullptr;
111
112 /// Mapping of Pica VS instructions to pointers in the emitted code
113 std::array<Xbyak::Label, MAX_PROGRAM_CODE_LENGTH> instruction_labels;
114
115 /// Offsets in code where a return needs to be inserted
116 std::vector<unsigned> return_offsets;
117
118 unsigned program_counter = 0; ///< Offset of the next instruction to decode
119 bool looping = false; ///< True if compiling a loop, used to check for nested loops
120
121 using CompiledShader = void(const void* setup, void* state, const u8* start_addr);
122 CompiledShader* program = nullptr;
123};
124
125} // Shader
126
127} // Pica
diff --git a/src/video_core/swrasterizer/clipper.cpp b/src/video_core/swrasterizer/clipper.cpp
deleted file mode 100644
index c1ed48398..000000000
--- a/src/video_core/swrasterizer/clipper.cpp
+++ /dev/null
@@ -1,197 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <array>
7#include <cstddef>
8#include <boost/container/static_vector.hpp>
9#include <boost/container/vector.hpp>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12#include "common/logging/log.h"
13#include "common/vector_math.h"
14#include "video_core/pica_state.h"
15#include "video_core/pica_types.h"
16#include "video_core/shader/shader.h"
17#include "video_core/swrasterizer/clipper.h"
18#include "video_core/swrasterizer/rasterizer.h"
19
20using Pica::Rasterizer::Vertex;
21
22namespace Pica {
23
24namespace Clipper {
25
26struct ClippingEdge {
27public:
28 ClippingEdge(Math::Vec4<float24> coeffs, Math::Vec4<float24> bias = Math::Vec4<float24>(
29 float24::FromFloat32(0), float24::FromFloat32(0),
30 float24::FromFloat32(0), float24::FromFloat32(0)))
31 : coeffs(coeffs), bias(bias) {}
32
33 bool IsInside(const Vertex& vertex) const {
34 return Math::Dot(vertex.pos + bias, coeffs) >= float24::FromFloat32(0);
35 }
36
37 bool IsOutSide(const Vertex& vertex) const {
38 return !IsInside(vertex);
39 }
40
41 Vertex GetIntersection(const Vertex& v0, const Vertex& v1) const {
42 float24 dp = Math::Dot(v0.pos + bias, coeffs);
43 float24 dp_prev = Math::Dot(v1.pos + bias, coeffs);
44 float24 factor = dp_prev / (dp_prev - dp);
45
46 return Vertex::Lerp(factor, v0, v1);
47 }
48
49private:
50 float24 pos;
51 Math::Vec4<float24> coeffs;
52 Math::Vec4<float24> bias;
53};
54
55static void InitScreenCoordinates(Vertex& vtx) {
56 struct {
57 float24 halfsize_x;
58 float24 offset_x;
59 float24 halfsize_y;
60 float24 offset_y;
61 float24 zscale;
62 float24 offset_z;
63 } viewport;
64
65 const auto& regs = g_state.regs;
66 viewport.halfsize_x = float24::FromRaw(regs.rasterizer.viewport_size_x);
67 viewport.halfsize_y = float24::FromRaw(regs.rasterizer.viewport_size_y);
68 viewport.offset_x = float24::FromFloat32(static_cast<float>(regs.rasterizer.viewport_corner.x));
69 viewport.offset_y = float24::FromFloat32(static_cast<float>(regs.rasterizer.viewport_corner.y));
70
71 float24 inv_w = float24::FromFloat32(1.f) / vtx.pos.w;
72 vtx.pos.w = inv_w;
73 vtx.quat *= inv_w;
74 vtx.color *= inv_w;
75 vtx.tc0 *= inv_w;
76 vtx.tc1 *= inv_w;
77 vtx.tc0_w *= inv_w;
78 vtx.view *= inv_w;
79 vtx.tc2 *= inv_w;
80
81 vtx.screenpos[0] =
82 (vtx.pos.x * inv_w + float24::FromFloat32(1.0)) * viewport.halfsize_x + viewport.offset_x;
83 vtx.screenpos[1] =
84 (vtx.pos.y * inv_w + float24::FromFloat32(1.0)) * viewport.halfsize_y + viewport.offset_y;
85 vtx.screenpos[2] = vtx.pos.z * inv_w;
86}
87
88void ProcessTriangle(const OutputVertex& v0, const OutputVertex& v1, const OutputVertex& v2) {
89 using boost::container::static_vector;
90
91 // Clipping a planar n-gon against a plane will remove at least 1 vertex and introduces 2 at
92 // the new edge (or less in degenerate cases). As such, we can say that each clipping plane
93 // introduces at most 1 new vertex to the polygon. Since we start with a triangle and have a
94 // fixed 6 clipping planes, the maximum number of vertices of the clipped polygon is 3 + 6 = 9.
95 static const size_t MAX_VERTICES = 9;
96 static_vector<Vertex, MAX_VERTICES> buffer_a = {v0, v1, v2};
97 static_vector<Vertex, MAX_VERTICES> buffer_b;
98
99 auto FlipQuaternionIfOpposite = [](auto& a, const auto& b) {
100 if (Math::Dot(a, b) < float24::Zero())
101 a = a * float24::FromFloat32(-1.0f);
102 };
103
104 // Flip the quaternions if they are opposite to prevent interpolating them over the wrong
105 // direction.
106 FlipQuaternionIfOpposite(buffer_a[1].quat, buffer_a[0].quat);
107 FlipQuaternionIfOpposite(buffer_a[2].quat, buffer_a[0].quat);
108
109 auto* output_list = &buffer_a;
110 auto* input_list = &buffer_b;
111
112 // NOTE: We clip against a w=epsilon plane to guarantee that the output has a positive w value.
113 // TODO: Not sure if this is a valid approach. Also should probably instead use the smallest
114 // epsilon possible within float24 accuracy.
115 static const float24 EPSILON = float24::FromFloat32(0.00001f);
116 static const float24 f0 = float24::FromFloat32(0.0);
117 static const float24 f1 = float24::FromFloat32(1.0);
118 static const std::array<ClippingEdge, 7> clipping_edges = {{
119 {Math::MakeVec(-f1, f0, f0, f1)}, // x = +w
120 {Math::MakeVec(f1, f0, f0, f1)}, // x = -w
121 {Math::MakeVec(f0, -f1, f0, f1)}, // y = +w
122 {Math::MakeVec(f0, f1, f0, f1)}, // y = -w
123 {Math::MakeVec(f0, f0, -f1, f0)}, // z = 0
124 {Math::MakeVec(f0, f0, f1, f1)}, // z = -w
125 {Math::MakeVec(f0, f0, f0, f1), Math::Vec4<float24>(f0, f0, f0, EPSILON)}, // w = EPSILON
126 }};
127
128 // Simple implementation of the Sutherland-Hodgman clipping algorithm.
129 // TODO: Make this less inefficient (currently lots of useless buffering overhead happens here)
130 auto Clip = [&](const ClippingEdge& edge) {
131 std::swap(input_list, output_list);
132 output_list->clear();
133
134 const Vertex* reference_vertex = &input_list->back();
135
136 for (const auto& vertex : *input_list) {
137 // NOTE: This algorithm changes vertex order in some cases!
138 if (edge.IsInside(vertex)) {
139 if (edge.IsOutSide(*reference_vertex)) {
140 output_list->push_back(edge.GetIntersection(vertex, *reference_vertex));
141 }
142
143 output_list->push_back(vertex);
144 } else if (edge.IsInside(*reference_vertex)) {
145 output_list->push_back(edge.GetIntersection(vertex, *reference_vertex));
146 }
147 reference_vertex = &vertex;
148 }
149 };
150
151 for (auto edge : clipping_edges) {
152 Clip(edge);
153
154 // Need to have at least a full triangle to continue...
155 if (output_list->size() < 3)
156 return;
157 }
158
159 if (g_state.regs.rasterizer.clip_enable) {
160 ClippingEdge custom_edge{g_state.regs.rasterizer.GetClipCoef()};
161 Clip(custom_edge);
162
163 if (output_list->size() < 3)
164 return;
165 }
166
167 InitScreenCoordinates((*output_list)[0]);
168 InitScreenCoordinates((*output_list)[1]);
169
170 for (size_t i = 0; i < output_list->size() - 2; i++) {
171 Vertex& vtx0 = (*output_list)[0];
172 Vertex& vtx1 = (*output_list)[i + 1];
173 Vertex& vtx2 = (*output_list)[i + 2];
174
175 InitScreenCoordinates(vtx2);
176
177 LOG_TRACE(Render_Software,
178 "Triangle %lu/%lu at position (%.3f, %.3f, %.3f, %.3f), "
179 "(%.3f, %.3f, %.3f, %.3f), (%.3f, %.3f, %.3f, %.3f) and "
180 "screen position (%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f)",
181 i + 1, output_list->size() - 2, vtx0.pos.x.ToFloat32(), vtx0.pos.y.ToFloat32(),
182 vtx0.pos.z.ToFloat32(), vtx0.pos.w.ToFloat32(), vtx1.pos.x.ToFloat32(),
183 vtx1.pos.y.ToFloat32(), vtx1.pos.z.ToFloat32(), vtx1.pos.w.ToFloat32(),
184 vtx2.pos.x.ToFloat32(), vtx2.pos.y.ToFloat32(), vtx2.pos.z.ToFloat32(),
185 vtx2.pos.w.ToFloat32(), vtx0.screenpos.x.ToFloat32(),
186 vtx0.screenpos.y.ToFloat32(), vtx0.screenpos.z.ToFloat32(),
187 vtx1.screenpos.x.ToFloat32(), vtx1.screenpos.y.ToFloat32(),
188 vtx1.screenpos.z.ToFloat32(), vtx2.screenpos.x.ToFloat32(),
189 vtx2.screenpos.y.ToFloat32(), vtx2.screenpos.z.ToFloat32());
190
191 Rasterizer::ProcessTriangle(vtx0, vtx1, vtx2);
192 }
193}
194
195} // namespace
196
197} // namespace
diff --git a/src/video_core/swrasterizer/clipper.h b/src/video_core/swrasterizer/clipper.h
deleted file mode 100644
index b51af0af9..000000000
--- a/src/video_core/swrasterizer/clipper.h
+++ /dev/null
@@ -1,21 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7namespace Pica {
8
9namespace Shader {
10struct OutputVertex;
11}
12
13namespace Clipper {
14
15using Shader::OutputVertex;
16
17void ProcessTriangle(const OutputVertex& v0, const OutputVertex& v1, const OutputVertex& v2);
18
19} // namespace
20
21} // namespace
diff --git a/src/video_core/swrasterizer/framebuffer.cpp b/src/video_core/swrasterizer/framebuffer.cpp
deleted file mode 100644
index f34eab6cf..000000000
--- a/src/video_core/swrasterizer/framebuffer.cpp
+++ /dev/null
@@ -1,360 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6
7#include "common/assert.h"
8#include "common/color.h"
9#include "common/common_types.h"
10#include "common/logging/log.h"
11#include "common/math_util.h"
12#include "common/vector_math.h"
13#include "core/hw/gpu.h"
14#include "core/memory.h"
15#include "video_core/pica_state.h"
16#include "video_core/regs_framebuffer.h"
17#include "video_core/swrasterizer/framebuffer.h"
18#include "video_core/utils.h"
19
20namespace Pica {
21namespace Rasterizer {
22
23void DrawPixel(int x, int y, const Math::Vec4<u8>& color) {
24 const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
25 const PAddr addr = framebuffer.GetColorBufferPhysicalAddress();
26
27 // Similarly to textures, the render framebuffer is laid out from bottom to top, too.
28 // NOTE: The framebuffer height register contains the actual FB height minus one.
29 y = framebuffer.height - y;
30
31 const u32 coarse_y = y & ~7;
32 u32 bytes_per_pixel =
33 GPU::Regs::BytesPerPixel(GPU::Regs::PixelFormat(framebuffer.color_format.Value()));
34 u32 dst_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) +
35 coarse_y * framebuffer.width * bytes_per_pixel;
36 u8* dst_pixel = Memory::GetPhysicalPointer(addr) + dst_offset;
37
38 switch (framebuffer.color_format) {
39 case FramebufferRegs::ColorFormat::RGBA8:
40 Color::EncodeRGBA8(color, dst_pixel);
41 break;
42
43 case FramebufferRegs::ColorFormat::RGB8:
44 Color::EncodeRGB8(color, dst_pixel);
45 break;
46
47 case FramebufferRegs::ColorFormat::RGB5A1:
48 Color::EncodeRGB5A1(color, dst_pixel);
49 break;
50
51 case FramebufferRegs::ColorFormat::RGB565:
52 Color::EncodeRGB565(color, dst_pixel);
53 break;
54
55 case FramebufferRegs::ColorFormat::RGBA4:
56 Color::EncodeRGBA4(color, dst_pixel);
57 break;
58
59 default:
60 LOG_CRITICAL(Render_Software, "Unknown framebuffer color format %x",
61 framebuffer.color_format.Value());
62 UNIMPLEMENTED();
63 }
64}
65
66const Math::Vec4<u8> GetPixel(int x, int y) {
67 const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
68 const PAddr addr = framebuffer.GetColorBufferPhysicalAddress();
69
70 y = framebuffer.height - y;
71
72 const u32 coarse_y = y & ~7;
73 u32 bytes_per_pixel =
74 GPU::Regs::BytesPerPixel(GPU::Regs::PixelFormat(framebuffer.color_format.Value()));
75 u32 src_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) +
76 coarse_y * framebuffer.width * bytes_per_pixel;
77 u8* src_pixel = Memory::GetPhysicalPointer(addr) + src_offset;
78
79 switch (framebuffer.color_format) {
80 case FramebufferRegs::ColorFormat::RGBA8:
81 return Color::DecodeRGBA8(src_pixel);
82
83 case FramebufferRegs::ColorFormat::RGB8:
84 return Color::DecodeRGB8(src_pixel);
85
86 case FramebufferRegs::ColorFormat::RGB5A1:
87 return Color::DecodeRGB5A1(src_pixel);
88
89 case FramebufferRegs::ColorFormat::RGB565:
90 return Color::DecodeRGB565(src_pixel);
91
92 case FramebufferRegs::ColorFormat::RGBA4:
93 return Color::DecodeRGBA4(src_pixel);
94
95 default:
96 LOG_CRITICAL(Render_Software, "Unknown framebuffer color format %x",
97 framebuffer.color_format.Value());
98 UNIMPLEMENTED();
99 }
100
101 return {0, 0, 0, 0};
102}
103
104u32 GetDepth(int x, int y) {
105 const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
106 const PAddr addr = framebuffer.GetDepthBufferPhysicalAddress();
107 u8* depth_buffer = Memory::GetPhysicalPointer(addr);
108
109 y = framebuffer.height - y;
110
111 const u32 coarse_y = y & ~7;
112 u32 bytes_per_pixel = FramebufferRegs::BytesPerDepthPixel(framebuffer.depth_format);
113 u32 stride = framebuffer.width * bytes_per_pixel;
114
115 u32 src_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) + coarse_y * stride;
116 u8* src_pixel = depth_buffer + src_offset;
117
118 switch (framebuffer.depth_format) {
119 case FramebufferRegs::DepthFormat::D16:
120 return Color::DecodeD16(src_pixel);
121 case FramebufferRegs::DepthFormat::D24:
122 return Color::DecodeD24(src_pixel);
123 case FramebufferRegs::DepthFormat::D24S8:
124 return Color::DecodeD24S8(src_pixel).x;
125 default:
126 LOG_CRITICAL(HW_GPU, "Unimplemented depth format %u", framebuffer.depth_format);
127 UNIMPLEMENTED();
128 return 0;
129 }
130}
131
132u8 GetStencil(int x, int y) {
133 const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
134 const PAddr addr = framebuffer.GetDepthBufferPhysicalAddress();
135 u8* depth_buffer = Memory::GetPhysicalPointer(addr);
136
137 y = framebuffer.height - y;
138
139 const u32 coarse_y = y & ~7;
140 u32 bytes_per_pixel = Pica::FramebufferRegs::BytesPerDepthPixel(framebuffer.depth_format);
141 u32 stride = framebuffer.width * bytes_per_pixel;
142
143 u32 src_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) + coarse_y * stride;
144 u8* src_pixel = depth_buffer + src_offset;
145
146 switch (framebuffer.depth_format) {
147 case FramebufferRegs::DepthFormat::D24S8:
148 return Color::DecodeD24S8(src_pixel).y;
149
150 default:
151 LOG_WARNING(
152 HW_GPU,
153 "GetStencil called for function which doesn't have a stencil component (format %u)",
154 framebuffer.depth_format);
155 return 0;
156 }
157}
158
159void SetDepth(int x, int y, u32 value) {
160 const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
161 const PAddr addr = framebuffer.GetDepthBufferPhysicalAddress();
162 u8* depth_buffer = Memory::GetPhysicalPointer(addr);
163
164 y = framebuffer.height - y;
165
166 const u32 coarse_y = y & ~7;
167 u32 bytes_per_pixel = FramebufferRegs::BytesPerDepthPixel(framebuffer.depth_format);
168 u32 stride = framebuffer.width * bytes_per_pixel;
169
170 u32 dst_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) + coarse_y * stride;
171 u8* dst_pixel = depth_buffer + dst_offset;
172
173 switch (framebuffer.depth_format) {
174 case FramebufferRegs::DepthFormat::D16:
175 Color::EncodeD16(value, dst_pixel);
176 break;
177
178 case FramebufferRegs::DepthFormat::D24:
179 Color::EncodeD24(value, dst_pixel);
180 break;
181
182 case FramebufferRegs::DepthFormat::D24S8:
183 Color::EncodeD24X8(value, dst_pixel);
184 break;
185
186 default:
187 LOG_CRITICAL(HW_GPU, "Unimplemented depth format %u", framebuffer.depth_format);
188 UNIMPLEMENTED();
189 break;
190 }
191}
192
193void SetStencil(int x, int y, u8 value) {
194 const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
195 const PAddr addr = framebuffer.GetDepthBufferPhysicalAddress();
196 u8* depth_buffer = Memory::GetPhysicalPointer(addr);
197
198 y = framebuffer.height - y;
199
200 const u32 coarse_y = y & ~7;
201 u32 bytes_per_pixel = Pica::FramebufferRegs::BytesPerDepthPixel(framebuffer.depth_format);
202 u32 stride = framebuffer.width * bytes_per_pixel;
203
204 u32 dst_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) + coarse_y * stride;
205 u8* dst_pixel = depth_buffer + dst_offset;
206
207 switch (framebuffer.depth_format) {
208 case Pica::FramebufferRegs::DepthFormat::D16:
209 case Pica::FramebufferRegs::DepthFormat::D24:
210 // Nothing to do
211 break;
212
213 case Pica::FramebufferRegs::DepthFormat::D24S8:
214 Color::EncodeX24S8(value, dst_pixel);
215 break;
216
217 default:
218 LOG_CRITICAL(HW_GPU, "Unimplemented depth format %u", framebuffer.depth_format);
219 UNIMPLEMENTED();
220 break;
221 }
222}
223
224u8 PerformStencilAction(FramebufferRegs::StencilAction action, u8 old_stencil, u8 ref) {
225 switch (action) {
226 case FramebufferRegs::StencilAction::Keep:
227 return old_stencil;
228
229 case FramebufferRegs::StencilAction::Zero:
230 return 0;
231
232 case FramebufferRegs::StencilAction::Replace:
233 return ref;
234
235 case FramebufferRegs::StencilAction::Increment:
236 // Saturated increment
237 return std::min<u8>(old_stencil, 254) + 1;
238
239 case FramebufferRegs::StencilAction::Decrement:
240 // Saturated decrement
241 return std::max<u8>(old_stencil, 1) - 1;
242
243 case FramebufferRegs::StencilAction::Invert:
244 return ~old_stencil;
245
246 case FramebufferRegs::StencilAction::IncrementWrap:
247 return old_stencil + 1;
248
249 case FramebufferRegs::StencilAction::DecrementWrap:
250 return old_stencil - 1;
251
252 default:
253 LOG_CRITICAL(HW_GPU, "Unknown stencil action %x", (int)action);
254 UNIMPLEMENTED();
255 return 0;
256 }
257}
258
259Math::Vec4<u8> EvaluateBlendEquation(const Math::Vec4<u8>& src, const Math::Vec4<u8>& srcfactor,
260 const Math::Vec4<u8>& dest, const Math::Vec4<u8>& destfactor,
261 FramebufferRegs::BlendEquation equation) {
262 Math::Vec4<int> result;
263
264 auto src_result = (src * srcfactor).Cast<int>();
265 auto dst_result = (dest * destfactor).Cast<int>();
266
267 switch (equation) {
268 case FramebufferRegs::BlendEquation::Add:
269 result = (src_result + dst_result) / 255;
270 break;
271
272 case FramebufferRegs::BlendEquation::Subtract:
273 result = (src_result - dst_result) / 255;
274 break;
275
276 case FramebufferRegs::BlendEquation::ReverseSubtract:
277 result = (dst_result - src_result) / 255;
278 break;
279
280 // TODO: How do these two actually work? OpenGL doesn't include the blend factors in the
281 // min/max computations, but is this what the 3DS actually does?
282 case FramebufferRegs::BlendEquation::Min:
283 result.r() = std::min(src.r(), dest.r());
284 result.g() = std::min(src.g(), dest.g());
285 result.b() = std::min(src.b(), dest.b());
286 result.a() = std::min(src.a(), dest.a());
287 break;
288
289 case FramebufferRegs::BlendEquation::Max:
290 result.r() = std::max(src.r(), dest.r());
291 result.g() = std::max(src.g(), dest.g());
292 result.b() = std::max(src.b(), dest.b());
293 result.a() = std::max(src.a(), dest.a());
294 break;
295
296 default:
297 LOG_CRITICAL(HW_GPU, "Unknown RGB blend equation %x", equation);
298 UNIMPLEMENTED();
299 }
300
301 return Math::Vec4<u8>(MathUtil::Clamp(result.r(), 0, 255), MathUtil::Clamp(result.g(), 0, 255),
302 MathUtil::Clamp(result.b(), 0, 255), MathUtil::Clamp(result.a(), 0, 255));
303};
304
305u8 LogicOp(u8 src, u8 dest, FramebufferRegs::LogicOp op) {
306 switch (op) {
307 case FramebufferRegs::LogicOp::Clear:
308 return 0;
309
310 case FramebufferRegs::LogicOp::And:
311 return src & dest;
312
313 case FramebufferRegs::LogicOp::AndReverse:
314 return src & ~dest;
315
316 case FramebufferRegs::LogicOp::Copy:
317 return src;
318
319 case FramebufferRegs::LogicOp::Set:
320 return 255;
321
322 case FramebufferRegs::LogicOp::CopyInverted:
323 return ~src;
324
325 case FramebufferRegs::LogicOp::NoOp:
326 return dest;
327
328 case FramebufferRegs::LogicOp::Invert:
329 return ~dest;
330
331 case FramebufferRegs::LogicOp::Nand:
332 return ~(src & dest);
333
334 case FramebufferRegs::LogicOp::Or:
335 return src | dest;
336
337 case FramebufferRegs::LogicOp::Nor:
338 return ~(src | dest);
339
340 case FramebufferRegs::LogicOp::Xor:
341 return src ^ dest;
342
343 case FramebufferRegs::LogicOp::Equiv:
344 return ~(src ^ dest);
345
346 case FramebufferRegs::LogicOp::AndInverted:
347 return ~src & dest;
348
349 case FramebufferRegs::LogicOp::OrReverse:
350 return src | ~dest;
351
352 case FramebufferRegs::LogicOp::OrInverted:
353 return ~src | dest;
354 }
355
356 UNREACHABLE();
357};
358
359} // namespace Rasterizer
360} // namespace Pica
diff --git a/src/video_core/swrasterizer/framebuffer.h b/src/video_core/swrasterizer/framebuffer.h
deleted file mode 100644
index 4a32a4979..000000000
--- a/src/video_core/swrasterizer/framebuffer.h
+++ /dev/null
@@ -1,29 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8#include "common/vector_math.h"
9#include "video_core/regs_framebuffer.h"
10
11namespace Pica {
12namespace Rasterizer {
13
14void DrawPixel(int x, int y, const Math::Vec4<u8>& color);
15const Math::Vec4<u8> GetPixel(int x, int y);
16u32 GetDepth(int x, int y);
17u8 GetStencil(int x, int y);
18void SetDepth(int x, int y, u32 value);
19void SetStencil(int x, int y, u8 value);
20u8 PerformStencilAction(FramebufferRegs::StencilAction action, u8 old_stencil, u8 ref);
21
22Math::Vec4<u8> EvaluateBlendEquation(const Math::Vec4<u8>& src, const Math::Vec4<u8>& srcfactor,
23 const Math::Vec4<u8>& dest, const Math::Vec4<u8>& destfactor,
24 FramebufferRegs::BlendEquation equation);
25
26u8 LogicOp(u8 src, u8 dest, FramebufferRegs::LogicOp op);
27
28} // namespace Rasterizer
29} // namespace Pica
diff --git a/src/video_core/swrasterizer/lighting.cpp b/src/video_core/swrasterizer/lighting.cpp
deleted file mode 100644
index 5fa748611..000000000
--- a/src/video_core/swrasterizer/lighting.cpp
+++ /dev/null
@@ -1,308 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/math_util.h"
6#include "video_core/swrasterizer/lighting.h"
7
8namespace Pica {
9
10static float LookupLightingLut(const Pica::State::Lighting& lighting, size_t lut_index, u8 index,
11 float delta) {
12 ASSERT_MSG(lut_index < lighting.luts.size(), "Out of range lut");
13 ASSERT_MSG(index < lighting.luts[lut_index].size(), "Out of range index");
14
15 const auto& lut = lighting.luts[lut_index][index];
16
17 float lut_value = lut.ToFloat();
18 float lut_diff = lut.DiffToFloat();
19
20 return lut_value + lut_diff * delta;
21}
22
23std::tuple<Math::Vec4<u8>, Math::Vec4<u8>> ComputeFragmentsColors(
24 const Pica::LightingRegs& lighting, const Pica::State::Lighting& lighting_state,
25 const Math::Quaternion<float>& normquat, const Math::Vec3<float>& view,
26 const Math::Vec4<u8> (&texture_color)[4]) {
27
28 Math::Vec3<float> surface_normal;
29 Math::Vec3<float> surface_tangent;
30
31 if (lighting.config0.bump_mode != LightingRegs::LightingBumpMode::None) {
32 Math::Vec3<float> perturbation =
33 texture_color[lighting.config0.bump_selector].xyz().Cast<float>() / 127.5f -
34 Math::MakeVec(1.0f, 1.0f, 1.0f);
35 if (lighting.config0.bump_mode == LightingRegs::LightingBumpMode::NormalMap) {
36 if (!lighting.config0.disable_bump_renorm) {
37 const float z_square = 1 - perturbation.xy().Length2();
38 perturbation.z = std::sqrt(std::max(z_square, 0.0f));
39 }
40 surface_normal = perturbation;
41 surface_tangent = Math::MakeVec(1.0f, 0.0f, 0.0f);
42 } else if (lighting.config0.bump_mode == LightingRegs::LightingBumpMode::TangentMap) {
43 surface_normal = Math::MakeVec(0.0f, 0.0f, 1.0f);
44 surface_tangent = perturbation;
45 } else {
46 LOG_ERROR(HW_GPU, "Unknown bump mode %u", lighting.config0.bump_mode.Value());
47 }
48 } else {
49 surface_normal = Math::MakeVec(0.0f, 0.0f, 1.0f);
50 surface_tangent = Math::MakeVec(1.0f, 0.0f, 0.0f);
51 }
52
53 // Use the normalized the quaternion when performing the rotation
54 auto normal = Math::QuaternionRotate(normquat, surface_normal);
55 auto tangent = Math::QuaternionRotate(normquat, surface_tangent);
56
57 Math::Vec4<float> diffuse_sum = {0.0f, 0.0f, 0.0f, 1.0f};
58 Math::Vec4<float> specular_sum = {0.0f, 0.0f, 0.0f, 1.0f};
59
60 for (unsigned light_index = 0; light_index <= lighting.max_light_index; ++light_index) {
61 unsigned num = lighting.light_enable.GetNum(light_index);
62 const auto& light_config = lighting.light[num];
63
64 Math::Vec3<float> refl_value = {};
65 Math::Vec3<float> position = {float16::FromRaw(light_config.x).ToFloat32(),
66 float16::FromRaw(light_config.y).ToFloat32(),
67 float16::FromRaw(light_config.z).ToFloat32()};
68 Math::Vec3<float> light_vector;
69
70 if (light_config.config.directional)
71 light_vector = position;
72 else
73 light_vector = position + view;
74
75 light_vector.Normalize();
76
77 Math::Vec3<float> norm_view = view.Normalized();
78 Math::Vec3<float> half_vector = norm_view + light_vector;
79
80 float dist_atten = 1.0f;
81 if (!lighting.IsDistAttenDisabled(num)) {
82 auto distance = (-view - position).Length();
83 float scale = Pica::float20::FromRaw(light_config.dist_atten_scale).ToFloat32();
84 float bias = Pica::float20::FromRaw(light_config.dist_atten_bias).ToFloat32();
85 size_t lut =
86 static_cast<size_t>(LightingRegs::LightingSampler::DistanceAttenuation) + num;
87
88 float sample_loc = MathUtil::Clamp(scale * distance + bias, 0.0f, 1.0f);
89
90 u8 lutindex =
91 static_cast<u8>(MathUtil::Clamp(std::floor(sample_loc * 256.0f), 0.0f, 255.0f));
92 float delta = sample_loc * 256 - lutindex;
93 dist_atten = LookupLightingLut(lighting_state, lut, lutindex, delta);
94 }
95
96 auto GetLutValue = [&](LightingRegs::LightingLutInput input, bool abs,
97 LightingRegs::LightingScale scale_enum,
98 LightingRegs::LightingSampler sampler) {
99 float result = 0.0f;
100
101 switch (input) {
102 case LightingRegs::LightingLutInput::NH:
103 result = Math::Dot(normal, half_vector.Normalized());
104 break;
105
106 case LightingRegs::LightingLutInput::VH:
107 result = Math::Dot(norm_view, half_vector.Normalized());
108 break;
109
110 case LightingRegs::LightingLutInput::NV:
111 result = Math::Dot(normal, norm_view);
112 break;
113
114 case LightingRegs::LightingLutInput::LN:
115 result = Math::Dot(light_vector, normal);
116 break;
117
118 case LightingRegs::LightingLutInput::SP: {
119 Math::Vec3<s32> spot_dir{light_config.spot_x.Value(), light_config.spot_y.Value(),
120 light_config.spot_z.Value()};
121 result = Math::Dot(light_vector, spot_dir.Cast<float>() / 2047.0f);
122 break;
123 }
124 case LightingRegs::LightingLutInput::CP:
125 if (lighting.config0.config == LightingRegs::LightingConfig::Config7) {
126 const Math::Vec3<float> norm_half_vector = half_vector.Normalized();
127 const Math::Vec3<float> half_vector_proj =
128 norm_half_vector - normal * Math::Dot(normal, norm_half_vector);
129 result = Math::Dot(half_vector_proj, tangent);
130 } else {
131 result = 0.0f;
132 }
133 break;
134 default:
135 LOG_CRITICAL(HW_GPU, "Unknown lighting LUT input %u\n", static_cast<u32>(input));
136 UNIMPLEMENTED();
137 result = 0.0f;
138 }
139
140 u8 index;
141 float delta;
142
143 if (abs) {
144 if (light_config.config.two_sided_diffuse)
145 result = std::abs(result);
146 else
147 result = std::max(result, 0.0f);
148
149 float flr = std::floor(result * 256.0f);
150 index = static_cast<u8>(MathUtil::Clamp(flr, 0.0f, 255.0f));
151 delta = result * 256 - index;
152 } else {
153 float flr = std::floor(result * 128.0f);
154 s8 signed_index = static_cast<s8>(MathUtil::Clamp(flr, -128.0f, 127.0f));
155 delta = result * 128.0f - signed_index;
156 index = static_cast<u8>(signed_index);
157 }
158
159 float scale = lighting.lut_scale.GetScale(scale_enum);
160 return scale *
161 LookupLightingLut(lighting_state, static_cast<size_t>(sampler), index, delta);
162 };
163
164 // If enabled, compute spot light attenuation value
165 float spot_atten = 1.0f;
166 if (!lighting.IsSpotAttenDisabled(num) &&
167 LightingRegs::IsLightingSamplerSupported(
168 lighting.config0.config, LightingRegs::LightingSampler::SpotlightAttenuation)) {
169 auto lut = LightingRegs::SpotlightAttenuationSampler(num);
170 spot_atten = GetLutValue(lighting.lut_input.sp, lighting.abs_lut_input.disable_sp == 0,
171 lighting.lut_scale.sp, lut);
172 }
173
174 // Specular 0 component
175 float d0_lut_value = 1.0f;
176 if (lighting.config1.disable_lut_d0 == 0 &&
177 LightingRegs::IsLightingSamplerSupported(
178 lighting.config0.config, LightingRegs::LightingSampler::Distribution0)) {
179 d0_lut_value =
180 GetLutValue(lighting.lut_input.d0, lighting.abs_lut_input.disable_d0 == 0,
181 lighting.lut_scale.d0, LightingRegs::LightingSampler::Distribution0);
182 }
183
184 Math::Vec3<float> specular_0 = d0_lut_value * light_config.specular_0.ToVec3f();
185
186 // If enabled, lookup ReflectRed value, otherwise, 1.0 is used
187 if (lighting.config1.disable_lut_rr == 0 &&
188 LightingRegs::IsLightingSamplerSupported(lighting.config0.config,
189 LightingRegs::LightingSampler::ReflectRed)) {
190 refl_value.x =
191 GetLutValue(lighting.lut_input.rr, lighting.abs_lut_input.disable_rr == 0,
192 lighting.lut_scale.rr, LightingRegs::LightingSampler::ReflectRed);
193 } else {
194 refl_value.x = 1.0f;
195 }
196
197 // If enabled, lookup ReflectGreen value, otherwise, ReflectRed value is used
198 if (lighting.config1.disable_lut_rg == 0 &&
199 LightingRegs::IsLightingSamplerSupported(lighting.config0.config,
200 LightingRegs::LightingSampler::ReflectGreen)) {
201 refl_value.y =
202 GetLutValue(lighting.lut_input.rg, lighting.abs_lut_input.disable_rg == 0,
203 lighting.lut_scale.rg, LightingRegs::LightingSampler::ReflectGreen);
204 } else {
205 refl_value.y = refl_value.x;
206 }
207
208 // If enabled, lookup ReflectBlue value, otherwise, ReflectRed value is used
209 if (lighting.config1.disable_lut_rb == 0 &&
210 LightingRegs::IsLightingSamplerSupported(lighting.config0.config,
211 LightingRegs::LightingSampler::ReflectBlue)) {
212 refl_value.z =
213 GetLutValue(lighting.lut_input.rb, lighting.abs_lut_input.disable_rb == 0,
214 lighting.lut_scale.rb, LightingRegs::LightingSampler::ReflectBlue);
215 } else {
216 refl_value.z = refl_value.x;
217 }
218
219 // Specular 1 component
220 float d1_lut_value = 1.0f;
221 if (lighting.config1.disable_lut_d1 == 0 &&
222 LightingRegs::IsLightingSamplerSupported(
223 lighting.config0.config, LightingRegs::LightingSampler::Distribution1)) {
224 d1_lut_value =
225 GetLutValue(lighting.lut_input.d1, lighting.abs_lut_input.disable_d1 == 0,
226 lighting.lut_scale.d1, LightingRegs::LightingSampler::Distribution1);
227 }
228
229 Math::Vec3<float> specular_1 =
230 d1_lut_value * refl_value * light_config.specular_1.ToVec3f();
231
232 // Fresnel
233 // Note: only the last entry in the light slots applies the Fresnel factor
234 if (light_index == lighting.max_light_index && lighting.config1.disable_lut_fr == 0 &&
235 LightingRegs::IsLightingSamplerSupported(lighting.config0.config,
236 LightingRegs::LightingSampler::Fresnel)) {
237
238 float lut_value =
239 GetLutValue(lighting.lut_input.fr, lighting.abs_lut_input.disable_fr == 0,
240 lighting.lut_scale.fr, LightingRegs::LightingSampler::Fresnel);
241
242 // Enabled for diffuse lighting alpha component
243 if (lighting.config0.fresnel_selector ==
244 LightingRegs::LightingFresnelSelector::PrimaryAlpha ||
245 lighting.config0.fresnel_selector == LightingRegs::LightingFresnelSelector::Both) {
246 diffuse_sum.a() = lut_value;
247 }
248
249 // Enabled for the specular lighting alpha component
250 if (lighting.config0.fresnel_selector ==
251 LightingRegs::LightingFresnelSelector::SecondaryAlpha ||
252 lighting.config0.fresnel_selector == LightingRegs::LightingFresnelSelector::Both) {
253 specular_sum.a() = lut_value;
254 }
255 }
256
257 auto dot_product = Math::Dot(light_vector, normal);
258
259 // Calculate clamp highlights before applying the two-sided diffuse configuration to the dot
260 // product.
261 float clamp_highlights = 1.0f;
262 if (lighting.config0.clamp_highlights) {
263 if (dot_product <= 0.0f)
264 clamp_highlights = 0.0f;
265 else
266 clamp_highlights = 1.0f;
267 }
268
269 if (light_config.config.two_sided_diffuse)
270 dot_product = std::abs(dot_product);
271 else
272 dot_product = std::max(dot_product, 0.0f);
273
274 if (light_config.config.geometric_factor_0 || light_config.config.geometric_factor_1) {
275 float geo_factor = half_vector.Length2();
276 geo_factor = geo_factor == 0.0f ? 0.0f : std::min(dot_product / geo_factor, 1.0f);
277 if (light_config.config.geometric_factor_0) {
278 specular_0 *= geo_factor;
279 }
280 if (light_config.config.geometric_factor_1) {
281 specular_1 *= geo_factor;
282 }
283 }
284
285 auto diffuse =
286 light_config.diffuse.ToVec3f() * dot_product + light_config.ambient.ToVec3f();
287 diffuse_sum += Math::MakeVec(diffuse * dist_atten * spot_atten, 0.0f);
288
289 specular_sum += Math::MakeVec(
290 (specular_0 + specular_1) * clamp_highlights * dist_atten * spot_atten, 0.0f);
291 }
292
293 diffuse_sum += Math::MakeVec(lighting.global_ambient.ToVec3f(), 0.0f);
294
295 auto diffuse = Math::MakeVec<float>(MathUtil::Clamp(diffuse_sum.x, 0.0f, 1.0f) * 255,
296 MathUtil::Clamp(diffuse_sum.y, 0.0f, 1.0f) * 255,
297 MathUtil::Clamp(diffuse_sum.z, 0.0f, 1.0f) * 255,
298 MathUtil::Clamp(diffuse_sum.w, 0.0f, 1.0f) * 255)
299 .Cast<u8>();
300 auto specular = Math::MakeVec<float>(MathUtil::Clamp(specular_sum.x, 0.0f, 1.0f) * 255,
301 MathUtil::Clamp(specular_sum.y, 0.0f, 1.0f) * 255,
302 MathUtil::Clamp(specular_sum.z, 0.0f, 1.0f) * 255,
303 MathUtil::Clamp(specular_sum.w, 0.0f, 1.0f) * 255)
304 .Cast<u8>();
305 return std::make_tuple(diffuse, specular);
306}
307
308} // namespace Pica
diff --git a/src/video_core/swrasterizer/lighting.h b/src/video_core/swrasterizer/lighting.h
deleted file mode 100644
index d807a3d94..000000000
--- a/src/video_core/swrasterizer/lighting.h
+++ /dev/null
@@ -1,19 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <tuple>
8#include "common/quaternion.h"
9#include "common/vector_math.h"
10#include "video_core/pica_state.h"
11
12namespace Pica {
13
14std::tuple<Math::Vec4<u8>, Math::Vec4<u8>> ComputeFragmentsColors(
15 const Pica::LightingRegs& lighting, const Pica::State::Lighting& lighting_state,
16 const Math::Quaternion<float>& normquat, const Math::Vec3<float>& view,
17 const Math::Vec4<u8> (&texture_color)[4]);
18
19} // namespace Pica
diff --git a/src/video_core/swrasterizer/proctex.cpp b/src/video_core/swrasterizer/proctex.cpp
deleted file mode 100644
index b69892778..000000000
--- a/src/video_core/swrasterizer/proctex.cpp
+++ /dev/null
@@ -1,223 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <array>
6#include <cmath>
7#include "common/math_util.h"
8#include "video_core/swrasterizer/proctex.h"
9
10namespace Pica {
11namespace Rasterizer {
12
13using ProcTexClamp = TexturingRegs::ProcTexClamp;
14using ProcTexShift = TexturingRegs::ProcTexShift;
15using ProcTexCombiner = TexturingRegs::ProcTexCombiner;
16using ProcTexFilter = TexturingRegs::ProcTexFilter;
17
18static float LookupLUT(const std::array<State::ProcTex::ValueEntry, 128>& lut, float coord) {
19 // For NoiseLUT/ColorMap/AlphaMap, coord=0.0 is lut[0], coord=127.0/128.0 is lut[127] and
20 // coord=1.0 is lut[127]+lut_diff[127]. For other indices, the result is interpolated using
21 // value entries and difference entries.
22 coord *= 128;
23 const int index_int = std::min(static_cast<int>(coord), 127);
24 const float frac = coord - index_int;
25 return lut[index_int].ToFloat() + frac * lut[index_int].DiffToFloat();
26}
27
28// These function are used to generate random noise for procedural texture. Their results are
29// verified against real hardware, but it's not known if the algorithm is the same as hardware.
30static unsigned int NoiseRand1D(unsigned int v) {
31 static constexpr std::array<unsigned int, 16> table{
32 {0, 4, 10, 8, 4, 9, 7, 12, 5, 15, 13, 14, 11, 15, 2, 11}};
33 return ((v % 9 + 2) * 3 & 0xF) ^ table[(v / 9) & 0xF];
34}
35
36static float NoiseRand2D(unsigned int x, unsigned int y) {
37 static constexpr std::array<unsigned int, 16> table{
38 {10, 2, 15, 8, 0, 7, 4, 5, 5, 13, 2, 6, 13, 9, 3, 14}};
39 unsigned int u2 = NoiseRand1D(x);
40 unsigned int v2 = NoiseRand1D(y);
41 v2 += ((u2 & 3) == 1) ? 4 : 0;
42 v2 ^= (u2 & 1) * 6;
43 v2 += 10 + u2;
44 v2 &= 0xF;
45 v2 ^= table[u2];
46 return -1.0f + v2 * 2.0f / 15.0f;
47}
48
49static float NoiseCoef(float u, float v, TexturingRegs regs, State::ProcTex state) {
50 const float freq_u = float16::FromRaw(regs.proctex_noise_frequency.u).ToFloat32();
51 const float freq_v = float16::FromRaw(regs.proctex_noise_frequency.v).ToFloat32();
52 const float phase_u = float16::FromRaw(regs.proctex_noise_u.phase).ToFloat32();
53 const float phase_v = float16::FromRaw(regs.proctex_noise_v.phase).ToFloat32();
54 const float x = 9 * freq_u * std::abs(u + phase_u);
55 const float y = 9 * freq_v * std::abs(v + phase_v);
56 const int x_int = static_cast<int>(x);
57 const int y_int = static_cast<int>(y);
58 const float x_frac = x - x_int;
59 const float y_frac = y - y_int;
60
61 const float g0 = NoiseRand2D(x_int, y_int) * (x_frac + y_frac);
62 const float g1 = NoiseRand2D(x_int + 1, y_int) * (x_frac + y_frac - 1);
63 const float g2 = NoiseRand2D(x_int, y_int + 1) * (x_frac + y_frac - 1);
64 const float g3 = NoiseRand2D(x_int + 1, y_int + 1) * (x_frac + y_frac - 2);
65 const float x_noise = LookupLUT(state.noise_table, x_frac);
66 const float y_noise = LookupLUT(state.noise_table, y_frac);
67 return Math::BilinearInterp(g0, g1, g2, g3, x_noise, y_noise);
68}
69
70static float GetShiftOffset(float v, ProcTexShift mode, ProcTexClamp clamp_mode) {
71 const float offset = (clamp_mode == ProcTexClamp::MirroredRepeat) ? 1 : 0.5f;
72 switch (mode) {
73 case ProcTexShift::None:
74 return 0;
75 case ProcTexShift::Odd:
76 return offset * (((int)v / 2) % 2);
77 case ProcTexShift::Even:
78 return offset * ((((int)v + 1) / 2) % 2);
79 default:
80 LOG_CRITICAL(HW_GPU, "Unknown shift mode %u", static_cast<u32>(mode));
81 return 0;
82 }
83};
84
85static void ClampCoord(float& coord, ProcTexClamp mode) {
86 switch (mode) {
87 case ProcTexClamp::ToZero:
88 if (coord > 1.0f)
89 coord = 0.0f;
90 break;
91 case ProcTexClamp::ToEdge:
92 coord = std::min(coord, 1.0f);
93 break;
94 case ProcTexClamp::SymmetricalRepeat:
95 coord = coord - std::floor(coord);
96 break;
97 case ProcTexClamp::MirroredRepeat: {
98 int integer = static_cast<int>(coord);
99 float frac = coord - integer;
100 coord = (integer % 2) == 0 ? frac : (1.0f - frac);
101 break;
102 }
103 case ProcTexClamp::Pulse:
104 if (coord <= 0.5f)
105 coord = 0.0f;
106 else
107 coord = 1.0f;
108 break;
109 default:
110 LOG_CRITICAL(HW_GPU, "Unknown clamp mode %u", static_cast<u32>(mode));
111 coord = std::min(coord, 1.0f);
112 break;
113 }
114}
115
116float CombineAndMap(float u, float v, ProcTexCombiner combiner,
117 const std::array<State::ProcTex::ValueEntry, 128>& map_table) {
118 float f;
119 switch (combiner) {
120 case ProcTexCombiner::U:
121 f = u;
122 break;
123 case ProcTexCombiner::U2:
124 f = u * u;
125 break;
126 case TexturingRegs::ProcTexCombiner::V:
127 f = v;
128 break;
129 case TexturingRegs::ProcTexCombiner::V2:
130 f = v * v;
131 break;
132 case TexturingRegs::ProcTexCombiner::Add:
133 f = (u + v) * 0.5f;
134 break;
135 case TexturingRegs::ProcTexCombiner::Add2:
136 f = (u * u + v * v) * 0.5f;
137 break;
138 case TexturingRegs::ProcTexCombiner::SqrtAdd2:
139 f = std::min(std::sqrt(u * u + v * v), 1.0f);
140 break;
141 case TexturingRegs::ProcTexCombiner::Min:
142 f = std::min(u, v);
143 break;
144 case TexturingRegs::ProcTexCombiner::Max:
145 f = std::max(u, v);
146 break;
147 case TexturingRegs::ProcTexCombiner::RMax:
148 f = std::min(((u + v) * 0.5f + std::sqrt(u * u + v * v)) * 0.5f, 1.0f);
149 break;
150 default:
151 LOG_CRITICAL(HW_GPU, "Unknown combiner %u", static_cast<u32>(combiner));
152 f = 0.0f;
153 break;
154 }
155 return LookupLUT(map_table, f);
156}
157
158Math::Vec4<u8> ProcTex(float u, float v, TexturingRegs regs, State::ProcTex state) {
159 u = std::abs(u);
160 v = std::abs(v);
161
162 // Get shift offset before noise generation
163 const float u_shift = GetShiftOffset(v, regs.proctex.u_shift, regs.proctex.u_clamp);
164 const float v_shift = GetShiftOffset(u, regs.proctex.v_shift, regs.proctex.v_clamp);
165
166 // Generate noise
167 if (regs.proctex.noise_enable) {
168 float noise = NoiseCoef(u, v, regs, state);
169 u += noise * regs.proctex_noise_u.amplitude / 4095.0f;
170 v += noise * regs.proctex_noise_v.amplitude / 4095.0f;
171 u = std::abs(u);
172 v = std::abs(v);
173 }
174
175 // Shift
176 u += u_shift;
177 v += v_shift;
178
179 // Clamp
180 ClampCoord(u, regs.proctex.u_clamp);
181 ClampCoord(v, regs.proctex.v_clamp);
182
183 // Combine and map
184 const float lut_coord = CombineAndMap(u, v, regs.proctex.color_combiner, state.color_map_table);
185
186 // Look up the color
187 // For the color lut, coord=0.0 is lut[offset] and coord=1.0 is lut[offset+width-1]
188 const u32 offset = regs.proctex_lut_offset;
189 const u32 width = regs.proctex_lut.width;
190 const float index = offset + (lut_coord * (width - 1));
191 Math::Vec4<u8> final_color;
192 // TODO(wwylele): implement mipmap
193 switch (regs.proctex_lut.filter) {
194 case ProcTexFilter::Linear:
195 case ProcTexFilter::LinearMipmapLinear:
196 case ProcTexFilter::LinearMipmapNearest: {
197 const int index_int = static_cast<int>(index);
198 const float frac = index - index_int;
199 const auto color_value = state.color_table[index_int].ToVector().Cast<float>();
200 const auto color_diff = state.color_diff_table[index_int].ToVector().Cast<float>();
201 final_color = (color_value + frac * color_diff).Cast<u8>();
202 break;
203 }
204 case ProcTexFilter::Nearest:
205 case ProcTexFilter::NearestMipmapLinear:
206 case ProcTexFilter::NearestMipmapNearest:
207 final_color = state.color_table[static_cast<int>(std::round(index))].ToVector();
208 break;
209 }
210
211 if (regs.proctex.separate_alpha) {
212 // Note: in separate alpha mode, the alpha channel skips the color LUT look up stage. It
213 // uses the output of CombineAndMap directly instead.
214 const float final_alpha =
215 CombineAndMap(u, v, regs.proctex.alpha_combiner, state.alpha_map_table);
216 return Math::MakeVec<u8>(final_color.rgb(), static_cast<u8>(final_alpha * 255));
217 } else {
218 return final_color;
219 }
220}
221
222} // namespace Rasterizer
223} // namespace Pica
diff --git a/src/video_core/swrasterizer/proctex.h b/src/video_core/swrasterizer/proctex.h
deleted file mode 100644
index 036e4620e..000000000
--- a/src/video_core/swrasterizer/proctex.h
+++ /dev/null
@@ -1,16 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/common_types.h"
6#include "common/vector_math.h"
7#include "video_core/pica_state.h"
8
9namespace Pica {
10namespace Rasterizer {
11
12/// Generates procedural texture color for the given coordinates
13Math::Vec4<u8> ProcTex(float u, float v, TexturingRegs regs, State::ProcTex state);
14
15} // namespace Rasterizer
16} // namespace Pica
diff --git a/src/video_core/swrasterizer/rasterizer.cpp b/src/video_core/swrasterizer/rasterizer.cpp
deleted file mode 100644
index 862135614..000000000
--- a/src/video_core/swrasterizer/rasterizer.cpp
+++ /dev/null
@@ -1,853 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <array>
7#include <cmath>
8#include <tuple>
9#include "common/assert.h"
10#include "common/bit_field.h"
11#include "common/color.h"
12#include "common/common_types.h"
13#include "common/logging/log.h"
14#include "common/math_util.h"
15#include "common/microprofile.h"
16#include "common/quaternion.h"
17#include "common/vector_math.h"
18#include "core/hw/gpu.h"
19#include "core/memory.h"
20#include "video_core/debug_utils/debug_utils.h"
21#include "video_core/pica_state.h"
22#include "video_core/pica_types.h"
23#include "video_core/regs_framebuffer.h"
24#include "video_core/regs_rasterizer.h"
25#include "video_core/regs_texturing.h"
26#include "video_core/shader/shader.h"
27#include "video_core/swrasterizer/framebuffer.h"
28#include "video_core/swrasterizer/lighting.h"
29#include "video_core/swrasterizer/proctex.h"
30#include "video_core/swrasterizer/rasterizer.h"
31#include "video_core/swrasterizer/texturing.h"
32#include "video_core/texture/texture_decode.h"
33#include "video_core/utils.h"
34
35namespace Pica {
36namespace Rasterizer {
37
38// NOTE: Assuming that rasterizer coordinates are 12.4 fixed-point values
39struct Fix12P4 {
40 Fix12P4() {}
41 Fix12P4(u16 val) : val(val) {}
42
43 static u16 FracMask() {
44 return 0xF;
45 }
46 static u16 IntMask() {
47 return (u16)~0xF;
48 }
49
50 operator u16() const {
51 return val;
52 }
53
54 bool operator<(const Fix12P4& oth) const {
55 return (u16) * this < (u16)oth;
56 }
57
58private:
59 u16 val;
60};
61
62/**
63 * Calculate signed area of the triangle spanned by the three argument vertices.
64 * The sign denotes an orientation.
65 *
66 * @todo define orientation concretely.
67 */
68static int SignedArea(const Math::Vec2<Fix12P4>& vtx1, const Math::Vec2<Fix12P4>& vtx2,
69 const Math::Vec2<Fix12P4>& vtx3) {
70 const auto vec1 = Math::MakeVec(vtx2 - vtx1, 0);
71 const auto vec2 = Math::MakeVec(vtx3 - vtx1, 0);
72 // TODO: There is a very small chance this will overflow for sizeof(int) == 4
73 return Math::Cross(vec1, vec2).z;
74};
75
76/// Convert a 3D vector for cube map coordinates to 2D texture coordinates along with the face name
77static std::tuple<float24, float24, PAddr> ConvertCubeCoord(float24 u, float24 v, float24 w,
78 const TexturingRegs& regs) {
79 const float abs_u = std::abs(u.ToFloat32());
80 const float abs_v = std::abs(v.ToFloat32());
81 const float abs_w = std::abs(w.ToFloat32());
82 float24 x, y, z;
83 PAddr addr;
84 if (abs_u > abs_v && abs_u > abs_w) {
85 if (u > float24::FromFloat32(0)) {
86 addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::PositiveX);
87 y = -v;
88 } else {
89 addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::NegativeX);
90 y = v;
91 }
92 x = -w;
93 z = u;
94 } else if (abs_v > abs_w) {
95 if (v > float24::FromFloat32(0)) {
96 addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::PositiveY);
97 x = u;
98 } else {
99 addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::NegativeY);
100 x = -u;
101 }
102 y = w;
103 z = v;
104 } else {
105 if (w > float24::FromFloat32(0)) {
106 addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::PositiveZ);
107 y = -v;
108 } else {
109 addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::NegativeZ);
110 y = v;
111 }
112 x = u;
113 z = w;
114 }
115 const float24 half = float24::FromFloat32(0.5f);
116 return std::make_tuple(x / z * half + half, y / z * half + half, addr);
117}
118
119MICROPROFILE_DEFINE(GPU_Rasterization, "GPU", "Rasterization", MP_RGB(50, 50, 240));
120
121/**
122 * Helper function for ProcessTriangle with the "reversed" flag to allow for implementing
123 * culling via recursion.
124 */
125static void ProcessTriangleInternal(const Vertex& v0, const Vertex& v1, const Vertex& v2,
126 bool reversed = false) {
127 const auto& regs = g_state.regs;
128 MICROPROFILE_SCOPE(GPU_Rasterization);
129
130 // vertex positions in rasterizer coordinates
131 static auto FloatToFix = [](float24 flt) {
132 // TODO: Rounding here is necessary to prevent garbage pixels at
133 // triangle borders. Is it that the correct solution, though?
134 return Fix12P4(static_cast<unsigned short>(round(flt.ToFloat32() * 16.0f)));
135 };
136 static auto ScreenToRasterizerCoordinates = [](const Math::Vec3<float24>& vec) {
137 return Math::Vec3<Fix12P4>{FloatToFix(vec.x), FloatToFix(vec.y), FloatToFix(vec.z)};
138 };
139
140 Math::Vec3<Fix12P4> vtxpos[3]{ScreenToRasterizerCoordinates(v0.screenpos),
141 ScreenToRasterizerCoordinates(v1.screenpos),
142 ScreenToRasterizerCoordinates(v2.screenpos)};
143
144 if (regs.rasterizer.cull_mode == RasterizerRegs::CullMode::KeepAll) {
145 // Make sure we always end up with a triangle wound counter-clockwise
146 if (!reversed && SignedArea(vtxpos[0].xy(), vtxpos[1].xy(), vtxpos[2].xy()) <= 0) {
147 ProcessTriangleInternal(v0, v2, v1, true);
148 return;
149 }
150 } else {
151 if (!reversed && regs.rasterizer.cull_mode == RasterizerRegs::CullMode::KeepClockWise) {
152 // Reverse vertex order and use the CCW code path.
153 ProcessTriangleInternal(v0, v2, v1, true);
154 return;
155 }
156
157 // Cull away triangles which are wound clockwise.
158 if (SignedArea(vtxpos[0].xy(), vtxpos[1].xy(), vtxpos[2].xy()) <= 0)
159 return;
160 }
161
162 u16 min_x = std::min({vtxpos[0].x, vtxpos[1].x, vtxpos[2].x});
163 u16 min_y = std::min({vtxpos[0].y, vtxpos[1].y, vtxpos[2].y});
164 u16 max_x = std::max({vtxpos[0].x, vtxpos[1].x, vtxpos[2].x});
165 u16 max_y = std::max({vtxpos[0].y, vtxpos[1].y, vtxpos[2].y});
166
167 // Convert the scissor box coordinates to 12.4 fixed point
168 u16 scissor_x1 = (u16)(regs.rasterizer.scissor_test.x1 << 4);
169 u16 scissor_y1 = (u16)(regs.rasterizer.scissor_test.y1 << 4);
170 // x2,y2 have +1 added to cover the entire sub-pixel area
171 u16 scissor_x2 = (u16)((regs.rasterizer.scissor_test.x2 + 1) << 4);
172 u16 scissor_y2 = (u16)((regs.rasterizer.scissor_test.y2 + 1) << 4);
173
174 if (regs.rasterizer.scissor_test.mode == RasterizerRegs::ScissorMode::Include) {
175 // Calculate the new bounds
176 min_x = std::max(min_x, scissor_x1);
177 min_y = std::max(min_y, scissor_y1);
178 max_x = std::min(max_x, scissor_x2);
179 max_y = std::min(max_y, scissor_y2);
180 }
181
182 min_x &= Fix12P4::IntMask();
183 min_y &= Fix12P4::IntMask();
184 max_x = ((max_x + Fix12P4::FracMask()) & Fix12P4::IntMask());
185 max_y = ((max_y + Fix12P4::FracMask()) & Fix12P4::IntMask());
186
187 // Triangle filling rules: Pixels on the right-sided edge or on flat bottom edges are not
188 // drawn. Pixels on any other triangle border are drawn. This is implemented with three bias
189 // values which are added to the barycentric coordinates w0, w1 and w2, respectively.
190 // NOTE: These are the PSP filling rules. Not sure if the 3DS uses the same ones...
191 auto IsRightSideOrFlatBottomEdge = [](const Math::Vec2<Fix12P4>& vtx,
192 const Math::Vec2<Fix12P4>& line1,
193 const Math::Vec2<Fix12P4>& line2) {
194 if (line1.y == line2.y) {
195 // just check if vertex is above us => bottom line parallel to x-axis
196 return vtx.y < line1.y;
197 } else {
198 // check if vertex is on our left => right side
199 // TODO: Not sure how likely this is to overflow
200 return (int)vtx.x < (int)line1.x +
201 ((int)line2.x - (int)line1.x) * ((int)vtx.y - (int)line1.y) /
202 ((int)line2.y - (int)line1.y);
203 }
204 };
205 int bias0 =
206 IsRightSideOrFlatBottomEdge(vtxpos[0].xy(), vtxpos[1].xy(), vtxpos[2].xy()) ? -1 : 0;
207 int bias1 =
208 IsRightSideOrFlatBottomEdge(vtxpos[1].xy(), vtxpos[2].xy(), vtxpos[0].xy()) ? -1 : 0;
209 int bias2 =
210 IsRightSideOrFlatBottomEdge(vtxpos[2].xy(), vtxpos[0].xy(), vtxpos[1].xy()) ? -1 : 0;
211
212 auto w_inverse = Math::MakeVec(v0.pos.w, v1.pos.w, v2.pos.w);
213
214 auto textures = regs.texturing.GetTextures();
215 auto tev_stages = regs.texturing.GetTevStages();
216
217 bool stencil_action_enable =
218 g_state.regs.framebuffer.output_merger.stencil_test.enable &&
219 g_state.regs.framebuffer.framebuffer.depth_format == FramebufferRegs::DepthFormat::D24S8;
220 const auto stencil_test = g_state.regs.framebuffer.output_merger.stencil_test;
221
222 // Enter rasterization loop, starting at the center of the topleft bounding box corner.
223 // TODO: Not sure if looping through x first might be faster
224 for (u16 y = min_y + 8; y < max_y; y += 0x10) {
225 for (u16 x = min_x + 8; x < max_x; x += 0x10) {
226
227 // Do not process the pixel if it's inside the scissor box and the scissor mode is set
228 // to Exclude
229 if (regs.rasterizer.scissor_test.mode == RasterizerRegs::ScissorMode::Exclude) {
230 if (x >= scissor_x1 && x < scissor_x2 && y >= scissor_y1 && y < scissor_y2)
231 continue;
232 }
233
234 // Calculate the barycentric coordinates w0, w1 and w2
235 int w0 = bias0 + SignedArea(vtxpos[1].xy(), vtxpos[2].xy(), {x, y});
236 int w1 = bias1 + SignedArea(vtxpos[2].xy(), vtxpos[0].xy(), {x, y});
237 int w2 = bias2 + SignedArea(vtxpos[0].xy(), vtxpos[1].xy(), {x, y});
238 int wsum = w0 + w1 + w2;
239
240 // If current pixel is not covered by the current primitive
241 if (w0 < 0 || w1 < 0 || w2 < 0)
242 continue;
243
244 auto baricentric_coordinates =
245 Math::MakeVec(float24::FromFloat32(static_cast<float>(w0)),
246 float24::FromFloat32(static_cast<float>(w1)),
247 float24::FromFloat32(static_cast<float>(w2)));
248 float24 interpolated_w_inverse =
249 float24::FromFloat32(1.0f) / Math::Dot(w_inverse, baricentric_coordinates);
250
251 // interpolated_z = z / w
252 float interpolated_z_over_w =
253 (v0.screenpos[2].ToFloat32() * w0 + v1.screenpos[2].ToFloat32() * w1 +
254 v2.screenpos[2].ToFloat32() * w2) /
255 wsum;
256
257 // Not fully accurate. About 3 bits in precision are missing.
258 // Z-Buffer (z / w * scale + offset)
259 float depth_scale = float24::FromRaw(regs.rasterizer.viewport_depth_range).ToFloat32();
260 float depth_offset =
261 float24::FromRaw(regs.rasterizer.viewport_depth_near_plane).ToFloat32();
262 float depth = interpolated_z_over_w * depth_scale + depth_offset;
263
264 // Potentially switch to W-Buffer
265 if (regs.rasterizer.depthmap_enable ==
266 Pica::RasterizerRegs::DepthBuffering::WBuffering) {
267 // W-Buffer (z * scale + w * offset = (z / w * scale + offset) * w)
268 depth *= interpolated_w_inverse.ToFloat32() * wsum;
269 }
270
271 // Clamp the result
272 depth = MathUtil::Clamp(depth, 0.0f, 1.0f);
273
274 // Perspective correct attribute interpolation:
275 // Attribute values cannot be calculated by simple linear interpolation since
276 // they are not linear in screen space. For example, when interpolating a
277 // texture coordinate across two vertices, something simple like
278 // u = (u0*w0 + u1*w1)/(w0+w1)
279 // will not work. However, the attribute value divided by the
280 // clipspace w-coordinate (u/w) and and the inverse w-coordinate (1/w) are linear
281 // in screenspace. Hence, we can linearly interpolate these two independently and
282 // calculate the interpolated attribute by dividing the results.
283 // I.e.
284 // u_over_w = ((u0/v0.pos.w)*w0 + (u1/v1.pos.w)*w1)/(w0+w1)
285 // one_over_w = (( 1/v0.pos.w)*w0 + ( 1/v1.pos.w)*w1)/(w0+w1)
286 // u = u_over_w / one_over_w
287 //
288 // The generalization to three vertices is straightforward in baricentric coordinates.
289 auto GetInterpolatedAttribute = [&](float24 attr0, float24 attr1, float24 attr2) {
290 auto attr_over_w = Math::MakeVec(attr0, attr1, attr2);
291 float24 interpolated_attr_over_w = Math::Dot(attr_over_w, baricentric_coordinates);
292 return interpolated_attr_over_w * interpolated_w_inverse;
293 };
294
295 Math::Vec4<u8> primary_color{
296 (u8)(
297 GetInterpolatedAttribute(v0.color.r(), v1.color.r(), v2.color.r()).ToFloat32() *
298 255),
299 (u8)(
300 GetInterpolatedAttribute(v0.color.g(), v1.color.g(), v2.color.g()).ToFloat32() *
301 255),
302 (u8)(
303 GetInterpolatedAttribute(v0.color.b(), v1.color.b(), v2.color.b()).ToFloat32() *
304 255),
305 (u8)(
306 GetInterpolatedAttribute(v0.color.a(), v1.color.a(), v2.color.a()).ToFloat32() *
307 255),
308 };
309
310 Math::Vec2<float24> uv[3];
311 uv[0].u() = GetInterpolatedAttribute(v0.tc0.u(), v1.tc0.u(), v2.tc0.u());
312 uv[0].v() = GetInterpolatedAttribute(v0.tc0.v(), v1.tc0.v(), v2.tc0.v());
313 uv[1].u() = GetInterpolatedAttribute(v0.tc1.u(), v1.tc1.u(), v2.tc1.u());
314 uv[1].v() = GetInterpolatedAttribute(v0.tc1.v(), v1.tc1.v(), v2.tc1.v());
315 uv[2].u() = GetInterpolatedAttribute(v0.tc2.u(), v1.tc2.u(), v2.tc2.u());
316 uv[2].v() = GetInterpolatedAttribute(v0.tc2.v(), v1.tc2.v(), v2.tc2.v());
317
318 Math::Vec4<u8> texture_color[4]{};
319 for (int i = 0; i < 3; ++i) {
320 const auto& texture = textures[i];
321 if (!texture.enabled)
322 continue;
323
324 DEBUG_ASSERT(0 != texture.config.address);
325
326 int coordinate_i =
327 (i == 2 && regs.texturing.main_config.texture2_use_coord1) ? 1 : i;
328 float24 u = uv[coordinate_i].u();
329 float24 v = uv[coordinate_i].v();
330
331 // Only unit 0 respects the texturing type (according to 3DBrew)
332 // TODO: Refactor so cubemaps and shadowmaps can be handled
333 PAddr texture_address = texture.config.GetPhysicalAddress();
334 if (i == 0) {
335 switch (texture.config.type) {
336 case TexturingRegs::TextureConfig::Texture2D:
337 break;
338 case TexturingRegs::TextureConfig::TextureCube: {
339 auto w = GetInterpolatedAttribute(v0.tc0_w, v1.tc0_w, v2.tc0_w);
340 std::tie(u, v, texture_address) = ConvertCubeCoord(u, v, w, regs.texturing);
341 break;
342 }
343 case TexturingRegs::TextureConfig::Projection2D: {
344 auto tc0_w = GetInterpolatedAttribute(v0.tc0_w, v1.tc0_w, v2.tc0_w);
345 u /= tc0_w;
346 v /= tc0_w;
347 break;
348 }
349 default:
350 // TODO: Change to LOG_ERROR when more types are handled.
351 LOG_DEBUG(HW_GPU, "Unhandled texture type %x", (int)texture.config.type);
352 UNIMPLEMENTED();
353 break;
354 }
355 }
356
357 int s = (int)(u * float24::FromFloat32(static_cast<float>(texture.config.width)))
358 .ToFloat32();
359 int t = (int)(v * float24::FromFloat32(static_cast<float>(texture.config.height)))
360 .ToFloat32();
361
362 bool use_border_s = false;
363 bool use_border_t = false;
364
365 if (texture.config.wrap_s == TexturingRegs::TextureConfig::ClampToBorder) {
366 use_border_s = s < 0 || s >= static_cast<int>(texture.config.width);
367 } else if (texture.config.wrap_s == TexturingRegs::TextureConfig::ClampToBorder2) {
368 use_border_s = s >= static_cast<int>(texture.config.width);
369 }
370
371 if (texture.config.wrap_t == TexturingRegs::TextureConfig::ClampToBorder) {
372 use_border_t = t < 0 || t >= static_cast<int>(texture.config.height);
373 } else if (texture.config.wrap_t == TexturingRegs::TextureConfig::ClampToBorder2) {
374 use_border_t = t >= static_cast<int>(texture.config.height);
375 }
376
377 if (use_border_s || use_border_t) {
378 auto border_color = texture.config.border_color;
379 texture_color[i] = {border_color.r, border_color.g, border_color.b,
380 border_color.a};
381 } else {
382 // Textures are laid out from bottom to top, hence we invert the t coordinate.
383 // NOTE: This may not be the right place for the inversion.
384 // TODO: Check if this applies to ETC textures, too.
385 s = GetWrappedTexCoord(texture.config.wrap_s, s, texture.config.width);
386 t = texture.config.height - 1 -
387 GetWrappedTexCoord(texture.config.wrap_t, t, texture.config.height);
388
389 const u8* texture_data = Memory::GetPhysicalPointer(texture_address);
390 auto info =
391 Texture::TextureInfo::FromPicaRegister(texture.config, texture.format);
392
393 // TODO: Apply the min and mag filters to the texture
394 texture_color[i] = Texture::LookupTexture(texture_data, s, t, info);
395#if PICA_DUMP_TEXTURES
396 DebugUtils::DumpTexture(texture.config, texture_data);
397#endif
398 }
399 }
400
401 // sample procedural texture
402 if (regs.texturing.main_config.texture3_enable) {
403 const auto& proctex_uv = uv[regs.texturing.main_config.texture3_coordinates];
404 texture_color[3] = ProcTex(proctex_uv.u().ToFloat32(), proctex_uv.v().ToFloat32(),
405 g_state.regs.texturing, g_state.proctex);
406 }
407
408 // Texture environment - consists of 6 stages of color and alpha combining.
409 //
410 // Color combiners take three input color values from some source (e.g. interpolated
411 // vertex color, texture color, previous stage, etc), perform some very simple
412 // operations on each of them (e.g. inversion) and then calculate the output color
413 // with some basic arithmetic. Alpha combiners can be configured separately but work
414 // analogously.
415 Math::Vec4<u8> combiner_output;
416 Math::Vec4<u8> combiner_buffer = {0, 0, 0, 0};
417 Math::Vec4<u8> next_combiner_buffer = {
418 regs.texturing.tev_combiner_buffer_color.r,
419 regs.texturing.tev_combiner_buffer_color.g,
420 regs.texturing.tev_combiner_buffer_color.b,
421 regs.texturing.tev_combiner_buffer_color.a,
422 };
423
424 Math::Vec4<u8> primary_fragment_color = {0, 0, 0, 0};
425 Math::Vec4<u8> secondary_fragment_color = {0, 0, 0, 0};
426
427 if (!g_state.regs.lighting.disable) {
428 Math::Quaternion<float> normquat = Math::Quaternion<float>{
429 {GetInterpolatedAttribute(v0.quat.x, v1.quat.x, v2.quat.x).ToFloat32(),
430 GetInterpolatedAttribute(v0.quat.y, v1.quat.y, v2.quat.y).ToFloat32(),
431 GetInterpolatedAttribute(v0.quat.z, v1.quat.z, v2.quat.z).ToFloat32()},
432 GetInterpolatedAttribute(v0.quat.w, v1.quat.w, v2.quat.w).ToFloat32(),
433 }.Normalized();
434
435 Math::Vec3<float> view{
436 GetInterpolatedAttribute(v0.view.x, v1.view.x, v2.view.x).ToFloat32(),
437 GetInterpolatedAttribute(v0.view.y, v1.view.y, v2.view.y).ToFloat32(),
438 GetInterpolatedAttribute(v0.view.z, v1.view.z, v2.view.z).ToFloat32(),
439 };
440 std::tie(primary_fragment_color, secondary_fragment_color) = ComputeFragmentsColors(
441 g_state.regs.lighting, g_state.lighting, normquat, view, texture_color);
442 }
443
444 for (unsigned tev_stage_index = 0; tev_stage_index < tev_stages.size();
445 ++tev_stage_index) {
446 const auto& tev_stage = tev_stages[tev_stage_index];
447 using Source = TexturingRegs::TevStageConfig::Source;
448
449 auto GetSource = [&](Source source) -> Math::Vec4<u8> {
450 switch (source) {
451 case Source::PrimaryColor:
452 return primary_color;
453
454 case Source::PrimaryFragmentColor:
455 return primary_fragment_color;
456
457 case Source::SecondaryFragmentColor:
458 return secondary_fragment_color;
459
460 case Source::Texture0:
461 return texture_color[0];
462
463 case Source::Texture1:
464 return texture_color[1];
465
466 case Source::Texture2:
467 return texture_color[2];
468
469 case Source::Texture3:
470 return texture_color[3];
471
472 case Source::PreviousBuffer:
473 return combiner_buffer;
474
475 case Source::Constant:
476 return {tev_stage.const_r, tev_stage.const_g, tev_stage.const_b,
477 tev_stage.const_a};
478
479 case Source::Previous:
480 return combiner_output;
481
482 default:
483 LOG_ERROR(HW_GPU, "Unknown color combiner source %d", (int)source);
484 UNIMPLEMENTED();
485 return {0, 0, 0, 0};
486 }
487 };
488
489 // color combiner
490 // NOTE: Not sure if the alpha combiner might use the color output of the previous
491 // stage as input. Hence, we currently don't directly write the result to
492 // combiner_output.rgb(), but instead store it in a temporary variable until
493 // alpha combining has been done.
494 Math::Vec3<u8> color_result[3] = {
495 GetColorModifier(tev_stage.color_modifier1, GetSource(tev_stage.color_source1)),
496 GetColorModifier(tev_stage.color_modifier2, GetSource(tev_stage.color_source2)),
497 GetColorModifier(tev_stage.color_modifier3, GetSource(tev_stage.color_source3)),
498 };
499 auto color_output = ColorCombine(tev_stage.color_op, color_result);
500
501 u8 alpha_output;
502 if (tev_stage.color_op == TexturingRegs::TevStageConfig::Operation::Dot3_RGBA) {
503 // result of Dot3_RGBA operation is also placed to the alpha component
504 alpha_output = color_output.x;
505 } else {
506 // alpha combiner
507 std::array<u8, 3> alpha_result = {{
508 GetAlphaModifier(tev_stage.alpha_modifier1,
509 GetSource(tev_stage.alpha_source1)),
510 GetAlphaModifier(tev_stage.alpha_modifier2,
511 GetSource(tev_stage.alpha_source2)),
512 GetAlphaModifier(tev_stage.alpha_modifier3,
513 GetSource(tev_stage.alpha_source3)),
514 }};
515 alpha_output = AlphaCombine(tev_stage.alpha_op, alpha_result);
516 }
517
518 combiner_output[0] =
519 std::min((unsigned)255, color_output.r() * tev_stage.GetColorMultiplier());
520 combiner_output[1] =
521 std::min((unsigned)255, color_output.g() * tev_stage.GetColorMultiplier());
522 combiner_output[2] =
523 std::min((unsigned)255, color_output.b() * tev_stage.GetColorMultiplier());
524 combiner_output[3] =
525 std::min((unsigned)255, alpha_output * tev_stage.GetAlphaMultiplier());
526
527 combiner_buffer = next_combiner_buffer;
528
529 if (regs.texturing.tev_combiner_buffer_input.TevStageUpdatesCombinerBufferColor(
530 tev_stage_index)) {
531 next_combiner_buffer.r() = combiner_output.r();
532 next_combiner_buffer.g() = combiner_output.g();
533 next_combiner_buffer.b() = combiner_output.b();
534 }
535
536 if (regs.texturing.tev_combiner_buffer_input.TevStageUpdatesCombinerBufferAlpha(
537 tev_stage_index)) {
538 next_combiner_buffer.a() = combiner_output.a();
539 }
540 }
541
542 const auto& output_merger = regs.framebuffer.output_merger;
543 // TODO: Does alpha testing happen before or after stencil?
544 if (output_merger.alpha_test.enable) {
545 bool pass = false;
546
547 switch (output_merger.alpha_test.func) {
548 case FramebufferRegs::CompareFunc::Never:
549 pass = false;
550 break;
551
552 case FramebufferRegs::CompareFunc::Always:
553 pass = true;
554 break;
555
556 case FramebufferRegs::CompareFunc::Equal:
557 pass = combiner_output.a() == output_merger.alpha_test.ref;
558 break;
559
560 case FramebufferRegs::CompareFunc::NotEqual:
561 pass = combiner_output.a() != output_merger.alpha_test.ref;
562 break;
563
564 case FramebufferRegs::CompareFunc::LessThan:
565 pass = combiner_output.a() < output_merger.alpha_test.ref;
566 break;
567
568 case FramebufferRegs::CompareFunc::LessThanOrEqual:
569 pass = combiner_output.a() <= output_merger.alpha_test.ref;
570 break;
571
572 case FramebufferRegs::CompareFunc::GreaterThan:
573 pass = combiner_output.a() > output_merger.alpha_test.ref;
574 break;
575
576 case FramebufferRegs::CompareFunc::GreaterThanOrEqual:
577 pass = combiner_output.a() >= output_merger.alpha_test.ref;
578 break;
579 }
580
581 if (!pass)
582 continue;
583 }
584
585 // Apply fog combiner
586 // Not fully accurate. We'd have to know what data type is used to
587 // store the depth etc. Using float for now until we know more
588 // about Pica datatypes
589 if (regs.texturing.fog_mode == TexturingRegs::FogMode::Fog) {
590 const Math::Vec3<u8> fog_color = {
591 static_cast<u8>(regs.texturing.fog_color.r.Value()),
592 static_cast<u8>(regs.texturing.fog_color.g.Value()),
593 static_cast<u8>(regs.texturing.fog_color.b.Value()),
594 };
595
596 // Get index into fog LUT
597 float fog_index;
598 if (g_state.regs.texturing.fog_flip) {
599 fog_index = (1.0f - depth) * 128.0f;
600 } else {
601 fog_index = depth * 128.0f;
602 }
603
604 // Generate clamped fog factor from LUT for given fog index
605 float fog_i = MathUtil::Clamp(floorf(fog_index), 0.0f, 127.0f);
606 float fog_f = fog_index - fog_i;
607 const auto& fog_lut_entry = g_state.fog.lut[static_cast<unsigned int>(fog_i)];
608 float fog_factor = fog_lut_entry.ToFloat() + fog_lut_entry.DiffToFloat() * fog_f;
609 fog_factor = MathUtil::Clamp(fog_factor, 0.0f, 1.0f);
610
611 // Blend the fog
612 for (unsigned i = 0; i < 3; i++) {
613 combiner_output[i] = static_cast<u8>(fog_factor * combiner_output[i] +
614 (1.0f - fog_factor) * fog_color[i]);
615 }
616 }
617
618 u8 old_stencil = 0;
619
620 auto UpdateStencil = [stencil_test, x, y,
621 &old_stencil](Pica::FramebufferRegs::StencilAction action) {
622 u8 new_stencil =
623 PerformStencilAction(action, old_stencil, stencil_test.reference_value);
624 if (g_state.regs.framebuffer.framebuffer.allow_depth_stencil_write != 0)
625 SetStencil(x >> 4, y >> 4, (new_stencil & stencil_test.write_mask) |
626 (old_stencil & ~stencil_test.write_mask));
627 };
628
629 if (stencil_action_enable) {
630 old_stencil = GetStencil(x >> 4, y >> 4);
631 u8 dest = old_stencil & stencil_test.input_mask;
632 u8 ref = stencil_test.reference_value & stencil_test.input_mask;
633
634 bool pass = false;
635 switch (stencil_test.func) {
636 case FramebufferRegs::CompareFunc::Never:
637 pass = false;
638 break;
639
640 case FramebufferRegs::CompareFunc::Always:
641 pass = true;
642 break;
643
644 case FramebufferRegs::CompareFunc::Equal:
645 pass = (ref == dest);
646 break;
647
648 case FramebufferRegs::CompareFunc::NotEqual:
649 pass = (ref != dest);
650 break;
651
652 case FramebufferRegs::CompareFunc::LessThan:
653 pass = (ref < dest);
654 break;
655
656 case FramebufferRegs::CompareFunc::LessThanOrEqual:
657 pass = (ref <= dest);
658 break;
659
660 case FramebufferRegs::CompareFunc::GreaterThan:
661 pass = (ref > dest);
662 break;
663
664 case FramebufferRegs::CompareFunc::GreaterThanOrEqual:
665 pass = (ref >= dest);
666 break;
667 }
668
669 if (!pass) {
670 UpdateStencil(stencil_test.action_stencil_fail);
671 continue;
672 }
673 }
674
675 // Convert float to integer
676 unsigned num_bits =
677 FramebufferRegs::DepthBitsPerPixel(regs.framebuffer.framebuffer.depth_format);
678 u32 z = (u32)(depth * ((1 << num_bits) - 1));
679
680 if (output_merger.depth_test_enable) {
681 u32 ref_z = GetDepth(x >> 4, y >> 4);
682
683 bool pass = false;
684
685 switch (output_merger.depth_test_func) {
686 case FramebufferRegs::CompareFunc::Never:
687 pass = false;
688 break;
689
690 case FramebufferRegs::CompareFunc::Always:
691 pass = true;
692 break;
693
694 case FramebufferRegs::CompareFunc::Equal:
695 pass = z == ref_z;
696 break;
697
698 case FramebufferRegs::CompareFunc::NotEqual:
699 pass = z != ref_z;
700 break;
701
702 case FramebufferRegs::CompareFunc::LessThan:
703 pass = z < ref_z;
704 break;
705
706 case FramebufferRegs::CompareFunc::LessThanOrEqual:
707 pass = z <= ref_z;
708 break;
709
710 case FramebufferRegs::CompareFunc::GreaterThan:
711 pass = z > ref_z;
712 break;
713
714 case FramebufferRegs::CompareFunc::GreaterThanOrEqual:
715 pass = z >= ref_z;
716 break;
717 }
718
719 if (!pass) {
720 if (stencil_action_enable)
721 UpdateStencil(stencil_test.action_depth_fail);
722 continue;
723 }
724 }
725
726 if (regs.framebuffer.framebuffer.allow_depth_stencil_write != 0 &&
727 output_merger.depth_write_enable) {
728
729 SetDepth(x >> 4, y >> 4, z);
730 }
731
732 // The stencil depth_pass action is executed even if depth testing is disabled
733 if (stencil_action_enable)
734 UpdateStencil(stencil_test.action_depth_pass);
735
736 auto dest = GetPixel(x >> 4, y >> 4);
737 Math::Vec4<u8> blend_output = combiner_output;
738
739 if (output_merger.alphablend_enable) {
740 auto params = output_merger.alpha_blending;
741
742 auto LookupFactor = [&](unsigned channel,
743 FramebufferRegs::BlendFactor factor) -> u8 {
744 DEBUG_ASSERT(channel < 4);
745
746 const Math::Vec4<u8> blend_const = {
747 static_cast<u8>(output_merger.blend_const.r),
748 static_cast<u8>(output_merger.blend_const.g),
749 static_cast<u8>(output_merger.blend_const.b),
750 static_cast<u8>(output_merger.blend_const.a),
751 };
752
753 switch (factor) {
754 case FramebufferRegs::BlendFactor::Zero:
755 return 0;
756
757 case FramebufferRegs::BlendFactor::One:
758 return 255;
759
760 case FramebufferRegs::BlendFactor::SourceColor:
761 return combiner_output[channel];
762
763 case FramebufferRegs::BlendFactor::OneMinusSourceColor:
764 return 255 - combiner_output[channel];
765
766 case FramebufferRegs::BlendFactor::DestColor:
767 return dest[channel];
768
769 case FramebufferRegs::BlendFactor::OneMinusDestColor:
770 return 255 - dest[channel];
771
772 case FramebufferRegs::BlendFactor::SourceAlpha:
773 return combiner_output.a();
774
775 case FramebufferRegs::BlendFactor::OneMinusSourceAlpha:
776 return 255 - combiner_output.a();
777
778 case FramebufferRegs::BlendFactor::DestAlpha:
779 return dest.a();
780
781 case FramebufferRegs::BlendFactor::OneMinusDestAlpha:
782 return 255 - dest.a();
783
784 case FramebufferRegs::BlendFactor::ConstantColor:
785 return blend_const[channel];
786
787 case FramebufferRegs::BlendFactor::OneMinusConstantColor:
788 return 255 - blend_const[channel];
789
790 case FramebufferRegs::BlendFactor::ConstantAlpha:
791 return blend_const.a();
792
793 case FramebufferRegs::BlendFactor::OneMinusConstantAlpha:
794 return 255 - blend_const.a();
795
796 case FramebufferRegs::BlendFactor::SourceAlphaSaturate:
797 // Returns 1.0 for the alpha channel
798 if (channel == 3)
799 return 255;
800 return std::min(combiner_output.a(), static_cast<u8>(255 - dest.a()));
801
802 default:
803 LOG_CRITICAL(HW_GPU, "Unknown blend factor %x", factor);
804 UNIMPLEMENTED();
805 break;
806 }
807
808 return combiner_output[channel];
809 };
810
811 auto srcfactor = Math::MakeVec(LookupFactor(0, params.factor_source_rgb),
812 LookupFactor(1, params.factor_source_rgb),
813 LookupFactor(2, params.factor_source_rgb),
814 LookupFactor(3, params.factor_source_a));
815
816 auto dstfactor = Math::MakeVec(LookupFactor(0, params.factor_dest_rgb),
817 LookupFactor(1, params.factor_dest_rgb),
818 LookupFactor(2, params.factor_dest_rgb),
819 LookupFactor(3, params.factor_dest_a));
820
821 blend_output = EvaluateBlendEquation(combiner_output, srcfactor, dest, dstfactor,
822 params.blend_equation_rgb);
823 blend_output.a() = EvaluateBlendEquation(combiner_output, srcfactor, dest,
824 dstfactor, params.blend_equation_a)
825 .a();
826 } else {
827 blend_output =
828 Math::MakeVec(LogicOp(combiner_output.r(), dest.r(), output_merger.logic_op),
829 LogicOp(combiner_output.g(), dest.g(), output_merger.logic_op),
830 LogicOp(combiner_output.b(), dest.b(), output_merger.logic_op),
831 LogicOp(combiner_output.a(), dest.a(), output_merger.logic_op));
832 }
833
834 const Math::Vec4<u8> result = {
835 output_merger.red_enable ? blend_output.r() : dest.r(),
836 output_merger.green_enable ? blend_output.g() : dest.g(),
837 output_merger.blue_enable ? blend_output.b() : dest.b(),
838 output_merger.alpha_enable ? blend_output.a() : dest.a(),
839 };
840
841 if (regs.framebuffer.framebuffer.allow_color_write != 0)
842 DrawPixel(x >> 4, y >> 4, result);
843 }
844 }
845}
846
847void ProcessTriangle(const Vertex& v0, const Vertex& v1, const Vertex& v2) {
848 ProcessTriangleInternal(v0, v1, v2);
849}
850
851} // namespace Rasterizer
852
853} // namespace Pica
diff --git a/src/video_core/swrasterizer/rasterizer.h b/src/video_core/swrasterizer/rasterizer.h
deleted file mode 100644
index 66cd6cfd4..000000000
--- a/src/video_core/swrasterizer/rasterizer.h
+++ /dev/null
@@ -1,48 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "video_core/shader/shader.h"
8
9namespace Pica {
10
11namespace Rasterizer {
12
13struct Vertex : Shader::OutputVertex {
14 Vertex(const OutputVertex& v) : OutputVertex(v) {}
15
16 // Attributes used to store intermediate results
17 // position after perspective divide
18 Math::Vec3<float24> screenpos;
19
20 // Linear interpolation
21 // factor: 0=this, 1=vtx
22 // Note: This function cannot be called after perspective divide
23 void Lerp(float24 factor, const Vertex& vtx) {
24 pos = pos * factor + vtx.pos * (float24::FromFloat32(1) - factor);
25 quat = quat * factor + vtx.quat * (float24::FromFloat32(1) - factor);
26 color = color * factor + vtx.color * (float24::FromFloat32(1) - factor);
27 tc0 = tc0 * factor + vtx.tc0 * (float24::FromFloat32(1) - factor);
28 tc1 = tc1 * factor + vtx.tc1 * (float24::FromFloat32(1) - factor);
29 tc0_w = tc0_w * factor + vtx.tc0_w * (float24::FromFloat32(1) - factor);
30 view = view * factor + vtx.view * (float24::FromFloat32(1) - factor);
31 tc2 = tc2 * factor + vtx.tc2 * (float24::FromFloat32(1) - factor);
32 }
33
34 // Linear interpolation
35 // factor: 0=v0, 1=v1
36 // Note: This function cannot be called after perspective divide
37 static Vertex Lerp(float24 factor, const Vertex& v0, const Vertex& v1) {
38 Vertex ret = v0;
39 ret.Lerp(factor, v1);
40 return ret;
41 }
42};
43
44void ProcessTriangle(const Vertex& v0, const Vertex& v1, const Vertex& v2);
45
46} // namespace Rasterizer
47
48} // namespace Pica
diff --git a/src/video_core/swrasterizer/swrasterizer.cpp b/src/video_core/swrasterizer/swrasterizer.cpp
deleted file mode 100644
index 402b705dd..000000000
--- a/src/video_core/swrasterizer/swrasterizer.cpp
+++ /dev/null
@@ -1,15 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "video_core/swrasterizer/clipper.h"
6#include "video_core/swrasterizer/swrasterizer.h"
7
8namespace VideoCore {
9
10void SWRasterizer::AddTriangle(const Pica::Shader::OutputVertex& v0,
11 const Pica::Shader::OutputVertex& v1,
12 const Pica::Shader::OutputVertex& v2) {
13 Pica::Clipper::ProcessTriangle(v0, v1, v2);
14}
15}
diff --git a/src/video_core/swrasterizer/swrasterizer.h b/src/video_core/swrasterizer/swrasterizer.h
deleted file mode 100644
index 04ebd5312..000000000
--- a/src/video_core/swrasterizer/swrasterizer.h
+++ /dev/null
@@ -1,27 +0,0 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8#include "video_core/rasterizer_interface.h"
9
10namespace Pica {
11namespace Shader {
12struct OutputVertex;
13}
14}
15
16namespace VideoCore {
17
18class SWRasterizer : public RasterizerInterface {
19 void AddTriangle(const Pica::Shader::OutputVertex& v0, const Pica::Shader::OutputVertex& v1,
20 const Pica::Shader::OutputVertex& v2) override;
21 void DrawTriangles() override {}
22 void NotifyPicaRegisterChanged(u32 id) override {}
23 void FlushAll() override {}
24 void FlushRegion(PAddr addr, u64 size) override {}
25 void FlushAndInvalidateRegion(PAddr addr, u64 size) override {}
26};
27}
diff --git a/src/video_core/swrasterizer/texturing.cpp b/src/video_core/swrasterizer/texturing.cpp
deleted file mode 100644
index 79b1ce841..000000000
--- a/src/video_core/swrasterizer/texturing.cpp
+++ /dev/null
@@ -1,244 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6
7#include "common/assert.h"
8#include "common/common_types.h"
9#include "common/math_util.h"
10#include "common/vector_math.h"
11#include "video_core/regs_texturing.h"
12#include "video_core/swrasterizer/texturing.h"
13
14namespace Pica {
15namespace Rasterizer {
16
17using TevStageConfig = TexturingRegs::TevStageConfig;
18
19int GetWrappedTexCoord(TexturingRegs::TextureConfig::WrapMode mode, int val, unsigned size) {
20 switch (mode) {
21 case TexturingRegs::TextureConfig::ClampToEdge2:
22 // For negative coordinate, ClampToEdge2 behaves the same as Repeat
23 if (val < 0) {
24 return static_cast<int>(static_cast<unsigned>(val) % size);
25 }
26 // [[fallthrough]]
27 case TexturingRegs::TextureConfig::ClampToEdge:
28 val = std::max(val, 0);
29 val = std::min(val, static_cast<int>(size) - 1);
30 return val;
31
32 case TexturingRegs::TextureConfig::ClampToBorder:
33 return val;
34
35 case TexturingRegs::TextureConfig::ClampToBorder2:
36 // For ClampToBorder2, the case of positive coordinate beyond the texture size is already
37 // handled outside. Here we only handle the negative coordinate in the same way as Repeat.
38 case TexturingRegs::TextureConfig::Repeat2:
39 case TexturingRegs::TextureConfig::Repeat3:
40 case TexturingRegs::TextureConfig::Repeat:
41 return static_cast<int>(static_cast<unsigned>(val) % size);
42
43 case TexturingRegs::TextureConfig::MirroredRepeat: {
44 unsigned int coord = (static_cast<unsigned>(val) % (2 * size));
45 if (coord >= size)
46 coord = 2 * size - 1 - coord;
47 return static_cast<int>(coord);
48 }
49
50 default:
51 LOG_ERROR(HW_GPU, "Unknown texture coordinate wrapping mode %x", (int)mode);
52 UNIMPLEMENTED();
53 return 0;
54 }
55};
56
57Math::Vec3<u8> GetColorModifier(TevStageConfig::ColorModifier factor,
58 const Math::Vec4<u8>& values) {
59 using ColorModifier = TevStageConfig::ColorModifier;
60
61 switch (factor) {
62 case ColorModifier::SourceColor:
63 return values.rgb();
64
65 case ColorModifier::OneMinusSourceColor:
66 return (Math::Vec3<u8>(255, 255, 255) - values.rgb()).Cast<u8>();
67
68 case ColorModifier::SourceAlpha:
69 return values.aaa();
70
71 case ColorModifier::OneMinusSourceAlpha:
72 return (Math::Vec3<u8>(255, 255, 255) - values.aaa()).Cast<u8>();
73
74 case ColorModifier::SourceRed:
75 return values.rrr();
76
77 case ColorModifier::OneMinusSourceRed:
78 return (Math::Vec3<u8>(255, 255, 255) - values.rrr()).Cast<u8>();
79
80 case ColorModifier::SourceGreen:
81 return values.ggg();
82
83 case ColorModifier::OneMinusSourceGreen:
84 return (Math::Vec3<u8>(255, 255, 255) - values.ggg()).Cast<u8>();
85
86 case ColorModifier::SourceBlue:
87 return values.bbb();
88
89 case ColorModifier::OneMinusSourceBlue:
90 return (Math::Vec3<u8>(255, 255, 255) - values.bbb()).Cast<u8>();
91 }
92
93 UNREACHABLE();
94};
95
96u8 GetAlphaModifier(TevStageConfig::AlphaModifier factor, const Math::Vec4<u8>& values) {
97 using AlphaModifier = TevStageConfig::AlphaModifier;
98
99 switch (factor) {
100 case AlphaModifier::SourceAlpha:
101 return values.a();
102
103 case AlphaModifier::OneMinusSourceAlpha:
104 return 255 - values.a();
105
106 case AlphaModifier::SourceRed:
107 return values.r();
108
109 case AlphaModifier::OneMinusSourceRed:
110 return 255 - values.r();
111
112 case AlphaModifier::SourceGreen:
113 return values.g();
114
115 case AlphaModifier::OneMinusSourceGreen:
116 return 255 - values.g();
117
118 case AlphaModifier::SourceBlue:
119 return values.b();
120
121 case AlphaModifier::OneMinusSourceBlue:
122 return 255 - values.b();
123 }
124
125 UNREACHABLE();
126};
127
128Math::Vec3<u8> ColorCombine(TevStageConfig::Operation op, const Math::Vec3<u8> input[3]) {
129 using Operation = TevStageConfig::Operation;
130
131 switch (op) {
132 case Operation::Replace:
133 return input[0];
134
135 case Operation::Modulate:
136 return ((input[0] * input[1]) / 255).Cast<u8>();
137
138 case Operation::Add: {
139 auto result = input[0] + input[1];
140 result.r() = std::min(255, result.r());
141 result.g() = std::min(255, result.g());
142 result.b() = std::min(255, result.b());
143 return result.Cast<u8>();
144 }
145
146 case Operation::AddSigned: {
147 // TODO(bunnei): Verify that the color conversion from (float) 0.5f to
148 // (byte) 128 is correct
149 auto result =
150 input[0].Cast<int>() + input[1].Cast<int>() - Math::MakeVec<int>(128, 128, 128);
151 result.r() = MathUtil::Clamp<int>(result.r(), 0, 255);
152 result.g() = MathUtil::Clamp<int>(result.g(), 0, 255);
153 result.b() = MathUtil::Clamp<int>(result.b(), 0, 255);
154 return result.Cast<u8>();
155 }
156
157 case Operation::Lerp:
158 return ((input[0] * input[2] +
159 input[1] * (Math::MakeVec<u8>(255, 255, 255) - input[2]).Cast<u8>()) /
160 255)
161 .Cast<u8>();
162
163 case Operation::Subtract: {
164 auto result = input[0].Cast<int>() - input[1].Cast<int>();
165 result.r() = std::max(0, result.r());
166 result.g() = std::max(0, result.g());
167 result.b() = std::max(0, result.b());
168 return result.Cast<u8>();
169 }
170
171 case Operation::MultiplyThenAdd: {
172 auto result = (input[0] * input[1] + 255 * input[2].Cast<int>()) / 255;
173 result.r() = std::min(255, result.r());
174 result.g() = std::min(255, result.g());
175 result.b() = std::min(255, result.b());
176 return result.Cast<u8>();
177 }
178
179 case Operation::AddThenMultiply: {
180 auto result = input[0] + input[1];
181 result.r() = std::min(255, result.r());
182 result.g() = std::min(255, result.g());
183 result.b() = std::min(255, result.b());
184 result = (result * input[2].Cast<int>()) / 255;
185 return result.Cast<u8>();
186 }
187 case Operation::Dot3_RGB:
188 case Operation::Dot3_RGBA: {
189 // Not fully accurate. Worst case scenario seems to yield a +/-3 error. Some HW results
190 // indicate that the per-component computation can't have a higher precision than 1/256,
191 // while dot3_rgb((0x80,g0,b0), (0x7F,g1,b1)) and dot3_rgb((0x80,g0,b0), (0x80,g1,b1)) give
192 // different results.
193 int result = ((input[0].r() * 2 - 255) * (input[1].r() * 2 - 255) + 128) / 256 +
194 ((input[0].g() * 2 - 255) * (input[1].g() * 2 - 255) + 128) / 256 +
195 ((input[0].b() * 2 - 255) * (input[1].b() * 2 - 255) + 128) / 256;
196 result = std::max(0, std::min(255, result));
197 return {(u8)result, (u8)result, (u8)result};
198 }
199 default:
200 LOG_ERROR(HW_GPU, "Unknown color combiner operation %d", (int)op);
201 UNIMPLEMENTED();
202 return {0, 0, 0};
203 }
204};
205
206u8 AlphaCombine(TevStageConfig::Operation op, const std::array<u8, 3>& input) {
207 switch (op) {
208 using Operation = TevStageConfig::Operation;
209 case Operation::Replace:
210 return input[0];
211
212 case Operation::Modulate:
213 return input[0] * input[1] / 255;
214
215 case Operation::Add:
216 return std::min(255, input[0] + input[1]);
217
218 case Operation::AddSigned: {
219 // TODO(bunnei): Verify that the color conversion from (float) 0.5f to (byte) 128 is correct
220 auto result = static_cast<int>(input[0]) + static_cast<int>(input[1]) - 128;
221 return static_cast<u8>(MathUtil::Clamp<int>(result, 0, 255));
222 }
223
224 case Operation::Lerp:
225 return (input[0] * input[2] + input[1] * (255 - input[2])) / 255;
226
227 case Operation::Subtract:
228 return std::max(0, (int)input[0] - (int)input[1]);
229
230 case Operation::MultiplyThenAdd:
231 return std::min(255, (input[0] * input[1] + 255 * input[2]) / 255);
232
233 case Operation::AddThenMultiply:
234 return (std::min(255, (input[0] + input[1])) * input[2]) / 255;
235
236 default:
237 LOG_ERROR(HW_GPU, "Unknown alpha combiner operation %d", (int)op);
238 UNIMPLEMENTED();
239 return 0;
240 }
241};
242
243} // namespace Rasterizer
244} // namespace Pica
diff --git a/src/video_core/swrasterizer/texturing.h b/src/video_core/swrasterizer/texturing.h
deleted file mode 100644
index 24f74a5a3..000000000
--- a/src/video_core/swrasterizer/texturing.h
+++ /dev/null
@@ -1,28 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8#include "common/vector_math.h"
9#include "video_core/regs_texturing.h"
10
11namespace Pica {
12namespace Rasterizer {
13
14int GetWrappedTexCoord(TexturingRegs::TextureConfig::WrapMode mode, int val, unsigned size);
15
16Math::Vec3<u8> GetColorModifier(TexturingRegs::TevStageConfig::ColorModifier factor,
17 const Math::Vec4<u8>& values);
18
19u8 GetAlphaModifier(TexturingRegs::TevStageConfig::AlphaModifier factor,
20 const Math::Vec4<u8>& values);
21
22Math::Vec3<u8> ColorCombine(TexturingRegs::TevStageConfig::Operation op,
23 const Math::Vec3<u8> input[3]);
24
25u8 AlphaCombine(TexturingRegs::TevStageConfig::Operation op, const std::array<u8, 3>& input);
26
27} // namespace Rasterizer
28} // namespace Pica
diff --git a/src/video_core/texture/etc1.cpp b/src/video_core/texture/etc1.cpp
deleted file mode 100644
index 43f7f56db..000000000
--- a/src/video_core/texture/etc1.cpp
+++ /dev/null
@@ -1,122 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <array>
6#include "common/bit_field.h"
7#include "common/color.h"
8#include "common/common_types.h"
9#include "common/math_util.h"
10#include "common/vector_math.h"
11#include "video_core/texture/etc1.h"
12
13namespace Pica {
14namespace Texture {
15
16namespace {
17
18constexpr std::array<std::array<u8, 2>, 8> etc1_modifier_table = {{
19 {2, 8}, {5, 17}, {9, 29}, {13, 42}, {18, 60}, {24, 80}, {33, 106}, {47, 183},
20}};
21
22union ETC1Tile {
23 u64 raw;
24
25 // Each of these two is a collection of 16 bits (one per lookup value)
26 BitField<0, 16, u64> table_subindexes;
27 BitField<16, 16, u64> negation_flags;
28
29 unsigned GetTableSubIndex(unsigned index) const {
30 return (table_subindexes >> index) & 1;
31 }
32
33 bool GetNegationFlag(unsigned index) const {
34 return ((negation_flags >> index) & 1) == 1;
35 }
36
37 BitField<32, 1, u64> flip;
38 BitField<33, 1, u64> differential_mode;
39
40 BitField<34, 3, u64> table_index_2;
41 BitField<37, 3, u64> table_index_1;
42
43 union {
44 // delta value + base value
45 BitField<40, 3, s64> db;
46 BitField<43, 5, u64> b;
47
48 BitField<48, 3, s64> dg;
49 BitField<51, 5, u64> g;
50
51 BitField<56, 3, s64> dr;
52 BitField<59, 5, u64> r;
53 } differential;
54
55 union {
56 BitField<40, 4, u64> b2;
57 BitField<44, 4, u64> b1;
58
59 BitField<48, 4, u64> g2;
60 BitField<52, 4, u64> g1;
61
62 BitField<56, 4, u64> r2;
63 BitField<60, 4, u64> r1;
64 } separate;
65
66 const Math::Vec3<u8> GetRGB(unsigned int x, unsigned int y) const {
67 int texel = 4 * x + y;
68
69 if (flip)
70 std::swap(x, y);
71
72 // Lookup base value
73 Math::Vec3<int> ret;
74 if (differential_mode) {
75 ret.r() = static_cast<int>(differential.r);
76 ret.g() = static_cast<int>(differential.g);
77 ret.b() = static_cast<int>(differential.b);
78 if (x >= 2) {
79 ret.r() += static_cast<int>(differential.dr);
80 ret.g() += static_cast<int>(differential.dg);
81 ret.b() += static_cast<int>(differential.db);
82 }
83 ret.r() = Color::Convert5To8(ret.r());
84 ret.g() = Color::Convert5To8(ret.g());
85 ret.b() = Color::Convert5To8(ret.b());
86 } else {
87 if (x < 2) {
88 ret.r() = Color::Convert4To8(static_cast<u8>(separate.r1));
89 ret.g() = Color::Convert4To8(static_cast<u8>(separate.g1));
90 ret.b() = Color::Convert4To8(static_cast<u8>(separate.b1));
91 } else {
92 ret.r() = Color::Convert4To8(static_cast<u8>(separate.r2));
93 ret.g() = Color::Convert4To8(static_cast<u8>(separate.g2));
94 ret.b() = Color::Convert4To8(static_cast<u8>(separate.b2));
95 }
96 }
97
98 // Add modifier
99 unsigned table_index =
100 static_cast<int>((x < 2) ? table_index_1.Value() : table_index_2.Value());
101
102 int modifier = etc1_modifier_table[table_index][GetTableSubIndex(texel)];
103 if (GetNegationFlag(texel))
104 modifier *= -1;
105
106 ret.r() = MathUtil::Clamp(ret.r() + modifier, 0, 255);
107 ret.g() = MathUtil::Clamp(ret.g() + modifier, 0, 255);
108 ret.b() = MathUtil::Clamp(ret.b() + modifier, 0, 255);
109
110 return ret.Cast<u8>();
111 }
112};
113
114} // anonymous namespace
115
116Math::Vec3<u8> SampleETC1Subtile(u64 value, unsigned int x, unsigned int y) {
117 ETC1Tile tile{value};
118 return tile.GetRGB(x, y);
119}
120
121} // namespace Texture
122} // namespace Pica
diff --git a/src/video_core/texture/etc1.h b/src/video_core/texture/etc1.h
deleted file mode 100644
index e188b19df..000000000
--- a/src/video_core/texture/etc1.h
+++ /dev/null
@@ -1,16 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8#include "common/vector_math.h"
9
10namespace Pica {
11namespace Texture {
12
13Math::Vec3<u8> SampleETC1Subtile(u64 value, unsigned int x, unsigned int y);
14
15} // namespace Texture
16} // namespace Pica
diff --git a/src/video_core/texture/texture_decode.cpp b/src/video_core/texture/texture_decode.cpp
deleted file mode 100644
index 0818d652c..000000000
--- a/src/video_core/texture/texture_decode.cpp
+++ /dev/null
@@ -1,227 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/color.h"
7#include "common/logging/log.h"
8#include "common/math_util.h"
9#include "common/swap.h"
10#include "common/vector_math.h"
11#include "video_core/regs_texturing.h"
12#include "video_core/texture/etc1.h"
13#include "video_core/texture/texture_decode.h"
14#include "video_core/utils.h"
15
16using TextureFormat = Pica::TexturingRegs::TextureFormat;
17
18namespace Pica {
19namespace Texture {
20
21constexpr size_t TILE_SIZE = 8 * 8;
22constexpr size_t ETC1_SUBTILES = 2 * 2;
23
24size_t CalculateTileSize(TextureFormat format) {
25 switch (format) {
26 case TextureFormat::RGBA8:
27 return 4 * TILE_SIZE;
28
29 case TextureFormat::RGB8:
30 return 3 * TILE_SIZE;
31
32 case TextureFormat::RGB5A1:
33 case TextureFormat::RGB565:
34 case TextureFormat::RGBA4:
35 case TextureFormat::IA8:
36 case TextureFormat::RG8:
37 return 2 * TILE_SIZE;
38
39 case TextureFormat::I8:
40 case TextureFormat::A8:
41 case TextureFormat::IA4:
42 return 1 * TILE_SIZE;
43
44 case TextureFormat::I4:
45 case TextureFormat::A4:
46 return TILE_SIZE / 2;
47
48 case TextureFormat::ETC1:
49 return ETC1_SUBTILES * 8;
50
51 case TextureFormat::ETC1A4:
52 return ETC1_SUBTILES * 16;
53
54 default: // placeholder for yet unknown formats
55 UNIMPLEMENTED();
56 return 0;
57 }
58}
59
60Math::Vec4<u8> LookupTexture(const u8* source, unsigned int x, unsigned int y,
61 const TextureInfo& info, bool disable_alpha) {
62 // Coordinate in tiles
63 const unsigned int coarse_x = x / 8;
64 const unsigned int coarse_y = y / 8;
65
66 // Coordinate inside the tile
67 const unsigned int fine_x = x % 8;
68 const unsigned int fine_y = y % 8;
69
70 const u8* line = source + coarse_y * info.stride;
71 const u8* tile = line + coarse_x * CalculateTileSize(info.format);
72 return LookupTexelInTile(tile, fine_x, fine_y, info, disable_alpha);
73}
74
75Math::Vec4<u8> LookupTexelInTile(const u8* source, unsigned int x, unsigned int y,
76 const TextureInfo& info, bool disable_alpha) {
77 DEBUG_ASSERT(x < 8);
78 DEBUG_ASSERT(y < 8);
79
80 using VideoCore::MortonInterleave;
81
82 switch (info.format) {
83 case TextureFormat::RGBA8: {
84 auto res = Color::DecodeRGBA8(source + MortonInterleave(x, y) * 4);
85 return {res.r(), res.g(), res.b(), static_cast<u8>(disable_alpha ? 255 : res.a())};
86 }
87
88 case TextureFormat::RGB8: {
89 auto res = Color::DecodeRGB8(source + MortonInterleave(x, y) * 3);
90 return {res.r(), res.g(), res.b(), 255};
91 }
92
93 case TextureFormat::RGB5A1: {
94 auto res = Color::DecodeRGB5A1(source + MortonInterleave(x, y) * 2);
95 return {res.r(), res.g(), res.b(), static_cast<u8>(disable_alpha ? 255 : res.a())};
96 }
97
98 case TextureFormat::RGB565: {
99 auto res = Color::DecodeRGB565(source + MortonInterleave(x, y) * 2);
100 return {res.r(), res.g(), res.b(), 255};
101 }
102
103 case TextureFormat::RGBA4: {
104 auto res = Color::DecodeRGBA4(source + MortonInterleave(x, y) * 2);
105 return {res.r(), res.g(), res.b(), static_cast<u8>(disable_alpha ? 255 : res.a())};
106 }
107
108 case TextureFormat::IA8: {
109 const u8* source_ptr = source + MortonInterleave(x, y) * 2;
110
111 if (disable_alpha) {
112 // Show intensity as red, alpha as green
113 return {source_ptr[1], source_ptr[0], 0, 255};
114 } else {
115 return {source_ptr[1], source_ptr[1], source_ptr[1], source_ptr[0]};
116 }
117 }
118
119 case TextureFormat::RG8: {
120 auto res = Color::DecodeRG8(source + MortonInterleave(x, y) * 2);
121 return {res.r(), res.g(), 0, 255};
122 }
123
124 case TextureFormat::I8: {
125 const u8* source_ptr = source + MortonInterleave(x, y);
126 return {*source_ptr, *source_ptr, *source_ptr, 255};
127 }
128
129 case TextureFormat::A8: {
130 const u8* source_ptr = source + MortonInterleave(x, y);
131
132 if (disable_alpha) {
133 return {*source_ptr, *source_ptr, *source_ptr, 255};
134 } else {
135 return {0, 0, 0, *source_ptr};
136 }
137 }
138
139 case TextureFormat::IA4: {
140 const u8* source_ptr = source + MortonInterleave(x, y);
141
142 u8 i = Color::Convert4To8(((*source_ptr) & 0xF0) >> 4);
143 u8 a = Color::Convert4To8((*source_ptr) & 0xF);
144
145 if (disable_alpha) {
146 // Show intensity as red, alpha as green
147 return {i, a, 0, 255};
148 } else {
149 return {i, i, i, a};
150 }
151 }
152
153 case TextureFormat::I4: {
154 u32 morton_offset = MortonInterleave(x, y);
155 const u8* source_ptr = source + morton_offset / 2;
156
157 u8 i = (morton_offset % 2) ? ((*source_ptr & 0xF0) >> 4) : (*source_ptr & 0xF);
158 i = Color::Convert4To8(i);
159
160 return {i, i, i, 255};
161 }
162
163 case TextureFormat::A4: {
164 u32 morton_offset = MortonInterleave(x, y);
165 const u8* source_ptr = source + morton_offset / 2;
166
167 u8 a = (morton_offset % 2) ? ((*source_ptr & 0xF0) >> 4) : (*source_ptr & 0xF);
168 a = Color::Convert4To8(a);
169
170 if (disable_alpha) {
171 return {a, a, a, 255};
172 } else {
173 return {0, 0, 0, a};
174 }
175 }
176
177 case TextureFormat::ETC1:
178 case TextureFormat::ETC1A4: {
179 bool has_alpha = (info.format == TextureFormat::ETC1A4);
180 size_t subtile_size = has_alpha ? 16 : 8;
181
182 // ETC1 further subdivides each 8x8 tile into four 4x4 subtiles
183 constexpr unsigned int subtile_width = 4;
184 constexpr unsigned int subtile_height = 4;
185
186 unsigned int subtile_index = (x / subtile_width) + 2 * (y / subtile_height);
187 x %= subtile_width;
188 y %= subtile_height;
189
190 const u8* subtile_ptr = source + subtile_index * subtile_size;
191
192 u8 alpha = 255;
193 if (has_alpha) {
194 u64_le packed_alpha;
195 memcpy(&packed_alpha, subtile_ptr, sizeof(u64));
196 subtile_ptr += sizeof(u64);
197
198 alpha = Color::Convert4To8((packed_alpha >> (4 * (x * subtile_width + y))) & 0xF);
199 }
200
201 u64_le subtile_data;
202 memcpy(&subtile_data, subtile_ptr, sizeof(u64));
203
204 return Math::MakeVec(SampleETC1Subtile(subtile_data, x, y),
205 disable_alpha ? (u8)255 : alpha);
206 }
207
208 default:
209 LOG_ERROR(HW_GPU, "Unknown texture format: %x", (u32)info.format);
210 DEBUG_ASSERT(false);
211 return {};
212 }
213}
214
215TextureInfo TextureInfo::FromPicaRegister(const TexturingRegs::TextureConfig& config,
216 const TexturingRegs::TextureFormat& format) {
217 TextureInfo info;
218 info.physical_address = config.GetPhysicalAddress();
219 info.width = config.width;
220 info.height = config.height;
221 info.format = format;
222 info.SetDefaultStride();
223 return info;
224}
225
226} // namespace Texture
227} // namespace Pica
diff --git a/src/video_core/texture/texture_decode.h b/src/video_core/texture/texture_decode.h
deleted file mode 100644
index 8507cfeb8..000000000
--- a/src/video_core/texture/texture_decode.h
+++ /dev/null
@@ -1,60 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8#include "common/vector_math.h"
9#include "video_core/regs_texturing.h"
10
11namespace Pica {
12namespace Texture {
13
14/// Returns the byte size of a 8*8 tile of the specified texture format.
15size_t CalculateTileSize(TexturingRegs::TextureFormat format);
16
17struct TextureInfo {
18 PAddr physical_address;
19 unsigned int width;
20 unsigned int height;
21 ptrdiff_t stride;
22 TexturingRegs::TextureFormat format;
23
24 static TextureInfo FromPicaRegister(const TexturingRegs::TextureConfig& config,
25 const TexturingRegs::TextureFormat& format);
26
27 /// Calculates stride from format and width, assuming that the entire texture is contiguous.
28 void SetDefaultStride() {
29 stride = CalculateTileSize(format) * (width / 8);
30 }
31};
32
33/**
34 * Lookup texel located at the given coordinates and return an RGBA vector of its color.
35 * @param source Source pointer to read data from
36 * @param x,y Texture coordinates to read from
37 * @param info TextureInfo object describing the texture setup
38 * @param disable_alpha This is used for debug widgets which use this method to display textures
39 * without providing a good way to visualize alpha by themselves. If true, this will return 255 for
40 * the alpha component, and either drop the information entirely or store it in an "unused" color
41 * channel.
42 * @todo Eventually we should get rid of the disable_alpha parameter.
43 */
44Math::Vec4<u8> LookupTexture(const u8* source, unsigned int x, unsigned int y,
45 const TextureInfo& info, bool disable_alpha = false);
46
47/**
48 * Looks up a texel from a single 8x8 texture tile.
49 *
50 * @param source Pointer to the beginning of the tile.
51 * @param x, y In-tile coordinates to read from. Must be < 8.
52 * @param info TextureInfo describing the texture format.
53 * @param disable_alpha Used for debugging. Sets the result alpha to 255 and either discards the
54 * real alpha or inserts it in an otherwise unused channel.
55 */
56Math::Vec4<u8> LookupTexelInTile(const u8* source, unsigned int x, unsigned int y,
57 const TextureInfo& info, bool disable_alpha);
58
59} // namespace Texture
60} // namespace Pica
diff --git a/src/video_core/vertex_loader.cpp b/src/video_core/vertex_loader.cpp
deleted file mode 100644
index 37c5224a9..000000000
--- a/src/video_core/vertex_loader.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
1#include <memory>
2#include <boost/range/algorithm/fill.hpp>
3#include "common/alignment.h"
4#include "common/assert.h"
5#include "common/bit_field.h"
6#include "common/common_types.h"
7#include "common/logging/log.h"
8#include "common/vector_math.h"
9#include "core/memory.h"
10#include "video_core/debug_utils/debug_utils.h"
11#include "video_core/pica_state.h"
12#include "video_core/pica_types.h"
13#include "video_core/regs_pipeline.h"
14#include "video_core/shader/shader.h"
15#include "video_core/vertex_loader.h"
16
17namespace Pica {
18
19void VertexLoader::Setup(const PipelineRegs& regs) {
20 ASSERT_MSG(!is_setup, "VertexLoader is not intended to be setup more than once.");
21
22 const auto& attribute_config = regs.vertex_attributes;
23 num_total_attributes = attribute_config.GetNumTotalAttributes();
24
25 boost::fill(vertex_attribute_sources, 0xdeadbeef);
26
27 for (int i = 0; i < 16; i++) {
28 vertex_attribute_is_default[i] = attribute_config.IsDefaultAttribute(i);
29 }
30
31 // Setup attribute data from loaders
32 for (int loader = 0; loader < 12; ++loader) {
33 const auto& loader_config = attribute_config.attribute_loaders[loader];
34
35 u32 offset = 0;
36
37 // TODO: What happens if a loader overwrites a previous one's data?
38 for (unsigned component = 0; component < loader_config.component_count; ++component) {
39 if (component >= 12) {
40 LOG_ERROR(HW_GPU,
41 "Overflow in the vertex attribute loader %u trying to load component %u",
42 loader, component);
43 continue;
44 }
45
46 u32 attribute_index = loader_config.GetComponent(component);
47 if (attribute_index < 12) {
48 offset = Common::AlignUp(offset,
49 attribute_config.GetElementSizeInBytes(attribute_index));
50 vertex_attribute_sources[attribute_index] = loader_config.data_offset + offset;
51 vertex_attribute_strides[attribute_index] =
52 static_cast<u32>(loader_config.byte_count);
53 vertex_attribute_formats[attribute_index] =
54 attribute_config.GetFormat(attribute_index);
55 vertex_attribute_elements[attribute_index] =
56 attribute_config.GetNumElements(attribute_index);
57 offset += attribute_config.GetStride(attribute_index);
58 } else if (attribute_index < 16) {
59 // Attribute ids 12, 13, 14 and 15 signify 4, 8, 12 and 16-byte paddings,
60 // respectively
61 offset = Common::AlignUp(offset, 4);
62 offset += (attribute_index - 11) * 4;
63 } else {
64 UNREACHABLE(); // This is truly unreachable due to the number of bits for each
65 // component
66 }
67 }
68 }
69
70 is_setup = true;
71}
72
73void VertexLoader::LoadVertex(u32 base_address, int index, int vertex,
74 Shader::AttributeBuffer& input,
75 DebugUtils::MemoryAccessTracker& memory_accesses) {
76 ASSERT_MSG(is_setup, "A VertexLoader needs to be setup before loading vertices.");
77
78 for (int i = 0; i < num_total_attributes; ++i) {
79 if (vertex_attribute_elements[i] != 0) {
80 // Load per-vertex data from the loader arrays
81 u32 source_addr =
82 base_address + vertex_attribute_sources[i] + vertex_attribute_strides[i] * vertex;
83
84 if (g_debug_context && Pica::g_debug_context->recorder) {
85 memory_accesses.AddAccess(
86 source_addr,
87 vertex_attribute_elements[i] *
88 ((vertex_attribute_formats[i] == PipelineRegs::VertexAttributeFormat::FLOAT)
89 ? 4
90 : (vertex_attribute_formats[i] ==
91 PipelineRegs::VertexAttributeFormat::SHORT)
92 ? 2
93 : 1));
94 }
95
96 switch (vertex_attribute_formats[i]) {
97 case PipelineRegs::VertexAttributeFormat::BYTE: {
98 const s8* srcdata =
99 reinterpret_cast<const s8*>(Memory::GetPhysicalPointer(source_addr));
100 for (unsigned int comp = 0; comp < vertex_attribute_elements[i]; ++comp) {
101 input.attr[i][comp] = float24::FromFloat32(srcdata[comp]);
102 }
103 break;
104 }
105 case PipelineRegs::VertexAttributeFormat::UBYTE: {
106 const u8* srcdata =
107 reinterpret_cast<const u8*>(Memory::GetPhysicalPointer(source_addr));
108 for (unsigned int comp = 0; comp < vertex_attribute_elements[i]; ++comp) {
109 input.attr[i][comp] = float24::FromFloat32(srcdata[comp]);
110 }
111 break;
112 }
113 case PipelineRegs::VertexAttributeFormat::SHORT: {
114 const s16* srcdata =
115 reinterpret_cast<const s16*>(Memory::GetPhysicalPointer(source_addr));
116 for (unsigned int comp = 0; comp < vertex_attribute_elements[i]; ++comp) {
117 input.attr[i][comp] = float24::FromFloat32(srcdata[comp]);
118 }
119 break;
120 }
121 case PipelineRegs::VertexAttributeFormat::FLOAT: {
122 const float* srcdata =
123 reinterpret_cast<const float*>(Memory::GetPhysicalPointer(source_addr));
124 for (unsigned int comp = 0; comp < vertex_attribute_elements[i]; ++comp) {
125 input.attr[i][comp] = float24::FromFloat32(srcdata[comp]);
126 }
127 break;
128 }
129 }
130
131 // Default attribute values set if array elements have < 4 components. This
132 // is *not* carried over from the default attribute settings even if they're
133 // enabled for this attribute.
134 for (unsigned int comp = vertex_attribute_elements[i]; comp < 4; ++comp) {
135 input.attr[i][comp] =
136 comp == 3 ? float24::FromFloat32(1.0f) : float24::FromFloat32(0.0f);
137 }
138
139 LOG_TRACE(HW_GPU, "Loaded %d components of attribute %x for vertex %x (index %x) from "
140 "0x%08x + 0x%08x + 0x%04x: %f %f %f %f",
141 vertex_attribute_elements[i], i, vertex, index, base_address,
142 vertex_attribute_sources[i], vertex_attribute_strides[i] * vertex,
143 input.attr[i][0].ToFloat32(), input.attr[i][1].ToFloat32(),
144 input.attr[i][2].ToFloat32(), input.attr[i][3].ToFloat32());
145 } else if (vertex_attribute_is_default[i]) {
146 // Load the default attribute if we're configured to do so
147 input.attr[i] = g_state.input_default_attributes.attr[i];
148 LOG_TRACE(HW_GPU,
149 "Loaded default attribute %x for vertex %x (index %x): (%f, %f, %f, %f)", i,
150 vertex, index, input.attr[i][0].ToFloat32(), input.attr[i][1].ToFloat32(),
151 input.attr[i][2].ToFloat32(), input.attr[i][3].ToFloat32());
152 } else {
153 // TODO(yuriks): In this case, no data gets loaded and the vertex
154 // remains with the last value it had. This isn't currently maintained
155 // as global state, however, and so won't work in Citra yet.
156 }
157 }
158}
159
160} // namespace Pica
diff --git a/src/video_core/vertex_loader.h b/src/video_core/vertex_loader.h
deleted file mode 100644
index 02db10aee..000000000
--- a/src/video_core/vertex_loader.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#pragma once
2
3#include <array>
4#include "common/common_types.h"
5#include "video_core/regs_pipeline.h"
6
7namespace Pica {
8
9namespace DebugUtils {
10class MemoryAccessTracker;
11}
12
13namespace Shader {
14struct AttributeBuffer;
15}
16
17class VertexLoader {
18public:
19 VertexLoader() = default;
20 explicit VertexLoader(const PipelineRegs& regs) {
21 Setup(regs);
22 }
23
24 void Setup(const PipelineRegs& regs);
25 void LoadVertex(u32 base_address, int index, int vertex, Shader::AttributeBuffer& input,
26 DebugUtils::MemoryAccessTracker& memory_accesses);
27
28 int GetNumTotalAttributes() const {
29 return num_total_attributes;
30 }
31
32private:
33 std::array<u32, 16> vertex_attribute_sources;
34 std::array<u32, 16> vertex_attribute_strides{};
35 std::array<PipelineRegs::VertexAttributeFormat, 16> vertex_attribute_formats;
36 std::array<u32, 16> vertex_attribute_elements{};
37 std::array<bool, 16> vertex_attribute_is_default;
38 int num_total_attributes = 0;
39 bool is_setup = false;
40};
41
42} // namespace Pica
diff --git a/src/video_core/video_core.cpp b/src/video_core/video_core.cpp
index 7186a7652..110f38a3e 100644
--- a/src/video_core/video_core.cpp
+++ b/src/video_core/video_core.cpp
@@ -4,7 +4,6 @@
4 4
5#include <memory> 5#include <memory>
6#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "video_core/pica.h"
8#include "video_core/renderer_base.h" 7#include "video_core/renderer_base.h"
9#include "video_core/renderer_opengl/renderer_opengl.h" 8#include "video_core/renderer_opengl/renderer_opengl.h"
10#include "video_core/video_core.h" 9#include "video_core/video_core.h"
@@ -24,8 +23,6 @@ std::atomic<bool> g_toggle_framelimit_enabled;
24 23
25/// Initialize the video core 24/// Initialize the video core
26bool Init(EmuWindow* emu_window) { 25bool Init(EmuWindow* emu_window) {
27 Pica::Init();
28
29 g_emu_window = emu_window; 26 g_emu_window = emu_window;
30 g_renderer = std::make_unique<RendererOpenGL>(); 27 g_renderer = std::make_unique<RendererOpenGL>();
31 g_renderer->SetWindow(g_emu_window); 28 g_renderer->SetWindow(g_emu_window);
@@ -40,8 +37,6 @@ bool Init(EmuWindow* emu_window) {
40 37
41/// Shutdown the video core 38/// Shutdown the video core
42void Shutdown() { 39void Shutdown() {
43 Pica::Shutdown();
44
45 g_renderer.reset(); 40 g_renderer.reset();
46 41
47 LOG_DEBUG(Render, "shutdown OK"); 42 LOG_DEBUG(Render, "shutdown OK");