summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/core/core.cpp10
-rw-r--r--src/core/core.h4
-rw-r--r--src/video_core/CMakeLists.txt6
-rw-r--r--src/video_core/debug_utils/debug_utils.cpp49
-rw-r--r--src/video_core/debug_utils/debug_utils.h157
-rw-r--r--src/video_core/engines/maxwell_3d.cpp31
-rw-r--r--src/video_core/engines/shader_bytecode.h16
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp3
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp90
-rw-r--r--src/video_core/renderer_opengl/gl_shader_manager.h4
-rw-r--r--src/video_core/renderer_opengl/maxwell_to_gl.h22
-rw-r--r--src/video_core/renderer_vulkan/vk_image.cpp106
-rw-r--r--src/video_core/renderer_vulkan/vk_image.h84
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp127
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h83
-rw-r--r--src/video_core/shader/decode/register_set_predicate.cpp60
-rw-r--r--src/video_core/shader/decode/texture.cpp108
-rw-r--r--src/video_core/shader/node.h1
-rw-r--r--src/video_core/shader/shader_ir.h5
-rw-r--r--src/yuzu/CMakeLists.txt5
-rw-r--r--src/yuzu/debugger/graphics/graphics_breakpoint_observer.cpp27
-rw-r--r--src/yuzu/debugger/graphics/graphics_breakpoint_observer.h33
-rw-r--r--src/yuzu/debugger/graphics/graphics_breakpoints.cpp221
-rw-r--r--src/yuzu/debugger/graphics/graphics_breakpoints.h45
-rw-r--r--src/yuzu/debugger/graphics/graphics_breakpoints_p.h37
-rw-r--r--src/yuzu/main.cpp11
-rw-r--r--src/yuzu/main.h8
27 files changed, 618 insertions, 735 deletions
diff --git a/src/core/core.cpp b/src/core/core.cpp
index c45fb960c..d697b80ef 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -46,7 +46,6 @@
46#include "core/settings.h" 46#include "core/settings.h"
47#include "core/telemetry_session.h" 47#include "core/telemetry_session.h"
48#include "core/tools/freezer.h" 48#include "core/tools/freezer.h"
49#include "video_core/debug_utils/debug_utils.h"
50#include "video_core/renderer_base.h" 49#include "video_core/renderer_base.h"
51#include "video_core/video_core.h" 50#include "video_core/video_core.h"
52 51
@@ -341,7 +340,6 @@ struct System::Impl {
341 std::unique_ptr<Loader::AppLoader> app_loader; 340 std::unique_ptr<Loader::AppLoader> app_loader;
342 std::unique_ptr<VideoCore::RendererBase> renderer; 341 std::unique_ptr<VideoCore::RendererBase> renderer;
343 std::unique_ptr<Tegra::GPU> gpu_core; 342 std::unique_ptr<Tegra::GPU> gpu_core;
344 std::shared_ptr<Tegra::DebugContext> debug_context;
345 std::unique_ptr<Hardware::InterruptManager> interrupt_manager; 343 std::unique_ptr<Hardware::InterruptManager> interrupt_manager;
346 Memory::Memory memory; 344 Memory::Memory memory;
347 CpuCoreManager cpu_core_manager; 345 CpuCoreManager cpu_core_manager;
@@ -580,14 +578,6 @@ Loader::AppLoader& System::GetAppLoader() const {
580 return *impl->app_loader; 578 return *impl->app_loader;
581} 579}
582 580
583void System::SetGPUDebugContext(std::shared_ptr<Tegra::DebugContext> context) {
584 impl->debug_context = std::move(context);
585}
586
587Tegra::DebugContext* System::GetGPUDebugContext() const {
588 return impl->debug_context.get();
589}
590
591void System::SetFilesystem(std::shared_ptr<FileSys::VfsFilesystem> vfs) { 581void System::SetFilesystem(std::shared_ptr<FileSys::VfsFilesystem> vfs) {
592 impl->virtual_filesystem = std::move(vfs); 582 impl->virtual_filesystem = std::move(vfs);
593} 583}
diff --git a/src/core/core.h b/src/core/core.h
index 91184e433..e240c5c58 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -307,10 +307,6 @@ public:
307 Service::SM::ServiceManager& ServiceManager(); 307 Service::SM::ServiceManager& ServiceManager();
308 const Service::SM::ServiceManager& ServiceManager() const; 308 const Service::SM::ServiceManager& ServiceManager() const;
309 309
310 void SetGPUDebugContext(std::shared_ptr<Tegra::DebugContext> context);
311
312 Tegra::DebugContext* GetGPUDebugContext() const;
313
314 void SetFilesystem(std::shared_ptr<FileSys::VfsFilesystem> vfs); 310 void SetFilesystem(std::shared_ptr<FileSys::VfsFilesystem> vfs);
315 311
316 std::shared_ptr<FileSys::VfsFilesystem> GetFilesystem() const; 312 std::shared_ptr<FileSys::VfsFilesystem> GetFilesystem() const;
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index e615b238e..65d7b9f93 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -4,8 +4,6 @@ add_library(video_core STATIC
4 buffer_cache/map_interval.h 4 buffer_cache/map_interval.h
5 dma_pusher.cpp 5 dma_pusher.cpp
6 dma_pusher.h 6 dma_pusher.h
7 debug_utils/debug_utils.cpp
8 debug_utils/debug_utils.h
9 engines/const_buffer_engine_interface.h 7 engines/const_buffer_engine_interface.h
10 engines/const_buffer_info.h 8 engines/const_buffer_info.h
11 engines/engine_upload.cpp 9 engines/engine_upload.cpp
@@ -159,6 +157,8 @@ if (ENABLE_VULKAN)
159 renderer_vulkan/vk_buffer_cache.h 157 renderer_vulkan/vk_buffer_cache.h
160 renderer_vulkan/vk_device.cpp 158 renderer_vulkan/vk_device.cpp
161 renderer_vulkan/vk_device.h 159 renderer_vulkan/vk_device.h
160 renderer_vulkan/vk_image.cpp
161 renderer_vulkan/vk_image.h
162 renderer_vulkan/vk_memory_manager.cpp 162 renderer_vulkan/vk_memory_manager.cpp
163 renderer_vulkan/vk_memory_manager.h 163 renderer_vulkan/vk_memory_manager.h
164 renderer_vulkan/vk_resource_manager.cpp 164 renderer_vulkan/vk_resource_manager.cpp
@@ -169,6 +169,8 @@ if (ENABLE_VULKAN)
169 renderer_vulkan/vk_scheduler.h 169 renderer_vulkan/vk_scheduler.h
170 renderer_vulkan/vk_shader_decompiler.cpp 170 renderer_vulkan/vk_shader_decompiler.cpp
171 renderer_vulkan/vk_shader_decompiler.h 171 renderer_vulkan/vk_shader_decompiler.h
172 renderer_vulkan/vk_staging_buffer_pool.cpp
173 renderer_vulkan/vk_staging_buffer_pool.h
172 renderer_vulkan/vk_stream_buffer.cpp 174 renderer_vulkan/vk_stream_buffer.cpp
173 renderer_vulkan/vk_stream_buffer.h 175 renderer_vulkan/vk_stream_buffer.h
174 renderer_vulkan/vk_swapchain.cpp 176 renderer_vulkan/vk_swapchain.cpp
diff --git a/src/video_core/debug_utils/debug_utils.cpp b/src/video_core/debug_utils/debug_utils.cpp
deleted file mode 100644
index f0ef67535..000000000
--- a/src/video_core/debug_utils/debug_utils.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2
3// Refer to the license.txt file included.
4
5#include <mutex>
6
7#include "video_core/debug_utils/debug_utils.h"
8
9namespace Tegra {
10
11void DebugContext::DoOnEvent(Event event, void* data) {
12 {
13 std::unique_lock lock{breakpoint_mutex};
14
15 // TODO(Subv): Commit the rasterizer's caches so framebuffers, render targets, etc. will
16 // show on debug widgets
17
18 // TODO: Should stop the CPU thread here once we multithread emulation.
19
20 active_breakpoint = event;
21 at_breakpoint = true;
22
23 // Tell all observers that we hit a breakpoint
24 for (auto& breakpoint_observer : breakpoint_observers) {
25 breakpoint_observer->OnMaxwellBreakPointHit(event, data);
26 }
27
28 // Wait until another thread tells us to Resume()
29 resume_from_breakpoint.wait(lock, [&] { return !at_breakpoint; });
30 }
31}
32
33void DebugContext::Resume() {
34 {
35 std::lock_guard lock{breakpoint_mutex};
36
37 // Tell all observers that we are about to resume
38 for (auto& breakpoint_observer : breakpoint_observers) {
39 breakpoint_observer->OnMaxwellResume();
40 }
41
42 // Resume the waiting thread (i.e. OnEvent())
43 at_breakpoint = false;
44 }
45
46 resume_from_breakpoint.notify_one();
47}
48
49} // namespace Tegra
diff --git a/src/video_core/debug_utils/debug_utils.h b/src/video_core/debug_utils/debug_utils.h
deleted file mode 100644
index ac3a2eb01..000000000
--- a/src/video_core/debug_utils/debug_utils.h
+++ /dev/null
@@ -1,157 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <condition_variable>
9#include <list>
10#include <memory>
11#include <mutex>
12
13namespace Tegra {
14
15class DebugContext {
16public:
17 enum class Event {
18 FirstEvent = 0,
19
20 MaxwellCommandLoaded = FirstEvent,
21 MaxwellCommandProcessed,
22 IncomingPrimitiveBatch,
23 FinishedPrimitiveBatch,
24
25 NumEvents
26 };
27
28 /**
29 * Inherit from this class to be notified of events registered to some debug context.
30 * Most importantly this is used for our debugger GUI.
31 *
32 * To implement event handling, override the OnMaxwellBreakPointHit and OnMaxwellResume methods.
33 * @warning All BreakPointObservers need to be on the same thread to guarantee thread-safe state
34 * access
35 * @todo Evaluate an alternative interface, in which there is only one managing observer and
36 * multiple child observers running (by design) on the same thread.
37 */
38 class BreakPointObserver {
39 public:
40 /// Constructs the object such that it observes events of the given DebugContext.
41 explicit BreakPointObserver(std::shared_ptr<DebugContext> debug_context)
42 : context_weak(debug_context) {
43 std::unique_lock lock{debug_context->breakpoint_mutex};
44 debug_context->breakpoint_observers.push_back(this);
45 }
46
47 virtual ~BreakPointObserver() {
48 auto context = context_weak.lock();
49 if (context) {
50 {
51 std::unique_lock lock{context->breakpoint_mutex};
52 context->breakpoint_observers.remove(this);
53 }
54
55 // If we are the last observer to be destroyed, tell the debugger context that
56 // it is free to continue. In particular, this is required for a proper yuzu
57 // shutdown, when the emulation thread is waiting at a breakpoint.
58 if (context->breakpoint_observers.empty())
59 context->Resume();
60 }
61 }
62
63 /**
64 * Action to perform when a breakpoint was reached.
65 * @param event Type of event which triggered the breakpoint
66 * @param data Optional data pointer (if unused, this is a nullptr)
67 * @note This function will perform nothing unless it is overridden in the child class.
68 */
69 virtual void OnMaxwellBreakPointHit(Event event, void* data) {}
70
71 /**
72 * Action to perform when emulation is resumed from a breakpoint.
73 * @note This function will perform nothing unless it is overridden in the child class.
74 */
75 virtual void OnMaxwellResume() {}
76
77 protected:
78 /**
79 * Weak context pointer. This need not be valid, so when requesting a shared_ptr via
80 * context_weak.lock(), always compare the result against nullptr.
81 */
82 std::weak_ptr<DebugContext> context_weak;
83 };
84
85 /**
86 * Simple structure defining a breakpoint state
87 */
88 struct BreakPoint {
89 bool enabled = false;
90 };
91
92 /**
93 * Static constructor used to create a shared_ptr of a DebugContext.
94 */
95 static std::shared_ptr<DebugContext> Construct() {
96 return std::shared_ptr<DebugContext>(new DebugContext);
97 }
98
99 /**
100 * Used by the emulation core when a given event has happened. If a breakpoint has been set
101 * for this event, OnEvent calls the event handlers of the registered breakpoint observers.
102 * The current thread then is halted until Resume() is called from another thread (or until
103 * emulation is stopped).
104 * @param event Event which has happened
105 * @param data Optional data pointer (pass nullptr if unused). Needs to remain valid until
106 * Resume() is called.
107 */
108 void OnEvent(Event event, void* data) {
109 // This check is left in the header to allow the compiler to inline it.
110 if (!breakpoints[(int)event].enabled)
111 return;
112 // For the rest of event handling, call a separate function.
113 DoOnEvent(event, data);
114 }
115
116 void DoOnEvent(Event event, void* data);
117
118 /**
119 * Resume from the current breakpoint.
120 * @warning Calling this from the same thread that OnEvent was called in will cause a deadlock.
121 * Calling from any other thread is safe.
122 */
123 void Resume();
124
125 /**
126 * Delete all set breakpoints and resume emulation.
127 */
128 void ClearBreakpoints() {
129 for (auto& bp : breakpoints) {
130 bp.enabled = false;
131 }
132 Resume();
133 }
134
135 // TODO: Evaluate if access to these members should be hidden behind a public interface.
136 std::array<BreakPoint, static_cast<int>(Event::NumEvents)> breakpoints;
137 Event active_breakpoint{};
138 bool at_breakpoint = false;
139
140private:
141 /**
142 * Private default constructor to make sure people always construct this through Construct()
143 * instead.
144 */
145 DebugContext() = default;
146
147 /// Mutex protecting current breakpoint state and the observer list.
148 std::mutex breakpoint_mutex;
149
150 /// Used by OnEvent to wait for resumption.
151 std::condition_variable resume_from_breakpoint;
152
153 /// List of registered observers
154 std::list<BreakPointObserver*> breakpoint_observers;
155};
156
157} // namespace Tegra
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index e1cb8b0b0..1d1f780e7 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -7,7 +7,6 @@
7#include "common/assert.h" 7#include "common/assert.h"
8#include "core/core.h" 8#include "core/core.h"
9#include "core/core_timing.h" 9#include "core/core_timing.h"
10#include "video_core/debug_utils/debug_utils.h"
11#include "video_core/engines/maxwell_3d.h" 10#include "video_core/engines/maxwell_3d.h"
12#include "video_core/engines/shader_type.h" 11#include "video_core/engines/shader_type.h"
13#include "video_core/memory_manager.h" 12#include "video_core/memory_manager.h"
@@ -273,8 +272,6 @@ void Maxwell3D::CallMacroMethod(u32 method, std::size_t num_parameters, const u3
273} 272}
274 273
275void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { 274void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
276 auto debug_context = system.GetGPUDebugContext();
277
278 const u32 method = method_call.method; 275 const u32 method = method_call.method;
279 276
280 if (method == cb_data_state.current) { 277 if (method == cb_data_state.current) {
@@ -315,10 +312,6 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
315 ASSERT_MSG(method < Regs::NUM_REGS, 312 ASSERT_MSG(method < Regs::NUM_REGS,
316 "Invalid Maxwell3D register, increase the size of the Regs structure"); 313 "Invalid Maxwell3D register, increase the size of the Regs structure");
317 314
318 if (debug_context) {
319 debug_context->OnEvent(Tegra::DebugContext::Event::MaxwellCommandLoaded, nullptr);
320 }
321
322 if (regs.reg_array[method] != method_call.argument) { 315 if (regs.reg_array[method] != method_call.argument) {
323 regs.reg_array[method] = method_call.argument; 316 regs.reg_array[method] = method_call.argument;
324 const std::size_t dirty_reg = dirty_pointers[method]; 317 const std::size_t dirty_reg = dirty_pointers[method];
@@ -424,10 +417,6 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
424 default: 417 default:
425 break; 418 break;
426 } 419 }
427
428 if (debug_context) {
429 debug_context->OnEvent(Tegra::DebugContext::Event::MaxwellCommandProcessed, nullptr);
430 }
431} 420}
432 421
433void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) { 422void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) {
@@ -485,12 +474,6 @@ void Maxwell3D::FlushMMEInlineDraw() {
485 ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?"); 474 ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?");
486 ASSERT(mme_draw.instance_count == mme_draw.gl_end_count); 475 ASSERT(mme_draw.instance_count == mme_draw.gl_end_count);
487 476
488 auto debug_context = system.GetGPUDebugContext();
489
490 if (debug_context) {
491 debug_context->OnEvent(Tegra::DebugContext::Event::IncomingPrimitiveBatch, nullptr);
492 }
493
494 // Both instance configuration registers can not be set at the same time. 477 // Both instance configuration registers can not be set at the same time.
495 ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont, 478 ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont,
496 "Illegal combination of instancing parameters"); 479 "Illegal combination of instancing parameters");
@@ -500,10 +483,6 @@ void Maxwell3D::FlushMMEInlineDraw() {
500 rasterizer.DrawMultiBatch(is_indexed); 483 rasterizer.DrawMultiBatch(is_indexed);
501 } 484 }
502 485
503 if (debug_context) {
504 debug_context->OnEvent(Tegra::DebugContext::Event::FinishedPrimitiveBatch, nullptr);
505 }
506
507 // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if 486 // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if
508 // the game is trying to draw indexed or direct mode. This needs to be verified on HW still - 487 // the game is trying to draw indexed or direct mode. This needs to be verified on HW still -
509 // it's possible that it is incorrect and that there is some other register used to specify the 488 // it's possible that it is incorrect and that there is some other register used to specify the
@@ -650,12 +629,6 @@ void Maxwell3D::DrawArrays() {
650 regs.vertex_buffer.count); 629 regs.vertex_buffer.count);
651 ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?"); 630 ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?");
652 631
653 auto debug_context = system.GetGPUDebugContext();
654
655 if (debug_context) {
656 debug_context->OnEvent(Tegra::DebugContext::Event::IncomingPrimitiveBatch, nullptr);
657 }
658
659 // Both instance configuration registers can not be set at the same time. 632 // Both instance configuration registers can not be set at the same time.
660 ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont, 633 ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont,
661 "Illegal combination of instancing parameters"); 634 "Illegal combination of instancing parameters");
@@ -673,10 +646,6 @@ void Maxwell3D::DrawArrays() {
673 rasterizer.DrawBatch(is_indexed); 646 rasterizer.DrawBatch(is_indexed);
674 } 647 }
675 648
676 if (debug_context) {
677 debug_context->OnEvent(Tegra::DebugContext::Event::FinishedPrimitiveBatch, nullptr);
678 }
679
680 // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if 649 // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if
681 // the game is trying to draw indexed or direct mode. This needs to be verified on HW still - 650 // the game is trying to draw indexed or direct mode. This needs to be verified on HW still -
682 // it's possible that it is incorrect and that there is some other register used to specify the 651 // it's possible that it is incorrect and that there is some other register used to specify the
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index dfb12cd2d..57b57c647 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -1051,7 +1051,7 @@ union Instruction {
1051 BitField<40, 1, R2pMode> mode; 1051 BitField<40, 1, R2pMode> mode;
1052 BitField<41, 2, u64> byte; 1052 BitField<41, 2, u64> byte;
1053 BitField<20, 7, u64> immediate_mask; 1053 BitField<20, 7, u64> immediate_mask;
1054 } r2p; 1054 } p2r_r2p;
1055 1055
1056 union { 1056 union {
1057 BitField<39, 3, u64> pred39; 1057 BitField<39, 3, u64> pred39;
@@ -1239,7 +1239,7 @@ union Instruction {
1239 BitField<35, 1, u64> ndv_flag; 1239 BitField<35, 1, u64> ndv_flag;
1240 BitField<49, 1, u64> nodep_flag; 1240 BitField<49, 1, u64> nodep_flag;
1241 BitField<50, 1, u64> dc_flag; 1241 BitField<50, 1, u64> dc_flag;
1242 BitField<54, 2, u64> info; 1242 BitField<54, 2, u64> offset_mode;
1243 BitField<56, 2, u64> component; 1243 BitField<56, 2, u64> component;
1244 1244
1245 bool UsesMiscMode(TextureMiscMode mode) const { 1245 bool UsesMiscMode(TextureMiscMode mode) const {
@@ -1251,9 +1251,9 @@ union Instruction {
1251 case TextureMiscMode::DC: 1251 case TextureMiscMode::DC:
1252 return dc_flag != 0; 1252 return dc_flag != 0;
1253 case TextureMiscMode::AOFFI: 1253 case TextureMiscMode::AOFFI:
1254 return info == 1; 1254 return offset_mode == 1;
1255 case TextureMiscMode::PTP: 1255 case TextureMiscMode::PTP:
1256 return info == 2; 1256 return offset_mode == 2;
1257 default: 1257 default:
1258 break; 1258 break;
1259 } 1259 }
@@ -1265,7 +1265,7 @@ union Instruction {
1265 BitField<35, 1, u64> ndv_flag; 1265 BitField<35, 1, u64> ndv_flag;
1266 BitField<49, 1, u64> nodep_flag; 1266 BitField<49, 1, u64> nodep_flag;
1267 BitField<50, 1, u64> dc_flag; 1267 BitField<50, 1, u64> dc_flag;
1268 BitField<33, 2, u64> info; 1268 BitField<33, 2, u64> offset_mode;
1269 BitField<37, 2, u64> component; 1269 BitField<37, 2, u64> component;
1270 1270
1271 bool UsesMiscMode(TextureMiscMode mode) const { 1271 bool UsesMiscMode(TextureMiscMode mode) const {
@@ -1277,9 +1277,9 @@ union Instruction {
1277 case TextureMiscMode::DC: 1277 case TextureMiscMode::DC:
1278 return dc_flag != 0; 1278 return dc_flag != 0;
1279 case TextureMiscMode::AOFFI: 1279 case TextureMiscMode::AOFFI:
1280 return info == 1; 1280 return offset_mode == 1;
1281 case TextureMiscMode::PTP: 1281 case TextureMiscMode::PTP:
1282 return info == 2; 1282 return offset_mode == 2;
1283 default: 1283 default:
1284 break; 1284 break;
1285 } 1285 }
@@ -1801,6 +1801,7 @@ public:
1801 PSET, 1801 PSET,
1802 CSETP, 1802 CSETP,
1803 R2P_IMM, 1803 R2P_IMM,
1804 P2R_IMM,
1804 XMAD_IMM, 1805 XMAD_IMM,
1805 XMAD_CR, 1806 XMAD_CR,
1806 XMAD_RC, 1807 XMAD_RC,
@@ -2106,6 +2107,7 @@ private:
2106 INST("0101000010010---", Id::PSETP, Type::PredicateSetPredicate, "PSETP"), 2107 INST("0101000010010---", Id::PSETP, Type::PredicateSetPredicate, "PSETP"),
2107 INST("010100001010----", Id::CSETP, Type::PredicateSetPredicate, "CSETP"), 2108 INST("010100001010----", Id::CSETP, Type::PredicateSetPredicate, "CSETP"),
2108 INST("0011100-11110---", Id::R2P_IMM, Type::RegisterSetPredicate, "R2P_IMM"), 2109 INST("0011100-11110---", Id::R2P_IMM, Type::RegisterSetPredicate, "R2P_IMM"),
2110 INST("0011100-11101---", Id::P2R_IMM, Type::RegisterSetPredicate, "P2R_IMM"),
2109 INST("0011011-00------", Id::XMAD_IMM, Type::Xmad, "XMAD_IMM"), 2111 INST("0011011-00------", Id::XMAD_IMM, Type::Xmad, "XMAD_IMM"),
2110 INST("0100111---------", Id::XMAD_CR, Type::Xmad, "XMAD_CR"), 2112 INST("0100111---------", Id::XMAD_CR, Type::Xmad, "XMAD_CR"),
2111 INST("010100010-------", Id::XMAD_RC, Type::Xmad, "XMAD_RC"), 2113 INST("010100010-------", Id::XMAD_RC, Type::Xmad, "XMAD_RC"),
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index dbb08dd80..672051102 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -271,6 +271,9 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
271 case Maxwell::ShaderProgram::Geometry: 271 case Maxwell::ShaderProgram::Geometry:
272 shader_program_manager->UseTrivialGeometryShader(); 272 shader_program_manager->UseTrivialGeometryShader();
273 break; 273 break;
274 case Maxwell::ShaderProgram::Fragment:
275 shader_program_manager->UseTrivialFragmentShader();
276 break;
274 default: 277 default:
275 break; 278 break;
276 } 279 }
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 0389c2143..a311dbcfe 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -48,10 +48,10 @@ class ExprDecompiler;
48 48
49enum class Type { Void, Bool, Bool2, Float, Int, Uint, HalfFloat }; 49enum class Type { Void, Bool, Bool2, Float, Int, Uint, HalfFloat };
50 50
51struct TextureAoffi {}; 51struct TextureOffset {};
52struct TextureDerivates {}; 52struct TextureDerivates {};
53using TextureArgument = std::pair<Type, Node>; 53using TextureArgument = std::pair<Type, Node>;
54using TextureIR = std::variant<TextureAoffi, TextureDerivates, TextureArgument>; 54using TextureIR = std::variant<TextureOffset, TextureDerivates, TextureArgument>;
55 55
56constexpr u32 MAX_CONSTBUFFER_ELEMENTS = 56constexpr u32 MAX_CONSTBUFFER_ELEMENTS =
57 static_cast<u32>(Maxwell::MaxConstBufferSize) / (4 * sizeof(float)); 57 static_cast<u32>(Maxwell::MaxConstBufferSize) / (4 * sizeof(float));
@@ -1077,7 +1077,7 @@ private:
1077 } 1077 }
1078 1078
1079 std::string GenerateTexture(Operation operation, const std::string& function_suffix, 1079 std::string GenerateTexture(Operation operation, const std::string& function_suffix,
1080 const std::vector<TextureIR>& extras, bool sepparate_dc = false) { 1080 const std::vector<TextureIR>& extras, bool separate_dc = false) {
1081 constexpr std::array coord_constructors = {"float", "vec2", "vec3", "vec4"}; 1081 constexpr std::array coord_constructors = {"float", "vec2", "vec3", "vec4"};
1082 1082
1083 const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); 1083 const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
@@ -1090,10 +1090,12 @@ private:
1090 std::string expr = "texture" + function_suffix; 1090 std::string expr = "texture" + function_suffix;
1091 if (!meta->aoffi.empty()) { 1091 if (!meta->aoffi.empty()) {
1092 expr += "Offset"; 1092 expr += "Offset";
1093 } else if (!meta->ptp.empty()) {
1094 expr += "Offsets";
1093 } 1095 }
1094 expr += '(' + GetSampler(meta->sampler) + ", "; 1096 expr += '(' + GetSampler(meta->sampler) + ", ";
1095 expr += coord_constructors.at(count + (has_array ? 1 : 0) + 1097 expr += coord_constructors.at(count + (has_array ? 1 : 0) +
1096 (has_shadow && !sepparate_dc ? 1 : 0) - 1); 1098 (has_shadow && !separate_dc ? 1 : 0) - 1);
1097 expr += '('; 1099 expr += '(';
1098 for (std::size_t i = 0; i < count; ++i) { 1100 for (std::size_t i = 0; i < count; ++i) {
1099 expr += Visit(operation[i]).AsFloat(); 1101 expr += Visit(operation[i]).AsFloat();
@@ -1106,7 +1108,7 @@ private:
1106 expr += ", float(" + Visit(meta->array).AsInt() + ')'; 1108 expr += ", float(" + Visit(meta->array).AsInt() + ')';
1107 } 1109 }
1108 if (has_shadow) { 1110 if (has_shadow) {
1109 if (sepparate_dc) { 1111 if (separate_dc) {
1110 expr += "), " + Visit(meta->depth_compare).AsFloat(); 1112 expr += "), " + Visit(meta->depth_compare).AsFloat();
1111 } else { 1113 } else {
1112 expr += ", " + Visit(meta->depth_compare).AsFloat() + ')'; 1114 expr += ", " + Visit(meta->depth_compare).AsFloat() + ')';
@@ -1118,8 +1120,12 @@ private:
1118 for (const auto& variant : extras) { 1120 for (const auto& variant : extras) {
1119 if (const auto argument = std::get_if<TextureArgument>(&variant)) { 1121 if (const auto argument = std::get_if<TextureArgument>(&variant)) {
1120 expr += GenerateTextureArgument(*argument); 1122 expr += GenerateTextureArgument(*argument);
1121 } else if (std::holds_alternative<TextureAoffi>(variant)) { 1123 } else if (std::holds_alternative<TextureOffset>(variant)) {
1122 expr += GenerateTextureAoffi(meta->aoffi); 1124 if (!meta->aoffi.empty()) {
1125 expr += GenerateTextureAoffi(meta->aoffi);
1126 } else if (!meta->ptp.empty()) {
1127 expr += GenerateTexturePtp(meta->ptp);
1128 }
1123 } else if (std::holds_alternative<TextureDerivates>(variant)) { 1129 } else if (std::holds_alternative<TextureDerivates>(variant)) {
1124 expr += GenerateTextureDerivates(meta->derivates); 1130 expr += GenerateTextureDerivates(meta->derivates);
1125 } else { 1131 } else {
@@ -1160,6 +1166,20 @@ private:
1160 return expr; 1166 return expr;
1161 } 1167 }
1162 1168
1169 std::string ReadTextureOffset(const Node& value) {
1170 if (const auto immediate = std::get_if<ImmediateNode>(&*value)) {
1171 // Inline the string as an immediate integer in GLSL (AOFFI arguments are required
1172 // to be constant by the standard).
1173 return std::to_string(static_cast<s32>(immediate->GetValue()));
1174 } else if (device.HasVariableAoffi()) {
1175 // Avoid using variable AOFFI on unsupported devices.
1176 return Visit(value).AsInt();
1177 } else {
1178 // Insert 0 on devices not supporting variable AOFFI.
1179 return "0";
1180 }
1181 }
1182
1163 std::string GenerateTextureAoffi(const std::vector<Node>& aoffi) { 1183 std::string GenerateTextureAoffi(const std::vector<Node>& aoffi) {
1164 if (aoffi.empty()) { 1184 if (aoffi.empty()) {
1165 return {}; 1185 return {};
@@ -1170,18 +1190,7 @@ private:
1170 expr += '('; 1190 expr += '(';
1171 1191
1172 for (std::size_t index = 0; index < aoffi.size(); ++index) { 1192 for (std::size_t index = 0; index < aoffi.size(); ++index) {
1173 const auto operand{aoffi.at(index)}; 1193 expr += ReadTextureOffset(aoffi.at(index));
1174 if (const auto immediate = std::get_if<ImmediateNode>(&*operand)) {
1175 // Inline the string as an immediate integer in GLSL (AOFFI arguments are required
1176 // to be constant by the standard).
1177 expr += std::to_string(static_cast<s32>(immediate->GetValue()));
1178 } else if (device.HasVariableAoffi()) {
1179 // Avoid using variable AOFFI on unsupported devices.
1180 expr += Visit(operand).AsInt();
1181 } else {
1182 // Insert 0 on devices not supporting variable AOFFI.
1183 expr += '0';
1184 }
1185 if (index + 1 < aoffi.size()) { 1194 if (index + 1 < aoffi.size()) {
1186 expr += ", "; 1195 expr += ", ";
1187 } 1196 }
@@ -1191,6 +1200,20 @@ private:
1191 return expr; 1200 return expr;
1192 } 1201 }
1193 1202
1203 std::string GenerateTexturePtp(const std::vector<Node>& ptp) {
1204 static constexpr std::size_t num_vectors = 4;
1205 ASSERT(ptp.size() == num_vectors * 2);
1206
1207 std::string expr = ", ivec2[](";
1208 for (std::size_t vector = 0; vector < num_vectors; ++vector) {
1209 const bool has_next = vector + 1 < num_vectors;
1210 expr += fmt::format("ivec2({}, {}){}", ReadTextureOffset(ptp.at(vector * 2)),
1211 ReadTextureOffset(ptp.at(vector * 2 + 1)), has_next ? ", " : "");
1212 }
1213 expr += ')';
1214 return expr;
1215 }
1216
1194 std::string GenerateTextureDerivates(const std::vector<Node>& derivates) { 1217 std::string GenerateTextureDerivates(const std::vector<Node>& derivates) {
1195 if (derivates.empty()) { 1218 if (derivates.empty()) {
1196 return {}; 1219 return {};
@@ -1689,7 +1712,7 @@ private:
1689 ASSERT(meta); 1712 ASSERT(meta);
1690 1713
1691 std::string expr = GenerateTexture( 1714 std::string expr = GenerateTexture(
1692 operation, "", {TextureAoffi{}, TextureArgument{Type::Float, meta->bias}}); 1715 operation, "", {TextureOffset{}, TextureArgument{Type::Float, meta->bias}});
1693 if (meta->sampler.IsShadow()) { 1716 if (meta->sampler.IsShadow()) {
1694 expr = "vec4(" + expr + ')'; 1717 expr = "vec4(" + expr + ')';
1695 } 1718 }
@@ -1701,7 +1724,7 @@ private:
1701 ASSERT(meta); 1724 ASSERT(meta);
1702 1725
1703 std::string expr = GenerateTexture( 1726 std::string expr = GenerateTexture(
1704 operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureAoffi{}}); 1727 operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureOffset{}});
1705 if (meta->sampler.IsShadow()) { 1728 if (meta->sampler.IsShadow()) {
1706 expr = "vec4(" + expr + ')'; 1729 expr = "vec4(" + expr + ')';
1707 } 1730 }
@@ -1709,21 +1732,19 @@ private:
1709 } 1732 }
1710 1733
1711 Expression TextureGather(Operation operation) { 1734 Expression TextureGather(Operation operation) {
1712 const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); 1735 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
1713 ASSERT(meta);
1714 1736
1715 const auto type = meta->sampler.IsShadow() ? Type::Float : Type::Int; 1737 const auto type = meta.sampler.IsShadow() ? Type::Float : Type::Int;
1716 if (meta->sampler.IsShadow()) { 1738 const bool separate_dc = meta.sampler.IsShadow();
1717 return {GenerateTexture(operation, "Gather", {TextureAoffi{}}, true) + 1739
1718 GetSwizzle(meta->element), 1740 std::vector<TextureIR> ir;
1719 Type::Float}; 1741 if (meta.sampler.IsShadow()) {
1742 ir = {TextureOffset{}};
1720 } else { 1743 } else {
1721 return {GenerateTexture(operation, "Gather", 1744 ir = {TextureOffset{}, TextureArgument{type, meta.component}};
1722 {TextureAoffi{}, TextureArgument{type, meta->component}},
1723 false) +
1724 GetSwizzle(meta->element),
1725 Type::Float};
1726 } 1745 }
1746 return {GenerateTexture(operation, "Gather", ir, separate_dc) + GetSwizzle(meta.element),
1747 Type::Float};
1727 } 1748 }
1728 1749
1729 Expression TextureQueryDimensions(Operation operation) { 1750 Expression TextureQueryDimensions(Operation operation) {
@@ -1794,7 +1815,8 @@ private:
1794 const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); 1815 const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
1795 ASSERT(meta); 1816 ASSERT(meta);
1796 1817
1797 std::string expr = GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureAoffi{}}); 1818 std::string expr =
1819 GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureOffset{}});
1798 return {std::move(expr) + GetSwizzle(meta->element), Type::Float}; 1820 return {std::move(expr) + GetSwizzle(meta->element), Type::Float};
1799 } 1821 }
1800 1822
diff --git a/src/video_core/renderer_opengl/gl_shader_manager.h b/src/video_core/renderer_opengl/gl_shader_manager.h
index 3703e7018..478c165ce 100644
--- a/src/video_core/renderer_opengl/gl_shader_manager.h
+++ b/src/video_core/renderer_opengl/gl_shader_manager.h
@@ -50,6 +50,10 @@ public:
50 current_state.geometry_shader = 0; 50 current_state.geometry_shader = 0;
51 } 51 }
52 52
53 void UseTrivialFragmentShader() {
54 current_state.fragment_shader = 0;
55 }
56
53private: 57private:
54 struct PipelineState { 58 struct PipelineState {
55 bool operator==(const PipelineState& rhs) const { 59 bool operator==(const PipelineState& rhs) const {
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h
index 9ed738171..ea4f35663 100644
--- a/src/video_core/renderer_opengl/maxwell_to_gl.h
+++ b/src/video_core/renderer_opengl/maxwell_to_gl.h
@@ -120,6 +120,8 @@ inline GLenum PrimitiveTopology(Maxwell::PrimitiveTopology topology) {
120 return GL_POINTS; 120 return GL_POINTS;
121 case Maxwell::PrimitiveTopology::Lines: 121 case Maxwell::PrimitiveTopology::Lines:
122 return GL_LINES; 122 return GL_LINES;
123 case Maxwell::PrimitiveTopology::LineLoop:
124 return GL_LINE_LOOP;
123 case Maxwell::PrimitiveTopology::LineStrip: 125 case Maxwell::PrimitiveTopology::LineStrip:
124 return GL_LINE_STRIP; 126 return GL_LINE_STRIP;
125 case Maxwell::PrimitiveTopology::Triangles: 127 case Maxwell::PrimitiveTopology::Triangles:
@@ -130,11 +132,23 @@ inline GLenum PrimitiveTopology(Maxwell::PrimitiveTopology topology) {
130 return GL_TRIANGLE_FAN; 132 return GL_TRIANGLE_FAN;
131 case Maxwell::PrimitiveTopology::Quads: 133 case Maxwell::PrimitiveTopology::Quads:
132 return GL_QUADS; 134 return GL_QUADS;
133 default: 135 case Maxwell::PrimitiveTopology::QuadStrip:
134 LOG_CRITICAL(Render_OpenGL, "Unimplemented topology={}", static_cast<u32>(topology)); 136 return GL_QUAD_STRIP;
135 UNREACHABLE(); 137 case Maxwell::PrimitiveTopology::Polygon:
136 return {}; 138 return GL_POLYGON;
139 case Maxwell::PrimitiveTopology::LinesAdjacency:
140 return GL_LINES_ADJACENCY;
141 case Maxwell::PrimitiveTopology::LineStripAdjacency:
142 return GL_LINE_STRIP_ADJACENCY;
143 case Maxwell::PrimitiveTopology::TrianglesAdjacency:
144 return GL_TRIANGLES_ADJACENCY;
145 case Maxwell::PrimitiveTopology::TriangleStripAdjacency:
146 return GL_TRIANGLE_STRIP_ADJACENCY;
147 case Maxwell::PrimitiveTopology::Patches:
148 return GL_PATCHES;
137 } 149 }
150 UNREACHABLE_MSG("Invalid topology={}", static_cast<int>(topology));
151 return GL_POINTS;
138} 152}
139 153
140inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode, 154inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode,
diff --git a/src/video_core/renderer_vulkan/vk_image.cpp b/src/video_core/renderer_vulkan/vk_image.cpp
new file mode 100644
index 000000000..4bcbef959
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_image.cpp
@@ -0,0 +1,106 @@
1// Copyright 2018 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <memory>
6#include <vector>
7
8#include "common/assert.h"
9#include "video_core/renderer_vulkan/declarations.h"
10#include "video_core/renderer_vulkan/vk_device.h"
11#include "video_core/renderer_vulkan/vk_image.h"
12#include "video_core/renderer_vulkan/vk_scheduler.h"
13
14namespace Vulkan {
15
16VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler,
17 const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask)
18 : device{device}, scheduler{scheduler}, format{image_ci.format}, aspect_mask{aspect_mask},
19 image_num_layers{image_ci.arrayLayers}, image_num_levels{image_ci.mipLevels} {
20 UNIMPLEMENTED_IF_MSG(image_ci.queueFamilyIndexCount != 0,
21 "Queue family tracking is not implemented");
22
23 const auto dev = device.GetLogical();
24 image = dev.createImageUnique(image_ci, nullptr, device.GetDispatchLoader());
25
26 const u32 num_ranges = image_num_layers * image_num_levels;
27 barriers.resize(num_ranges);
28 subrange_states.resize(num_ranges, {{}, image_ci.initialLayout});
29}
30
31VKImage::~VKImage() = default;
32
33void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
34 vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access,
35 vk::ImageLayout new_layout) {
36 if (!HasChanged(base_layer, num_layers, base_level, num_levels, new_access, new_layout)) {
37 return;
38 }
39
40 std::size_t cursor = 0;
41 for (u32 layer_it = 0; layer_it < num_layers; ++layer_it) {
42 for (u32 level_it = 0; level_it < num_levels; ++level_it, ++cursor) {
43 const u32 layer = base_layer + layer_it;
44 const u32 level = base_level + level_it;
45 auto& state = GetSubrangeState(layer, level);
46 barriers[cursor] = vk::ImageMemoryBarrier(
47 state.access, new_access, state.layout, new_layout, VK_QUEUE_FAMILY_IGNORED,
48 VK_QUEUE_FAMILY_IGNORED, *image, {aspect_mask, level, 1, layer, 1});
49 state.access = new_access;
50 state.layout = new_layout;
51 }
52 }
53
54 scheduler.RequestOutsideRenderPassOperationContext();
55
56 scheduler.Record([barriers = barriers, cursor](auto cmdbuf, auto& dld) {
57 // TODO(Rodrigo): Implement a way to use the latest stage across subresources.
58 constexpr auto stage_stub = vk::PipelineStageFlagBits::eAllCommands;
59 cmdbuf.pipelineBarrier(stage_stub, stage_stub, {}, 0, nullptr, 0, nullptr,
60 static_cast<u32>(cursor), barriers.data(), dld);
61 });
62}
63
64bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
65 vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept {
66 const bool is_full_range = base_layer == 0 && num_layers == image_num_layers &&
67 base_level == 0 && num_levels == image_num_levels;
68 if (!is_full_range) {
69 state_diverged = true;
70 }
71
72 if (!state_diverged) {
73 auto& state = GetSubrangeState(0, 0);
74 if (state.access != new_access || state.layout != new_layout) {
75 return true;
76 }
77 }
78
79 for (u32 layer_it = 0; layer_it < num_layers; ++layer_it) {
80 for (u32 level_it = 0; level_it < num_levels; ++level_it) {
81 const u32 layer = base_layer + layer_it;
82 const u32 level = base_level + level_it;
83 auto& state = GetSubrangeState(layer, level);
84 if (state.access != new_access || state.layout != new_layout) {
85 return true;
86 }
87 }
88 }
89 return false;
90}
91
92void VKImage::CreatePresentView() {
93 // Image type has to be 2D to be presented.
94 const vk::ImageViewCreateInfo image_view_ci({}, *image, vk::ImageViewType::e2D, format, {},
95 {aspect_mask, 0, 1, 0, 1});
96 const auto dev = device.GetLogical();
97 const auto& dld = device.GetDispatchLoader();
98 present_view = dev.createImageViewUnique(image_view_ci, nullptr, dld);
99}
100
101VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept {
102 return subrange_states[static_cast<std::size_t>(layer * image_num_levels) +
103 static_cast<std::size_t>(level)];
104}
105
106} // namespace Vulkan \ No newline at end of file
diff --git a/src/video_core/renderer_vulkan/vk_image.h b/src/video_core/renderer_vulkan/vk_image.h
new file mode 100644
index 000000000..b78242512
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_image.h
@@ -0,0 +1,84 @@
1// Copyright 2018 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <vector>
9
10#include "common/common_types.h"
11#include "video_core/renderer_vulkan/declarations.h"
12
13namespace Vulkan {
14
15class VKDevice;
16class VKScheduler;
17
18class VKImage {
19public:
20 explicit VKImage(const VKDevice& device, VKScheduler& scheduler,
21 const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask);
22 ~VKImage();
23
24 /// Records in the passed command buffer an image transition and updates the state of the image.
25 void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
26 vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access,
27 vk::ImageLayout new_layout);
28
29 /// Returns a view compatible with presentation, the image has to be 2D.
30 vk::ImageView GetPresentView() {
31 if (!present_view) {
32 CreatePresentView();
33 }
34 return *present_view;
35 }
36
37 /// Returns the Vulkan image handler.
38 vk::Image GetHandle() const {
39 return *image;
40 }
41
42 /// Returns the Vulkan format for this image.
43 vk::Format GetFormat() const {
44 return format;
45 }
46
47 /// Returns the Vulkan aspect mask.
48 vk::ImageAspectFlags GetAspectMask() const {
49 return aspect_mask;
50 }
51
52private:
53 struct SubrangeState final {
54 vk::AccessFlags access{}; ///< Current access bits.
55 vk::ImageLayout layout = vk::ImageLayout::eUndefined; ///< Current image layout.
56 };
57
58 bool HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
59 vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept;
60
61 /// Creates a presentation view.
62 void CreatePresentView();
63
64 /// Returns the subrange state for a layer and layer.
65 SubrangeState& GetSubrangeState(u32 layer, u32 level) noexcept;
66
67 const VKDevice& device; ///< Device handler.
68 VKScheduler& scheduler; ///< Device scheduler.
69
70 const vk::Format format; ///< Vulkan format.
71 const vk::ImageAspectFlags aspect_mask; ///< Vulkan aspect mask.
72 const u32 image_num_layers; ///< Number of layers.
73 const u32 image_num_levels; ///< Number of mipmap levels.
74
75 UniqueImage image; ///< Image handle.
76 UniqueImageView present_view; ///< Image view compatible with presentation.
77
78 std::vector<vk::ImageMemoryBarrier> barriers; ///< Pool of barriers.
79 std::vector<SubrangeState> subrange_states; ///< Current subrange state.
80
81 bool state_diverged = false; ///< True when subresources mismatch in layout.
82};
83
84} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
new file mode 100644
index 000000000..171d78afc
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -0,0 +1,127 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <unordered_map>
7#include <utility>
8#include <vector>
9
10#include "common/bit_util.h"
11#include "common/common_types.h"
12#include "video_core/renderer_vulkan/vk_device.h"
13#include "video_core/renderer_vulkan/vk_resource_manager.h"
14#include "video_core/renderer_vulkan/vk_scheduler.h"
15#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
16
17namespace Vulkan {
18
19VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence,
20 u64 last_epoch)
21 : buffer{std::move(buffer)}, watch{fence}, last_epoch{last_epoch} {}
22
23VKStagingBufferPool::StagingBuffer::StagingBuffer(StagingBuffer&& rhs) noexcept {
24 buffer = std::move(rhs.buffer);
25 watch = std::move(rhs.watch);
26 last_epoch = rhs.last_epoch;
27}
28
29VKStagingBufferPool::StagingBuffer::~StagingBuffer() = default;
30
31VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator=(
32 StagingBuffer&& rhs) noexcept {
33 buffer = std::move(rhs.buffer);
34 watch = std::move(rhs.watch);
35 last_epoch = rhs.last_epoch;
36 return *this;
37}
38
39VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
40 VKScheduler& scheduler)
41 : device{device}, memory_manager{memory_manager}, scheduler{scheduler},
42 is_device_integrated{device.IsIntegrated()} {}
43
44VKStagingBufferPool::~VKStagingBufferPool() = default;
45
46VKBuffer& VKStagingBufferPool::GetUnusedBuffer(std::size_t size, bool host_visible) {
47 if (const auto buffer = TryGetReservedBuffer(size, host_visible)) {
48 return *buffer;
49 }
50 return CreateStagingBuffer(size, host_visible);
51}
52
53void VKStagingBufferPool::TickFrame() {
54 ++epoch;
55 current_delete_level = (current_delete_level + 1) % NumLevels;
56
57 ReleaseCache(true);
58 if (!is_device_integrated) {
59 ReleaseCache(false);
60 }
61}
62
63VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) {
64 for (auto& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) {
65 if (entry.watch.TryWatch(scheduler.GetFence())) {
66 entry.last_epoch = epoch;
67 return &*entry.buffer;
68 }
69 }
70 return nullptr;
71}
72
73VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) {
74 const auto usage =
75 vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst |
76 vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndexBuffer;
77 const u32 log2 = Common::Log2Ceil64(size);
78 const vk::BufferCreateInfo buffer_ci({}, 1ULL << log2, usage, vk::SharingMode::eExclusive, 0,
79 nullptr);
80 const auto dev = device.GetLogical();
81 auto buffer = std::make_unique<VKBuffer>();
82 buffer->handle = dev.createBufferUnique(buffer_ci, nullptr, device.GetDispatchLoader());
83 buffer->commit = memory_manager.Commit(*buffer->handle, host_visible);
84
85 auto& entries = GetCache(host_visible)[log2].entries;
86 return *entries.emplace_back(std::move(buffer), scheduler.GetFence(), epoch).buffer;
87}
88
89VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) {
90 return is_device_integrated || host_visible ? host_staging_buffers : device_staging_buffers;
91}
92
93void VKStagingBufferPool::ReleaseCache(bool host_visible) {
94 auto& cache = GetCache(host_visible);
95 const u64 size = ReleaseLevel(cache, current_delete_level);
96 if (size == 0) {
97 return;
98 }
99}
100
101u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t log2) {
102 static constexpr u64 epochs_to_destroy = 180;
103 static constexpr std::size_t deletions_per_tick = 16;
104
105 auto& staging = cache[log2];
106 auto& entries = staging.entries;
107 const std::size_t old_size = entries.size();
108
109 const auto is_deleteable = [this](const auto& entry) {
110 return entry.last_epoch + epochs_to_destroy < epoch && !entry.watch.IsUsed();
111 };
112 const std::size_t begin_offset = staging.delete_index;
113 const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);
114 const auto begin = std::begin(entries) + begin_offset;
115 const auto end = std::begin(entries) + end_offset;
116 entries.erase(std::remove_if(begin, end, is_deleteable), end);
117
118 const std::size_t new_size = entries.size();
119 staging.delete_index += deletions_per_tick;
120 if (staging.delete_index >= new_size) {
121 staging.delete_index = 0;
122 }
123
124 return (1ULL << log2) * (old_size - new_size);
125}
126
127} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
new file mode 100644
index 000000000..02310375f
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -0,0 +1,83 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <climits>
8#include <unordered_map>
9#include <utility>
10#include <vector>
11
12#include "common/common_types.h"
13
14#include "video_core/renderer_vulkan/declarations.h"
15#include "video_core/renderer_vulkan/vk_memory_manager.h"
16
17namespace Vulkan {
18
19class VKDevice;
20class VKFenceWatch;
21class VKScheduler;
22
23struct VKBuffer final {
24 UniqueBuffer handle;
25 VKMemoryCommit commit;
26};
27
28class VKStagingBufferPool final {
29public:
30 explicit VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
31 VKScheduler& scheduler);
32 ~VKStagingBufferPool();
33
34 VKBuffer& GetUnusedBuffer(std::size_t size, bool host_visible);
35
36 void TickFrame();
37
38private:
39 struct StagingBuffer final {
40 explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence, u64 last_epoch);
41 StagingBuffer(StagingBuffer&& rhs) noexcept;
42 StagingBuffer(const StagingBuffer&) = delete;
43 ~StagingBuffer();
44
45 StagingBuffer& operator=(StagingBuffer&& rhs) noexcept;
46
47 std::unique_ptr<VKBuffer> buffer;
48 VKFenceWatch watch;
49 u64 last_epoch = 0;
50 };
51
52 struct StagingBuffers final {
53 std::vector<StagingBuffer> entries;
54 std::size_t delete_index = 0;
55 };
56
57 static constexpr std::size_t NumLevels = sizeof(std::size_t) * CHAR_BIT;
58 using StagingBuffersCache = std::array<StagingBuffers, NumLevels>;
59
60 VKBuffer* TryGetReservedBuffer(std::size_t size, bool host_visible);
61
62 VKBuffer& CreateStagingBuffer(std::size_t size, bool host_visible);
63
64 StagingBuffersCache& GetCache(bool host_visible);
65
66 void ReleaseCache(bool host_visible);
67
68 u64 ReleaseLevel(StagingBuffersCache& cache, std::size_t log2);
69
70 const VKDevice& device;
71 VKMemoryManager& memory_manager;
72 VKScheduler& scheduler;
73 const bool is_device_integrated;
74
75 StagingBuffersCache host_staging_buffers;
76 StagingBuffersCache device_staging_buffers;
77
78 u64 epoch = 0;
79
80 std::size_t current_delete_level = 0;
81};
82
83} // namespace Vulkan
diff --git a/src/video_core/shader/decode/register_set_predicate.cpp b/src/video_core/shader/decode/register_set_predicate.cpp
index e6c9d287e..8d54cce34 100644
--- a/src/video_core/shader/decode/register_set_predicate.cpp
+++ b/src/video_core/shader/decode/register_set_predicate.cpp
@@ -13,37 +13,65 @@ namespace VideoCommon::Shader {
13using Tegra::Shader::Instruction; 13using Tegra::Shader::Instruction;
14using Tegra::Shader::OpCode; 14using Tegra::Shader::OpCode;
15 15
16namespace {
17constexpr u64 NUM_PROGRAMMABLE_PREDICATES = 7;
18}
19
16u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) { 20u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) {
17 const Instruction instr = {program_code[pc]}; 21 const Instruction instr = {program_code[pc]};
18 const auto opcode = OpCode::Decode(instr); 22 const auto opcode = OpCode::Decode(instr);
19 23
20 UNIMPLEMENTED_IF(instr.r2p.mode != Tegra::Shader::R2pMode::Pr); 24 UNIMPLEMENTED_IF(instr.p2r_r2p.mode != Tegra::Shader::R2pMode::Pr);
21 25
22 const Node apply_mask = [&]() { 26 const Node apply_mask = [&] {
23 switch (opcode->get().GetId()) { 27 switch (opcode->get().GetId()) {
24 case OpCode::Id::R2P_IMM: 28 case OpCode::Id::R2P_IMM:
25 return Immediate(static_cast<u32>(instr.r2p.immediate_mask)); 29 case OpCode::Id::P2R_IMM:
30 return Immediate(static_cast<u32>(instr.p2r_r2p.immediate_mask));
26 default: 31 default:
27 UNREACHABLE(); 32 UNREACHABLE();
28 return Immediate(static_cast<u32>(instr.r2p.immediate_mask)); 33 return Immediate(0);
29 } 34 }
30 }(); 35 }();
31 const Node mask = GetRegister(instr.gpr8);
32 const auto offset = static_cast<u32>(instr.r2p.byte) * 8;
33 36
34 constexpr u32 programmable_preds = 7; 37 const auto offset = static_cast<u32>(instr.p2r_r2p.byte) * 8;
35 for (u64 pred = 0; pred < programmable_preds; ++pred) { 38
36 const auto shift = static_cast<u32>(pred); 39 switch (opcode->get().GetId()) {
40 case OpCode::Id::R2P_IMM: {
41 const Node mask = GetRegister(instr.gpr8);
37 42
38 const Node apply_compare = BitfieldExtract(apply_mask, shift, 1); 43 for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) {
39 const Node condition = 44 const auto shift = static_cast<u32>(pred);
40 Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0));
41 45
42 const Node value_compare = BitfieldExtract(mask, offset + shift, 1); 46 const Node apply_compare = BitfieldExtract(apply_mask, shift, 1);
43 const Node value = Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0)); 47 const Node condition =
48 Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0));
44 49
45 const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value); 50 const Node value_compare = BitfieldExtract(mask, offset + shift, 1);
46 bb.push_back(Conditional(condition, {code})); 51 const Node value =
52 Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0));
53
54 const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value);
55 bb.push_back(Conditional(condition, {code}));
56 }
57 break;
58 }
59 case OpCode::Id::P2R_IMM: {
60 Node value = Immediate(0);
61 for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) {
62 Node bit = Operation(OperationCode::Select, GetPredicate(pred), Immediate(1U << pred),
63 Immediate(0));
64 value = Operation(OperationCode::UBitwiseOr, std::move(value), std::move(bit));
65 }
66 value = Operation(OperationCode::UBitwiseAnd, std::move(value), apply_mask);
67 value = BitfieldInsert(GetRegister(instr.gpr8), std::move(value), offset, 8);
68
69 SetRegister(bb, instr.gpr0, std::move(value));
70 break;
71 }
72 default:
73 UNIMPLEMENTED_MSG("Unhandled P2R/R2R instruction: {}", opcode->get().GetName());
74 break;
47 } 75 }
48 76
49 return pc; 77 return pc;
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index dff01a541..4b14cdf58 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -89,59 +89,62 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
89 [[fallthrough]]; 89 [[fallthrough]];
90 } 90 }
91 case OpCode::Id::TLD4: { 91 case OpCode::Id::TLD4: {
92 ASSERT(instr.tld4.array == 0);
93 UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV), 92 UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV),
94 "NDV is not implemented"); 93 "NDV is not implemented");
95 UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP),
96 "PTP is not implemented");
97
98 const auto texture_type = instr.tld4.texture_type.Value(); 94 const auto texture_type = instr.tld4.texture_type.Value();
99 const bool depth_compare = is_bindless ? instr.tld4_b.UsesMiscMode(TextureMiscMode::DC) 95 const bool depth_compare = is_bindless ? instr.tld4_b.UsesMiscMode(TextureMiscMode::DC)
100 : instr.tld4.UsesMiscMode(TextureMiscMode::DC); 96 : instr.tld4.UsesMiscMode(TextureMiscMode::DC);
101 const bool is_array = instr.tld4.array != 0; 97 const bool is_array = instr.tld4.array != 0;
102 const bool is_aoffi = is_bindless ? instr.tld4_b.UsesMiscMode(TextureMiscMode::AOFFI) 98 const bool is_aoffi = is_bindless ? instr.tld4_b.UsesMiscMode(TextureMiscMode::AOFFI)
103 : instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI); 99 : instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI);
104 WriteTexInstructionFloat( 100 const bool is_ptp = is_bindless ? instr.tld4_b.UsesMiscMode(TextureMiscMode::PTP)
105 bb, instr, 101 : instr.tld4.UsesMiscMode(TextureMiscMode::PTP);
106 GetTld4Code(instr, texture_type, depth_compare, is_array, is_aoffi, is_bindless)); 102 WriteTexInstructionFloat(bb, instr,
103 GetTld4Code(instr, texture_type, depth_compare, is_array, is_aoffi,
104 is_ptp, is_bindless));
107 break; 105 break;
108 } 106 }
109 case OpCode::Id::TLD4S: { 107 case OpCode::Id::TLD4S: {
110 const bool uses_aoffi = instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI); 108 constexpr std::size_t num_coords = 2;
111 UNIMPLEMENTED_IF_MSG(uses_aoffi, "AOFFI is not implemented"); 109 const bool is_aoffi = instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI);
112 110 const bool is_depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC);
113 const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC);
114 const Node op_a = GetRegister(instr.gpr8); 111 const Node op_a = GetRegister(instr.gpr8);
115 const Node op_b = GetRegister(instr.gpr20); 112 const Node op_b = GetRegister(instr.gpr20);
116 113
117 // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction. 114 // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction.
118 std::vector<Node> coords; 115 std::vector<Node> coords;
119 Node dc_reg; 116 std::vector<Node> aoffi;
120 if (depth_compare) { 117 Node depth_compare;
118 if (is_depth_compare) {
121 // Note: TLD4S coordinate encoding works just like TEXS's 119 // Note: TLD4S coordinate encoding works just like TEXS's
122 const Node op_y = GetRegister(instr.gpr8.Value() + 1); 120 const Node op_y = GetRegister(instr.gpr8.Value() + 1);
123 coords.push_back(op_a); 121 coords.push_back(op_a);
124 coords.push_back(op_y); 122 coords.push_back(op_y);
125 dc_reg = uses_aoffi ? GetRegister(instr.gpr20.Value() + 1) : op_b; 123 if (is_aoffi) {
124 aoffi = GetAoffiCoordinates(op_b, num_coords, true);
125 depth_compare = GetRegister(instr.gpr20.Value() + 1);
126 } else {
127 depth_compare = op_b;
128 }
126 } else { 129 } else {
130 // There's no depth compare
127 coords.push_back(op_a); 131 coords.push_back(op_a);
128 if (uses_aoffi) { 132 if (is_aoffi) {
129 const Node op_y = GetRegister(instr.gpr8.Value() + 1); 133 coords.push_back(GetRegister(instr.gpr8.Value() + 1));
130 coords.push_back(op_y); 134 aoffi = GetAoffiCoordinates(op_b, num_coords, true);
131 } else { 135 } else {
132 coords.push_back(op_b); 136 coords.push_back(op_b);
133 } 137 }
134 dc_reg = {};
135 } 138 }
136 const Node component = Immediate(static_cast<u32>(instr.tld4s.component)); 139 const Node component = Immediate(static_cast<u32>(instr.tld4s.component));
137 140
138 const SamplerInfo info{TextureType::Texture2D, false, depth_compare}; 141 const SamplerInfo info{TextureType::Texture2D, false, is_depth_compare};
139 const Sampler& sampler = *GetSampler(instr.sampler, info); 142 const Sampler& sampler = *GetSampler(instr.sampler, info);
140 143
141 Node4 values; 144 Node4 values;
142 for (u32 element = 0; element < values.size(); ++element) { 145 for (u32 element = 0; element < values.size(); ++element) {
143 auto coords_copy = coords; 146 auto coords_copy = coords;
144 MetaTexture meta{sampler, {}, dc_reg, {}, {}, {}, {}, component, element}; 147 MetaTexture meta{sampler, {}, depth_compare, aoffi, {}, {}, {}, {}, component, element};
145 values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); 148 values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
146 } 149 }
147 150
@@ -190,7 +193,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
190 } 193 }
191 194
192 for (u32 element = 0; element < values.size(); ++element) { 195 for (u32 element = 0; element < values.size(); ++element) {
193 MetaTexture meta{*sampler, {}, {}, {}, derivates, {}, {}, {}, element}; 196 MetaTexture meta{*sampler, {}, {}, {}, {}, derivates, {}, {}, {}, element};
194 values[element] = Operation(OperationCode::TextureGradient, std::move(meta), coords); 197 values[element] = Operation(OperationCode::TextureGradient, std::move(meta), coords);
195 } 198 }
196 199
@@ -230,7 +233,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
230 if (!instr.txq.IsComponentEnabled(element)) { 233 if (!instr.txq.IsComponentEnabled(element)) {
231 continue; 234 continue;
232 } 235 }
233 MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, element}; 236 MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, {}, element};
234 const Node value = 237 const Node value =
235 Operation(OperationCode::TextureQueryDimensions, meta, 238 Operation(OperationCode::TextureQueryDimensions, meta,
236 GetRegister(instr.gpr8.Value() + (is_bindless ? 1 : 0))); 239 GetRegister(instr.gpr8.Value() + (is_bindless ? 1 : 0)));
@@ -299,7 +302,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
299 continue; 302 continue;
300 } 303 }
301 auto params = coords; 304 auto params = coords;
302 MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, element}; 305 MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, {}, element};
303 const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params)); 306 const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params));
304 SetTemporary(bb, indexer++, value); 307 SetTemporary(bb, indexer++, value);
305 } 308 }
@@ -367,7 +370,7 @@ const Sampler* ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler,
367 if (it != used_samplers.end()) { 370 if (it != used_samplers.end()) {
368 ASSERT(!it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array && 371 ASSERT(!it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array &&
369 it->IsShadow() == info.is_shadow && it->IsBuffer() == info.is_buffer); 372 it->IsShadow() == info.is_shadow && it->IsBuffer() == info.is_buffer);
370 return &(*it); 373 return &*it;
371 } 374 }
372 375
373 // Otherwise create a new mapping for this sampler 376 // Otherwise create a new mapping for this sampler
@@ -397,7 +400,7 @@ const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg,
397 if (it != used_samplers.end()) { 400 if (it != used_samplers.end()) {
398 ASSERT(it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array && 401 ASSERT(it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array &&
399 it->IsShadow() == info.is_shadow); 402 it->IsShadow() == info.is_shadow);
400 return &(*it); 403 return &*it;
401 } 404 }
402 405
403 // Otherwise create a new mapping for this sampler 406 // Otherwise create a new mapping for this sampler
@@ -538,7 +541,7 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
538 541
539 for (u32 element = 0; element < values.size(); ++element) { 542 for (u32 element = 0; element < values.size(); ++element) {
540 auto copy_coords = coords; 543 auto copy_coords = coords;
541 MetaTexture meta{*sampler, array, depth_compare, aoffi, {}, bias, lod, {}, element}; 544 MetaTexture meta{*sampler, array, depth_compare, aoffi, {}, {}, bias, lod, {}, element};
542 values[element] = Operation(read_method, meta, std::move(copy_coords)); 545 values[element] = Operation(read_method, meta, std::move(copy_coords));
543 } 546 }
544 547
@@ -635,7 +638,9 @@ Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type,
635} 638}
636 639
637Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare, 640Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare,
638 bool is_array, bool is_aoffi, bool is_bindless) { 641 bool is_array, bool is_aoffi, bool is_ptp, bool is_bindless) {
642 ASSERT_MSG(!(is_aoffi && is_ptp), "AOFFI and PTP can't be enabled at the same time");
643
639 const std::size_t coord_count = GetCoordCount(texture_type); 644 const std::size_t coord_count = GetCoordCount(texture_type);
640 645
641 // If enabled arrays index is always stored in the gpr8 field 646 // If enabled arrays index is always stored in the gpr8 field
@@ -661,12 +666,15 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de
661 return values; 666 return values;
662 } 667 }
663 668
664 std::vector<Node> aoffi; 669 std::vector<Node> aoffi, ptp;
665 if (is_aoffi) { 670 if (is_aoffi) {
666 aoffi = GetAoffiCoordinates(GetRegister(parameter_register++), coord_count, true); 671 aoffi = GetAoffiCoordinates(GetRegister(parameter_register++), coord_count, true);
672 } else if (is_ptp) {
673 ptp = GetPtpCoordinates(
674 {GetRegister(parameter_register++), GetRegister(parameter_register++)});
667 } 675 }
668 676
669 Node dc{}; 677 Node dc;
670 if (depth_compare) { 678 if (depth_compare) {
671 dc = GetRegister(parameter_register++); 679 dc = GetRegister(parameter_register++);
672 } 680 }
@@ -676,8 +684,8 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de
676 684
677 for (u32 element = 0; element < values.size(); ++element) { 685 for (u32 element = 0; element < values.size(); ++element) {
678 auto coords_copy = coords; 686 auto coords_copy = coords;
679 MetaTexture meta{*sampler, GetRegister(array_register), dc, aoffi, {}, {}, {}, component, 687 MetaTexture meta{
680 element}; 688 *sampler, GetRegister(array_register), dc, aoffi, ptp, {}, {}, {}, component, element};
681 values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); 689 values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
682 } 690 }
683 691
@@ -710,7 +718,7 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
710 Node4 values; 718 Node4 values;
711 for (u32 element = 0; element < values.size(); ++element) { 719 for (u32 element = 0; element < values.size(); ++element) {
712 auto coords_copy = coords; 720 auto coords_copy = coords;
713 MetaTexture meta{sampler, array_register, {}, {}, {}, {}, lod, {}, element}; 721 MetaTexture meta{sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element};
714 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); 722 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
715 } 723 }
716 724
@@ -760,7 +768,7 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is
760 Node4 values; 768 Node4 values;
761 for (u32 element = 0; element < values.size(); ++element) { 769 for (u32 element = 0; element < values.size(); ++element) {
762 auto coords_copy = coords; 770 auto coords_copy = coords;
763 MetaTexture meta{sampler, array, {}, {}, {}, {}, lod, {}, element}; 771 MetaTexture meta{sampler, array, {}, {}, {}, {}, {}, lod, {}, element};
764 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); 772 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
765 } 773 }
766 return values; 774 return values;
@@ -825,4 +833,38 @@ std::vector<Node> ShaderIR::GetAoffiCoordinates(Node aoffi_reg, std::size_t coor
825 return aoffi; 833 return aoffi;
826} 834}
827 835
836std::vector<Node> ShaderIR::GetPtpCoordinates(std::array<Node, 2> ptp_regs) {
837 static constexpr u32 num_entries = 8;
838
839 std::vector<Node> ptp;
840 ptp.reserve(num_entries);
841
842 const auto global_size = static_cast<s64>(global_code.size());
843 const std::optional low = TrackImmediate(ptp_regs[0], global_code, global_size);
844 const std::optional high = TrackImmediate(ptp_regs[1], global_code, global_size);
845 if (!low || !high) {
846 for (u32 entry = 0; entry < num_entries; ++entry) {
847 const u32 reg = entry / 4;
848 const u32 offset = entry % 4;
849 const Node value = BitfieldExtract(ptp_regs[reg], offset * 8, 6);
850 const Node condition =
851 Operation(OperationCode::LogicalIGreaterEqual, value, Immediate(32));
852 const Node negative = Operation(OperationCode::IAdd, value, Immediate(-64));
853 ptp.push_back(Operation(OperationCode::Select, condition, negative, value));
854 }
855 return ptp;
856 }
857
858 const u64 immediate = (static_cast<u64>(*high) << 32) | static_cast<u64>(*low);
859 for (u32 entry = 0; entry < num_entries; ++entry) {
860 s32 value = (immediate >> (entry * 8)) & 0b111111;
861 if (value >= 32) {
862 value -= 64;
863 }
864 ptp.push_back(Immediate(value));
865 }
866
867 return ptp;
868}
869
828} // namespace VideoCommon::Shader 870} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index abd40f582..4d2f4d6a8 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -374,6 +374,7 @@ struct MetaTexture {
374 Node array; 374 Node array;
375 Node depth_compare; 375 Node depth_compare;
376 std::vector<Node> aoffi; 376 std::vector<Node> aoffi;
377 std::vector<Node> ptp;
377 std::vector<Node> derivates; 378 std::vector<Node> derivates;
378 Node bias; 379 Node bias;
379 Node lod; 380 Node lod;
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index 04ae5f822..baed06ccd 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -350,7 +350,8 @@ private:
350 bool is_array); 350 bool is_array);
351 351
352 Node4 GetTld4Code(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, 352 Node4 GetTld4Code(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type,
353 bool depth_compare, bool is_array, bool is_aoffi, bool is_bindless); 353 bool depth_compare, bool is_array, bool is_aoffi, bool is_ptp,
354 bool is_bindless);
354 355
355 Node4 GetTldCode(Tegra::Shader::Instruction instr); 356 Node4 GetTldCode(Tegra::Shader::Instruction instr);
356 357
@@ -363,6 +364,8 @@ private:
363 364
364 std::vector<Node> GetAoffiCoordinates(Node aoffi_reg, std::size_t coord_count, bool is_tld4); 365 std::vector<Node> GetAoffiCoordinates(Node aoffi_reg, std::size_t coord_count, bool is_tld4);
365 366
367 std::vector<Node> GetPtpCoordinates(std::array<Node, 2> ptp_regs);
368
366 Node4 GetTextureCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, 369 Node4 GetTextureCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type,
367 Tegra::Shader::TextureProcessMode process_mode, std::vector<Node> coords, 370 Tegra::Shader::TextureProcessMode process_mode, std::vector<Node> coords,
368 Node array, Node depth_compare, u32 bias_offset, std::vector<Node> aoffi, 371 Node array, Node depth_compare, u32 bias_offset, std::vector<Node> aoffi,
diff --git a/src/yuzu/CMakeLists.txt b/src/yuzu/CMakeLists.txt
index ff1c1d985..11ae1e66e 100644
--- a/src/yuzu/CMakeLists.txt
+++ b/src/yuzu/CMakeLists.txt
@@ -78,11 +78,6 @@ add_executable(yuzu
78 configuration/configure_web.cpp 78 configuration/configure_web.cpp
79 configuration/configure_web.h 79 configuration/configure_web.h
80 configuration/configure_web.ui 80 configuration/configure_web.ui
81 debugger/graphics/graphics_breakpoint_observer.cpp
82 debugger/graphics/graphics_breakpoint_observer.h
83 debugger/graphics/graphics_breakpoints.cpp
84 debugger/graphics/graphics_breakpoints.h
85 debugger/graphics/graphics_breakpoints_p.h
86 debugger/console.cpp 81 debugger/console.cpp
87 debugger/console.h 82 debugger/console.h
88 debugger/profiler.cpp 83 debugger/profiler.cpp
diff --git a/src/yuzu/debugger/graphics/graphics_breakpoint_observer.cpp b/src/yuzu/debugger/graphics/graphics_breakpoint_observer.cpp
deleted file mode 100644
index 5f459ccfb..000000000
--- a/src/yuzu/debugger/graphics/graphics_breakpoint_observer.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <QMetaType>
6#include "yuzu/debugger/graphics/graphics_breakpoint_observer.h"
7
8BreakPointObserverDock::BreakPointObserverDock(std::shared_ptr<Tegra::DebugContext> debug_context,
9 const QString& title, QWidget* parent)
10 : QDockWidget(title, parent), BreakPointObserver(debug_context) {
11 qRegisterMetaType<Tegra::DebugContext::Event>("Tegra::DebugContext::Event");
12
13 connect(this, &BreakPointObserverDock::Resumed, this, &BreakPointObserverDock::OnResumed);
14
15 // NOTE: This signal is emitted from a non-GUI thread, but connect() takes
16 // care of delaying its handling to the GUI thread.
17 connect(this, &BreakPointObserverDock::BreakPointHit, this,
18 &BreakPointObserverDock::OnBreakPointHit, Qt::BlockingQueuedConnection);
19}
20
21void BreakPointObserverDock::OnMaxwellBreakPointHit(Tegra::DebugContext::Event event, void* data) {
22 emit BreakPointHit(event, data);
23}
24
25void BreakPointObserverDock::OnMaxwellResume() {
26 emit Resumed();
27}
diff --git a/src/yuzu/debugger/graphics/graphics_breakpoint_observer.h b/src/yuzu/debugger/graphics/graphics_breakpoint_observer.h
deleted file mode 100644
index ab32f0115..000000000
--- a/src/yuzu/debugger/graphics/graphics_breakpoint_observer.h
+++ /dev/null
@@ -1,33 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <QDockWidget>
8#include "video_core/debug_utils/debug_utils.h"
9
10/**
11 * Utility class which forwards calls to OnMaxwellBreakPointHit and OnMaxwellResume to public slots.
12 * This is because the Maxwell breakpoint callbacks are called from a non-GUI thread, while
13 * the widget usually wants to perform reactions in the GUI thread.
14 */
15class BreakPointObserverDock : public QDockWidget,
16 protected Tegra::DebugContext::BreakPointObserver {
17 Q_OBJECT
18
19public:
20 BreakPointObserverDock(std::shared_ptr<Tegra::DebugContext> debug_context, const QString& title,
21 QWidget* parent = nullptr);
22
23 void OnMaxwellBreakPointHit(Tegra::DebugContext::Event event, void* data) override;
24 void OnMaxwellResume() override;
25
26signals:
27 void Resumed();
28 void BreakPointHit(Tegra::DebugContext::Event event, void* data);
29
30private:
31 virtual void OnBreakPointHit(Tegra::DebugContext::Event event, void* data) = 0;
32 virtual void OnResumed() = 0;
33};
diff --git a/src/yuzu/debugger/graphics/graphics_breakpoints.cpp b/src/yuzu/debugger/graphics/graphics_breakpoints.cpp
deleted file mode 100644
index 1c80082a4..000000000
--- a/src/yuzu/debugger/graphics/graphics_breakpoints.cpp
+++ /dev/null
@@ -1,221 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <QLabel>
6#include <QMetaType>
7#include <QPushButton>
8#include <QTreeView>
9#include <QVBoxLayout>
10#include "common/assert.h"
11#include "yuzu/debugger/graphics/graphics_breakpoints.h"
12#include "yuzu/debugger/graphics/graphics_breakpoints_p.h"
13
14BreakPointModel::BreakPointModel(std::shared_ptr<Tegra::DebugContext> debug_context,
15 QObject* parent)
16 : QAbstractListModel(parent), context_weak(debug_context),
17 at_breakpoint(debug_context->at_breakpoint),
18 active_breakpoint(debug_context->active_breakpoint) {}
19
20int BreakPointModel::columnCount(const QModelIndex& parent) const {
21 return 1;
22}
23
24int BreakPointModel::rowCount(const QModelIndex& parent) const {
25 return static_cast<int>(Tegra::DebugContext::Event::NumEvents);
26}
27
28QVariant BreakPointModel::data(const QModelIndex& index, int role) const {
29 const auto event = static_cast<Tegra::DebugContext::Event>(index.row());
30
31 switch (role) {
32 case Qt::DisplayRole: {
33 if (index.column() == 0) {
34 return DebugContextEventToString(event);
35 }
36 break;
37 }
38
39 case Qt::CheckStateRole: {
40 if (index.column() == 0)
41 return data(index, Role_IsEnabled).toBool() ? Qt::Checked : Qt::Unchecked;
42 break;
43 }
44
45 case Qt::BackgroundRole: {
46 if (at_breakpoint && index.row() == static_cast<int>(active_breakpoint)) {
47 return QBrush(QColor(0xE0, 0xE0, 0x10));
48 }
49 break;
50 }
51
52 case Role_IsEnabled: {
53 auto context = context_weak.lock();
54 return context && context->breakpoints[(int)event].enabled;
55 }
56
57 default:
58 break;
59 }
60 return QVariant();
61}
62
63Qt::ItemFlags BreakPointModel::flags(const QModelIndex& index) const {
64 if (!index.isValid())
65 return 0;
66
67 Qt::ItemFlags flags = Qt::ItemIsEnabled;
68 if (index.column() == 0)
69 flags |= Qt::ItemIsUserCheckable;
70 return flags;
71}
72
73bool BreakPointModel::setData(const QModelIndex& index, const QVariant& value, int role) {
74 const auto event = static_cast<Tegra::DebugContext::Event>(index.row());
75
76 switch (role) {
77 case Qt::CheckStateRole: {
78 if (index.column() != 0)
79 return false;
80
81 auto context = context_weak.lock();
82 if (!context)
83 return false;
84
85 context->breakpoints[(int)event].enabled = value == Qt::Checked;
86 QModelIndex changed_index = createIndex(index.row(), 0);
87 emit dataChanged(changed_index, changed_index);
88 return true;
89 }
90 }
91
92 return false;
93}
94
95void BreakPointModel::OnBreakPointHit(Tegra::DebugContext::Event event) {
96 auto context = context_weak.lock();
97 if (!context)
98 return;
99
100 active_breakpoint = context->active_breakpoint;
101 at_breakpoint = context->at_breakpoint;
102 emit dataChanged(createIndex(static_cast<int>(event), 0),
103 createIndex(static_cast<int>(event), 0));
104}
105
106void BreakPointModel::OnResumed() {
107 auto context = context_weak.lock();
108 if (!context)
109 return;
110
111 at_breakpoint = context->at_breakpoint;
112 emit dataChanged(createIndex(static_cast<int>(active_breakpoint), 0),
113 createIndex(static_cast<int>(active_breakpoint), 0));
114 active_breakpoint = context->active_breakpoint;
115}
116
117QString BreakPointModel::DebugContextEventToString(Tegra::DebugContext::Event event) {
118 switch (event) {
119 case Tegra::DebugContext::Event::MaxwellCommandLoaded:
120 return tr("Maxwell command loaded");
121 case Tegra::DebugContext::Event::MaxwellCommandProcessed:
122 return tr("Maxwell command processed");
123 case Tegra::DebugContext::Event::IncomingPrimitiveBatch:
124 return tr("Incoming primitive batch");
125 case Tegra::DebugContext::Event::FinishedPrimitiveBatch:
126 return tr("Finished primitive batch");
127 case Tegra::DebugContext::Event::NumEvents:
128 break;
129 }
130
131 return tr("Unknown debug context event");
132}
133
134GraphicsBreakPointsWidget::GraphicsBreakPointsWidget(
135 std::shared_ptr<Tegra::DebugContext> debug_context, QWidget* parent)
136 : QDockWidget(tr("Maxwell Breakpoints"), parent), Tegra::DebugContext::BreakPointObserver(
137 debug_context) {
138 setObjectName(QStringLiteral("TegraBreakPointsWidget"));
139
140 status_text = new QLabel(tr("Emulation running"));
141 resume_button = new QPushButton(tr("Resume"));
142 resume_button->setEnabled(false);
143
144 breakpoint_model = new BreakPointModel(debug_context, this);
145 breakpoint_list = new QTreeView;
146 breakpoint_list->setRootIsDecorated(false);
147 breakpoint_list->setHeaderHidden(true);
148 breakpoint_list->setModel(breakpoint_model);
149
150 qRegisterMetaType<Tegra::DebugContext::Event>("Tegra::DebugContext::Event");
151
152 connect(breakpoint_list, &QTreeView::doubleClicked, this,
153 &GraphicsBreakPointsWidget::OnItemDoubleClicked);
154
155 connect(resume_button, &QPushButton::clicked, this,
156 &GraphicsBreakPointsWidget::OnResumeRequested);
157
158 connect(this, &GraphicsBreakPointsWidget::BreakPointHit, this,
159 &GraphicsBreakPointsWidget::OnBreakPointHit, Qt::BlockingQueuedConnection);
160 connect(this, &GraphicsBreakPointsWidget::Resumed, this, &GraphicsBreakPointsWidget::OnResumed);
161
162 connect(this, &GraphicsBreakPointsWidget::BreakPointHit, breakpoint_model,
163 &BreakPointModel::OnBreakPointHit, Qt::BlockingQueuedConnection);
164 connect(this, &GraphicsBreakPointsWidget::Resumed, breakpoint_model,
165 &BreakPointModel::OnResumed);
166
167 connect(this, &GraphicsBreakPointsWidget::BreakPointsChanged,
168 [this](const QModelIndex& top_left, const QModelIndex& bottom_right) {
169 breakpoint_model->dataChanged(top_left, bottom_right);
170 });
171
172 QWidget* main_widget = new QWidget;
173 auto main_layout = new QVBoxLayout;
174 {
175 auto sub_layout = new QHBoxLayout;
176 sub_layout->addWidget(status_text);
177 sub_layout->addWidget(resume_button);
178 main_layout->addLayout(sub_layout);
179 }
180 main_layout->addWidget(breakpoint_list);
181 main_widget->setLayout(main_layout);
182
183 setWidget(main_widget);
184}
185
186void GraphicsBreakPointsWidget::OnMaxwellBreakPointHit(Event event, void* data) {
187 // Process in GUI thread
188 emit BreakPointHit(event, data);
189}
190
191void GraphicsBreakPointsWidget::OnBreakPointHit(Tegra::DebugContext::Event event, void* data) {
192 status_text->setText(tr("Emulation halted at breakpoint"));
193 resume_button->setEnabled(true);
194}
195
196void GraphicsBreakPointsWidget::OnMaxwellResume() {
197 // Process in GUI thread
198 emit Resumed();
199}
200
201void GraphicsBreakPointsWidget::OnResumed() {
202 status_text->setText(tr("Emulation running"));
203 resume_button->setEnabled(false);
204}
205
206void GraphicsBreakPointsWidget::OnResumeRequested() {
207 if (auto context = context_weak.lock())
208 context->Resume();
209}
210
211void GraphicsBreakPointsWidget::OnItemDoubleClicked(const QModelIndex& index) {
212 if (!index.isValid())
213 return;
214
215 QModelIndex check_index = breakpoint_list->model()->index(index.row(), 0);
216 QVariant enabled = breakpoint_list->model()->data(check_index, Qt::CheckStateRole);
217 QVariant new_state = Qt::Unchecked;
218 if (enabled == Qt::Unchecked)
219 new_state = Qt::Checked;
220 breakpoint_list->model()->setData(check_index, new_state, Qt::CheckStateRole);
221}
diff --git a/src/yuzu/debugger/graphics/graphics_breakpoints.h b/src/yuzu/debugger/graphics/graphics_breakpoints.h
deleted file mode 100644
index a920a2ae5..000000000
--- a/src/yuzu/debugger/graphics/graphics_breakpoints.h
+++ /dev/null
@@ -1,45 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <QDockWidget>
9#include "video_core/debug_utils/debug_utils.h"
10
11class QLabel;
12class QPushButton;
13class QTreeView;
14
15class BreakPointModel;
16
17class GraphicsBreakPointsWidget : public QDockWidget, Tegra::DebugContext::BreakPointObserver {
18 Q_OBJECT
19
20 using Event = Tegra::DebugContext::Event;
21
22public:
23 explicit GraphicsBreakPointsWidget(std::shared_ptr<Tegra::DebugContext> debug_context,
24 QWidget* parent = nullptr);
25
26 void OnMaxwellBreakPointHit(Tegra::DebugContext::Event event, void* data) override;
27 void OnMaxwellResume() override;
28
29signals:
30 void Resumed();
31 void BreakPointHit(Tegra::DebugContext::Event event, void* data);
32 void BreakPointsChanged(const QModelIndex& topLeft, const QModelIndex& bottomRight);
33
34private:
35 void OnBreakPointHit(Tegra::DebugContext::Event event, void* data);
36 void OnItemDoubleClicked(const QModelIndex&);
37 void OnResumeRequested();
38 void OnResumed();
39
40 QLabel* status_text;
41 QPushButton* resume_button;
42
43 BreakPointModel* breakpoint_model;
44 QTreeView* breakpoint_list;
45};
diff --git a/src/yuzu/debugger/graphics/graphics_breakpoints_p.h b/src/yuzu/debugger/graphics/graphics_breakpoints_p.h
deleted file mode 100644
index fb488e38f..000000000
--- a/src/yuzu/debugger/graphics/graphics_breakpoints_p.h
+++ /dev/null
@@ -1,37 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <QAbstractListModel>
9#include "video_core/debug_utils/debug_utils.h"
10
11class BreakPointModel : public QAbstractListModel {
12 Q_OBJECT
13
14public:
15 enum {
16 Role_IsEnabled = Qt::UserRole,
17 };
18
19 BreakPointModel(std::shared_ptr<Tegra::DebugContext> context, QObject* parent);
20
21 int columnCount(const QModelIndex& parent = QModelIndex()) const override;
22 int rowCount(const QModelIndex& parent = QModelIndex()) const override;
23 QVariant data(const QModelIndex& index, int role = Qt::DisplayRole) const override;
24 Qt::ItemFlags flags(const QModelIndex& index) const override;
25
26 bool setData(const QModelIndex& index, const QVariant& value, int role = Qt::EditRole) override;
27
28 void OnBreakPointHit(Tegra::DebugContext::Event event);
29 void OnResumed();
30
31private:
32 static QString DebugContextEventToString(Tegra::DebugContext::Event event);
33
34 std::weak_ptr<Tegra::DebugContext> context_weak;
35 bool at_breakpoint;
36 Tegra::DebugContext::Event active_breakpoint;
37};
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 867f8e913..b21fbf826 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -93,7 +93,6 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
93#include "core/perf_stats.h" 93#include "core/perf_stats.h"
94#include "core/settings.h" 94#include "core/settings.h"
95#include "core/telemetry_session.h" 95#include "core/telemetry_session.h"
96#include "video_core/debug_utils/debug_utils.h"
97#include "yuzu/about_dialog.h" 96#include "yuzu/about_dialog.h"
98#include "yuzu/bootmanager.h" 97#include "yuzu/bootmanager.h"
99#include "yuzu/compatdb.h" 98#include "yuzu/compatdb.h"
@@ -101,7 +100,6 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
101#include "yuzu/configuration/config.h" 100#include "yuzu/configuration/config.h"
102#include "yuzu/configuration/configure_dialog.h" 101#include "yuzu/configuration/configure_dialog.h"
103#include "yuzu/debugger/console.h" 102#include "yuzu/debugger/console.h"
104#include "yuzu/debugger/graphics/graphics_breakpoints.h"
105#include "yuzu/debugger/profiler.h" 103#include "yuzu/debugger/profiler.h"
106#include "yuzu/debugger/wait_tree.h" 104#include "yuzu/debugger/wait_tree.h"
107#include "yuzu/discord.h" 105#include "yuzu/discord.h"
@@ -187,8 +185,6 @@ GMainWindow::GMainWindow()
187 provider(std::make_unique<FileSys::ManualContentProvider>()) { 185 provider(std::make_unique<FileSys::ManualContentProvider>()) {
188 InitializeLogging(); 186 InitializeLogging();
189 187
190 debug_context = Tegra::DebugContext::Construct();
191
192 setAcceptDrops(true); 188 setAcceptDrops(true);
193 ui.setupUi(this); 189 ui.setupUi(this);
194 statusBar()->hide(); 190 statusBar()->hide();
@@ -495,11 +491,6 @@ void GMainWindow::InitializeDebugWidgets() {
495 debug_menu->addAction(microProfileDialog->toggleViewAction()); 491 debug_menu->addAction(microProfileDialog->toggleViewAction());
496#endif 492#endif
497 493
498 graphicsBreakpointsWidget = new GraphicsBreakPointsWidget(debug_context, this);
499 addDockWidget(Qt::RightDockWidgetArea, graphicsBreakpointsWidget);
500 graphicsBreakpointsWidget->hide();
501 debug_menu->addAction(graphicsBreakpointsWidget->toggleViewAction());
502
503 waitTreeWidget = new WaitTreeWidget(this); 494 waitTreeWidget = new WaitTreeWidget(this);
504 addDockWidget(Qt::LeftDockWidgetArea, waitTreeWidget); 495 addDockWidget(Qt::LeftDockWidgetArea, waitTreeWidget);
505 waitTreeWidget->hide(); 496 waitTreeWidget->hide();
@@ -869,8 +860,6 @@ bool GMainWindow::LoadROM(const QString& filename) {
869 Core::System& system{Core::System::GetInstance()}; 860 Core::System& system{Core::System::GetInstance()};
870 system.SetFilesystem(vfs); 861 system.SetFilesystem(vfs);
871 862
872 system.SetGPUDebugContext(debug_context);
873
874 system.SetAppletFrontendSet({ 863 system.SetAppletFrontendSet({
875 nullptr, // Parental Controls 864 nullptr, // Parental Controls
876 std::make_unique<QtErrorDisplay>(*this), // 865 std::make_unique<QtErrorDisplay>(*this), //
diff --git a/src/yuzu/main.h b/src/yuzu/main.h
index 7f46bea2b..a56f9a981 100644
--- a/src/yuzu/main.h
+++ b/src/yuzu/main.h
@@ -22,7 +22,6 @@ class Config;
22class EmuThread; 22class EmuThread;
23class GameList; 23class GameList;
24class GImageInfo; 24class GImageInfo;
25class GraphicsBreakPointsWidget;
26class GRenderWindow; 25class GRenderWindow;
27class LoadingScreen; 26class LoadingScreen;
28class MicroProfileDialog; 27class MicroProfileDialog;
@@ -42,10 +41,6 @@ class ManualContentProvider;
42class VfsFilesystem; 41class VfsFilesystem;
43} // namespace FileSys 42} // namespace FileSys
44 43
45namespace Tegra {
46class DebugContext;
47}
48
49enum class EmulatedDirectoryTarget { 44enum class EmulatedDirectoryTarget {
50 NAND, 45 NAND,
51 SDMC, 46 SDMC,
@@ -223,8 +218,6 @@ private:
223 218
224 Ui::MainWindow ui; 219 Ui::MainWindow ui;
225 220
226 std::shared_ptr<Tegra::DebugContext> debug_context;
227
228 GRenderWindow* render_window; 221 GRenderWindow* render_window;
229 GameList* game_list; 222 GameList* game_list;
230 LoadingScreen* loading_screen; 223 LoadingScreen* loading_screen;
@@ -255,7 +248,6 @@ private:
255 // Debugger panes 248 // Debugger panes
256 ProfilerWidget* profilerWidget; 249 ProfilerWidget* profilerWidget;
257 MicroProfileDialog* microProfileDialog; 250 MicroProfileDialog* microProfileDialog;
258 GraphicsBreakPointsWidget* graphicsBreakpointsWidget;
259 WaitTreeWidget* waitTreeWidget; 251 WaitTreeWidget* waitTreeWidget;
260 252
261 QAction* actions_recent_files[max_recent_files_item]; 253 QAction* actions_recent_files[max_recent_files_item];