summaryrefslogtreecommitdiff
path: root/src/video_core/gpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/gpu.h')
-rw-r--r--src/video_core/gpu.h55
1 files changed, 13 insertions, 42 deletions
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index 42c91954f..74d55e074 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -89,57 +89,20 @@ class Maxwell3D;
89class KeplerCompute; 89class KeplerCompute;
90} // namespace Engines 90} // namespace Engines
91 91
92enum class EngineID { 92namespace Control {
93 FERMI_TWOD_A = 0x902D, // 2D Engine 93struct ChannelState;
94 MAXWELL_B = 0xB197, // 3D Engine 94}
95 KEPLER_COMPUTE_B = 0xB1C0,
96 KEPLER_INLINE_TO_MEMORY_B = 0xA140,
97 MAXWELL_DMA_COPY_A = 0xB0B5,
98};
99 95
100class MemoryManager; 96class MemoryManager;
101 97
102class GPU final { 98class GPU final {
103public: 99public:
104 struct MethodCall {
105 u32 method{};
106 u32 argument{};
107 u32 subchannel{};
108 u32 method_count{};
109
110 explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
111 : method(method_), argument(argument_), subchannel(subchannel_),
112 method_count(method_count_) {}
113
114 [[nodiscard]] bool IsLastCall() const {
115 return method_count <= 1;
116 }
117 };
118
119 enum class FenceOperation : u32 {
120 Acquire = 0,
121 Increment = 1,
122 };
123
124 union FenceAction {
125 u32 raw;
126 BitField<0, 1, FenceOperation> op;
127 BitField<8, 24, u32> syncpoint_id;
128 };
129
130 explicit GPU(Core::System& system, bool is_async, bool use_nvdec); 100 explicit GPU(Core::System& system, bool is_async, bool use_nvdec);
131 ~GPU(); 101 ~GPU();
132 102
133 /// Binds a renderer to the GPU. 103 /// Binds a renderer to the GPU.
134 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer); 104 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer);
135 105
136 /// Calls a GPU method.
137 void CallMethod(const MethodCall& method_call);
138
139 /// Calls a GPU multivalue method.
140 void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
141 u32 methods_pending);
142
143 /// Flush all current written commands into the host GPU for execution. 106 /// Flush all current written commands into the host GPU for execution.
144 void FlushCommands(); 107 void FlushCommands();
145 /// Synchronizes CPU writes with Host GPU memory. 108 /// Synchronizes CPU writes with Host GPU memory.
@@ -147,6 +110,14 @@ public:
147 /// Signal the ending of command list. 110 /// Signal the ending of command list.
148 void OnCommandListEnd(); 111 void OnCommandListEnd();
149 112
113 std::shared_ptr<Control::ChannelState> AllocateChannel();
114
115 void InitChannel(Control::ChannelState& to_init);
116
117 void BindChannel(s32 channel_id);
118
119 void ReleaseChannel(Control::ChannelState& to_release);
120
150 /// Request a host GPU memory flush from the CPU. 121 /// Request a host GPU memory flush from the CPU.
151 [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); 122 [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size);
152 123
@@ -226,7 +197,7 @@ public:
226 void ReleaseContext(); 197 void ReleaseContext();
227 198
228 /// Push GPU command entries to be processed 199 /// Push GPU command entries to be processed
229 void PushGPUEntries(Tegra::CommandList&& entries); 200 void PushGPUEntries(s32 channel, Tegra::CommandList&& entries);
230 201
231 /// Push GPU command buffer entries to be processed 202 /// Push GPU command buffer entries to be processed
232 void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries); 203 void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries);
@@ -248,7 +219,7 @@ public:
248 219
249private: 220private:
250 struct Impl; 221 struct Impl;
251 std::unique_ptr<Impl> impl; 222 mutable std::unique_ptr<Impl> impl;
252}; 223};
253 224
254} // namespace Tegra 225} // namespace Tegra