summaryrefslogtreecommitdiff
path: root/src/video_core/gpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/gpu.h')
-rw-r--r--src/video_core/gpu.h93
1 files changed, 32 insertions, 61 deletions
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index b939ba315..0a4a8b14f 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -89,73 +89,58 @@ class Maxwell3D;
89class KeplerCompute; 89class KeplerCompute;
90} // namespace Engines 90} // namespace Engines
91 91
92enum class EngineID { 92namespace Control {
93 FERMI_TWOD_A = 0x902D, // 2D Engine 93struct ChannelState;
94 MAXWELL_B = 0xB197, // 3D Engine 94}
95 KEPLER_COMPUTE_B = 0xB1C0, 95
96 KEPLER_INLINE_TO_MEMORY_B = 0xA140, 96namespace Host1x {
97 MAXWELL_DMA_COPY_A = 0xB0B5, 97class Host1x;
98}; 98} // namespace Host1x
99 99
100class MemoryManager; 100class MemoryManager;
101 101
102class GPU final { 102class GPU final {
103public: 103public:
104 struct MethodCall {
105 u32 method{};
106 u32 argument{};
107 u32 subchannel{};
108 u32 method_count{};
109
110 explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
111 : method(method_), argument(argument_), subchannel(subchannel_),
112 method_count(method_count_) {}
113
114 [[nodiscard]] bool IsLastCall() const {
115 return method_count <= 1;
116 }
117 };
118
119 enum class FenceOperation : u32 {
120 Acquire = 0,
121 Increment = 1,
122 };
123
124 union FenceAction {
125 u32 raw;
126 BitField<0, 1, FenceOperation> op;
127 BitField<8, 24, u32> syncpoint_id;
128 };
129
130 explicit GPU(Core::System& system, bool is_async, bool use_nvdec); 104 explicit GPU(Core::System& system, bool is_async, bool use_nvdec);
131 ~GPU(); 105 ~GPU();
132 106
133 /// Binds a renderer to the GPU. 107 /// Binds a renderer to the GPU.
134 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer); 108 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer);
135 109
136 /// Calls a GPU method.
137 void CallMethod(const MethodCall& method_call);
138
139 /// Calls a GPU multivalue method.
140 void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
141 u32 methods_pending);
142
143 /// Flush all current written commands into the host GPU for execution. 110 /// Flush all current written commands into the host GPU for execution.
144 void FlushCommands(); 111 void FlushCommands();
145 /// Synchronizes CPU writes with Host GPU memory. 112 /// Synchronizes CPU writes with Host GPU memory.
146 void SyncGuestHost(); 113 void InvalidateGPUCache();
147 /// Signal the ending of command list. 114 /// Signal the ending of command list.
148 void OnCommandListEnd(); 115 void OnCommandListEnd();
149 116
117 std::shared_ptr<Control::ChannelState> AllocateChannel();
118
119 void InitChannel(Control::ChannelState& to_init);
120
121 void BindChannel(s32 channel_id);
122
123 void ReleaseChannel(Control::ChannelState& to_release);
124
125 void InitAddressSpace(Tegra::MemoryManager& memory_manager);
126
150 /// Request a host GPU memory flush from the CPU. 127 /// Request a host GPU memory flush from the CPU.
151 [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); 128 [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size);
152 129
153 /// Obtains current flush request fence id. 130 /// Obtains current flush request fence id.
154 [[nodiscard]] u64 CurrentFlushRequestFence() const; 131 [[nodiscard]] u64 CurrentSyncRequestFence() const;
132
133 void WaitForSyncOperation(u64 fence);
155 134
156 /// Tick pending requests within the GPU. 135 /// Tick pending requests within the GPU.
157 void TickWork(); 136 void TickWork();
158 137
138 /// Gets a mutable reference to the Host1x interface
139 [[nodiscard]] Host1x::Host1x& Host1x();
140
141 /// Gets an immutable reference to the Host1x interface.
142 [[nodiscard]] const Host1x::Host1x& Host1x() const;
143
159 /// Returns a reference to the Maxwell3D GPU engine. 144 /// Returns a reference to the Maxwell3D GPU engine.
160 [[nodiscard]] Engines::Maxwell3D& Maxwell3D(); 145 [[nodiscard]] Engines::Maxwell3D& Maxwell3D();
161 146
@@ -168,12 +153,6 @@ public:
168 /// Returns a reference to the KeplerCompute GPU engine. 153 /// Returns a reference to the KeplerCompute GPU engine.
169 [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const; 154 [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const;
170 155
171 /// Returns a reference to the GPU memory manager.
172 [[nodiscard]] Tegra::MemoryManager& MemoryManager();
173
174 /// Returns a const reference to the GPU memory manager.
175 [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const;
176
177 /// Returns a reference to the GPU DMA pusher. 156 /// Returns a reference to the GPU DMA pusher.
178 [[nodiscard]] Tegra::DmaPusher& DmaPusher(); 157 [[nodiscard]] Tegra::DmaPusher& DmaPusher();
179 158
@@ -192,17 +171,6 @@ public:
192 /// Returns a const reference to the shader notifier. 171 /// Returns a const reference to the shader notifier.
193 [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const; 172 [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
194 173
195 /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
196 void WaitFence(u32 syncpoint_id, u32 value);
197
198 void IncrementSyncPoint(u32 syncpoint_id);
199
200 [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const;
201
202 void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value);
203
204 [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value);
205
206 [[nodiscard]] u64 GetTicks() const; 174 [[nodiscard]] u64 GetTicks() const;
207 175
208 [[nodiscard]] bool IsAsync() const; 176 [[nodiscard]] bool IsAsync() const;
@@ -211,6 +179,9 @@ public:
211 179
212 void RendererFrameEndNotify(); 180 void RendererFrameEndNotify();
213 181
182 void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
183 std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences);
184
214 /// Performs any additional setup necessary in order to begin GPU emulation. 185 /// Performs any additional setup necessary in order to begin GPU emulation.
215 /// This can be used to launch any necessary threads and register any necessary 186 /// This can be used to launch any necessary threads and register any necessary
216 /// core timing events. 187 /// core timing events.
@@ -226,7 +197,7 @@ public:
226 void ReleaseContext(); 197 void ReleaseContext();
227 198
228 /// Push GPU command entries to be processed 199 /// Push GPU command entries to be processed
229 void PushGPUEntries(Tegra::CommandList&& entries); 200 void PushGPUEntries(s32 channel, Tegra::CommandList&& entries);
230 201
231 /// Push GPU command buffer entries to be processed 202 /// Push GPU command buffer entries to be processed
232 void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries); 203 void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries);
@@ -248,7 +219,7 @@ public:
248 219
249private: 220private:
250 struct Impl; 221 struct Impl;
251 std::unique_ptr<Impl> impl; 222 mutable std::unique_ptr<Impl> impl;
252}; 223};
253 224
254} // namespace Tegra 225} // namespace Tegra