summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2021-11-05 15:52:31 +0100
committerGravatar Fernando Sahmkow2022-10-06 21:00:51 +0200
commit139ea93512aeead8a4aee3910a3de86eb109a838 (patch)
tree857643fc08617b7035656a51728c399f30c8c2cb /src/core
parentNVASGPU: Fix Remap. (diff)
downloadyuzu-139ea93512aeead8a4aee3910a3de86eb109a838.tar.gz
yuzu-139ea93512aeead8a4aee3910a3de86eb109a838.tar.xz
yuzu-139ea93512aeead8a4aee3910a3de86eb109a838.zip
VideoCore: implement channels on gpu caches.
Diffstat (limited to 'src/core')
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp34
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h14
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp34
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.h9
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp2
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.cpp2
6 files changed, 65 insertions, 30 deletions
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index b1c683511..9946ce624 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -10,13 +10,17 @@
10#include "core/hle/service/nvdrv/core/container.h" 10#include "core/hle/service/nvdrv/core/container.h"
11#include "core/hle/service/nvdrv/core/nvmap.h" 11#include "core/hle/service/nvdrv/core/nvmap.h"
12#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" 12#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
13#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
14#include "core/hle/service/nvdrv/nvdrv.h"
15#include "video_core/control/channel_state.h"
13#include "video_core/memory_manager.h" 16#include "video_core/memory_manager.h"
14#include "video_core/rasterizer_interface.h" 17#include "video_core/rasterizer_interface.h"
15 18
16namespace Service::Nvidia::Devices { 19namespace Service::Nvidia::Devices {
17 20
18nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, NvCore::Container& core) 21nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, Module& module_, NvCore::Container& core)
19 : nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {} 22 : nvdevice{system_}, module{module_}, container{core}, nvmap{core.GetNvMapFile()},
23 gmmu{std::make_shared<Tegra::MemoryManager>(system)} {}
20nvhost_as_gpu::~nvhost_as_gpu() = default; 24nvhost_as_gpu::~nvhost_as_gpu() = default;
21 25
22NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 26NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -102,9 +106,9 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
102 106
103 const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)}; 107 const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
104 if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) { 108 if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) {
105 params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size); 109 params.offset = *(gmmu->AllocateFixed(params.offset, size));
106 } else { 110 } else {
107 params.offset = system.GPU().MemoryManager().Allocate(size, params.align); 111 params.offset = gmmu->Allocate(size, params.align);
108 } 112 }
109 113
110 auto result = NvResult::Success; 114 auto result = NvResult::Success;
@@ -124,8 +128,7 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>&
124 LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset, 128 LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
125 params.pages, params.page_size); 129 params.pages, params.page_size);
126 130
127 system.GPU().MemoryManager().Unmap(params.offset, 131 gmmu->Unmap(params.offset, static_cast<std::size_t>(params.pages) * params.page_size);
128 static_cast<std::size_t>(params.pages) * params.page_size);
129 132
130 std::memcpy(output.data(), &params, output.size()); 133 std::memcpy(output.data(), &params, output.size());
131 return NvResult::Success; 134 return NvResult::Success;
@@ -148,7 +151,7 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
148 // If nvmap handle is null, we should unmap instead. 151 // If nvmap handle is null, we should unmap instead.
149 const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10}; 152 const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10};
150 const auto size{static_cast<u64>(entry.pages) << 0x10}; 153 const auto size{static_cast<u64>(entry.pages) << 0x10};
151 system.GPU().MemoryManager().Unmap(offset, size); 154 gmmu->Unmap(offset, size);
152 continue; 155 continue;
153 } 156 }
154 157
@@ -162,8 +165,7 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
162 const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10}; 165 const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10};
163 const auto size{static_cast<u64>(entry.pages) << 0x10}; 166 const auto size{static_cast<u64>(entry.pages) << 0x10};
164 const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10}; 167 const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10};
165 const auto addr{ 168 const auto addr{gmmu->Map(object->address + map_offset, offset, size)};
166 system.GPU().MemoryManager().Map(object->address + map_offset, offset, size)};
167 169
168 if (!addr) { 170 if (!addr) {
169 LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!"); 171 LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!");
@@ -186,13 +188,12 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
186 params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size, 188 params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size,
187 params.offset); 189 params.offset);
188 190
189 auto& gpu = system.GPU();
190 if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) { 191 if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) {
191 if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) { 192 if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) {
192 const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)}; 193 const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)};
193 const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)}; 194 const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)};
194 195
195 if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) { 196 if (!gmmu->Map(cpu_addr, gpu_addr, params.mapping_size)) {
196 LOG_CRITICAL(Service_NVDRV, 197 LOG_CRITICAL(Service_NVDRV,
197 "remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, " 198 "remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, "
198 "mapping_size = {}, offset={}", 199 "mapping_size = {}, offset={}",
@@ -238,9 +239,9 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
238 239
239 const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None}; 240 const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None};
240 if (is_alloc) { 241 if (is_alloc) {
241 params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size); 242 params.offset = gmmu->MapAllocate(physical_address, size, page_size);
242 } else { 243 } else {
243 params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size); 244 params.offset = gmmu->Map(physical_address, params.offset, size);
244 } 245 }
245 246
246 auto result = NvResult::Success; 247 auto result = NvResult::Success;
@@ -262,7 +263,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8
262 LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset); 263 LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
263 264
264 if (const auto size{RemoveBufferMap(params.offset)}; size) { 265 if (const auto size{RemoveBufferMap(params.offset)}; size) {
265 system.GPU().MemoryManager().Unmap(params.offset, *size); 266 gmmu->Unmap(params.offset, *size);
266 } else { 267 } else {
267 LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset); 268 LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset);
268 } 269 }
@@ -274,9 +275,10 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8
274NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) { 275NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) {
275 IoctlBindChannel params{}; 276 IoctlBindChannel params{};
276 std::memcpy(&params, input.data(), input.size()); 277 std::memcpy(&params, input.data(), input.size());
277 LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}", params.fd); 278 LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
278 279
279 channel = params.fd; 280 auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd);
281 gpu_channel_device->channel_state->memory_manager = gmmu;
280 return NvResult::Success; 282 return NvResult::Success;
281} 283}
282 284
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index 67d2f1e87..4ecae3caf 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -13,6 +13,14 @@
13#include "common/swap.h" 13#include "common/swap.h"
14#include "core/hle/service/nvdrv/devices/nvdevice.h" 14#include "core/hle/service/nvdrv/devices/nvdevice.h"
15 15
16namespace Tegra {
17class MemoryManager;
18} // namespace Tegra
19
20namespace Service::Nvidia {
21class Module;
22}
23
16namespace Service::Nvidia::NvCore { 24namespace Service::Nvidia::NvCore {
17class Container; 25class Container;
18class NvMap; 26class NvMap;
@@ -34,7 +42,7 @@ DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags);
34 42
35class nvhost_as_gpu final : public nvdevice { 43class nvhost_as_gpu final : public nvdevice {
36public: 44public:
37 explicit nvhost_as_gpu(Core::System& system_, NvCore::Container& core); 45 explicit nvhost_as_gpu(Core::System& system_, Module& module, NvCore::Container& core);
38 ~nvhost_as_gpu() override; 46 ~nvhost_as_gpu() override;
39 47
40 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 48 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -187,9 +195,13 @@ private:
187 void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated); 195 void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated);
188 std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr); 196 std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr);
189 197
198 Module& module;
199
190 NvCore::Container& container; 200 NvCore::Container& container;
191 NvCore::NvMap& nvmap; 201 NvCore::NvMap& nvmap;
192 202
203 std::shared_ptr<Tegra::MemoryManager> gmmu;
204
193 // This is expected to be ordered, therefore we must use a map, not unordered_map 205 // This is expected to be ordered, therefore we must use a map, not unordered_map
194 std::map<GPUVAddr, BufferMap> buffer_mappings; 206 std::map<GPUVAddr, BufferMap> buffer_mappings;
195}; 207};
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index cb54ee5a4..38d45cb79 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -11,12 +11,14 @@
11#include "core/hle/service/nvdrv/devices/nvhost_gpu.h" 11#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
12#include "core/hle/service/nvdrv/nvdrv.h" 12#include "core/hle/service/nvdrv/nvdrv.h"
13#include "core/memory.h" 13#include "core/memory.h"
14#include "video_core/control/channel_state.h"
15#include "video_core/engines/puller.h"
14#include "video_core/gpu.h" 16#include "video_core/gpu.h"
15 17
16namespace Service::Nvidia::Devices { 18namespace Service::Nvidia::Devices {
17namespace { 19namespace {
18Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoint_id) { 20Tegra::CommandHeader BuildFenceAction(Tegra::Engines::Puller::FenceOperation op, u32 syncpoint_id) {
19 Tegra::GPU::FenceAction result{}; 21 Tegra::Engines::Puller::FenceAction result{};
20 result.op.Assign(op); 22 result.op.Assign(op);
21 result.syncpoint_id.Assign(syncpoint_id); 23 result.syncpoint_id.Assign(syncpoint_id);
22 return {result.raw}; 24 return {result.raw};
@@ -26,7 +28,8 @@ Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoi
26nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_, 28nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
27 NvCore::Container& core_) 29 NvCore::Container& core_)
28 : nvdevice{system_}, events_interface{events_interface_}, core{core_}, 30 : nvdevice{system_}, events_interface{events_interface_}, core{core_},
29 syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()} { 31 syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()},
32 channel_state{system.GPU().AllocateChannel()} {
30 channel_fence.id = syncpoint_manager.AllocateSyncpoint(); 33 channel_fence.id = syncpoint_manager.AllocateSyncpoint();
31 channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); 34 channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id);
32 sm_exception_breakpoint_int_report_event = 35 sm_exception_breakpoint_int_report_event =
@@ -180,6 +183,12 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8
180 params.num_entries, params.flags, params.unk0, params.unk1, params.unk2, 183 params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
181 params.unk3); 184 params.unk3);
182 185
186 if (channel_state->initiated) {
187 LOG_CRITICAL(Service_NVDRV, "Already allocated!");
188 return NvResult::AlreadyAllocated;
189 }
190
191 system.GPU().InitChannel(*channel_state);
183 channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id); 192 channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id);
184 193
185 params.fence_out = channel_fence; 194 params.fence_out = channel_fence;
@@ -206,7 +215,7 @@ static std::vector<Tegra::CommandHeader> BuildWaitCommandList(NvFence fence) {
206 {fence.value}, 215 {fence.value},
207 Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, 216 Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
208 Tegra::SubmissionMode::Increasing), 217 Tegra::SubmissionMode::Increasing),
209 BuildFenceAction(Tegra::GPU::FenceOperation::Acquire, fence.id), 218 BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Acquire, fence.id),
210 }; 219 };
211} 220}
212 221
@@ -220,7 +229,8 @@ static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence
220 for (u32 count = 0; count < add_increment; ++count) { 229 for (u32 count = 0; count < add_increment; ++count) {
221 result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, 230 result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
222 Tegra::SubmissionMode::Increasing)); 231 Tegra::SubmissionMode::Increasing));
223 result.emplace_back(BuildFenceAction(Tegra::GPU::FenceOperation::Increment, fence.id)); 232 result.emplace_back(
233 BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Increment, fence.id));
224 } 234 }
225 235
226 return result; 236 return result;
@@ -247,11 +257,13 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>
247 257
248 auto& gpu = system.GPU(); 258 auto& gpu = system.GPU();
249 259
260 const auto bind_id = channel_state->bind_id;
261
250 params.fence_out.id = channel_fence.id; 262 params.fence_out.id = channel_fence.id;
251 263
252 if (params.flags.add_wait.Value() && 264 if (params.flags.add_wait.Value() &&
253 !syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) { 265 !syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) {
254 gpu.PushGPUEntries(Tegra::CommandList{BuildWaitCommandList(params.fence_out)}); 266 gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildWaitCommandList(params.fence_out)});
255 } 267 }
256 268
257 if (params.flags.add_increment.Value() || params.flags.increment.Value()) { 269 if (params.flags.add_increment.Value() || params.flags.increment.Value()) {
@@ -262,15 +274,15 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>
262 params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id); 274 params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id);
263 } 275 }
264 276
265 gpu.PushGPUEntries(std::move(entries)); 277 gpu.PushGPUEntries(bind_id, std::move(entries));
266 278
267 if (params.flags.add_increment.Value()) { 279 if (params.flags.add_increment.Value()) {
268 if (params.flags.suppress_wfi) { 280 if (params.flags.suppress_wfi) {
269 gpu.PushGPUEntries(Tegra::CommandList{ 281 gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildIncrementCommandList(
270 BuildIncrementCommandList(params.fence_out, params.AddIncrementValue())}); 282 params.fence_out, params.AddIncrementValue())});
271 } else { 283 } else {
272 gpu.PushGPUEntries(Tegra::CommandList{ 284 gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildIncrementWithWfiCommandList(
273 BuildIncrementWithWfiCommandList(params.fence_out, params.AddIncrementValue())}); 285 params.fence_out, params.AddIncrementValue())});
274 } 286 }
275 } 287 }
276 288
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
index 440c0c42d..3a65ed06d 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
@@ -13,6 +13,12 @@
13#include "core/hle/service/nvdrv/nvdata.h" 13#include "core/hle/service/nvdrv/nvdata.h"
14#include "video_core/dma_pusher.h" 14#include "video_core/dma_pusher.h"
15 15
16namespace Tegra {
17namespace Control {
18struct ChannelState;
19}
20} // namespace Tegra
21
16namespace Service::Nvidia { 22namespace Service::Nvidia {
17 23
18namespace NvCore { 24namespace NvCore {
@@ -26,6 +32,7 @@ class EventInterface;
26 32
27namespace Service::Nvidia::Devices { 33namespace Service::Nvidia::Devices {
28 34
35class nvhost_as_gpu;
29class nvmap; 36class nvmap;
30class nvhost_gpu final : public nvdevice { 37class nvhost_gpu final : public nvdevice {
31public: 38public:
@@ -46,6 +53,7 @@ public:
46 Kernel::KEvent* QueryEvent(u32 event_id) override; 53 Kernel::KEvent* QueryEvent(u32 event_id) override;
47 54
48private: 55private:
56 friend class nvhost_as_gpu;
49 enum class CtxObjects : u32_le { 57 enum class CtxObjects : u32_le {
50 Ctx2D = 0x902D, 58 Ctx2D = 0x902D,
51 Ctx3D = 0xB197, 59 Ctx3D = 0xB197,
@@ -204,6 +212,7 @@ private:
204 NvCore::Container& core; 212 NvCore::Container& core;
205 NvCore::SyncpointManager& syncpoint_manager; 213 NvCore::SyncpointManager& syncpoint_manager;
206 NvCore::NvMap& nvmap; 214 NvCore::NvMap& nvmap;
215 std::shared_ptr<Tegra::Control::ChannelState> channel_state;
207 NvFence channel_fence; 216 NvFence channel_fence;
208 217
209 // Events 218 // Events
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index 57f58055d..279997e81 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -168,7 +168,7 @@ NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output)
168 IocFromIdParams params; 168 IocFromIdParams params;
169 std::memcpy(&params, input.data(), sizeof(params)); 169 std::memcpy(&params, input.data(), sizeof(params));
170 170
171 LOG_DEBUG(Service_NVDRV, "called, id:{}"); 171 LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id);
172 172
173 // Handles and IDs are always the same value in nvmap however IDs can be used globally given the 173 // Handles and IDs are always the same value in nvmap however IDs can be used globally given the
174 // right permissions. 174 // right permissions.
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp
index 208de0b75..b39a4c6db 100644
--- a/src/core/hle/service/nvdrv/nvdrv.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv.cpp
@@ -74,7 +74,7 @@ Module::Module(Core::System& system)
74 : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} { 74 : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} {
75 builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) { 75 builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) {
76 std::shared_ptr<Devices::nvdevice> device = 76 std::shared_ptr<Devices::nvdevice> device =
77 std::make_shared<Devices::nvhost_as_gpu>(system, container); 77 std::make_shared<Devices::nvhost_as_gpu>(system, *this, container);
78 return open_files.emplace(fd, device).first; 78 return open_files.emplace(fd, device).first;
79 }; 79 };
80 builders["/dev/nvhost-gpu"] = [this, &system](DeviceFD fd) { 80 builders["/dev/nvhost-gpu"] = [this, &system](DeviceFD fd) {