summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2021-11-04 12:51:17 +0100
committerGravatar Fernando Sahmkow2022-10-06 21:00:51 +0200
commitde0e8eff429b4374c18e3325ad3747db55bddddd (patch)
tree6700091146d5282c1efbee40c94b6573c5b2895f /src
parentNVDRV: Refactor and add new NvMap. (diff)
downloadyuzu-de0e8eff429b4374c18e3325ad3747db55bddddd.tar.gz
yuzu-de0e8eff429b4374c18e3325ad3747db55bddddd.tar.xz
yuzu-de0e8eff429b4374c18e3325ad3747db55bddddd.zip
NVDRV: Implement new NvMap
Diffstat (limited to '')
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.cpp119
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.h12
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp14
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.h12
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp54
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h12
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp9
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.h7
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp5
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.h3
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp26
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h11
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.cpp5
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.h3
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp220
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.h55
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.cpp15
18 files changed, 307 insertions, 277 deletions
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp
index d3f227f52..281381cc4 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/core/nvmap.cpp
@@ -17,7 +17,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress)
17 std::scoped_lock lock(mutex); 17 std::scoped_lock lock(mutex);
18 18
19 // Handles cannot be allocated twice 19 // Handles cannot be allocated twice
20 if (allocated) [[unlikely]] 20 if (allocated)
21 return NvResult::AccessDenied; 21 return NvResult::AccessDenied;
22 22
23 flags = pFlags; 23 flags = pFlags;
@@ -61,33 +61,34 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) {
61 61
62NvMap::NvMap() = default; 62NvMap::NvMap() = default;
63 63
64void NvMap::AddHandle(std::shared_ptr<Handle> handleDesc) { 64void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) {
65 std::scoped_lock lock(handles_lock); 65 std::scoped_lock lock(handles_lock);
66 66
67 handles.emplace(handleDesc->id, std::move(handleDesc)); 67 handles.emplace(handle_description->id, std::move(handle_description));
68} 68}
69 69
70void NvMap::UnmapHandle(Handle& handleDesc) { 70void NvMap::UnmapHandle(Handle& handle_description) {
71 // Remove pending unmap queue entry if needed 71 // Remove pending unmap queue entry if needed
72 if (handleDesc.unmap_queue_entry) { 72 if (handle_description.unmap_queue_entry) {
73 unmap_queue.erase(*handleDesc.unmap_queue_entry); 73 unmap_queue.erase(*handle_description.unmap_queue_entry);
74 handleDesc.unmap_queue_entry.reset(); 74 handle_description.unmap_queue_entry.reset();
75 } 75 }
76 76
77 // Free and unmap the handle from the SMMU 77 // Free and unmap the handle from the SMMU
78 /* 78 /*
79 state.soc->smmu.Unmap(handleDesc.pin_virt_address, static_cast<u32>(handleDesc.aligned_size)); 79 state.soc->smmu.Unmap(handle_description.pin_virt_address,
80 smmuAllocator.Free(handleDesc.pin_virt_address, static_cast<u32>(handleDesc.aligned_size)); 80 static_cast<u32>(handle_description.aligned_size));
81 handleDesc.pin_virt_address = 0; 81 smmuAllocator.Free(handle_description.pin_virt_address,
82 static_cast<u32>(handle_description.aligned_size)); handle_description.pin_virt_address = 0;
82 */ 83 */
83} 84}
84 85
85bool NvMap::TryRemoveHandle(const Handle& handleDesc) { 86bool NvMap::TryRemoveHandle(const Handle& handle_description) {
86 // No dupes left, we can remove from handle map 87 // No dupes left, we can remove from handle map
87 if (handleDesc.dupes == 0 && handleDesc.internal_dupes == 0) { 88 if (handle_description.dupes == 0 && handle_description.internal_dupes == 0) {
88 std::scoped_lock lock(handles_lock); 89 std::scoped_lock lock(handles_lock);
89 90
90 auto it{handles.find(handleDesc.id)}; 91 auto it{handles.find(handle_description.id)};
91 if (it != handles.end()) 92 if (it != handles.end())
92 handles.erase(it); 93 handles.erase(it);
93 94
@@ -102,10 +103,10 @@ NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_ou
102 return NvResult::BadValue; 103 return NvResult::BadValue;
103 104
104 u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)}; 105 u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)};
105 auto handleDesc{std::make_shared<Handle>(size, id)}; 106 auto handle_description{std::make_shared<Handle>(size, id)};
106 AddHandle(handleDesc); 107 AddHandle(handle_description);
107 108
108 result_out = handleDesc; 109 result_out = handle_description;
109 return NvResult::Success; 110 return NvResult::Success;
110} 111}
111 112
@@ -118,73 +119,83 @@ std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
118 } 119 }
119} 120}
120 121
122VAddr NvMap::GetHandleAddress(Handle::Id handle) {
123 std::scoped_lock lock(handles_lock);
124 try {
125 return handles.at(handle)->address;
126 } catch ([[maybe_unused]] std::out_of_range& e) {
127 return 0;
128 }
129}
130
121u32 NvMap::PinHandle(NvMap::Handle::Id handle) { 131u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
122 UNIMPLEMENTED_MSG("pinning"); 132 UNIMPLEMENTED_MSG("pinning");
123 return 0; 133 return 0;
124 /* 134 /*
125 auto handleDesc{GetHandle(handle)}; 135 auto handle_description{GetHandle(handle)};
126 if (!handleDesc) 136 if (!handle_description)
127 [[unlikely]] return 0; 137 [[unlikely]] return 0;
128 138
129 std::scoped_lock lock(handleDesc->mutex); 139 std::scoped_lock lock(handle_description->mutex);
130 if (!handleDesc->pins) { 140 if (!handle_description->pins) {
131 // If we're in the unmap queue we can just remove ourselves and return since we're already 141 // If we're in the unmap queue we can just remove ourselves and return since we're already
132 // mapped 142 // mapped
133 { 143 {
134 // Lock now to prevent our queue entry from being removed for allocation in-between the 144 // Lock now to prevent our queue entry from being removed for allocation in-between the
135 // following check and erase 145 // following check and erase
136 std::scoped_lock queueLock(unmap_queue_lock); 146 std::scoped_lock queueLock(unmap_queue_lock);
137 if (handleDesc->unmap_queue_entry) { 147 if (handle_description->unmap_queue_entry) {
138 unmap_queue.erase(*handleDesc->unmap_queue_entry); 148 unmap_queue.erase(*handle_description->unmap_queue_entry);
139 handleDesc->unmap_queue_entry.reset(); 149 handle_description->unmap_queue_entry.reset();
140 150
141 handleDesc->pins++; 151 handle_description->pins++;
142 return handleDesc->pin_virt_address; 152 return handle_description->pin_virt_address;
143 } 153 }
144 } 154 }
145 155
146 // If not then allocate some space and map it 156 // If not then allocate some space and map it
147 u32 address{}; 157 u32 address{};
148 while (!(address = smmuAllocator.Allocate(static_cast<u32>(handleDesc->aligned_size)))) { 158 while (!(address =
159 smmuAllocator.Allocate(static_cast<u32>(handle_description->aligned_size)))) {
149 // Free handles until the allocation succeeds 160 // Free handles until the allocation succeeds
150 std::scoped_lock queueLock(unmap_queue_lock); 161 std::scoped_lock queueLock(unmap_queue_lock);
151 if (auto freeHandleDesc{unmap_queue.front()}) { 162 if (auto freeHandleDesc{unmap_queue.front()}) {
152 // Handles in the unmap queue are guaranteed not to be pinned so don't bother 163 // Handles in the unmap queue are guaranteed not to be pinned so don't bother
153 // checking if they are before unmapping 164 // checking if they are before unmapping
154 std::scoped_lock freeLock(freeHandleDesc->mutex); 165 std::scoped_lock freeLock(freeHandleDesc->mutex);
155 if (handleDesc->pin_virt_address) 166 if (handle_description->pin_virt_address)
156 UnmapHandle(*freeHandleDesc); 167 UnmapHandle(*freeHandleDesc);
157 } else { 168 } else {
158 LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); 169 LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
159 } 170 }
160 } 171 }
161 172
162 state.soc->smmu.Map(address, handleDesc->GetPointer(), 173 state.soc->smmu.Map(address, handle_description->GetPointer(),
163 static_cast<u32>(handleDesc->aligned_size)); 174 static_cast<u32>(handle_description->aligned_size));
164 handleDesc->pin_virt_address = address; 175 handle_description->pin_virt_address = address;
165 } 176 }
166 177
167 handleDesc->pins++; 178 handle_description->pins++;
168 return handleDesc->pin_virt_address; 179 return handle_description->pin_virt_address;
169 */ 180 */
170} 181}
171 182
172void NvMap::UnpinHandle(Handle::Id handle) { 183void NvMap::UnpinHandle(Handle::Id handle) {
173 UNIMPLEMENTED_MSG("Unpinning"); 184 UNIMPLEMENTED_MSG("Unpinning");
174 /* 185 /*
175 auto handleDesc{GetHandle(handle)}; 186 auto handle_description{GetHandle(handle)};
176 if (!handleDesc) 187 if (!handle_description)
177 return; 188 return;
178 189
179 std::scoped_lock lock(handleDesc->mutex); 190 std::scoped_lock lock(handle_description->mutex);
180 if (--handleDesc->pins < 0) { 191 if (--handle_description->pins < 0) {
181 LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!"); 192 LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!");
182 } else if (!handleDesc->pins) { 193 } else if (!handle_description->pins) {
183 std::scoped_lock queueLock(unmap_queue_lock); 194 std::scoped_lock queueLock(unmap_queue_lock);
184 195
185 // Add to the unmap queue allowing this handle's memory to be freed if needed 196 // Add to the unmap queue allowing this handle's memory to be freed if needed
186 unmap_queue.push_back(handleDesc); 197 unmap_queue.push_back(handle_description);
187 handleDesc->unmap_queue_entry = std::prev(unmap_queue.end()); 198 handle_description->unmap_queue_entry = std::prev(unmap_queue.end());
188 } 199 }
189 */ 200 */
190} 201}
@@ -195,39 +206,39 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
195 206
196 // We use a weak ptr here so we can tell when the handle has been freed and report that back to 207 // We use a weak ptr here so we can tell when the handle has been freed and report that back to
197 // guest 208 // guest
198 if (auto handleDesc = hWeak.lock()) { 209 if (auto handle_description = hWeak.lock()) {
199 std::scoped_lock lock(handleDesc->mutex); 210 std::scoped_lock lock(handle_description->mutex);
200 211
201 if (internal_session) { 212 if (internal_session) {
202 if (--handleDesc->internal_dupes < 0) 213 if (--handle_description->internal_dupes < 0)
203 LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!"); 214 LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!");
204 } else { 215 } else {
205 if (--handleDesc->dupes < 0) { 216 if (--handle_description->dupes < 0) {
206 LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!"); 217 LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!");
207 } else if (handleDesc->dupes == 0) { 218 } else if (handle_description->dupes == 0) {
208 // Force unmap the handle 219 // Force unmap the handle
209 if (handleDesc->pin_virt_address) { 220 if (handle_description->pin_virt_address) {
210 std::scoped_lock queueLock(unmap_queue_lock); 221 std::scoped_lock queueLock(unmap_queue_lock);
211 UnmapHandle(*handleDesc); 222 UnmapHandle(*handle_description);
212 } 223 }
213 224
214 handleDesc->pins = 0; 225 handle_description->pins = 0;
215 } 226 }
216 } 227 }
217 228
218 // Try to remove the shared ptr to the handle from the map, if nothing else is using the 229 // Try to remove the shared ptr to the handle from the map, if nothing else is using the
219 // handle then it will now be freed when `handleDesc` goes out of scope 230 // handle then it will now be freed when `handle_description` goes out of scope
220 if (TryRemoveHandle(*handleDesc)) 231 if (TryRemoveHandle(*handle_description))
221 LOG_ERROR(Service_NVDRV, "Removed nvmap handle: {}", handle); 232 LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle);
222 else 233 else
223 LOG_ERROR(Service_NVDRV, 234 LOG_DEBUG(Service_NVDRV,
224 "Tried to free nvmap handle: {} but didn't as it still has duplicates", 235 "Tried to free nvmap handle: {} but didn't as it still has duplicates",
225 handle); 236 handle);
226 237
227 freeInfo = { 238 freeInfo = {
228 .address = handleDesc->address, 239 .address = handle_description->address,
229 .size = handleDesc->size, 240 .size = handle_description->size,
230 .was_uncached = handleDesc->flags.map_uncached.Value() != 0, 241 .was_uncached = handle_description->flags.map_uncached.Value() != 0,
231 }; 242 };
232 } else { 243 } else {
233 return std::nullopt; 244 return std::nullopt;
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h
index e47aa755d..994c70e6f 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.h
+++ b/src/core/hle/service/nvdrv/core/nvmap.h
@@ -59,6 +59,8 @@ public:
59 u8 kind{}; //!< Used for memory compression 59 u8 kind{}; //!< Used for memory compression
60 bool allocated{}; //!< If the handle has been allocated with `Alloc` 60 bool allocated{}; //!< If the handle has been allocated with `Alloc`
61 61
62 u64 dma_map_addr{}; //! remove me after implementing pinning.
63
62 Handle(u64 size, Id id); 64 Handle(u64 size, Id id);
63 65
64 /** 66 /**
@@ -101,16 +103,16 @@ private:
101 103
102 /** 104 /**
103 * @brief Unmaps and frees the SMMU memory region a handle is mapped to 105 * @brief Unmaps and frees the SMMU memory region a handle is mapped to
104 * @note Both `unmap_queue_lock` and `handleDesc.mutex` MUST be locked when calling this 106 * @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this
105 */ 107 */
106 void UnmapHandle(Handle& handleDesc); 108 void UnmapHandle(Handle& handle_description);
107 109
108 /** 110 /**
109 * @brief Removes a handle from the map taking its dupes into account 111 * @brief Removes a handle from the map taking its dupes into account
110 * @note handleDesc.mutex MUST be locked when calling this 112 * @note handle_description.mutex MUST be locked when calling this
111 * @return If the handle was removed from the map 113 * @return If the handle was removed from the map
112 */ 114 */
113 bool TryRemoveHandle(const Handle& handleDesc); 115 bool TryRemoveHandle(const Handle& handle_description);
114 116
115public: 117public:
116 /** 118 /**
@@ -131,6 +133,8 @@ public:
131 133
132 std::shared_ptr<Handle> GetHandle(Handle::Id handle); 134 std::shared_ptr<Handle> GetHandle(Handle::Id handle);
133 135
136 VAddr GetHandleAddress(Handle::Id handle);
137
134 /** 138 /**
135 * @brief Maps a handle into the SMMU address space 139 * @brief Maps a handle into the SMMU address space
136 * @note This operation is refcounted, the number of calls to this must eventually match the 140 * @note This operation is refcounted, the number of calls to this must eventually match the
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
index 604711914..b1c0e9eb2 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
@@ -5,15 +5,16 @@
5#include "common/logging/log.h" 5#include "common/logging/log.h"
6#include "core/core.h" 6#include "core/core.h"
7#include "core/core_timing.h" 7#include "core/core_timing.h"
8#include "core/hle/service/nvdrv/core/container.h"
9#include "core/hle/service/nvdrv/core/nvmap.h"
8#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" 10#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
9#include "core/hle/service/nvdrv/devices/nvmap.h"
10#include "core/perf_stats.h" 11#include "core/perf_stats.h"
11#include "video_core/gpu.h" 12#include "video_core/gpu.h"
12 13
13namespace Service::Nvidia::Devices { 14namespace Service::Nvidia::Devices {
14 15
15nvdisp_disp0::nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_) 16nvdisp_disp0::nvdisp_disp0(Core::System& system_, NvCore::Container& core)
16 : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {} 17 : nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {}
17nvdisp_disp0::~nvdisp_disp0() = default; 18nvdisp_disp0::~nvdisp_disp0() = default;
18 19
19NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 20NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -40,7 +41,7 @@ void nvdisp_disp0::OnClose(DeviceFD fd) {}
40void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, 41void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
41 u32 height, u32 stride, android::BufferTransformFlags transform, 42 u32 height, u32 stride, android::BufferTransformFlags transform,
42 const Common::Rectangle<int>& crop_rect) { 43 const Common::Rectangle<int>& crop_rect) {
43 const VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle); 44 const VAddr addr = nvmap.GetHandleAddress(buffer_handle);
44 LOG_TRACE(Service, 45 LOG_TRACE(Service,
45 "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", 46 "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
46 addr, offset, width, height, stride, format); 47 addr, offset, width, height, stride, format);
@@ -54,4 +55,9 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form
54 system.GetPerfStats().BeginSystemFrame(); 55 system.GetPerfStats().BeginSystemFrame();
55} 56}
56 57
58Kernel::KEvent* nvdisp_disp0::QueryEvent(u32 event_id) {
59 LOG_CRITICAL(Service_NVDRV, "Unknown DISP Event {}", event_id);
60 return nullptr;
61}
62
57} // namespace Service::Nvidia::Devices 63} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
index 67b105e02..1ca9b2e74 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
@@ -11,13 +11,18 @@
11#include "core/hle/service/nvflinger/buffer_transform_flags.h" 11#include "core/hle/service/nvflinger/buffer_transform_flags.h"
12#include "core/hle/service/nvflinger/pixel_format.h" 12#include "core/hle/service/nvflinger/pixel_format.h"
13 13
14namespace Service::Nvidia::NvCore {
15class Container;
16class NvMap;
17} // namespace Service::Nvidia::NvCore
18
14namespace Service::Nvidia::Devices { 19namespace Service::Nvidia::Devices {
15 20
16class nvmap; 21class nvmap;
17 22
18class nvdisp_disp0 final : public nvdevice { 23class nvdisp_disp0 final : public nvdevice {
19public: 24public:
20 explicit nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_); 25 explicit nvdisp_disp0(Core::System& system_, NvCore::Container& core);
21 ~nvdisp_disp0() override; 26 ~nvdisp_disp0() override;
22 27
23 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 28 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -35,8 +40,11 @@ public:
35 u32 stride, android::BufferTransformFlags transform, 40 u32 stride, android::BufferTransformFlags transform,
36 const Common::Rectangle<int>& crop_rect); 41 const Common::Rectangle<int>& crop_rect);
37 42
43 Kernel::KEvent* QueryEvent(u32 event_id) override;
44
38private: 45private:
39 std::shared_ptr<nvmap> nvmap_dev; 46 NvCore::Container& container;
47 NvCore::NvMap& nvmap;
40}; 48};
41 49
42} // namespace Service::Nvidia::Devices 50} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 9867a648d..9283d6aec 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -7,15 +7,16 @@
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/logging/log.h" 8#include "common/logging/log.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/hle/service/nvdrv/core/container.h"
11#include "core/hle/service/nvdrv/core/nvmap.h"
10#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" 12#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
11#include "core/hle/service/nvdrv/devices/nvmap.h"
12#include "video_core/memory_manager.h" 13#include "video_core/memory_manager.h"
13#include "video_core/rasterizer_interface.h" 14#include "video_core/rasterizer_interface.h"
14 15
15namespace Service::Nvidia::Devices { 16namespace Service::Nvidia::Devices {
16 17
17nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_) 18nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, NvCore::Container& core)
18 : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {} 19 : nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {}
19nvhost_as_gpu::~nvhost_as_gpu() = default; 20nvhost_as_gpu::~nvhost_as_gpu() = default;
20 21
21NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 22NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -143,7 +144,7 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
143 LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", 144 LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}",
144 entry.offset, entry.nvmap_handle, entry.pages); 145 entry.offset, entry.nvmap_handle, entry.pages);
145 146
146 const auto object{nvmap_dev->GetObject(entry.nvmap_handle)}; 147 const auto object{nvmap.GetHandle(entry.nvmap_handle)};
147 if (!object) { 148 if (!object) {
148 LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle); 149 LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle);
149 result = NvResult::InvalidState; 150 result = NvResult::InvalidState;
@@ -153,7 +154,8 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
153 const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10}; 154 const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10};
154 const auto size{static_cast<u64>(entry.pages) << 0x10}; 155 const auto size{static_cast<u64>(entry.pages) << 0x10};
155 const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10}; 156 const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10};
156 const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)}; 157 const auto addr{
158 system.GPU().MemoryManager().Map(object->address + map_offset, offset, size)};
157 159
158 if (!addr) { 160 if (!addr) {
159 LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!"); 161 LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!");
@@ -176,24 +178,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
176 params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size, 178 params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size,
177 params.offset); 179 params.offset);
178 180
179 const auto object{nvmap_dev->GetObject(params.nvmap_handle)};
180 if (!object) {
181 LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
182 std::memcpy(output.data(), &params, output.size());
183 return NvResult::InvalidState;
184 }
185
186 // The real nvservices doesn't make a distinction between handles and ids, and
187 // object can only have one handle and it will be the same as its id. Assert that this is the
188 // case to prevent unexpected behavior.
189 ASSERT(object->id == params.nvmap_handle);
190 auto& gpu = system.GPU(); 181 auto& gpu = system.GPU();
191
192 u64 page_size{params.page_size};
193 if (!page_size) {
194 page_size = object->align;
195 }
196
197 if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) { 182 if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) {
198 if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) { 183 if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) {
199 const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)}; 184 const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)};
@@ -220,10 +205,24 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
220 } 205 }
221 } 206 }
222 207
223 // We can only map objects that have already been assigned a CPU address. 208 const auto object{nvmap.GetHandle(params.nvmap_handle)};
224 ASSERT(object->status == nvmap::Object::Status::Allocated); 209 if (!object) {
210 LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
211 std::memcpy(output.data(), &params, output.size());
212 return NvResult::InvalidState;
213 }
214
215 // The real nvservices doesn't make a distinction between handles and ids, and
216 // object can only have one handle and it will be the same as its id. Assert that this is the
217 // case to prevent unexpected behavior.
218 ASSERT(object->id == params.nvmap_handle);
219
220 u64 page_size{params.page_size};
221 if (!page_size) {
222 page_size = object->align;
223 }
225 224
226 const auto physical_address{object->addr + params.buffer_offset}; 225 const auto physical_address{object->address + params.buffer_offset};
227 u64 size{params.mapping_size}; 226 u64 size{params.mapping_size};
228 if (!size) { 227 if (!size) {
229 size = object->size; 228 size = object->size;
@@ -363,4 +362,9 @@ std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) {
363 return std::nullopt; 362 return std::nullopt;
364} 363}
365 364
365Kernel::KEvent* nvhost_as_gpu::QueryEvent(u32 event_id) {
366 LOG_CRITICAL(Service_NVDRV, "Unknown AS GPU Event {}", event_id);
367 return nullptr;
368}
369
366} // namespace Service::Nvidia::Devices 370} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index 555843a6f..67d2f1e87 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -13,6 +13,11 @@
13#include "common/swap.h" 13#include "common/swap.h"
14#include "core/hle/service/nvdrv/devices/nvdevice.h" 14#include "core/hle/service/nvdrv/devices/nvdevice.h"
15 15
16namespace Service::Nvidia::NvCore {
17class Container;
18class NvMap;
19} // namespace Service::Nvidia::NvCore
20
16namespace Service::Nvidia::Devices { 21namespace Service::Nvidia::Devices {
17 22
18constexpr u32 DEFAULT_BIG_PAGE_SIZE = 1 << 16; 23constexpr u32 DEFAULT_BIG_PAGE_SIZE = 1 << 16;
@@ -29,7 +34,7 @@ DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags);
29 34
30class nvhost_as_gpu final : public nvdevice { 35class nvhost_as_gpu final : public nvdevice {
31public: 36public:
32 explicit nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_); 37 explicit nvhost_as_gpu(Core::System& system_, NvCore::Container& core);
33 ~nvhost_as_gpu() override; 38 ~nvhost_as_gpu() override;
34 39
35 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 40 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -42,6 +47,8 @@ public:
42 void OnOpen(DeviceFD fd) override; 47 void OnOpen(DeviceFD fd) override;
43 void OnClose(DeviceFD fd) override; 48 void OnClose(DeviceFD fd) override;
44 49
50 Kernel::KEvent* QueryEvent(u32 event_id) override;
51
45private: 52private:
46 class BufferMap final { 53 class BufferMap final {
47 public: 54 public:
@@ -180,7 +187,8 @@ private:
180 void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated); 187 void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated);
181 std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr); 188 std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr);
182 189
183 std::shared_ptr<nvmap> nvmap_dev; 190 NvCore::Container& container;
191 NvCore::NvMap& nvmap;
184 192
185 // This is expected to be ordered, therefore we must use a map, not unordered_map 193 // This is expected to be ordered, therefore we must use a map, not unordered_map
186 std::map<GPUVAddr, BufferMap> buffer_mappings; 194 std::map<GPUVAddr, BufferMap> buffer_mappings;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index 5e2155e6c..51c40f620 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -279,6 +279,8 @@ Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) {
279 ASSERT(events_interface.events[slot]); 279 ASSERT(events_interface.events[slot]);
280 return events_interface.events[slot]; 280 return events_interface.events[slot];
281 } 281 }
282 // Is this possible in hardware?
283 ASSERT_MSG(false, "Slot:{}, SyncpointID:{}, requested", slot, syncpoint_id);
282 return nullptr; 284 return nullptr;
283} 285}
284 286
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index a480bfc47..e7921ade2 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -6,6 +6,7 @@
6#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "core/core.h" 7#include "core/core.h"
8#include "core/hle/service/nvdrv/core/container.h" 8#include "core/hle/service/nvdrv/core/container.h"
9#include "core/hle/service/nvdrv/core/nvmap.h"
9#include "core/hle/service/nvdrv/core/syncpoint_manager.h" 10#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
10#include "core/hle/service/nvdrv/devices/nvhost_gpu.h" 11#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
11#include "core/hle/service/nvdrv/nvdrv.h" 12#include "core/hle/service/nvdrv/nvdrv.h"
@@ -22,10 +23,10 @@ Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoi
22} 23}
23} // namespace 24} // namespace
24 25
25nvhost_gpu::nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 26nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
26 EventInterface& events_interface_, NvCore::Container& core_) 27 NvCore::Container& core_)
27 : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, events_interface{events_interface_}, 28 : nvdevice{system_}, events_interface{events_interface_}, core{core_},
28 core{core_}, syncpoint_manager{core_.GetSyncpointManager()} { 29 syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()} {
29 channel_fence.id = syncpoint_manager.AllocateSyncpoint(); 30 channel_fence.id = syncpoint_manager.AllocateSyncpoint();
30 channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); 31 channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id);
31 sm_exception_breakpoint_int_report_event = 32 sm_exception_breakpoint_int_report_event =
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
index 4f73a7bae..440c0c42d 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
@@ -17,6 +17,7 @@ namespace Service::Nvidia {
17 17
18namespace NvCore { 18namespace NvCore {
19class Container; 19class Container;
20class NvMap;
20class SyncpointManager; 21class SyncpointManager;
21} // namespace NvCore 22} // namespace NvCore
22 23
@@ -28,8 +29,8 @@ namespace Service::Nvidia::Devices {
28class nvmap; 29class nvmap;
29class nvhost_gpu final : public nvdevice { 30class nvhost_gpu final : public nvdevice {
30public: 31public:
31 explicit nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 32 explicit nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
32 EventInterface& events_interface_, NvCore::Container& core); 33 NvCore::Container& core);
33 ~nvhost_gpu() override; 34 ~nvhost_gpu() override;
34 35
35 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 36 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -199,10 +200,10 @@ private:
199 NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output); 200 NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output);
200 NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output); 201 NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output);
201 202
202 std::shared_ptr<nvmap> nvmap_dev;
203 EventInterface& events_interface; 203 EventInterface& events_interface;
204 NvCore::Container& core; 204 NvCore::Container& core;
205 NvCore::SyncpointManager& syncpoint_manager; 205 NvCore::SyncpointManager& syncpoint_manager;
206 NvCore::NvMap& nvmap;
206 NvFence channel_fence; 207 NvFence channel_fence;
207 208
208 // Events 209 // Events
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
index 2c9158c7c..aa1a00832 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
@@ -10,9 +10,8 @@
10 10
11namespace Service::Nvidia::Devices { 11namespace Service::Nvidia::Devices {
12 12
13nvhost_nvdec::nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 13nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core)
14 NvCore::Container& core) 14 : nvhost_nvdec_common{system_, core} {}
15 : nvhost_nvdec_common{system_, std::move(nvmap_dev_), core} {}
16nvhost_nvdec::~nvhost_nvdec() = default; 15nvhost_nvdec::~nvhost_nvdec() = default;
17 16
18NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 17NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
index 04da4a913..fef4b3216 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
@@ -10,8 +10,7 @@ namespace Service::Nvidia::Devices {
10 10
11class nvhost_nvdec final : public nvhost_nvdec_common { 11class nvhost_nvdec final : public nvhost_nvdec_common {
12public: 12public:
13 explicit nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 13 explicit nvhost_nvdec(Core::System& system_, NvCore::Container& core);
14 NvCore::Container& core);
15 ~nvhost_nvdec() override; 14 ~nvhost_nvdec() override;
16 15
17 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 16 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
index 5a9c59f37..e76c9e5ed 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
@@ -9,9 +9,9 @@
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "core/core.h" 10#include "core/core.h"
11#include "core/hle/service/nvdrv/core/container.h" 11#include "core/hle/service/nvdrv/core/container.h"
12#include "core/hle/service/nvdrv/core/nvmap.h"
12#include "core/hle/service/nvdrv/core/syncpoint_manager.h" 13#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
13#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" 14#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
14#include "core/hle/service/nvdrv/devices/nvmap.h"
15#include "core/memory.h" 15#include "core/memory.h"
16#include "video_core/memory_manager.h" 16#include "video_core/memory_manager.h"
17#include "video_core/renderer_base.h" 17#include "video_core/renderer_base.h"
@@ -45,10 +45,9 @@ std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::s
45} 45}
46} // Anonymous namespace 46} // Anonymous namespace
47 47
48nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 48nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_)
49 NvCore::Container& core_) 49 : nvdevice{system_}, core{core_},
50 : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, core{core_}, 50 syncpoint_manager{core.GetSyncpointManager()}, nvmap{core.GetNvMapFile()} {}
51 syncpoint_manager{core.GetSyncpointManager()} {}
52nvhost_nvdec_common::~nvhost_nvdec_common() = default; 51nvhost_nvdec_common::~nvhost_nvdec_common() = default;
53 52
54NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) { 53NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
@@ -90,10 +89,10 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
90 } 89 }
91 } 90 }
92 for (const auto& cmd_buffer : command_buffers) { 91 for (const auto& cmd_buffer : command_buffers) {
93 const auto object = nvmap_dev->GetObject(cmd_buffer.memory_id); 92 const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
94 ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;); 93 ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
95 Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); 94 Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
96 system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(), 95 system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
97 cmdlist.size() * sizeof(u32)); 96 cmdlist.size() * sizeof(u32));
98 gpu.PushCommandBuffer(fd_to_id[fd], cmdlist); 97 gpu.PushCommandBuffer(fd_to_id[fd], cmdlist);
99 } 98 }
@@ -125,6 +124,7 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve
125 124
126NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) { 125NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
127 IoctlGetWaitbase params{}; 126 IoctlGetWaitbase params{};
127 LOG_CRITICAL(Service_NVDRV, "called WAITBASE");
128 std::memcpy(&params, input.data(), sizeof(IoctlGetWaitbase)); 128 std::memcpy(&params, input.data(), sizeof(IoctlGetWaitbase));
129 params.value = 0; // Seems to be hard coded at 0 129 params.value = 0; // Seems to be hard coded at 0
130 std::memcpy(output.data(), &params, sizeof(IoctlGetWaitbase)); 130 std::memcpy(output.data(), &params, sizeof(IoctlGetWaitbase));
@@ -141,7 +141,7 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
141 auto& gpu = system.GPU(); 141 auto& gpu = system.GPU();
142 142
143 for (auto& cmd_buffer : cmd_buffer_handles) { 143 for (auto& cmd_buffer : cmd_buffer_handles) {
144 auto object{nvmap_dev->GetObject(cmd_buffer.map_handle)}; 144 auto object{nvmap.GetHandle(cmd_buffer.map_handle)};
145 if (!object) { 145 if (!object) {
146 LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle); 146 LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle);
147 std::memcpy(output.data(), &params, output.size()); 147 std::memcpy(output.data(), &params, output.size());
@@ -150,7 +150,8 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
150 if (object->dma_map_addr == 0) { 150 if (object->dma_map_addr == 0) {
151 // NVDEC and VIC memory is in the 32-bit address space 151 // NVDEC and VIC memory is in the 32-bit address space
152 // MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space 152 // MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space
153 const GPUVAddr low_addr = gpu.MemoryManager().MapAllocate32(object->addr, object->size); 153 const GPUVAddr low_addr =
154 gpu.MemoryManager().MapAllocate32(object->address, object->size);
154 object->dma_map_addr = static_cast<u32>(low_addr); 155 object->dma_map_addr = static_cast<u32>(low_addr);
155 // Ensure that the dma_map_addr is indeed in the lower 32-bit address space. 156 // Ensure that the dma_map_addr is indeed in the lower 32-bit address space.
156 ASSERT(object->dma_map_addr == low_addr); 157 ASSERT(object->dma_map_addr == low_addr);
@@ -158,7 +159,7 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
158 if (!object->dma_map_addr) { 159 if (!object->dma_map_addr) {
159 LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size); 160 LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size);
160 } else { 161 } else {
161 cmd_buffer.map_address = object->dma_map_addr; 162 cmd_buffer.map_address = static_cast<u32_le>(object->dma_map_addr);
162 } 163 }
163 } 164 }
164 std::memcpy(output.data(), &params, sizeof(IoctlMapBuffer)); 165 std::memcpy(output.data(), &params, sizeof(IoctlMapBuffer));
@@ -184,4 +185,9 @@ NvResult nvhost_nvdec_common::SetSubmitTimeout(const std::vector<u8>& input,
184 return NvResult::Success; 185 return NvResult::Success;
185} 186}
186 187
188Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) {
189 LOG_CRITICAL(Service_NVDRV, "Unknown HOSTX1 Event {}", event_id);
190 return nullptr;
191}
192
187} // namespace Service::Nvidia::Devices 193} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
index cccc94a58..74231d5c5 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
@@ -11,17 +11,16 @@
11namespace Service::Nvidia { 11namespace Service::Nvidia {
12 12
13namespace NvCore { 13namespace NvCore {
14class SyncpointManager;
15class Container; 14class Container;
15class NvMap;
16class SyncpointManager;
16} // namespace NvCore 17} // namespace NvCore
17 18
18namespace Devices { 19namespace Devices {
19class nvmap;
20 20
21class nvhost_nvdec_common : public nvdevice { 21class nvhost_nvdec_common : public nvdevice {
22public: 22public:
23 explicit nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 23 explicit nvhost_nvdec_common(Core::System& system_, NvCore::Container& core);
24 NvCore::Container& core);
25 ~nvhost_nvdec_common() override; 24 ~nvhost_nvdec_common() override;
26 25
27protected: 26protected:
@@ -114,12 +113,14 @@ protected:
114 NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output); 113 NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
115 NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output); 114 NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
116 115
116 Kernel::KEvent* QueryEvent(u32 event_id) override;
117
117 std::unordered_map<DeviceFD, u32> fd_to_id{}; 118 std::unordered_map<DeviceFD, u32> fd_to_id{};
118 s32_le nvmap_fd{}; 119 s32_le nvmap_fd{};
119 u32_le submit_timeout{}; 120 u32_le submit_timeout{};
120 std::shared_ptr<nvmap> nvmap_dev;
121 NvCore::Container& core; 121 NvCore::Container& core;
122 NvCore::SyncpointManager& syncpoint_manager; 122 NvCore::SyncpointManager& syncpoint_manager;
123 NvCore::NvMap& nvmap;
123 std::array<u32, MaxSyncPoints> device_syncpoints{}; 124 std::array<u32, MaxSyncPoints> device_syncpoints{};
124}; 125};
125}; // namespace Devices 126}; // namespace Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
index 66558c331..358e89aa8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
@@ -8,9 +8,8 @@
8#include "video_core/renderer_base.h" 8#include "video_core/renderer_base.h"
9 9
10namespace Service::Nvidia::Devices { 10namespace Service::Nvidia::Devices {
11nvhost_vic::nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 11nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core)
12 NvCore::Container& core) 12 : nvhost_nvdec_common{system_, core} {}
13 : nvhost_nvdec_common{system_, std::move(nvmap_dev_), core} {}
14 13
15nvhost_vic::~nvhost_vic() = default; 14nvhost_vic::~nvhost_vic() = default;
16 15
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
index 6f9838b2d..252b1e6f2 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
@@ -9,8 +9,7 @@ namespace Service::Nvidia::Devices {
9 9
10class nvhost_vic final : public nvhost_nvdec_common { 10class nvhost_vic final : public nvhost_nvdec_common {
11public: 11public:
12 explicit nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 12 explicit nvhost_vic(Core::System& system_, NvCore::Container& core);
13 NvCore::Container& core);
14 ~nvhost_vic(); 13 ~nvhost_vic();
15 14
16 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 15 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index d8518149d..2aee68f5c 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -2,19 +2,24 @@
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include <algorithm> 4#include <algorithm>
5#include <bit>
5#include <cstring> 6#include <cstring>
6 7
8#include "common/alignment.h"
7#include "common/assert.h" 9#include "common/assert.h"
8#include "common/logging/log.h" 10#include "common/logging/log.h"
11#include "core/core.h"
12#include "core/hle/service/nvdrv/core/container.h"
13#include "core/hle/service/nvdrv/core/nvmap.h"
9#include "core/hle/service/nvdrv/devices/nvmap.h" 14#include "core/hle/service/nvdrv/devices/nvmap.h"
15#include "core/memory.h"
16
17using Core::Memory::YUZU_PAGESIZE;
10 18
11namespace Service::Nvidia::Devices { 19namespace Service::Nvidia::Devices {
12 20
13nvmap::nvmap(Core::System& system_) : nvdevice{system_} { 21nvmap::nvmap(Core::System& system_, NvCore::Container& container_)
14 // Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to 22 : nvdevice{system_}, container{container_}, file{container.GetNvMapFile()} {}
15 // represent this.
16 CreateObject(0);
17}
18 23
19nvmap::~nvmap() = default; 24nvmap::~nvmap() = default;
20 25
@@ -63,38 +68,32 @@ void nvmap::OnOpen(DeviceFD fd) {}
63void nvmap::OnClose(DeviceFD fd) {} 68void nvmap::OnClose(DeviceFD fd) {}
64 69
65VAddr nvmap::GetObjectAddress(u32 handle) const { 70VAddr nvmap::GetObjectAddress(u32 handle) const {
66 auto object = GetObject(handle); 71 auto obj = file.GetHandle(handle);
67 ASSERT(object); 72 if (obj) {
68 ASSERT(object->status == Object::Status::Allocated); 73 return obj->address;
69 return object->addr; 74 }
75 return 0;
70} 76}
71 77
72u32 nvmap::CreateObject(u32 size) { 78std::shared_ptr<NvCore::NvMap::Handle> nvmap::GetObject(u32 handle) const {
73 // Create a new nvmap object and obtain a handle to it. 79 return file.GetHandle(handle);
74 auto object = std::make_shared<Object>();
75 object->id = next_id++;
76 object->size = size;
77 object->status = Object::Status::Created;
78 object->refcount = 1;
79
80 const u32 handle = next_handle++;
81
82 handles.insert_or_assign(handle, std::move(object));
83
84 return handle;
85} 80}
86 81
87NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) { 82NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
88 IocCreateParams params; 83 IocCreateParams params;
89 std::memcpy(&params, input.data(), sizeof(params)); 84 std::memcpy(&params, input.data(), sizeof(params));
90 LOG_DEBUG(Service_NVDRV, "size=0x{:08X}", params.size); 85 LOG_WARNING(Service_NVDRV, "called, size=0x{:08X}", params.size);
91 86
92 if (!params.size) { 87 std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
93 LOG_ERROR(Service_NVDRV, "Size is 0"); 88 auto result =
94 return NvResult::BadValue; 89 file.CreateHandle(Common::AlignUp(params.size, YUZU_PAGESIZE), handle_description);
90 if (result != NvResult::Success) {
91 LOG_CRITICAL(Service_NVDRV, "Failed to create Object");
92 return result;
95 } 93 }
96 94 handle_description->orig_size = params.size; // Orig size is the unaligned size
97 params.handle = CreateObject(params.size); 95 params.handle = handle_description->id;
96 LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size);
98 97
99 std::memcpy(output.data(), &params, sizeof(params)); 98 std::memcpy(output.data(), &params, sizeof(params));
100 return NvResult::Success; 99 return NvResult::Success;
@@ -103,42 +102,42 @@ NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output)
103NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) { 102NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) {
104 IocAllocParams params; 103 IocAllocParams params;
105 std::memcpy(&params, input.data(), sizeof(params)); 104 std::memcpy(&params, input.data(), sizeof(params));
106 LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.addr); 105 LOG_WARNING(Service_NVDRV, "called, addr={:X}", params.address);
107 106
108 if (!params.handle) { 107 if (!params.handle) {
109 LOG_ERROR(Service_NVDRV, "Handle is 0"); 108 LOG_CRITICAL(Service_NVDRV, "Handle is 0");
110 return NvResult::BadValue; 109 return NvResult::BadValue;
111 } 110 }
112 111
113 if ((params.align - 1) & params.align) { 112 if ((params.align - 1) & params.align) {
114 LOG_ERROR(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align); 113 LOG_CRITICAL(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align);
115 return NvResult::BadValue; 114 return NvResult::BadValue;
116 } 115 }
117 116
118 const u32 min_alignment = 0x1000; 117 // Force page size alignment at a minimum
119 if (params.align < min_alignment) { 118 if (params.align < YUZU_PAGESIZE) {
120 params.align = min_alignment; 119 params.align = YUZU_PAGESIZE;
121 } 120 }
122 121
123 auto object = GetObject(params.handle); 122 auto handle_description{file.GetHandle(params.handle)};
124 if (!object) { 123 if (!handle_description) {
125 LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); 124 LOG_CRITICAL(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
126 return NvResult::BadValue; 125 return NvResult::BadValue;
127 } 126 }
128 127
129 if (object->status == Object::Status::Allocated) { 128 if (handle_description->allocated) {
130 LOG_ERROR(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle); 129 LOG_CRITICAL(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle);
131 return NvResult::InsufficientMemory; 130 return NvResult::InsufficientMemory;
132 } 131 }
133 132
134 object->flags = params.flags; 133 const auto result =
135 object->align = params.align; 134 handle_description->Alloc(params.flags, params.align, params.kind, params.address);
136 object->kind = params.kind; 135 if (result != NvResult::Success) {
137 object->addr = params.addr; 136 LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
138 object->status = Object::Status::Allocated; 137 return result;
139 138 }
140 std::memcpy(output.data(), &params, sizeof(params)); 139 std::memcpy(output.data(), &params, sizeof(params));
141 return NvResult::Success; 140 return result;
142} 141}
143 142
144NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) { 143NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) {
@@ -147,19 +146,20 @@ NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output)
147 146
148 LOG_WARNING(Service_NVDRV, "called"); 147 LOG_WARNING(Service_NVDRV, "called");
149 148
149 // See the comment in FromId for extra info on this function
150 if (!params.handle) { 150 if (!params.handle) {
151 LOG_ERROR(Service_NVDRV, "Handle is zero"); 151 LOG_CRITICAL(Service_NVDRV, "Error!");
152 return NvResult::BadValue; 152 return NvResult::BadValue;
153 } 153 }
154 154
155 auto object = GetObject(params.handle); 155 auto handle_description{file.GetHandle(params.handle)};
156 if (!object) { 156 if (!handle_description) {
157 LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); 157 LOG_CRITICAL(Service_NVDRV, "Error!");
158 return NvResult::BadValue; 158 return NvResult::AccessDenied; // This will always return EPERM irrespective of if the
159 // handle exists or not
159 } 160 }
160 161
161 params.id = object->id; 162 params.id = handle_description->id;
162
163 std::memcpy(output.data(), &params, sizeof(params)); 163 std::memcpy(output.data(), &params, sizeof(params));
164 return NvResult::Success; 164 return NvResult::Success;
165} 165}
@@ -168,26 +168,29 @@ NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output)
168 IocFromIdParams params; 168 IocFromIdParams params;
169 std::memcpy(&params, input.data(), sizeof(params)); 169 std::memcpy(&params, input.data(), sizeof(params));
170 170
171 LOG_WARNING(Service_NVDRV, "(STUBBED) called"); 171 LOG_WARNING(Service_NVDRV, "called, id:{}");
172 172
173 auto itr = std::find_if(handles.begin(), handles.end(), 173 // Handles and IDs are always the same value in nvmap however IDs can be used globally given the
174 [&](const auto& entry) { return entry.second->id == params.id; }); 174 // right permissions.
175 if (itr == handles.end()) { 175 // Since we don't plan on ever supporting multiprocess we can skip implementing handle refs and
176 LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); 176 // so this function just does simple validation and passes through the handle id.
177 if (!params.id) {
178 LOG_CRITICAL(Service_NVDRV, "Error!");
177 return NvResult::BadValue; 179 return NvResult::BadValue;
178 } 180 }
179 181
180 auto& object = itr->second; 182 auto handle_description{file.GetHandle(params.id)};
181 if (object->status != Object::Status::Allocated) { 183 if (!handle_description) {
182 LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle); 184 LOG_CRITICAL(Service_NVDRV, "Error!");
183 return NvResult::BadValue; 185 return NvResult::BadValue;
184 } 186 }
185 187
186 itr->second->refcount++; 188 auto result = handle_description->Duplicate(false);
187 189 if (result != NvResult::Success) {
188 // Return the existing handle instead of creating a new one. 190 LOG_CRITICAL(Service_NVDRV, "Error!");
189 params.handle = itr->first; 191 return result;
190 192 }
193 params.handle = handle_description->id;
191 std::memcpy(output.data(), &params, sizeof(params)); 194 std::memcpy(output.data(), &params, sizeof(params));
192 return NvResult::Success; 195 return NvResult::Success;
193} 196}
@@ -198,35 +201,43 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output)
198 IocParamParams params; 201 IocParamParams params;
199 std::memcpy(&params, input.data(), sizeof(params)); 202 std::memcpy(&params, input.data(), sizeof(params));
200 203
201 LOG_DEBUG(Service_NVDRV, "(STUBBED) called type={}", params.param); 204 LOG_WARNING(Service_NVDRV, "called type={}", params.param);
202 205
203 auto object = GetObject(params.handle); 206 if (!params.handle) {
204 if (!object) { 207 LOG_CRITICAL(Service_NVDRV, "Error!");
205 LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
206 return NvResult::BadValue; 208 return NvResult::BadValue;
207 } 209 }
208 210
209 if (object->status != Object::Status::Allocated) { 211 auto handle_description{file.GetHandle(params.handle)};
210 LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle); 212 if (!handle_description) {
213 LOG_CRITICAL(Service_NVDRV, "Error!");
211 return NvResult::BadValue; 214 return NvResult::BadValue;
212 } 215 }
213 216
214 switch (static_cast<ParamTypes>(params.param)) { 217 switch (params.param) {
215 case ParamTypes::Size: 218 case HandleParameterType::Size:
216 params.result = object->size; 219 params.result = static_cast<u32_le>(handle_description->orig_size);
220 break;
221 case HandleParameterType::Alignment:
222 params.result = static_cast<u32_le>(handle_description->align);
217 break; 223 break;
218 case ParamTypes::Alignment: 224 case HandleParameterType::Base:
219 params.result = object->align; 225 params.result = static_cast<u32_le>(-22); // posix EINVAL
220 break; 226 break;
221 case ParamTypes::Heap: 227 case HandleParameterType::Heap:
222 // TODO(Subv): Seems to be a hardcoded value? 228 if (handle_description->allocated)
223 params.result = 0x40000000; 229 params.result = 0x40000000;
230 else
231 params.result = 0x40000000;
224 break; 232 break;
225 case ParamTypes::Kind: 233 case HandleParameterType::Kind:
226 params.result = object->kind; 234 params.result = handle_description->kind;
235 break;
236 case HandleParameterType::IsSharedMemMapped:
237 params.result = handle_description->is_shared_mem_mapped;
227 break; 238 break;
228 default: 239 default:
229 UNIMPLEMENTED(); 240 return NvResult::BadValue;
230 } 241 }
231 242
232 std::memcpy(output.data(), &params, sizeof(params)); 243 std::memcpy(output.data(), &params, sizeof(params));
@@ -234,46 +245,25 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output)
234} 245}
235 246
236NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) { 247NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
237 // TODO(Subv): These flags are unconfirmed.
238 enum FreeFlags {
239 Freed = 0,
240 NotFreedYet = 1,
241 };
242
243 IocFreeParams params; 248 IocFreeParams params;
244 std::memcpy(&params, input.data(), sizeof(params)); 249 std::memcpy(&params, input.data(), sizeof(params));
245 250
246 LOG_DEBUG(Service_NVDRV, "(STUBBED) called"); 251 LOG_WARNING(Service_NVDRV, "called");
247 252
248 auto itr = handles.find(params.handle); 253 if (!params.handle) {
249 if (itr == handles.end()) { 254 LOG_CRITICAL(Service_NVDRV, "Handle null freed?");
250 LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); 255 return NvResult::Success;
251 return NvResult::BadValue;
252 }
253 if (!itr->second->refcount) {
254 LOG_ERROR(
255 Service_NVDRV,
256 "There is no references to this object. The object is already freed. handle={:08X}",
257 params.handle);
258 return NvResult::BadValue;
259 } 256 }
260 257
261 itr->second->refcount--; 258 if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
262 259 params.address = freeInfo->address;
263 params.size = itr->second->size; 260 params.size = static_cast<u32>(freeInfo->size);
264 261 params.flags = NvCore::NvMap::Handle::Flags{.map_uncached = freeInfo->was_uncached};
265 if (itr->second->refcount == 0) {
266 params.flags = Freed;
267 // The address of the nvmap is written to the output if we're finally freeing it, otherwise
268 // 0 is written.
269 params.address = itr->second->addr;
270 } else { 262 } else {
271 params.flags = NotFreedYet; 263 // This is possible when there's internel dups or other duplicates.
272 params.address = 0; 264 LOG_CRITICAL(Service_NVDRV, "Not freed");
273 } 265 }
274 266
275 handles.erase(params.handle);
276
277 std::memcpy(output.data(), &params, sizeof(params)); 267 std::memcpy(output.data(), &params, sizeof(params));
278 return NvResult::Success; 268 return NvResult::Success;
279} 269}
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h
index d5360d6e5..c22eb57a4 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.h
+++ b/src/core/hle/service/nvdrv/devices/nvmap.h
@@ -9,15 +9,23 @@
9#include "common/common_funcs.h" 9#include "common/common_funcs.h"
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "common/swap.h" 11#include "common/swap.h"
12#include "core/hle/service/nvdrv/core/nvmap.h"
12#include "core/hle/service/nvdrv/devices/nvdevice.h" 13#include "core/hle/service/nvdrv/devices/nvdevice.h"
13 14
15namespace Service::Nvidia::NvCore {
16class Container;
17} // namespace Service::Nvidia::NvCore
18
14namespace Service::Nvidia::Devices { 19namespace Service::Nvidia::Devices {
15 20
16class nvmap final : public nvdevice { 21class nvmap final : public nvdevice {
17public: 22public:
18 explicit nvmap(Core::System& system_); 23 explicit nvmap(Core::System& system_, NvCore::Container& container);
19 ~nvmap() override; 24 ~nvmap() override;
20 25
26 nvmap(nvmap const&) = delete;
27 nvmap& operator=(nvmap const&) = delete;
28
21 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 29 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
22 std::vector<u8>& output) override; 30 std::vector<u8>& output) override;
23 NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 31 NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -31,27 +39,16 @@ public:
31 /// Returns the allocated address of an nvmap object given its handle. 39 /// Returns the allocated address of an nvmap object given its handle.
32 VAddr GetObjectAddress(u32 handle) const; 40 VAddr GetObjectAddress(u32 handle) const;
33 41
34 /// Represents an nvmap object. 42 std::shared_ptr<NvCore::NvMap::Handle> GetObject(u32 handle) const;
35 struct Object {
36 enum class Status { Created, Allocated };
37 u32 id;
38 u32 size;
39 u32 flags;
40 u32 align;
41 u8 kind;
42 VAddr addr;
43 Status status;
44 u32 refcount;
45 u32 dma_map_addr;
46 };
47 43
48 std::shared_ptr<Object> GetObject(u32 handle) const { 44 enum class HandleParameterType : u32_le {
49 auto itr = handles.find(handle); 45 Size = 1,
50 if (itr != handles.end()) { 46 Alignment = 2,
51 return itr->second; 47 Base = 3,
52 } 48 Heap = 4,
53 return {}; 49 Kind = 5,
54 } 50 IsSharedMemMapped = 6
51 };
55 52
56private: 53private:
57 /// Id to use for the next handle that is created. 54 /// Id to use for the next handle that is created.
@@ -60,9 +57,6 @@ private:
60 /// Id to use for the next object that is created. 57 /// Id to use for the next object that is created.
61 u32 next_id = 0; 58 u32 next_id = 0;
62 59
63 /// Mapping of currently allocated handles to the objects they represent.
64 std::unordered_map<u32, std::shared_ptr<Object>> handles;
65
66 struct IocCreateParams { 60 struct IocCreateParams {
67 // Input 61 // Input
68 u32_le size{}; 62 u32_le size{};
@@ -83,11 +77,11 @@ private:
83 // Input 77 // Input
84 u32_le handle{}; 78 u32_le handle{};
85 u32_le heap_mask{}; 79 u32_le heap_mask{};
86 u32_le flags{}; 80 NvCore::NvMap::Handle::Flags flags{};
87 u32_le align{}; 81 u32_le align{};
88 u8 kind{}; 82 u8 kind{};
89 INSERT_PADDING_BYTES(7); 83 INSERT_PADDING_BYTES(7);
90 u64_le addr{}; 84 u64_le address{};
91 }; 85 };
92 static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size"); 86 static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size");
93 87
@@ -96,14 +90,14 @@ private:
96 INSERT_PADDING_BYTES(4); 90 INSERT_PADDING_BYTES(4);
97 u64_le address{}; 91 u64_le address{};
98 u32_le size{}; 92 u32_le size{};
99 u32_le flags{}; 93 NvCore::NvMap::Handle::Flags flags{};
100 }; 94 };
101 static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size"); 95 static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size");
102 96
103 struct IocParamParams { 97 struct IocParamParams {
104 // Input 98 // Input
105 u32_le handle{}; 99 u32_le handle{};
106 u32_le param{}; 100 HandleParameterType param{};
107 // Output 101 // Output
108 u32_le result{}; 102 u32_le result{};
109 }; 103 };
@@ -117,14 +111,15 @@ private:
117 }; 111 };
118 static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); 112 static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
119 113
120 u32 CreateObject(u32 size);
121
122 NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output); 114 NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output);
123 NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output); 115 NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output);
124 NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output); 116 NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output);
125 NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output); 117 NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output);
126 NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output); 118 NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output);
127 NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output); 119 NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output);
120
121 NvCore::Container& container;
122 NvCore::NvMap& file;
128}; 123};
129 124
130} // namespace Service::Nvidia::Devices 125} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp
index 824c0e290..f4914d539 100644
--- a/src/core/hle/service/nvdrv/nvdrv.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv.cpp
@@ -138,21 +138,18 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
138 138
139Module::Module(Core::System& system) 139Module::Module(Core::System& system)
140 : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} { 140 : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} {
141 auto nvmap_dev = std::make_shared<Devices::nvmap>(system); 141 devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, container);
142 devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, nvmap_dev);
143 devices["/dev/nvhost-gpu"] = 142 devices["/dev/nvhost-gpu"] =
144 std::make_shared<Devices::nvhost_gpu>(system, nvmap_dev, events_interface, container); 143 std::make_shared<Devices::nvhost_gpu>(system, events_interface, container);
145 devices["/dev/nvhost-ctrl-gpu"] = 144 devices["/dev/nvhost-ctrl-gpu"] =
146 std::make_shared<Devices::nvhost_ctrl_gpu>(system, events_interface); 145 std::make_shared<Devices::nvhost_ctrl_gpu>(system, events_interface);
147 devices["/dev/nvmap"] = nvmap_dev; 146 devices["/dev/nvmap"] = std::make_shared<Devices::nvmap>(system, container);
148 devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, nvmap_dev); 147 devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, container);
149 devices["/dev/nvhost-ctrl"] = 148 devices["/dev/nvhost-ctrl"] =
150 std::make_shared<Devices::nvhost_ctrl>(system, events_interface, container); 149 std::make_shared<Devices::nvhost_ctrl>(system, events_interface, container);
151 devices["/dev/nvhost-nvdec"] = 150 devices["/dev/nvhost-nvdec"] = std::make_shared<Devices::nvhost_nvdec>(system, container);
152 std::make_shared<Devices::nvhost_nvdec>(system, nvmap_dev, container);
153 devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system); 151 devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system);
154 devices["/dev/nvhost-vic"] = 152 devices["/dev/nvhost-vic"] = std::make_shared<Devices::nvhost_vic>(system, container);
155 std::make_shared<Devices::nvhost_vic>(system, nvmap_dev, container);
156} 153}
157 154
158Module::~Module() = default; 155Module::~Module() = default;