diff options
| author | 2023-12-29 09:50:04 +0100 | |
|---|---|---|
| committer | 2024-01-18 21:12:30 -0500 | |
| commit | 96fd1348aea9d70cb502a94cbd0412be6edb0189 (patch) | |
| tree | b264d2437dd4fda8c8257566bb5a5d8eb096131a /src/core | |
| parent | SMMU: Fix Unregister on MultiAddress (diff) | |
| download | yuzu-96fd1348aea9d70cb502a94cbd0412be6edb0189.tar.gz yuzu-96fd1348aea9d70cb502a94cbd0412be6edb0189.tar.xz yuzu-96fd1348aea9d70cb502a94cbd0412be6edb0189.zip | |
GPU SMMU: Expand to 34 bits
Diffstat (limited to 'src/core')
| -rw-r--r-- | src/core/hle/service/nvdrv/core/nvmap.cpp | 38 | ||||
| -rw-r--r-- | src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp | 1 |
2 files changed, 34 insertions, 5 deletions
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index e4168a37c..0b2ddd980 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp | |||
| @@ -80,6 +80,15 @@ void NvMap::UnmapHandle(Handle& handle_description) { | |||
| 80 | handle_description.unmap_queue_entry.reset(); | 80 | handle_description.unmap_queue_entry.reset(); |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | // Free and unmap the handle from Host1x GMMU | ||
| 84 | if (handle_description.pin_virt_address) { | ||
| 85 | host1x.GMMU().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address), | ||
| 86 | handle_description.aligned_size); | ||
| 87 | host1x.Allocator().Free(handle_description.pin_virt_address, | ||
| 88 | static_cast<u32>(handle_description.aligned_size)); | ||
| 89 | handle_description.pin_virt_address = 0; | ||
| 90 | } | ||
| 91 | |||
| 83 | // Free and unmap the handle from the SMMU | 92 | // Free and unmap the handle from the SMMU |
| 84 | auto& smmu = host1x.MemoryManager(); | 93 | auto& smmu = host1x.MemoryManager(); |
| 85 | smmu.Unmap(handle_description.d_address, handle_description.aligned_size); | 94 | smmu.Unmap(handle_description.d_address, handle_description.aligned_size); |
| @@ -141,6 +150,17 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are | |||
| 141 | } | 150 | } |
| 142 | 151 | ||
| 143 | std::scoped_lock lock(handle_description->mutex); | 152 | std::scoped_lock lock(handle_description->mutex); |
| 153 | const auto map_low_area = [&] { | ||
| 154 | if (handle_description->pin_virt_address == 0) { | ||
| 155 | auto& gmmu_allocator = host1x.Allocator(); | ||
| 156 | auto& gmmu = host1x.GMMU(); | ||
| 157 | u32 address = | ||
| 158 | gmmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size)); | ||
| 159 | gmmu.Map(static_cast<GPUVAddr>(address), handle_description->d_address, | ||
| 160 | handle_description->aligned_size); | ||
| 161 | handle_description->pin_virt_address = address; | ||
| 162 | } | ||
| 163 | }; | ||
| 144 | if (!handle_description->pins) { | 164 | if (!handle_description->pins) { |
| 145 | // If we're in the unmap queue we can just remove ourselves and return since we're already | 165 | // If we're in the unmap queue we can just remove ourselves and return since we're already |
| 146 | // mapped | 166 | // mapped |
| @@ -152,6 +172,12 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are | |||
| 152 | unmap_queue.erase(*handle_description->unmap_queue_entry); | 172 | unmap_queue.erase(*handle_description->unmap_queue_entry); |
| 153 | handle_description->unmap_queue_entry.reset(); | 173 | handle_description->unmap_queue_entry.reset(); |
| 154 | 174 | ||
| 175 | if (low_area_pin) { | ||
| 176 | map_low_area(); | ||
| 177 | handle_description->pins++; | ||
| 178 | return static_cast<DAddr>(handle_description->pin_virt_address); | ||
| 179 | } | ||
| 180 | |||
| 155 | handle_description->pins++; | 181 | handle_description->pins++; |
| 156 | return handle_description->d_address; | 182 | return handle_description->d_address; |
| 157 | } | 183 | } |
| @@ -162,10 +188,7 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are | |||
| 162 | DAddr address{}; | 188 | DAddr address{}; |
| 163 | auto& smmu = host1x.MemoryManager(); | 189 | auto& smmu = host1x.MemoryManager(); |
| 164 | auto* session = core.GetSession(session_id); | 190 | auto* session = core.GetSession(session_id); |
| 165 | 191 | while ((address = smmu.Allocate(handle_description->aligned_size)) == 0) { | |
| 166 | auto allocate = std::bind(&Tegra::MaxwellDeviceMemoryManager::Allocate, &smmu, _1); | ||
| 167 | //: std::bind(&Tegra::MaxwellDeviceMemoryManager::Allocate, &smmu, _1); | ||
| 168 | while ((address = allocate(static_cast<size_t>(handle_description->aligned_size))) == 0) { | ||
| 169 | // Free handles until the allocation succeeds | 192 | // Free handles until the allocation succeeds |
| 170 | std::scoped_lock queueLock(unmap_queue_lock); | 193 | std::scoped_lock queueLock(unmap_queue_lock); |
| 171 | if (auto freeHandleDesc{unmap_queue.front()}) { | 194 | if (auto freeHandleDesc{unmap_queue.front()}) { |
| @@ -185,7 +208,14 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are | |||
| 185 | session->smmu_id); | 208 | session->smmu_id); |
| 186 | } | 209 | } |
| 187 | 210 | ||
| 211 | if (low_area_pin) { | ||
| 212 | map_low_area(); | ||
| 213 | } | ||
| 214 | |||
| 188 | handle_description->pins++; | 215 | handle_description->pins++; |
| 216 | if (low_area_pin) { | ||
| 217 | return static_cast<DAddr>(handle_description->pin_virt_address); | ||
| 218 | } | ||
| 189 | return handle_description->d_address; | 219 | return handle_description->d_address; |
| 190 | } | 220 | } |
| 191 | 221 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 78bc5f3c4..0b6aa9993 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp | |||
| @@ -95,7 +95,6 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De | |||
| 95 | offset += SliceVectors(data, fence_thresholds, params.fence_count, offset); | 95 | offset += SliceVectors(data, fence_thresholds, params.fence_count, offset); |
| 96 | 96 | ||
| 97 | auto& gpu = system.GPU(); | 97 | auto& gpu = system.GPU(); |
| 98 | //auto& device_memory = system.Host1x().MemoryManager(); | ||
| 99 | auto* session = core.GetSession(sessions[fd]); | 98 | auto* session = core.GetSession(sessions[fd]); |
| 100 | 99 | ||
| 101 | if (gpu.UseNvdec()) { | 100 | if (gpu.UseNvdec()) { |