summaryrefslogtreecommitdiff
path: root/src/core/hle
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2022-01-30 22:26:01 +0100
committerGravatar Fernando Sahmkow2022-10-06 21:00:52 +0200
commit2931101e6f5aa755566ef40f6e6dc71909fd3e92 (patch)
tree76e847786e355e24a136562d42177b895a03315e /src/core/hle
parentVideoCore: Refactor syncing. (diff)
downloadyuzu-2931101e6f5aa755566ef40f6e6dc71909fd3e92.tar.gz
yuzu-2931101e6f5aa755566ef40f6e6dc71909fd3e92.tar.xz
yuzu-2931101e6f5aa755566ef40f6e6dc71909fd3e92.zip
NVDRV: Refactor Host1x
Diffstat (limited to 'src/core/hle')
-rw-r--r--src/core/hle/service/nvdrv/core/container.cpp8
-rw-r--r--src/core/hle/service/nvdrv/core/container.h10
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.cpp33
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.h18
-rw-r--r--src/core/hle/service/nvdrv/core/syncpoint_manager.cpp6
-rw-r--r--src/core/hle/service/nvdrv/core/syncpoint_manager.h12
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp37
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.cpp2
8 files changed, 62 insertions, 64 deletions
diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp
index 97b5b2c86..fbd66f001 100644
--- a/src/core/hle/service/nvdrv/core/container.cpp
+++ b/src/core/hle/service/nvdrv/core/container.cpp
@@ -6,18 +6,18 @@
6#include "core/hle/service/nvdrv/core/container.h" 6#include "core/hle/service/nvdrv/core/container.h"
7#include "core/hle/service/nvdrv/core/nvmap.h" 7#include "core/hle/service/nvdrv/core/nvmap.h"
8#include "core/hle/service/nvdrv/core/syncpoint_manager.h" 8#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
9#include "video_core/gpu.h" 9#include "video_core/host1x/host1x.h"
10 10
11namespace Service::Nvidia::NvCore { 11namespace Service::Nvidia::NvCore {
12 12
13struct ContainerImpl { 13struct ContainerImpl {
14 ContainerImpl(Tegra::GPU& gpu_) : file{}, manager{gpu_} {} 14 ContainerImpl(Tegra::Host1x::Host1x& host1x_) : file{host1x_}, manager{host1x_} {}
15 NvMap file; 15 NvMap file;
16 SyncpointManager manager; 16 SyncpointManager manager;
17}; 17};
18 18
19Container::Container(Tegra::GPU& gpu_) { 19Container::Container(Tegra::Host1x::Host1x& host1x_) {
20 impl = std::make_unique<ContainerImpl>(gpu_); 20 impl = std::make_unique<ContainerImpl>(host1x_);
21} 21}
22 22
23Container::~Container() = default; 23Container::~Container() = default;
diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h
index 91ac2305a..da75d74ff 100644
--- a/src/core/hle/service/nvdrv/core/container.h
+++ b/src/core/hle/service/nvdrv/core/container.h
@@ -8,8 +8,12 @@
8#include <memory> 8#include <memory>
9 9
10namespace Tegra { 10namespace Tegra {
11class GPU; 11
12} 12namespace Host1x {
13class Host1x;
14} // namespace Host1x
15
16} // namespace Tegra
13 17
14namespace Service::Nvidia::NvCore { 18namespace Service::Nvidia::NvCore {
15 19
@@ -20,7 +24,7 @@ struct ContainerImpl;
20 24
21class Container { 25class Container {
22public: 26public:
23 Container(Tegra::GPU& gpu_); 27 Container(Tegra::Host1x::Host1x& host1x);
24 ~Container(); 28 ~Container();
25 29
26 NvMap& GetNvMapFile(); 30 NvMap& GetNvMapFile();
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp
index 1126daeb5..9acec7ba6 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/core/nvmap.cpp
@@ -7,6 +7,7 @@
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "core/hle/service/nvdrv/core/nvmap.h" 8#include "core/hle/service/nvdrv/core/nvmap.h"
9#include "core/memory.h" 9#include "core/memory.h"
10#include "video_core/host1x/host1x.h"
10 11
11using Core::Memory::YUZU_PAGESIZE; 12using Core::Memory::YUZU_PAGESIZE;
12 13
@@ -61,7 +62,7 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) {
61 return NvResult::Success; 62 return NvResult::Success;
62} 63}
63 64
64NvMap::NvMap() = default; 65NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {}
65 66
66void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) { 67void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) {
67 std::scoped_lock lock(handles_lock); 68 std::scoped_lock lock(handles_lock);
@@ -77,12 +78,11 @@ void NvMap::UnmapHandle(Handle& handle_description) {
77 } 78 }
78 79
79 // Free and unmap the handle from the SMMU 80 // Free and unmap the handle from the SMMU
80 /* 81 host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
81 state.soc->smmu.Unmap(handle_description.pin_virt_address, 82 handle_description.aligned_size);
82 static_cast<u32>(handle_description.aligned_size)); 83 host1x.Allocator().Free(handle_description.pin_virt_address,
83 smmuAllocator.Free(handle_description.pin_virt_address, 84 static_cast<u32>(handle_description.aligned_size));
84 static_cast<u32>(handle_description.aligned_size)); handle_description.pin_virt_address = 0; 85 handle_description.pin_virt_address = 0;
85 */
86} 86}
87 87
88bool NvMap::TryRemoveHandle(const Handle& handle_description) { 88bool NvMap::TryRemoveHandle(const Handle& handle_description) {
@@ -131,12 +131,9 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) {
131} 131}
132 132
133u32 NvMap::PinHandle(NvMap::Handle::Id handle) { 133u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
134 UNIMPLEMENTED_MSG("pinning");
135 return 0;
136 /*
137 auto handle_description{GetHandle(handle)}; 134 auto handle_description{GetHandle(handle)};
138 if (!handle_description) 135 if (!handle_description) [[unlikely]]
139 [[unlikely]] return 0; 136 return 0;
140 137
141 std::scoped_lock lock(handle_description->mutex); 138 std::scoped_lock lock(handle_description->mutex);
142 if (!handle_description->pins) { 139 if (!handle_description->pins) {
@@ -157,8 +154,10 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
157 154
158 // If not then allocate some space and map it 155 // If not then allocate some space and map it
159 u32 address{}; 156 u32 address{};
157 auto& smmu_allocator = host1x.Allocator();
158 auto& smmu_memory_manager = host1x.MemoryManager();
160 while (!(address = 159 while (!(address =
161 smmuAllocator.Allocate(static_cast<u32>(handle_description->aligned_size)))) { 160 smmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size)))) {
162 // Free handles until the allocation succeeds 161 // Free handles until the allocation succeeds
163 std::scoped_lock queueLock(unmap_queue_lock); 162 std::scoped_lock queueLock(unmap_queue_lock);
164 if (auto freeHandleDesc{unmap_queue.front()}) { 163 if (auto freeHandleDesc{unmap_queue.front()}) {
@@ -172,19 +171,16 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
172 } 171 }
173 } 172 }
174 173
175 state.soc->smmu.Map(address, handle_description->GetPointer(), 174 smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address,
176 static_cast<u32>(handle_description->aligned_size)); 175 handle_description->aligned_size);
177 handle_description->pin_virt_address = address; 176 handle_description->pin_virt_address = address;
178 } 177 }
179 178
180 handle_description->pins++; 179 handle_description->pins++;
181 return handle_description->pin_virt_address; 180 return handle_description->pin_virt_address;
182 */
183} 181}
184 182
185void NvMap::UnpinHandle(Handle::Id handle) { 183void NvMap::UnpinHandle(Handle::Id handle) {
186 UNIMPLEMENTED_MSG("Unpinning");
187 /*
188 auto handle_description{GetHandle(handle)}; 184 auto handle_description{GetHandle(handle)};
189 if (!handle_description) 185 if (!handle_description)
190 return; 186 return;
@@ -199,7 +195,6 @@ void NvMap::UnpinHandle(Handle::Id handle) {
199 unmap_queue.push_back(handle_description); 195 unmap_queue.push_back(handle_description);
200 handle_description->unmap_queue_entry = std::prev(unmap_queue.end()); 196 handle_description->unmap_queue_entry = std::prev(unmap_queue.end());
201 } 197 }
202 */
203} 198}
204 199
205std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) { 200std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) {
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h
index 5e6c73589..5acdc961e 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.h
+++ b/src/core/hle/service/nvdrv/core/nvmap.h
@@ -15,6 +15,14 @@
15#include "common/common_types.h" 15#include "common/common_types.h"
16#include "core/hle/service/nvdrv/nvdata.h" 16#include "core/hle/service/nvdrv/nvdata.h"
17 17
18namespace Tegra {
19
20namespace Host1x {
21class Host1x;
22} // namespace Host1x
23
24} // namespace Tegra
25
18namespace Service::Nvidia::NvCore { 26namespace Service::Nvidia::NvCore {
19/** 27/**
20 * @brief The nvmap core class holds the global state for nvmap and provides methods to manage 28 * @brief The nvmap core class holds the global state for nvmap and provides methods to manage
@@ -90,15 +98,17 @@ public:
90 }; 98 };
91 99
92private: 100private:
93 std::list<std::shared_ptr<Handle>> unmap_queue; 101 std::list<std::shared_ptr<Handle>> unmap_queue{};
94 std::mutex unmap_queue_lock; //!< Protects access to `unmap_queue` 102 std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue`
95 103
96 std::unordered_map<Handle::Id, std::shared_ptr<Handle>> handles; //!< Main owning map of handles 104 std::unordered_map<Handle::Id, std::shared_ptr<Handle>>
105 handles{}; //!< Main owning map of handles
97 std::mutex handles_lock; //!< Protects access to `handles` 106 std::mutex handles_lock; //!< Protects access to `handles`
98 107
99 static constexpr u32 HandleIdIncrement{ 108 static constexpr u32 HandleIdIncrement{
100 4}; //!< Each new handle ID is an increment of 4 from the previous 109 4}; //!< Each new handle ID is an increment of 4 from the previous
101 std::atomic<u32> next_handle_id{HandleIdIncrement}; 110 std::atomic<u32> next_handle_id{HandleIdIncrement};
111 Tegra::Host1x::Host1x& host1x;
102 112
103 void AddHandle(std::shared_ptr<Handle> handle); 113 void AddHandle(std::shared_ptr<Handle> handle);
104 114
@@ -125,7 +135,7 @@ public:
125 bool was_uncached; //!< If the handle was allocated as uncached 135 bool was_uncached; //!< If the handle was allocated as uncached
126 }; 136 };
127 137
128 NvMap(); 138 NvMap(Tegra::Host1x::Host1x& host1x);
129 139
130 /** 140 /**
131 * @brief Creates an unallocated handle of the given size 141 * @brief Creates an unallocated handle of the given size
diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
index ff6cbb37e..61e00448c 100644
--- a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
+++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
@@ -3,16 +3,16 @@
3 3
4#include "common/assert.h" 4#include "common/assert.h"
5#include "core/hle/service/nvdrv/core/syncpoint_manager.h" 5#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
6#include "video_core/gpu.h" 6#include "video_core/host1x/host1x.h"
7 7
8namespace Service::Nvidia::NvCore { 8namespace Service::Nvidia::NvCore {
9 9
10SyncpointManager::SyncpointManager(Tegra::GPU& gpu_) : gpu{gpu_} {} 10SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {}
11 11
12SyncpointManager::~SyncpointManager() = default; 12SyncpointManager::~SyncpointManager() = default;
13 13
14u32 SyncpointManager::RefreshSyncpoint(u32 syncpoint_id) { 14u32 SyncpointManager::RefreshSyncpoint(u32 syncpoint_id) {
15 syncpoints[syncpoint_id].min = gpu.GetSyncpointValue(syncpoint_id); 15 syncpoints[syncpoint_id].min = host1x.GetSyncpointManager().GetHostSyncpointValue(syncpoint_id);
16 return GetSyncpointMin(syncpoint_id); 16 return GetSyncpointMin(syncpoint_id);
17} 17}
18 18
diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h
index cf7f0b4be..f332edc6e 100644
--- a/src/core/hle/service/nvdrv/core/syncpoint_manager.h
+++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h
@@ -10,14 +10,18 @@
10#include "core/hle/service/nvdrv/nvdata.h" 10#include "core/hle/service/nvdrv/nvdata.h"
11 11
12namespace Tegra { 12namespace Tegra {
13class GPU; 13
14} 14namespace Host1x {
15class Host1x;
16} // namespace Host1x
17
18} // namespace Tegra
15 19
16namespace Service::Nvidia::NvCore { 20namespace Service::Nvidia::NvCore {
17 21
18class SyncpointManager final { 22class SyncpointManager final {
19public: 23public:
20 explicit SyncpointManager(Tegra::GPU& gpu_); 24 explicit SyncpointManager(Tegra::Host1x::Host1x& host1x);
21 ~SyncpointManager(); 25 ~SyncpointManager();
22 26
23 /** 27 /**
@@ -78,7 +82,7 @@ private:
78 82
79 std::array<Syncpoint, MaxSyncPoints> syncpoints{}; 83 std::array<Syncpoint, MaxSyncPoints> syncpoints{};
80 84
81 Tegra::GPU& gpu; 85 Tegra::Host1x::Host1x& host1x;
82}; 86};
83 87
84} // namespace Service::Nvidia::NvCore 88} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
index 77e6a1cd6..b17589aa3 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
@@ -13,6 +13,7 @@
13#include "core/hle/service/nvdrv/core/syncpoint_manager.h" 13#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
14#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" 14#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
15#include "core/memory.h" 15#include "core/memory.h"
16#include "video_core/host1x/host1x.h"
16#include "video_core/memory_manager.h" 17#include "video_core/memory_manager.h"
17#include "video_core/renderer_base.h" 18#include "video_core/renderer_base.h"
18 19
@@ -140,29 +141,8 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
140 141
141 SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); 142 SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
142 143
143 auto& gpu = system.GPU();
144
145 for (auto& cmd_buffer : cmd_buffer_handles) { 144 for (auto& cmd_buffer : cmd_buffer_handles) {
146 auto object{nvmap.GetHandle(cmd_buffer.map_handle)}; 145 cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle);
147 if (!object) {
148 LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle);
149 std::memcpy(output.data(), &params, output.size());
150 return NvResult::InvalidState;
151 }
152 if (object->dma_map_addr == 0) {
153 // NVDEC and VIC memory is in the 32-bit address space
154 // MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space
155 const GPUVAddr low_addr =
156 gpu.MemoryManager().MapAllocate32(object->address, object->size);
157 object->dma_map_addr = static_cast<u32>(low_addr);
158 // Ensure that the dma_map_addr is indeed in the lower 32-bit address space.
159 ASSERT(object->dma_map_addr == low_addr);
160 }
161 if (!object->dma_map_addr) {
162 LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size);
163 } else {
164 cmd_buffer.map_address = static_cast<u32_le>(object->dma_map_addr);
165 }
166 } 146 }
167 std::memcpy(output.data(), &params, sizeof(IoctlMapBuffer)); 147 std::memcpy(output.data(), &params, sizeof(IoctlMapBuffer));
168 std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(), 148 std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(),
@@ -172,11 +152,16 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
172} 152}
173 153
174NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) { 154NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
175 // This is intntionally stubbed. 155 IoctlMapBuffer params{};
176 // Skip unmapping buffers here, as to not break the continuity of the VP9 reference frame 156 std::memcpy(&params, input.data(), sizeof(IoctlMapBuffer));
177 // addresses, and risk invalidating data before the async GPU thread is done with it 157 std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
158
159 SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
160 for (auto& cmd_buffer : cmd_buffer_handles) {
161 nvmap.UnpinHandle(cmd_buffer.map_handle);
162 }
163
178 std::memset(output.data(), 0, output.size()); 164 std::memset(output.data(), 0, output.size());
179 LOG_DEBUG(Service_NVDRV, "(STUBBED) called");
180 return NvResult::Success; 165 return NvResult::Success;
181} 166}
182 167
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp
index b39a4c6db..8a9f3c717 100644
--- a/src/core/hle/service/nvdrv/nvdrv.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv.cpp
@@ -71,7 +71,7 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
71} 71}
72 72
73Module::Module(Core::System& system) 73Module::Module(Core::System& system)
74 : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} { 74 : service_context{system, "nvdrv"}, events_interface{*this}, container{system.Host1x()} {
75 builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) { 75 builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) {
76 std::shared_ptr<Devices::nvdevice> device = 76 std::shared_ptr<Devices::nvdevice> device =
77 std::make_shared<Devices::nvhost_as_gpu>(system, *this, container); 77 std::make_shared<Devices::nvhost_as_gpu>(system, *this, container);