diff options
Diffstat (limited to 'src/core')
65 files changed, 2191 insertions, 649 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index a630c257f..f75b5e10a 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -37,6 +37,8 @@ add_library(core STATIC | |||
| 37 | debugger/gdbstub_arch.h | 37 | debugger/gdbstub_arch.h |
| 38 | debugger/gdbstub.cpp | 38 | debugger/gdbstub.cpp |
| 39 | debugger/gdbstub.h | 39 | debugger/gdbstub.h |
| 40 | device_memory_manager.h | ||
| 41 | device_memory_manager.inc | ||
| 40 | device_memory.cpp | 42 | device_memory.cpp |
| 41 | device_memory.h | 43 | device_memory.h |
| 42 | file_sys/fssystem/fs_i_storage.h | 44 | file_sys/fssystem/fs_i_storage.h |
| @@ -627,6 +629,8 @@ add_library(core STATIC | |||
| 627 | hle/service/ns/pdm_qry.h | 629 | hle/service/ns/pdm_qry.h |
| 628 | hle/service/nvdrv/core/container.cpp | 630 | hle/service/nvdrv/core/container.cpp |
| 629 | hle/service/nvdrv/core/container.h | 631 | hle/service/nvdrv/core/container.h |
| 632 | hle/service/nvdrv/core/heap_mapper.cpp | ||
| 633 | hle/service/nvdrv/core/heap_mapper.h | ||
| 630 | hle/service/nvdrv/core/nvmap.cpp | 634 | hle/service/nvdrv/core/nvmap.cpp |
| 631 | hle/service/nvdrv/core/nvmap.h | 635 | hle/service/nvdrv/core/nvmap.h |
| 632 | hle/service/nvdrv/core/syncpoint_manager.cpp | 636 | hle/service/nvdrv/core/syncpoint_manager.cpp |
diff --git a/src/core/core.cpp b/src/core/core.cpp index 33afc6049..dd9de948c 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include "core/file_sys/savedata_factory.h" | 28 | #include "core/file_sys/savedata_factory.h" |
| 29 | #include "core/file_sys/vfs_concat.h" | 29 | #include "core/file_sys/vfs_concat.h" |
| 30 | #include "core/file_sys/vfs_real.h" | 30 | #include "core/file_sys/vfs_real.h" |
| 31 | #include "core/gpu_dirty_memory_manager.h" | ||
| 31 | #include "core/hle/kernel/k_memory_manager.h" | 32 | #include "core/hle/kernel/k_memory_manager.h" |
| 32 | #include "core/hle/kernel/k_process.h" | 33 | #include "core/hle/kernel/k_process.h" |
| 33 | #include "core/hle/kernel/k_resource_limit.h" | 34 | #include "core/hle/kernel/k_resource_limit.h" |
| @@ -606,6 +607,9 @@ struct System::Impl { | |||
| 606 | std::array<u64, Core::Hardware::NUM_CPU_CORES> dynarmic_ticks{}; | 607 | std::array<u64, Core::Hardware::NUM_CPU_CORES> dynarmic_ticks{}; |
| 607 | std::array<MicroProfileToken, Core::Hardware::NUM_CPU_CORES> microprofile_cpu{}; | 608 | std::array<MicroProfileToken, Core::Hardware::NUM_CPU_CORES> microprofile_cpu{}; |
| 608 | 609 | ||
| 610 | std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES> | ||
| 611 | gpu_dirty_memory_managers; | ||
| 612 | |||
| 609 | std::deque<std::vector<u8>> user_channel; | 613 | std::deque<std::vector<u8>> user_channel; |
| 610 | }; | 614 | }; |
| 611 | 615 | ||
| @@ -692,8 +696,14 @@ size_t System::GetCurrentHostThreadID() const { | |||
| 692 | return impl->kernel.GetCurrentHostThreadID(); | 696 | return impl->kernel.GetCurrentHostThreadID(); |
| 693 | } | 697 | } |
| 694 | 698 | ||
| 695 | void System::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) { | 699 | std::span<GPUDirtyMemoryManager> System::GetGPUDirtyMemoryManager() { |
| 696 | return this->ApplicationProcess()->GatherGPUDirtyMemory(callback); | 700 | return impl->gpu_dirty_memory_managers; |
| 701 | } | ||
| 702 | |||
| 703 | void System::GatherGPUDirtyMemory(std::function<void(PAddr, size_t)>& callback) { | ||
| 704 | for (auto& manager : impl->gpu_dirty_memory_managers) { | ||
| 705 | manager.Gather(callback); | ||
| 706 | } | ||
| 697 | } | 707 | } |
| 698 | 708 | ||
| 699 | PerfStatsResults System::GetAndResetPerfStats() { | 709 | PerfStatsResults System::GetAndResetPerfStats() { |
diff --git a/src/core/core.h b/src/core/core.h index 7d49e5028..183410602 100644 --- a/src/core/core.h +++ b/src/core/core.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <functional> | 8 | #include <functional> |
| 9 | #include <memory> | 9 | #include <memory> |
| 10 | #include <mutex> | 10 | #include <mutex> |
| 11 | #include <span> | ||
| 11 | #include <string> | 12 | #include <string> |
| 12 | #include <vector> | 13 | #include <vector> |
| 13 | 14 | ||
| @@ -112,6 +113,7 @@ class CpuManager; | |||
| 112 | class Debugger; | 113 | class Debugger; |
| 113 | class DeviceMemory; | 114 | class DeviceMemory; |
| 114 | class ExclusiveMonitor; | 115 | class ExclusiveMonitor; |
| 116 | class GPUDirtyMemoryManager; | ||
| 115 | class PerfStats; | 117 | class PerfStats; |
| 116 | class Reporter; | 118 | class Reporter; |
| 117 | class SpeedLimiter; | 119 | class SpeedLimiter; |
| @@ -220,7 +222,9 @@ public: | |||
| 220 | /// Prepare the core emulation for a reschedule | 222 | /// Prepare the core emulation for a reschedule |
| 221 | void PrepareReschedule(u32 core_index); | 223 | void PrepareReschedule(u32 core_index); |
| 222 | 224 | ||
| 223 | void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback); | 225 | std::span<GPUDirtyMemoryManager> GetGPUDirtyMemoryManager(); |
| 226 | |||
| 227 | void GatherGPUDirtyMemory(std::function<void(PAddr, size_t)>& callback); | ||
| 224 | 228 | ||
| 225 | [[nodiscard]] size_t GetCurrentHostThreadID() const; | 229 | [[nodiscard]] size_t GetCurrentHostThreadID() const; |
| 226 | 230 | ||
diff --git a/src/core/device_memory.h b/src/core/device_memory.h index 13388b73e..11bf0e326 100644 --- a/src/core/device_memory.h +++ b/src/core/device_memory.h | |||
| @@ -32,6 +32,12 @@ public: | |||
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | template <typename T> | 34 | template <typename T> |
| 35 | PAddr GetRawPhysicalAddr(const T* ptr) const { | ||
| 36 | return static_cast<PAddr>(reinterpret_cast<uintptr_t>(ptr) - | ||
| 37 | reinterpret_cast<uintptr_t>(buffer.BackingBasePointer())); | ||
| 38 | } | ||
| 39 | |||
| 40 | template <typename T> | ||
| 35 | T* GetPointer(Common::PhysicalAddress addr) { | 41 | T* GetPointer(Common::PhysicalAddress addr) { |
| 36 | return reinterpret_cast<T*>(buffer.BackingBasePointer() + | 42 | return reinterpret_cast<T*>(buffer.BackingBasePointer() + |
| 37 | (GetInteger(addr) - DramMemoryMap::Base)); | 43 | (GetInteger(addr) - DramMemoryMap::Base)); |
| @@ -43,6 +49,16 @@ public: | |||
| 43 | (GetInteger(addr) - DramMemoryMap::Base)); | 49 | (GetInteger(addr) - DramMemoryMap::Base)); |
| 44 | } | 50 | } |
| 45 | 51 | ||
| 52 | template <typename T> | ||
| 53 | T* GetPointerFromRaw(PAddr addr) { | ||
| 54 | return reinterpret_cast<T*>(buffer.BackingBasePointer() + addr); | ||
| 55 | } | ||
| 56 | |||
| 57 | template <typename T> | ||
| 58 | const T* GetPointerFromRaw(PAddr addr) const { | ||
| 59 | return reinterpret_cast<T*>(buffer.BackingBasePointer() + addr); | ||
| 60 | } | ||
| 61 | |||
| 46 | Common::HostMemory buffer; | 62 | Common::HostMemory buffer; |
| 47 | }; | 63 | }; |
| 48 | 64 | ||
diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h new file mode 100644 index 000000000..ffeed46cc --- /dev/null +++ b/src/core/device_memory_manager.h | |||
| @@ -0,0 +1,211 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <array> | ||
| 7 | #include <atomic> | ||
| 8 | #include <deque> | ||
| 9 | #include <memory> | ||
| 10 | #include <mutex> | ||
| 11 | |||
| 12 | #include "common/common_types.h" | ||
| 13 | #include "common/scratch_buffer.h" | ||
| 14 | #include "common/virtual_buffer.h" | ||
| 15 | |||
| 16 | namespace Core { | ||
| 17 | |||
| 18 | constexpr size_t DEVICE_PAGEBITS = 12ULL; | ||
| 19 | constexpr size_t DEVICE_PAGESIZE = 1ULL << DEVICE_PAGEBITS; | ||
| 20 | constexpr size_t DEVICE_PAGEMASK = DEVICE_PAGESIZE - 1ULL; | ||
| 21 | |||
| 22 | class DeviceMemory; | ||
| 23 | |||
| 24 | namespace Memory { | ||
| 25 | class Memory; | ||
| 26 | } | ||
| 27 | |||
| 28 | template <typename DTraits> | ||
| 29 | struct DeviceMemoryManagerAllocator; | ||
| 30 | |||
| 31 | struct Asid { | ||
| 32 | size_t id; | ||
| 33 | }; | ||
| 34 | |||
| 35 | template <typename Traits> | ||
| 36 | class DeviceMemoryManager { | ||
| 37 | using DeviceInterface = typename Traits::DeviceInterface; | ||
| 38 | using DeviceMethods = typename Traits::DeviceMethods; | ||
| 39 | |||
| 40 | public: | ||
| 41 | DeviceMemoryManager(const DeviceMemory& device_memory); | ||
| 42 | ~DeviceMemoryManager(); | ||
| 43 | |||
| 44 | void BindInterface(DeviceInterface* device_inter); | ||
| 45 | |||
| 46 | DAddr Allocate(size_t size); | ||
| 47 | void AllocateFixed(DAddr start, size_t size); | ||
| 48 | void Free(DAddr start, size_t size); | ||
| 49 | |||
| 50 | void Map(DAddr address, VAddr virtual_address, size_t size, Asid asid, bool track = false); | ||
| 51 | |||
| 52 | void Unmap(DAddr address, size_t size); | ||
| 53 | |||
| 54 | void TrackContinuityImpl(DAddr address, VAddr virtual_address, size_t size, Asid asid); | ||
| 55 | void TrackContinuity(DAddr address, VAddr virtual_address, size_t size, Asid asid) { | ||
| 56 | std::scoped_lock lk(mapping_guard); | ||
| 57 | TrackContinuityImpl(address, virtual_address, size, asid); | ||
| 58 | } | ||
| 59 | |||
| 60 | // Write / Read | ||
| 61 | template <typename T> | ||
| 62 | T* GetPointer(DAddr address); | ||
| 63 | |||
| 64 | template <typename T> | ||
| 65 | const T* GetPointer(DAddr address) const; | ||
| 66 | |||
| 67 | template <typename Func> | ||
| 68 | void ApplyOpOnPAddr(PAddr address, Common::ScratchBuffer<u32>& buffer, Func&& operation) { | ||
| 69 | DAddr subbits = static_cast<DAddr>(address & page_mask); | ||
| 70 | const u32 base = compressed_device_addr[(address >> page_bits)]; | ||
| 71 | if ((base >> MULTI_FLAG_BITS) == 0) [[likely]] { | ||
| 72 | const DAddr d_address = (static_cast<DAddr>(base) << page_bits) + subbits; | ||
| 73 | operation(d_address); | ||
| 74 | return; | ||
| 75 | } | ||
| 76 | InnerGatherDeviceAddresses(buffer, address); | ||
| 77 | for (u32 value : buffer) { | ||
| 78 | operation((static_cast<DAddr>(value) << page_bits) + subbits); | ||
| 79 | } | ||
| 80 | } | ||
| 81 | |||
| 82 | template <typename Func> | ||
| 83 | void ApplyOpOnPointer(const u8* p, Common::ScratchBuffer<u32>& buffer, Func&& operation) { | ||
| 84 | PAddr address = GetRawPhysicalAddr<u8>(p); | ||
| 85 | ApplyOpOnPAddr(address, buffer, operation); | ||
| 86 | } | ||
| 87 | |||
| 88 | PAddr GetPhysicalRawAddressFromDAddr(DAddr address) const { | ||
| 89 | PAddr subbits = static_cast<PAddr>(address & page_mask); | ||
| 90 | auto paddr = compressed_physical_ptr[(address >> page_bits)]; | ||
| 91 | if (paddr == 0) { | ||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | return (static_cast<PAddr>(paddr - 1) << page_bits) + subbits; | ||
| 95 | } | ||
| 96 | |||
| 97 | template <typename T> | ||
| 98 | void Write(DAddr address, T value); | ||
| 99 | |||
| 100 | template <typename T> | ||
| 101 | T Read(DAddr address) const; | ||
| 102 | |||
| 103 | u8* GetSpan(const DAddr src_addr, const std::size_t size); | ||
| 104 | const u8* GetSpan(const DAddr src_addr, const std::size_t size) const; | ||
| 105 | |||
| 106 | void ReadBlock(DAddr address, void* dest_pointer, size_t size); | ||
| 107 | void ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size); | ||
| 108 | void WriteBlock(DAddr address, const void* src_pointer, size_t size); | ||
| 109 | void WriteBlockUnsafe(DAddr address, const void* src_pointer, size_t size); | ||
| 110 | |||
| 111 | Asid RegisterProcess(Memory::Memory* memory); | ||
| 112 | void UnregisterProcess(Asid id); | ||
| 113 | |||
| 114 | void UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta); | ||
| 115 | |||
| 116 | static constexpr size_t AS_BITS = Traits::device_virtual_bits; | ||
| 117 | |||
| 118 | private: | ||
| 119 | static constexpr size_t device_virtual_bits = Traits::device_virtual_bits; | ||
| 120 | static constexpr size_t device_as_size = 1ULL << device_virtual_bits; | ||
| 121 | static constexpr size_t physical_min_bits = 32; | ||
| 122 | static constexpr size_t physical_max_bits = 33; | ||
| 123 | static constexpr size_t page_bits = 12; | ||
| 124 | static constexpr size_t page_size = 1ULL << page_bits; | ||
| 125 | static constexpr size_t page_mask = page_size - 1ULL; | ||
| 126 | static constexpr u32 physical_address_base = 1U << page_bits; | ||
| 127 | static constexpr u32 MULTI_FLAG_BITS = 31; | ||
| 128 | static constexpr u32 MULTI_FLAG = 1U << MULTI_FLAG_BITS; | ||
| 129 | static constexpr u32 MULTI_MASK = ~MULTI_FLAG; | ||
| 130 | |||
| 131 | template <typename T> | ||
| 132 | T* GetPointerFromRaw(PAddr addr) { | ||
| 133 | return reinterpret_cast<T*>(physical_base + addr); | ||
| 134 | } | ||
| 135 | |||
| 136 | template <typename T> | ||
| 137 | const T* GetPointerFromRaw(PAddr addr) const { | ||
| 138 | return reinterpret_cast<T*>(physical_base + addr); | ||
| 139 | } | ||
| 140 | |||
| 141 | template <typename T> | ||
| 142 | PAddr GetRawPhysicalAddr(const T* ptr) const { | ||
| 143 | return static_cast<PAddr>(reinterpret_cast<uintptr_t>(ptr) - physical_base); | ||
| 144 | } | ||
| 145 | |||
| 146 | void WalkBlock(const DAddr addr, const std::size_t size, auto on_unmapped, auto on_memory, | ||
| 147 | auto increment); | ||
| 148 | |||
| 149 | void InnerGatherDeviceAddresses(Common::ScratchBuffer<u32>& buffer, PAddr address); | ||
| 150 | |||
| 151 | std::unique_ptr<DeviceMemoryManagerAllocator<Traits>> impl; | ||
| 152 | |||
| 153 | const uintptr_t physical_base; | ||
| 154 | DeviceInterface* device_inter; | ||
| 155 | Common::VirtualBuffer<u32> compressed_physical_ptr; | ||
| 156 | Common::VirtualBuffer<u32> compressed_device_addr; | ||
| 157 | Common::VirtualBuffer<u32> continuity_tracker; | ||
| 158 | |||
| 159 | // Process memory interfaces | ||
| 160 | |||
| 161 | std::deque<size_t> id_pool; | ||
| 162 | std::deque<Memory::Memory*> registered_processes; | ||
| 163 | |||
| 164 | // Memory protection management | ||
| 165 | |||
| 166 | static constexpr size_t guest_max_as_bits = 39; | ||
| 167 | static constexpr size_t guest_as_size = 1ULL << guest_max_as_bits; | ||
| 168 | static constexpr size_t guest_mask = guest_as_size - 1ULL; | ||
| 169 | static constexpr size_t asid_start_bit = guest_max_as_bits; | ||
| 170 | |||
| 171 | std::pair<Asid, VAddr> ExtractCPUBacking(size_t page_index) { | ||
| 172 | auto content = cpu_backing_address[page_index]; | ||
| 173 | const VAddr address = content & guest_mask; | ||
| 174 | const Asid asid{static_cast<size_t>(content >> asid_start_bit)}; | ||
| 175 | return std::make_pair(asid, address); | ||
| 176 | } | ||
| 177 | |||
| 178 | void InsertCPUBacking(size_t page_index, VAddr address, Asid asid) { | ||
| 179 | cpu_backing_address[page_index] = address | (asid.id << asid_start_bit); | ||
| 180 | } | ||
| 181 | |||
| 182 | Common::VirtualBuffer<VAddr> cpu_backing_address; | ||
| 183 | static constexpr size_t subentries = 8 / sizeof(u8); | ||
| 184 | static constexpr size_t subentries_mask = subentries - 1; | ||
| 185 | class CounterEntry final { | ||
| 186 | public: | ||
| 187 | CounterEntry() = default; | ||
| 188 | |||
| 189 | std::atomic_uint8_t& Count(std::size_t page) { | ||
| 190 | return values[page & subentries_mask]; | ||
| 191 | } | ||
| 192 | |||
| 193 | const std::atomic_uint8_t& Count(std::size_t page) const { | ||
| 194 | return values[page & subentries_mask]; | ||
| 195 | } | ||
| 196 | |||
| 197 | private: | ||
| 198 | std::array<std::atomic_uint8_t, subentries> values{}; | ||
| 199 | }; | ||
| 200 | static_assert(sizeof(CounterEntry) == subentries * sizeof(u8), | ||
| 201 | "CounterEntry should be 8 bytes!"); | ||
| 202 | |||
| 203 | static constexpr size_t num_counter_entries = | ||
| 204 | (1ULL << (device_virtual_bits - page_bits)) / subentries; | ||
| 205 | using CachedPages = std::array<CounterEntry, num_counter_entries>; | ||
| 206 | std::unique_ptr<CachedPages> cached_pages; | ||
| 207 | std::mutex counter_guard; | ||
| 208 | std::mutex mapping_guard; | ||
| 209 | }; | ||
| 210 | |||
| 211 | } // namespace Core | ||
diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc new file mode 100644 index 000000000..eab8a2731 --- /dev/null +++ b/src/core/device_memory_manager.inc | |||
| @@ -0,0 +1,581 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include <atomic> | ||
| 5 | #include <limits> | ||
| 6 | #include <memory> | ||
| 7 | #include <type_traits> | ||
| 8 | |||
| 9 | #include "common/address_space.h" | ||
| 10 | #include "common/address_space.inc" | ||
| 11 | #include "common/alignment.h" | ||
| 12 | #include "common/assert.h" | ||
| 13 | #include "common/div_ceil.h" | ||
| 14 | #include "common/scope_exit.h" | ||
| 15 | #include "common/settings.h" | ||
| 16 | #include "core/device_memory.h" | ||
| 17 | #include "core/device_memory_manager.h" | ||
| 18 | #include "core/memory.h" | ||
| 19 | |||
| 20 | namespace Core { | ||
| 21 | |||
| 22 | namespace { | ||
| 23 | |||
| 24 | class MultiAddressContainer { | ||
| 25 | public: | ||
| 26 | MultiAddressContainer() = default; | ||
| 27 | ~MultiAddressContainer() = default; | ||
| 28 | |||
| 29 | void GatherValues(u32 start_entry, Common::ScratchBuffer<u32>& buffer) { | ||
| 30 | buffer.resize(8); | ||
| 31 | buffer.resize(0); | ||
| 32 | size_t index = 0; | ||
| 33 | const auto add_value = [&](u32 value) { | ||
| 34 | buffer.resize(index + 1); | ||
| 35 | buffer[index++] = value; | ||
| 36 | }; | ||
| 37 | |||
| 38 | u32 iter_entry = start_entry; | ||
| 39 | Entry* current = &storage[iter_entry - 1]; | ||
| 40 | add_value(current->value); | ||
| 41 | while (current->next_entry != 0) { | ||
| 42 | iter_entry = current->next_entry; | ||
| 43 | current = &storage[iter_entry - 1]; | ||
| 44 | add_value(current->value); | ||
| 45 | } | ||
| 46 | } | ||
| 47 | |||
| 48 | u32 Register(u32 value) { | ||
| 49 | return RegisterImplementation(value); | ||
| 50 | } | ||
| 51 | |||
| 52 | void Register(u32 value, u32 start_entry) { | ||
| 53 | auto entry_id = RegisterImplementation(value); | ||
| 54 | u32 iter_entry = start_entry; | ||
| 55 | Entry* current = &storage[iter_entry - 1]; | ||
| 56 | while (current->next_entry != 0) { | ||
| 57 | iter_entry = current->next_entry; | ||
| 58 | current = &storage[iter_entry - 1]; | ||
| 59 | } | ||
| 60 | current->next_entry = entry_id; | ||
| 61 | } | ||
| 62 | |||
| 63 | std::pair<bool, u32> Unregister(u32 value, u32 start_entry) { | ||
| 64 | u32 iter_entry = start_entry; | ||
| 65 | Entry* previous{}; | ||
| 66 | Entry* current = &storage[iter_entry - 1]; | ||
| 67 | Entry* next{}; | ||
| 68 | bool more_than_one_remaining = false; | ||
| 69 | u32 result_start{start_entry}; | ||
| 70 | size_t count = 0; | ||
| 71 | while (current->value != value) { | ||
| 72 | count++; | ||
| 73 | previous = current; | ||
| 74 | iter_entry = current->next_entry; | ||
| 75 | current = &storage[iter_entry - 1]; | ||
| 76 | } | ||
| 77 | // Find next | ||
| 78 | u32 next_entry = current->next_entry; | ||
| 79 | if (next_entry != 0) { | ||
| 80 | next = &storage[next_entry - 1]; | ||
| 81 | more_than_one_remaining = next->next_entry != 0 || previous != nullptr; | ||
| 82 | } | ||
| 83 | if (previous) { | ||
| 84 | previous->next_entry = next_entry; | ||
| 85 | } else { | ||
| 86 | result_start = next_entry; | ||
| 87 | } | ||
| 88 | free_entries.emplace_back(iter_entry); | ||
| 89 | return std::make_pair(more_than_one_remaining || count > 1, result_start); | ||
| 90 | } | ||
| 91 | |||
| 92 | u32 ReleaseEntry(u32 start_entry) { | ||
| 93 | Entry* current = &storage[start_entry - 1]; | ||
| 94 | free_entries.emplace_back(start_entry); | ||
| 95 | return current->value; | ||
| 96 | } | ||
| 97 | |||
| 98 | private: | ||
| 99 | u32 RegisterImplementation(u32 value) { | ||
| 100 | auto entry_id = GetNewEntry(); | ||
| 101 | auto& entry = storage[entry_id - 1]; | ||
| 102 | entry.next_entry = 0; | ||
| 103 | entry.value = value; | ||
| 104 | return entry_id; | ||
| 105 | } | ||
| 106 | u32 GetNewEntry() { | ||
| 107 | if (!free_entries.empty()) { | ||
| 108 | u32 result = free_entries.front(); | ||
| 109 | free_entries.pop_front(); | ||
| 110 | return result; | ||
| 111 | } | ||
| 112 | storage.emplace_back(); | ||
| 113 | u32 new_entry = static_cast<u32>(storage.size()); | ||
| 114 | return new_entry; | ||
| 115 | } | ||
| 116 | |||
| 117 | struct Entry { | ||
| 118 | u32 next_entry{}; | ||
| 119 | u32 value{}; | ||
| 120 | }; | ||
| 121 | |||
| 122 | std::deque<Entry> storage; | ||
| 123 | std::deque<u32> free_entries; | ||
| 124 | }; | ||
| 125 | |||
| 126 | struct EmptyAllocator { | ||
| 127 | EmptyAllocator([[maybe_unused]] DAddr address) {} | ||
| 128 | }; | ||
| 129 | |||
| 130 | } // namespace | ||
| 131 | |||
| 132 | template <typename DTraits> | ||
| 133 | struct DeviceMemoryManagerAllocator { | ||
| 134 | static constexpr size_t device_virtual_bits = DTraits::device_virtual_bits; | ||
| 135 | static constexpr DAddr first_address = 1ULL << Memory::YUZU_PAGEBITS; | ||
| 136 | static constexpr DAddr max_device_area = 1ULL << device_virtual_bits; | ||
| 137 | |||
| 138 | DeviceMemoryManagerAllocator() : main_allocator(first_address) {} | ||
| 139 | |||
| 140 | Common::FlatAllocator<DAddr, 0, device_virtual_bits> main_allocator; | ||
| 141 | MultiAddressContainer multi_dev_address; | ||
| 142 | |||
| 143 | /// Returns true when vaddr -> vaddr+size is fully contained in the buffer | ||
| 144 | template <bool pin_area> | ||
| 145 | [[nodiscard]] bool IsInBounds(VAddr addr, u64 size) const noexcept { | ||
| 146 | return addr >= 0 && addr + size <= max_device_area; | ||
| 147 | } | ||
| 148 | |||
| 149 | DAddr Allocate(size_t size) { | ||
| 150 | return main_allocator.Allocate(size); | ||
| 151 | } | ||
| 152 | |||
| 153 | void AllocateFixed(DAddr b_address, size_t b_size) { | ||
| 154 | main_allocator.AllocateFixed(b_address, b_size); | ||
| 155 | } | ||
| 156 | |||
| 157 | void Free(DAddr b_address, size_t b_size) { | ||
| 158 | main_allocator.Free(b_address, b_size); | ||
| 159 | } | ||
| 160 | }; | ||
| 161 | |||
| 162 | template <typename Traits> | ||
| 163 | DeviceMemoryManager<Traits>::DeviceMemoryManager(const DeviceMemory& device_memory_) | ||
| 164 | : physical_base{reinterpret_cast<const uintptr_t>(device_memory_.buffer.BackingBasePointer())}, | ||
| 165 | device_inter{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS), | ||
| 166 | compressed_device_addr(1ULL << ((Settings::values.memory_layout_mode.GetValue() == | ||
| 167 | Settings::MemoryLayout::Memory_4Gb | ||
| 168 | ? physical_min_bits | ||
| 169 | : physical_max_bits) - | ||
| 170 | Memory::YUZU_PAGEBITS)), | ||
| 171 | continuity_tracker(device_as_size >> Memory::YUZU_PAGEBITS), | ||
| 172 | cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) { | ||
| 173 | impl = std::make_unique<DeviceMemoryManagerAllocator<Traits>>(); | ||
| 174 | cached_pages = std::make_unique<CachedPages>(); | ||
| 175 | |||
| 176 | const size_t total_virtual = device_as_size >> Memory::YUZU_PAGEBITS; | ||
| 177 | for (size_t i = 0; i < total_virtual; i++) { | ||
| 178 | compressed_physical_ptr[i] = 0; | ||
| 179 | continuity_tracker[i] = 1; | ||
| 180 | cpu_backing_address[i] = 0; | ||
| 181 | } | ||
| 182 | const size_t total_phys = 1ULL << ((Settings::values.memory_layout_mode.GetValue() == | ||
| 183 | Settings::MemoryLayout::Memory_4Gb | ||
| 184 | ? physical_min_bits | ||
| 185 | : physical_max_bits) - | ||
| 186 | Memory::YUZU_PAGEBITS); | ||
| 187 | for (size_t i = 0; i < total_phys; i++) { | ||
| 188 | compressed_device_addr[i] = 0; | ||
| 189 | } | ||
| 190 | } | ||
| 191 | |||
| 192 | template <typename Traits> | ||
| 193 | DeviceMemoryManager<Traits>::~DeviceMemoryManager() = default; | ||
| 194 | |||
| 195 | template <typename Traits> | ||
| 196 | void DeviceMemoryManager<Traits>::BindInterface(DeviceInterface* device_inter_) { | ||
| 197 | device_inter = device_inter_; | ||
| 198 | } | ||
| 199 | |||
| 200 | template <typename Traits> | ||
| 201 | DAddr DeviceMemoryManager<Traits>::Allocate(size_t size) { | ||
| 202 | return impl->Allocate(size); | ||
| 203 | } | ||
| 204 | |||
| 205 | template <typename Traits> | ||
| 206 | void DeviceMemoryManager<Traits>::AllocateFixed(DAddr start, size_t size) { | ||
| 207 | return impl->AllocateFixed(start, size); | ||
| 208 | } | ||
| 209 | |||
| 210 | template <typename Traits> | ||
| 211 | void DeviceMemoryManager<Traits>::Free(DAddr start, size_t size) { | ||
| 212 | impl->Free(start, size); | ||
| 213 | } | ||
| 214 | |||
| 215 | template <typename Traits> | ||
| 216 | void DeviceMemoryManager<Traits>::Map(DAddr address, VAddr virtual_address, size_t size, | ||
| 217 | Asid asid, bool track) { | ||
| 218 | Core::Memory::Memory* process_memory = registered_processes[asid.id]; | ||
| 219 | size_t start_page_d = address >> Memory::YUZU_PAGEBITS; | ||
| 220 | size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; | ||
| 221 | std::scoped_lock lk(mapping_guard); | ||
| 222 | for (size_t i = 0; i < num_pages; i++) { | ||
| 223 | const VAddr new_vaddress = virtual_address + i * Memory::YUZU_PAGESIZE; | ||
| 224 | auto* ptr = process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress)); | ||
| 225 | if (ptr == nullptr) [[unlikely]] { | ||
| 226 | compressed_physical_ptr[start_page_d + i] = 0; | ||
| 227 | continue; | ||
| 228 | } | ||
| 229 | auto phys_addr = static_cast<u32>(GetRawPhysicalAddr(ptr) >> Memory::YUZU_PAGEBITS) + 1U; | ||
| 230 | compressed_physical_ptr[start_page_d + i] = phys_addr; | ||
| 231 | InsertCPUBacking(start_page_d + i, new_vaddress, asid); | ||
| 232 | const u32 base_dev = compressed_device_addr[phys_addr - 1U]; | ||
| 233 | const u32 new_dev = static_cast<u32>(start_page_d + i); | ||
| 234 | if (base_dev == 0) [[likely]] { | ||
| 235 | compressed_device_addr[phys_addr - 1U] = new_dev; | ||
| 236 | continue; | ||
| 237 | } | ||
| 238 | u32 start_id = base_dev & MULTI_MASK; | ||
| 239 | if ((base_dev >> MULTI_FLAG_BITS) == 0) { | ||
| 240 | start_id = impl->multi_dev_address.Register(base_dev); | ||
| 241 | compressed_device_addr[phys_addr - 1U] = MULTI_FLAG | start_id; | ||
| 242 | } | ||
| 243 | impl->multi_dev_address.Register(new_dev, start_id); | ||
| 244 | } | ||
| 245 | if (track) { | ||
| 246 | TrackContinuityImpl(address, virtual_address, size, asid); | ||
| 247 | } | ||
| 248 | } | ||
| 249 | |||
| 250 | template <typename Traits> | ||
| 251 | void DeviceMemoryManager<Traits>::Unmap(DAddr address, size_t size) { | ||
| 252 | size_t start_page_d = address >> Memory::YUZU_PAGEBITS; | ||
| 253 | size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; | ||
| 254 | device_inter->InvalidateRegion(address, size); | ||
| 255 | std::scoped_lock lk(mapping_guard); | ||
| 256 | for (size_t i = 0; i < num_pages; i++) { | ||
| 257 | auto phys_addr = compressed_physical_ptr[start_page_d + i]; | ||
| 258 | compressed_physical_ptr[start_page_d + i] = 0; | ||
| 259 | cpu_backing_address[start_page_d + i] = 0; | ||
| 260 | if (phys_addr != 0) [[likely]] { | ||
| 261 | const u32 base_dev = compressed_device_addr[phys_addr - 1U]; | ||
| 262 | if ((base_dev >> MULTI_FLAG_BITS) == 0) [[likely]] { | ||
| 263 | compressed_device_addr[phys_addr - 1] = 0; | ||
| 264 | continue; | ||
| 265 | } | ||
| 266 | const auto [more_entries, new_start] = impl->multi_dev_address.Unregister( | ||
| 267 | static_cast<u32>(start_page_d + i), base_dev & MULTI_MASK); | ||
| 268 | if (!more_entries) { | ||
| 269 | compressed_device_addr[phys_addr - 1] = | ||
| 270 | impl->multi_dev_address.ReleaseEntry(new_start); | ||
| 271 | continue; | ||
| 272 | } | ||
| 273 | compressed_device_addr[phys_addr - 1] = new_start | MULTI_FLAG; | ||
| 274 | } | ||
| 275 | } | ||
| 276 | } | ||
| 277 | template <typename Traits> | ||
| 278 | void DeviceMemoryManager<Traits>::TrackContinuityImpl(DAddr address, VAddr virtual_address, | ||
| 279 | size_t size, Asid asid) { | ||
| 280 | Core::Memory::Memory* process_memory = registered_processes[asid.id]; | ||
| 281 | size_t start_page_d = address >> Memory::YUZU_PAGEBITS; | ||
| 282 | size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; | ||
| 283 | uintptr_t last_ptr = 0; | ||
| 284 | size_t page_count = 1; | ||
| 285 | for (size_t i = num_pages; i > 0; i--) { | ||
| 286 | size_t index = i - 1; | ||
| 287 | const VAddr new_vaddress = virtual_address + index * Memory::YUZU_PAGESIZE; | ||
| 288 | const uintptr_t new_ptr = reinterpret_cast<uintptr_t>( | ||
| 289 | process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress))); | ||
| 290 | if (new_ptr + page_size == last_ptr) { | ||
| 291 | page_count++; | ||
| 292 | } else { | ||
| 293 | page_count = 1; | ||
| 294 | } | ||
| 295 | last_ptr = new_ptr; | ||
| 296 | continuity_tracker[start_page_d + index] = static_cast<u32>(page_count); | ||
| 297 | } | ||
| 298 | } | ||
| 299 | template <typename Traits> | ||
| 300 | u8* DeviceMemoryManager<Traits>::GetSpan(const DAddr src_addr, const std::size_t size) { | ||
| 301 | size_t page_index = src_addr >> page_bits; | ||
| 302 | size_t subbits = src_addr & page_mask; | ||
| 303 | if ((static_cast<size_t>(continuity_tracker[page_index]) << page_bits) >= size + subbits) { | ||
| 304 | return GetPointer<u8>(src_addr); | ||
| 305 | } | ||
| 306 | return nullptr; | ||
| 307 | } | ||
| 308 | |||
| 309 | template <typename Traits> | ||
| 310 | const u8* DeviceMemoryManager<Traits>::GetSpan(const DAddr src_addr, const std::size_t size) const { | ||
| 311 | size_t page_index = src_addr >> page_bits; | ||
| 312 | size_t subbits = src_addr & page_mask; | ||
| 313 | if ((static_cast<size_t>(continuity_tracker[page_index]) << page_bits) >= size + subbits) { | ||
| 314 | return GetPointer<u8>(src_addr); | ||
| 315 | } | ||
| 316 | return nullptr; | ||
| 317 | } | ||
| 318 | |||
| 319 | template <typename Traits> | ||
| 320 | void DeviceMemoryManager<Traits>::InnerGatherDeviceAddresses(Common::ScratchBuffer<u32>& buffer, | ||
| 321 | PAddr address) { | ||
| 322 | size_t phys_addr = address >> page_bits; | ||
| 323 | std::scoped_lock lk(mapping_guard); | ||
| 324 | u32 backing = compressed_device_addr[phys_addr]; | ||
| 325 | if ((backing >> MULTI_FLAG_BITS) != 0) { | ||
| 326 | impl->multi_dev_address.GatherValues(backing & MULTI_MASK, buffer); | ||
| 327 | return; | ||
| 328 | } | ||
| 329 | buffer.resize(1); | ||
| 330 | buffer[0] = backing; | ||
| 331 | } | ||
| 332 | |||
| 333 | template <typename Traits> | ||
| 334 | template <typename T> | ||
| 335 | T* DeviceMemoryManager<Traits>::GetPointer(DAddr address) { | ||
| 336 | const size_t index = address >> Memory::YUZU_PAGEBITS; | ||
| 337 | const size_t offset = address & Memory::YUZU_PAGEMASK; | ||
| 338 | auto phys_addr = compressed_physical_ptr[index]; | ||
| 339 | if (phys_addr == 0) [[unlikely]] { | ||
| 340 | return nullptr; | ||
| 341 | } | ||
| 342 | return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) + | ||
| 343 | offset); | ||
| 344 | } | ||
| 345 | |||
| 346 | template <typename Traits> | ||
| 347 | template <typename T> | ||
| 348 | const T* DeviceMemoryManager<Traits>::GetPointer(DAddr address) const { | ||
| 349 | const size_t index = address >> Memory::YUZU_PAGEBITS; | ||
| 350 | const size_t offset = address & Memory::YUZU_PAGEMASK; | ||
| 351 | auto phys_addr = compressed_physical_ptr[index]; | ||
| 352 | if (phys_addr == 0) [[unlikely]] { | ||
| 353 | return nullptr; | ||
| 354 | } | ||
| 355 | return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) + | ||
| 356 | offset); | ||
| 357 | } | ||
| 358 | |||
| 359 | template <typename Traits> | ||
| 360 | template <typename T> | ||
| 361 | void DeviceMemoryManager<Traits>::Write(DAddr address, T value) { | ||
| 362 | T* ptr = GetPointer<T>(address); | ||
| 363 | if (!ptr) [[unlikely]] { | ||
| 364 | return; | ||
| 365 | } | ||
| 366 | std::memcpy(ptr, &value, sizeof(T)); | ||
| 367 | } | ||
| 368 | |||
| 369 | template <typename Traits> | ||
| 370 | template <typename T> | ||
| 371 | T DeviceMemoryManager<Traits>::Read(DAddr address) const { | ||
| 372 | const T* ptr = GetPointer<T>(address); | ||
| 373 | T result{}; | ||
| 374 | if (!ptr) [[unlikely]] { | ||
| 375 | return result; | ||
| 376 | } | ||
| 377 | std::memcpy(&result, ptr, sizeof(T)); | ||
| 378 | return result; | ||
| 379 | } | ||
| 380 | |||
| 381 | template <typename Traits> | ||
| 382 | void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto on_unmapped, | ||
| 383 | auto on_memory, auto increment) { | ||
| 384 | std::size_t remaining_size = size; | ||
| 385 | std::size_t page_index = addr >> Memory::YUZU_PAGEBITS; | ||
| 386 | std::size_t page_offset = addr & Memory::YUZU_PAGEMASK; | ||
| 387 | |||
| 388 | while (remaining_size) { | ||
| 389 | const size_t next_pages = static_cast<std::size_t>(continuity_tracker[page_index]); | ||
| 390 | const std::size_t copy_amount = | ||
| 391 | std::min((next_pages << Memory::YUZU_PAGEBITS) - page_offset, remaining_size); | ||
| 392 | const auto current_vaddr = | ||
| 393 | static_cast<u64>((page_index << Memory::YUZU_PAGEBITS) + page_offset); | ||
| 394 | SCOPE_EXIT({ | ||
| 395 | page_index += next_pages; | ||
| 396 | page_offset = 0; | ||
| 397 | increment(copy_amount); | ||
| 398 | remaining_size -= copy_amount; | ||
| 399 | }); | ||
| 400 | |||
| 401 | auto phys_addr = compressed_physical_ptr[page_index]; | ||
| 402 | if (phys_addr == 0) { | ||
| 403 | on_unmapped(copy_amount, current_vaddr); | ||
| 404 | continue; | ||
| 405 | } | ||
| 406 | auto* mem_ptr = GetPointerFromRaw<u8>( | ||
| 407 | (static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) + page_offset); | ||
| 408 | on_memory(copy_amount, mem_ptr); | ||
| 409 | } | ||
| 410 | } | ||
| 411 | |||
| 412 | template <typename Traits> | ||
| 413 | void DeviceMemoryManager<Traits>::ReadBlock(DAddr address, void* dest_pointer, size_t size) { | ||
| 414 | device_inter->FlushRegion(address, size); | ||
| 415 | WalkBlock( | ||
| 416 | address, size, | ||
| 417 | [&](size_t copy_amount, DAddr current_vaddr) { | ||
| 418 | LOG_ERROR( | ||
| 419 | HW_Memory, | ||
| 420 | "Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | ||
| 421 | current_vaddr, address, size); | ||
| 422 | std::memset(dest_pointer, 0, copy_amount); | ||
| 423 | }, | ||
| 424 | [&](size_t copy_amount, const u8* const src_ptr) { | ||
| 425 | std::memcpy(dest_pointer, src_ptr, copy_amount); | ||
| 426 | }, | ||
| 427 | [&](const std::size_t copy_amount) { | ||
| 428 | dest_pointer = static_cast<u8*>(dest_pointer) + copy_amount; | ||
| 429 | }); | ||
| 430 | } | ||
| 431 | |||
| 432 | template <typename Traits> | ||
| 433 | void DeviceMemoryManager<Traits>::WriteBlock(DAddr address, const void* src_pointer, size_t size) { | ||
| 434 | WalkBlock( | ||
| 435 | address, size, | ||
| 436 | [&](size_t copy_amount, DAddr current_vaddr) { | ||
| 437 | LOG_ERROR( | ||
| 438 | HW_Memory, | ||
| 439 | "Unmapped Device WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | ||
| 440 | current_vaddr, address, size); | ||
| 441 | }, | ||
| 442 | [&](size_t copy_amount, u8* const dst_ptr) { | ||
| 443 | std::memcpy(dst_ptr, src_pointer, copy_amount); | ||
| 444 | }, | ||
| 445 | [&](const std::size_t copy_amount) { | ||
| 446 | src_pointer = static_cast<const u8*>(src_pointer) + copy_amount; | ||
| 447 | }); | ||
| 448 | device_inter->InvalidateRegion(address, size); | ||
| 449 | } | ||
| 450 | |||
| 451 | template <typename Traits> | ||
| 452 | void DeviceMemoryManager<Traits>::ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size) { | ||
| 453 | WalkBlock( | ||
| 454 | address, size, | ||
| 455 | [&](size_t copy_amount, DAddr current_vaddr) { | ||
| 456 | LOG_ERROR( | ||
| 457 | HW_Memory, | ||
| 458 | "Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | ||
| 459 | current_vaddr, address, size); | ||
| 460 | std::memset(dest_pointer, 0, copy_amount); | ||
| 461 | }, | ||
| 462 | [&](size_t copy_amount, const u8* const src_ptr) { | ||
| 463 | std::memcpy(dest_pointer, src_ptr, copy_amount); | ||
| 464 | }, | ||
| 465 | [&](const std::size_t copy_amount) { | ||
| 466 | dest_pointer = static_cast<u8*>(dest_pointer) + copy_amount; | ||
| 467 | }); | ||
| 468 | } | ||
| 469 | |||
| 470 | template <typename Traits> | ||
| 471 | void DeviceMemoryManager<Traits>::WriteBlockUnsafe(DAddr address, const void* src_pointer, | ||
| 472 | size_t size) { | ||
| 473 | WalkBlock( | ||
| 474 | address, size, | ||
| 475 | [&](size_t copy_amount, DAddr current_vaddr) { | ||
| 476 | LOG_ERROR( | ||
| 477 | HW_Memory, | ||
| 478 | "Unmapped Device WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | ||
| 479 | current_vaddr, address, size); | ||
| 480 | }, | ||
| 481 | [&](size_t copy_amount, u8* const dst_ptr) { | ||
| 482 | std::memcpy(dst_ptr, src_pointer, copy_amount); | ||
| 483 | }, | ||
| 484 | [&](const std::size_t copy_amount) { | ||
| 485 | src_pointer = static_cast<const u8*>(src_pointer) + copy_amount; | ||
| 486 | }); | ||
| 487 | } | ||
| 488 | |||
| 489 | template <typename Traits> | ||
| 490 | Asid DeviceMemoryManager<Traits>::RegisterProcess(Memory::Memory* memory_device_inter) { | ||
| 491 | size_t new_id{}; | ||
| 492 | if (!id_pool.empty()) { | ||
| 493 | new_id = id_pool.front(); | ||
| 494 | id_pool.pop_front(); | ||
| 495 | registered_processes[new_id] = memory_device_inter; | ||
| 496 | } else { | ||
| 497 | registered_processes.emplace_back(memory_device_inter); | ||
| 498 | new_id = registered_processes.size() - 1U; | ||
| 499 | } | ||
| 500 | return Asid{new_id}; | ||
| 501 | } | ||
| 502 | |||
| 503 | template <typename Traits> | ||
| 504 | void DeviceMemoryManager<Traits>::UnregisterProcess(Asid asid) { | ||
| 505 | registered_processes[asid.id] = nullptr; | ||
| 506 | id_pool.push_front(asid.id); | ||
| 507 | } | ||
| 508 | |||
| 509 | template <typename Traits> | ||
| 510 | void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta) { | ||
| 511 | std::unique_lock<std::mutex> lk(counter_guard, std::defer_lock); | ||
| 512 | const auto Lock = [&] { | ||
| 513 | if (!lk) { | ||
| 514 | lk.lock(); | ||
| 515 | } | ||
| 516 | }; | ||
| 517 | u64 uncache_begin = 0; | ||
| 518 | u64 cache_begin = 0; | ||
| 519 | u64 uncache_bytes = 0; | ||
| 520 | u64 cache_bytes = 0; | ||
| 521 | const auto MarkRegionCaching = &DeviceMemoryManager<Traits>::DeviceMethods::MarkRegionCaching; | ||
| 522 | |||
| 523 | std::atomic_thread_fence(std::memory_order_acquire); | ||
| 524 | const size_t page_end = Common::DivCeil(addr + size, Memory::YUZU_PAGESIZE); | ||
| 525 | size_t page = addr >> Memory::YUZU_PAGEBITS; | ||
| 526 | auto [asid, base_vaddress] = ExtractCPUBacking(page); | ||
| 527 | size_t vpage = base_vaddress >> Memory::YUZU_PAGEBITS; | ||
| 528 | auto* memory_device_inter = registered_processes[asid.id]; | ||
| 529 | for (; page != page_end; ++page) { | ||
| 530 | std::atomic_uint8_t& count = cached_pages->at(page >> 3).Count(page); | ||
| 531 | |||
| 532 | if (delta > 0) { | ||
| 533 | ASSERT_MSG(count.load(std::memory_order::relaxed) < std::numeric_limits<u8>::max(), | ||
| 534 | "Count may overflow!"); | ||
| 535 | } else if (delta < 0) { | ||
| 536 | ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!"); | ||
| 537 | } else { | ||
| 538 | ASSERT_MSG(false, "Delta must be non-zero!"); | ||
| 539 | } | ||
| 540 | |||
| 541 | // Adds or subtracts 1, as count is a unsigned 8-bit value | ||
| 542 | count.fetch_add(static_cast<u8>(delta), std::memory_order_release); | ||
| 543 | |||
| 544 | // Assume delta is either -1 or 1 | ||
| 545 | if (count.load(std::memory_order::relaxed) == 0) { | ||
| 546 | if (uncache_bytes == 0) { | ||
| 547 | uncache_begin = vpage; | ||
| 548 | } | ||
| 549 | uncache_bytes += Memory::YUZU_PAGESIZE; | ||
| 550 | } else if (uncache_bytes > 0) { | ||
| 551 | Lock(); | ||
| 552 | MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, | ||
| 553 | uncache_bytes, false); | ||
| 554 | uncache_bytes = 0; | ||
| 555 | } | ||
| 556 | if (count.load(std::memory_order::relaxed) == 1 && delta > 0) { | ||
| 557 | if (cache_bytes == 0) { | ||
| 558 | cache_begin = vpage; | ||
| 559 | } | ||
| 560 | cache_bytes += Memory::YUZU_PAGESIZE; | ||
| 561 | } else if (cache_bytes > 0) { | ||
| 562 | Lock(); | ||
| 563 | MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, | ||
| 564 | true); | ||
| 565 | cache_bytes = 0; | ||
| 566 | } | ||
| 567 | vpage++; | ||
| 568 | } | ||
| 569 | if (uncache_bytes > 0) { | ||
| 570 | Lock(); | ||
| 571 | MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes, | ||
| 572 | false); | ||
| 573 | } | ||
| 574 | if (cache_bytes > 0) { | ||
| 575 | Lock(); | ||
| 576 | MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, | ||
| 577 | true); | ||
| 578 | } | ||
| 579 | } | ||
| 580 | |||
| 581 | } // namespace Core | ||
diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp index 4a3dbc6a3..612122224 100644 --- a/src/core/file_sys/patch_manager.cpp +++ b/src/core/file_sys/patch_manager.cpp | |||
| @@ -466,12 +466,12 @@ VirtualFile PatchManager::PatchRomFS(const NCA* base_nca, VirtualFile base_romfs | |||
| 466 | return romfs; | 466 | return romfs; |
| 467 | } | 467 | } |
| 468 | 468 | ||
| 469 | PatchManager::PatchVersionNames PatchManager::GetPatchVersionNames(VirtualFile update_raw) const { | 469 | std::vector<Patch> PatchManager::GetPatches(VirtualFile update_raw) const { |
| 470 | if (title_id == 0) { | 470 | if (title_id == 0) { |
| 471 | return {}; | 471 | return {}; |
| 472 | } | 472 | } |
| 473 | 473 | ||
| 474 | std::map<std::string, std::string, std::less<>> out; | 474 | std::vector<Patch> out; |
| 475 | const auto& disabled = Settings::values.disabled_addons[title_id]; | 475 | const auto& disabled = Settings::values.disabled_addons[title_id]; |
| 476 | 476 | ||
| 477 | // Game Updates | 477 | // Game Updates |
| @@ -482,20 +482,28 @@ PatchManager::PatchVersionNames PatchManager::GetPatchVersionNames(VirtualFile u | |||
| 482 | 482 | ||
| 483 | const auto update_disabled = | 483 | const auto update_disabled = |
| 484 | std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend(); | 484 | std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend(); |
| 485 | const auto update_label = update_disabled ? "[D] Update" : "Update"; | 485 | Patch update_patch = {.enabled = !update_disabled, |
| 486 | .name = "Update", | ||
| 487 | .version = "", | ||
| 488 | .type = PatchType::Update, | ||
| 489 | .program_id = title_id, | ||
| 490 | .title_id = title_id}; | ||
| 486 | 491 | ||
| 487 | if (nacp != nullptr) { | 492 | if (nacp != nullptr) { |
| 488 | out.insert_or_assign(update_label, nacp->GetVersionString()); | 493 | update_patch.version = nacp->GetVersionString(); |
| 494 | out.push_back(update_patch); | ||
| 489 | } else { | 495 | } else { |
| 490 | if (content_provider.HasEntry(update_tid, ContentRecordType::Program)) { | 496 | if (content_provider.HasEntry(update_tid, ContentRecordType::Program)) { |
| 491 | const auto meta_ver = content_provider.GetEntryVersion(update_tid); | 497 | const auto meta_ver = content_provider.GetEntryVersion(update_tid); |
| 492 | if (meta_ver.value_or(0) == 0) { | 498 | if (meta_ver.value_or(0) == 0) { |
| 493 | out.insert_or_assign(update_label, ""); | 499 | out.push_back(update_patch); |
| 494 | } else { | 500 | } else { |
| 495 | out.insert_or_assign(update_label, FormatTitleVersion(*meta_ver)); | 501 | update_patch.version = FormatTitleVersion(*meta_ver); |
| 502 | out.push_back(update_patch); | ||
| 496 | } | 503 | } |
| 497 | } else if (update_raw != nullptr) { | 504 | } else if (update_raw != nullptr) { |
| 498 | out.insert_or_assign(update_label, "PACKED"); | 505 | update_patch.version = "PACKED"; |
| 506 | out.push_back(update_patch); | ||
| 499 | } | 507 | } |
| 500 | } | 508 | } |
| 501 | 509 | ||
| @@ -539,7 +547,12 @@ PatchManager::PatchVersionNames PatchManager::GetPatchVersionNames(VirtualFile u | |||
| 539 | 547 | ||
| 540 | const auto mod_disabled = | 548 | const auto mod_disabled = |
| 541 | std::find(disabled.begin(), disabled.end(), mod->GetName()) != disabled.end(); | 549 | std::find(disabled.begin(), disabled.end(), mod->GetName()) != disabled.end(); |
| 542 | out.insert_or_assign(mod_disabled ? "[D] " + mod->GetName() : mod->GetName(), types); | 550 | out.push_back({.enabled = !mod_disabled, |
| 551 | .name = mod->GetName(), | ||
| 552 | .version = types, | ||
| 553 | .type = PatchType::Mod, | ||
| 554 | .program_id = title_id, | ||
| 555 | .title_id = title_id}); | ||
| 543 | } | 556 | } |
| 544 | } | 557 | } |
| 545 | 558 | ||
| @@ -557,7 +570,12 @@ PatchManager::PatchVersionNames PatchManager::GetPatchVersionNames(VirtualFile u | |||
| 557 | if (!types.empty()) { | 570 | if (!types.empty()) { |
| 558 | const auto mod_disabled = | 571 | const auto mod_disabled = |
| 559 | std::find(disabled.begin(), disabled.end(), "SDMC") != disabled.end(); | 572 | std::find(disabled.begin(), disabled.end(), "SDMC") != disabled.end(); |
| 560 | out.insert_or_assign(mod_disabled ? "[D] SDMC" : "SDMC", types); | 573 | out.push_back({.enabled = !mod_disabled, |
| 574 | .name = "SDMC", | ||
| 575 | .version = types, | ||
| 576 | .type = PatchType::Mod, | ||
| 577 | .program_id = title_id, | ||
| 578 | .title_id = title_id}); | ||
| 561 | } | 579 | } |
| 562 | } | 580 | } |
| 563 | 581 | ||
| @@ -584,7 +602,12 @@ PatchManager::PatchVersionNames PatchManager::GetPatchVersionNames(VirtualFile u | |||
| 584 | 602 | ||
| 585 | const auto dlc_disabled = | 603 | const auto dlc_disabled = |
| 586 | std::find(disabled.begin(), disabled.end(), "DLC") != disabled.end(); | 604 | std::find(disabled.begin(), disabled.end(), "DLC") != disabled.end(); |
| 587 | out.insert_or_assign(dlc_disabled ? "[D] DLC" : "DLC", std::move(list)); | 605 | out.push_back({.enabled = !dlc_disabled, |
| 606 | .name = "DLC", | ||
| 607 | .version = std::move(list), | ||
| 608 | .type = PatchType::DLC, | ||
| 609 | .program_id = title_id, | ||
| 610 | .title_id = dlc_match.back().title_id}); | ||
| 588 | } | 611 | } |
| 589 | 612 | ||
| 590 | return out; | 613 | return out; |
diff --git a/src/core/file_sys/patch_manager.h b/src/core/file_sys/patch_manager.h index 03e9c7301..2601b8217 100644 --- a/src/core/file_sys/patch_manager.h +++ b/src/core/file_sys/patch_manager.h | |||
| @@ -26,12 +26,22 @@ class ContentProvider; | |||
| 26 | class NCA; | 26 | class NCA; |
| 27 | class NACP; | 27 | class NACP; |
| 28 | 28 | ||
| 29 | enum class PatchType { Update, DLC, Mod }; | ||
| 30 | |||
| 31 | struct Patch { | ||
| 32 | bool enabled; | ||
| 33 | std::string name; | ||
| 34 | std::string version; | ||
| 35 | PatchType type; | ||
| 36 | u64 program_id; | ||
| 37 | u64 title_id; | ||
| 38 | }; | ||
| 39 | |||
| 29 | // A centralized class to manage patches to games. | 40 | // A centralized class to manage patches to games. |
| 30 | class PatchManager { | 41 | class PatchManager { |
| 31 | public: | 42 | public: |
| 32 | using BuildID = std::array<u8, 0x20>; | 43 | using BuildID = std::array<u8, 0x20>; |
| 33 | using Metadata = std::pair<std::unique_ptr<NACP>, VirtualFile>; | 44 | using Metadata = std::pair<std::unique_ptr<NACP>, VirtualFile>; |
| 34 | using PatchVersionNames = std::map<std::string, std::string, std::less<>>; | ||
| 35 | 45 | ||
| 36 | explicit PatchManager(u64 title_id_, | 46 | explicit PatchManager(u64 title_id_, |
| 37 | const Service::FileSystem::FileSystemController& fs_controller_, | 47 | const Service::FileSystem::FileSystemController& fs_controller_, |
| @@ -66,9 +76,8 @@ public: | |||
| 66 | VirtualFile packed_update_raw = nullptr, | 76 | VirtualFile packed_update_raw = nullptr, |
| 67 | bool apply_layeredfs = true) const; | 77 | bool apply_layeredfs = true) const; |
| 68 | 78 | ||
| 69 | // Returns a vector of pairs between patch names and patch versions. | 79 | // Returns a vector of patches |
| 70 | // i.e. Update 3.2.2 will return {"Update", "3.2.2"} | 80 | [[nodiscard]] std::vector<Patch> GetPatches(VirtualFile update_raw = nullptr) const; |
| 71 | [[nodiscard]] PatchVersionNames GetPatchVersionNames(VirtualFile update_raw = nullptr) const; | ||
| 72 | 81 | ||
| 73 | // If the game update exists, returns the u32 version field in its Meta-type NCA. If that fails, | 82 | // If the game update exists, returns the u32 version field in its Meta-type NCA. If that fails, |
| 74 | // it will fallback to the Meta-type NCA of the base game. If that fails, the result will be | 83 | // it will fallback to the Meta-type NCA of the base game. If that fails, the result will be |
diff --git a/src/core/gpu_dirty_memory_manager.h b/src/core/gpu_dirty_memory_manager.h index 9687531e8..cc8fc176f 100644 --- a/src/core/gpu_dirty_memory_manager.h +++ b/src/core/gpu_dirty_memory_manager.h | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | #include <utility> | 10 | #include <utility> |
| 11 | #include <vector> | 11 | #include <vector> |
| 12 | 12 | ||
| 13 | #include "core/memory.h" | 13 | #include "core/device_memory_manager.h" |
| 14 | 14 | ||
| 15 | namespace Core { | 15 | namespace Core { |
| 16 | 16 | ||
| @@ -23,7 +23,7 @@ public: | |||
| 23 | 23 | ||
| 24 | ~GPUDirtyMemoryManager() = default; | 24 | ~GPUDirtyMemoryManager() = default; |
| 25 | 25 | ||
| 26 | void Collect(VAddr address, size_t size) { | 26 | void Collect(PAddr address, size_t size) { |
| 27 | TransformAddress t = BuildTransform(address, size); | 27 | TransformAddress t = BuildTransform(address, size); |
| 28 | TransformAddress tmp, original; | 28 | TransformAddress tmp, original; |
| 29 | do { | 29 | do { |
| @@ -47,7 +47,7 @@ public: | |||
| 47 | std::memory_order_relaxed)); | 47 | std::memory_order_relaxed)); |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | void Gather(std::function<void(VAddr, size_t)>& callback) { | 50 | void Gather(std::function<void(PAddr, size_t)>& callback) { |
| 51 | { | 51 | { |
| 52 | std::scoped_lock lk(guard); | 52 | std::scoped_lock lk(guard); |
| 53 | TransformAddress t = current.exchange(default_transform, std::memory_order_relaxed); | 53 | TransformAddress t = current.exchange(default_transform, std::memory_order_relaxed); |
| @@ -65,7 +65,7 @@ public: | |||
| 65 | mask = mask >> empty_bits; | 65 | mask = mask >> empty_bits; |
| 66 | 66 | ||
| 67 | const size_t continuous_bits = std::countr_one(mask); | 67 | const size_t continuous_bits = std::countr_one(mask); |
| 68 | callback((static_cast<VAddr>(transform.address) << page_bits) + offset, | 68 | callback((static_cast<PAddr>(transform.address) << page_bits) + offset, |
| 69 | continuous_bits << align_bits); | 69 | continuous_bits << align_bits); |
| 70 | mask = continuous_bits < align_size ? (mask >> continuous_bits) : 0; | 70 | mask = continuous_bits < align_size ? (mask >> continuous_bits) : 0; |
| 71 | offset += continuous_bits << align_bits; | 71 | offset += continuous_bits << align_bits; |
| @@ -80,7 +80,7 @@ private: | |||
| 80 | u32 mask; | 80 | u32 mask; |
| 81 | }; | 81 | }; |
| 82 | 82 | ||
| 83 | constexpr static size_t page_bits = Memory::YUZU_PAGEBITS - 1; | 83 | constexpr static size_t page_bits = DEVICE_PAGEBITS - 1; |
| 84 | constexpr static size_t page_size = 1ULL << page_bits; | 84 | constexpr static size_t page_size = 1ULL << page_bits; |
| 85 | constexpr static size_t page_mask = page_size - 1; | 85 | constexpr static size_t page_mask = page_size - 1; |
| 86 | 86 | ||
| @@ -89,7 +89,7 @@ private: | |||
| 89 | constexpr static size_t align_mask = align_size - 1; | 89 | constexpr static size_t align_mask = align_size - 1; |
| 90 | constexpr static TransformAddress default_transform = {.address = ~0U, .mask = 0U}; | 90 | constexpr static TransformAddress default_transform = {.address = ~0U, .mask = 0U}; |
| 91 | 91 | ||
| 92 | bool IsValid(VAddr address) { | 92 | bool IsValid(PAddr address) { |
| 93 | return address < (1ULL << 39); | 93 | return address < (1ULL << 39); |
| 94 | } | 94 | } |
| 95 | 95 | ||
| @@ -103,7 +103,7 @@ private: | |||
| 103 | return mask; | 103 | return mask; |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | TransformAddress BuildTransform(VAddr address, size_t size) { | 106 | TransformAddress BuildTransform(PAddr address, size_t size) { |
| 107 | const size_t minor_address = address & page_mask; | 107 | const size_t minor_address = address & page_mask; |
| 108 | const size_t minor_bit = minor_address >> align_bits; | 108 | const size_t minor_bit = minor_address >> align_bits; |
| 109 | const size_t top_bit = (minor_address + size + align_mask) >> align_bits; | 109 | const size_t top_bit = (minor_address + size + align_mask) >> align_bits; |
diff --git a/src/core/guest_memory.h b/src/core/guest_memory.h new file mode 100644 index 000000000..7ee18c126 --- /dev/null +++ b/src/core/guest_memory.h | |||
| @@ -0,0 +1,214 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <iterator> | ||
| 7 | #include <memory> | ||
| 8 | #include <optional> | ||
| 9 | #include <span> | ||
| 10 | #include <vector> | ||
| 11 | |||
| 12 | #include "common/assert.h" | ||
| 13 | #include "common/scratch_buffer.h" | ||
| 14 | |||
| 15 | namespace Core::Memory { | ||
| 16 | |||
| 17 | enum GuestMemoryFlags : u32 { | ||
| 18 | Read = 1 << 0, | ||
| 19 | Write = 1 << 1, | ||
| 20 | Safe = 1 << 2, | ||
| 21 | Cached = 1 << 3, | ||
| 22 | |||
| 23 | SafeRead = Read | Safe, | ||
| 24 | SafeWrite = Write | Safe, | ||
| 25 | SafeReadWrite = SafeRead | SafeWrite, | ||
| 26 | SafeReadCachedWrite = SafeReadWrite | Cached, | ||
| 27 | |||
| 28 | UnsafeRead = Read, | ||
| 29 | UnsafeWrite = Write, | ||
| 30 | UnsafeReadWrite = UnsafeRead | UnsafeWrite, | ||
| 31 | UnsafeReadCachedWrite = UnsafeReadWrite | Cached, | ||
| 32 | }; | ||
| 33 | |||
| 34 | namespace { | ||
| 35 | template <typename M, typename T, GuestMemoryFlags FLAGS> | ||
| 36 | class GuestMemory { | ||
| 37 | using iterator = T*; | ||
| 38 | using const_iterator = const T*; | ||
| 39 | using value_type = T; | ||
| 40 | using element_type = T; | ||
| 41 | using iterator_category = std::contiguous_iterator_tag; | ||
| 42 | |||
| 43 | public: | ||
| 44 | GuestMemory() = delete; | ||
| 45 | explicit GuestMemory(M& memory, u64 addr, std::size_t size, | ||
| 46 | Common::ScratchBuffer<T>* backup = nullptr) | ||
| 47 | : m_memory{memory}, m_addr{addr}, m_size{size} { | ||
| 48 | static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write); | ||
| 49 | if constexpr (FLAGS & GuestMemoryFlags::Read) { | ||
| 50 | Read(addr, size, backup); | ||
| 51 | } | ||
| 52 | } | ||
| 53 | |||
| 54 | ~GuestMemory() = default; | ||
| 55 | |||
| 56 | T* data() noexcept { | ||
| 57 | return m_data_span.data(); | ||
| 58 | } | ||
| 59 | |||
| 60 | const T* data() const noexcept { | ||
| 61 | return m_data_span.data(); | ||
| 62 | } | ||
| 63 | |||
| 64 | size_t size() const noexcept { | ||
| 65 | return m_size; | ||
| 66 | } | ||
| 67 | |||
| 68 | size_t size_bytes() const noexcept { | ||
| 69 | return this->size() * sizeof(T); | ||
| 70 | } | ||
| 71 | |||
| 72 | [[nodiscard]] T* begin() noexcept { | ||
| 73 | return this->data(); | ||
| 74 | } | ||
| 75 | |||
| 76 | [[nodiscard]] const T* begin() const noexcept { | ||
| 77 | return this->data(); | ||
| 78 | } | ||
| 79 | |||
| 80 | [[nodiscard]] T* end() noexcept { | ||
| 81 | return this->data() + this->size(); | ||
| 82 | } | ||
| 83 | |||
| 84 | [[nodiscard]] const T* end() const noexcept { | ||
| 85 | return this->data() + this->size(); | ||
| 86 | } | ||
| 87 | |||
| 88 | T& operator[](size_t index) noexcept { | ||
| 89 | return m_data_span[index]; | ||
| 90 | } | ||
| 91 | |||
| 92 | const T& operator[](size_t index) const noexcept { | ||
| 93 | return m_data_span[index]; | ||
| 94 | } | ||
| 95 | |||
| 96 | void SetAddressAndSize(u64 addr, std::size_t size) noexcept { | ||
| 97 | m_addr = addr; | ||
| 98 | m_size = size; | ||
| 99 | m_addr_changed = true; | ||
| 100 | } | ||
| 101 | |||
| 102 | std::span<T> Read(u64 addr, std::size_t size, | ||
| 103 | Common::ScratchBuffer<T>* backup = nullptr) noexcept { | ||
| 104 | m_addr = addr; | ||
| 105 | m_size = size; | ||
| 106 | if (m_size == 0) { | ||
| 107 | m_is_data_copy = true; | ||
| 108 | return {}; | ||
| 109 | } | ||
| 110 | |||
| 111 | if (this->TrySetSpan()) { | ||
| 112 | if constexpr (FLAGS & GuestMemoryFlags::Safe) { | ||
| 113 | m_memory.FlushRegion(m_addr, this->size_bytes()); | ||
| 114 | } | ||
| 115 | } else { | ||
| 116 | if (backup) { | ||
| 117 | backup->resize_destructive(this->size()); | ||
| 118 | m_data_span = *backup; | ||
| 119 | } else { | ||
| 120 | m_data_copy.resize(this->size()); | ||
| 121 | m_data_span = std::span(m_data_copy); | ||
| 122 | } | ||
| 123 | m_is_data_copy = true; | ||
| 124 | m_span_valid = true; | ||
| 125 | if constexpr (FLAGS & GuestMemoryFlags::Safe) { | ||
| 126 | m_memory.ReadBlock(m_addr, this->data(), this->size_bytes()); | ||
| 127 | } else { | ||
| 128 | m_memory.ReadBlockUnsafe(m_addr, this->data(), this->size_bytes()); | ||
| 129 | } | ||
| 130 | } | ||
| 131 | return m_data_span; | ||
| 132 | } | ||
| 133 | |||
| 134 | void Write(std::span<T> write_data) noexcept { | ||
| 135 | if constexpr (FLAGS & GuestMemoryFlags::Cached) { | ||
| 136 | m_memory.WriteBlockCached(m_addr, write_data.data(), this->size_bytes()); | ||
| 137 | } else if constexpr (FLAGS & GuestMemoryFlags::Safe) { | ||
| 138 | m_memory.WriteBlock(m_addr, write_data.data(), this->size_bytes()); | ||
| 139 | } else { | ||
| 140 | m_memory.WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes()); | ||
| 141 | } | ||
| 142 | } | ||
| 143 | |||
| 144 | bool TrySetSpan() noexcept { | ||
| 145 | if (u8* ptr = m_memory.GetSpan(m_addr, this->size_bytes()); ptr) { | ||
| 146 | m_data_span = {reinterpret_cast<T*>(ptr), this->size()}; | ||
| 147 | m_span_valid = true; | ||
| 148 | return true; | ||
| 149 | } | ||
| 150 | return false; | ||
| 151 | } | ||
| 152 | |||
| 153 | protected: | ||
| 154 | bool IsDataCopy() const noexcept { | ||
| 155 | return m_is_data_copy; | ||
| 156 | } | ||
| 157 | |||
| 158 | bool AddressChanged() const noexcept { | ||
| 159 | return m_addr_changed; | ||
| 160 | } | ||
| 161 | |||
| 162 | M& m_memory; | ||
| 163 | u64 m_addr{}; | ||
| 164 | size_t m_size{}; | ||
| 165 | std::span<T> m_data_span{}; | ||
| 166 | std::vector<T> m_data_copy{}; | ||
| 167 | bool m_span_valid{false}; | ||
| 168 | bool m_is_data_copy{false}; | ||
| 169 | bool m_addr_changed{false}; | ||
| 170 | }; | ||
| 171 | |||
| 172 | template <typename M, typename T, GuestMemoryFlags FLAGS> | ||
| 173 | class GuestMemoryScoped : public GuestMemory<M, T, FLAGS> { | ||
| 174 | public: | ||
| 175 | GuestMemoryScoped() = delete; | ||
| 176 | explicit GuestMemoryScoped(M& memory, u64 addr, std::size_t size, | ||
| 177 | Common::ScratchBuffer<T>* backup = nullptr) | ||
| 178 | : GuestMemory<M, T, FLAGS>(memory, addr, size, backup) { | ||
| 179 | if constexpr (!(FLAGS & GuestMemoryFlags::Read)) { | ||
| 180 | if (!this->TrySetSpan()) { | ||
| 181 | if (backup) { | ||
| 182 | this->m_data_span = *backup; | ||
| 183 | this->m_span_valid = true; | ||
| 184 | this->m_is_data_copy = true; | ||
| 185 | } | ||
| 186 | } | ||
| 187 | } | ||
| 188 | } | ||
| 189 | |||
| 190 | ~GuestMemoryScoped() { | ||
| 191 | if constexpr (FLAGS & GuestMemoryFlags::Write) { | ||
| 192 | if (this->size() == 0) [[unlikely]] { | ||
| 193 | return; | ||
| 194 | } | ||
| 195 | |||
| 196 | if (this->AddressChanged() || this->IsDataCopy()) { | ||
| 197 | ASSERT(this->m_span_valid); | ||
| 198 | if constexpr (FLAGS & GuestMemoryFlags::Cached) { | ||
| 199 | this->m_memory.WriteBlockCached(this->m_addr, this->data(), this->size_bytes()); | ||
| 200 | } else if constexpr (FLAGS & GuestMemoryFlags::Safe) { | ||
| 201 | this->m_memory.WriteBlock(this->m_addr, this->data(), this->size_bytes()); | ||
| 202 | } else { | ||
| 203 | this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes()); | ||
| 204 | } | ||
| 205 | } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) || | ||
| 206 | (FLAGS & GuestMemoryFlags::Cached)) { | ||
| 207 | this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes()); | ||
| 208 | } | ||
| 209 | } | ||
| 210 | } | ||
| 211 | }; | ||
| 212 | } // namespace | ||
| 213 | |||
| 214 | } // namespace Core::Memory | ||
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 53735a225..0b08e877e 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include "common/scope_exit.h" | 5 | #include "common/scope_exit.h" |
| 6 | #include "common/settings.h" | 6 | #include "common/settings.h" |
| 7 | #include "core/core.h" | 7 | #include "core/core.h" |
| 8 | #include "core/gpu_dirty_memory_manager.h" | ||
| 8 | #include "core/hle/kernel/k_process.h" | 9 | #include "core/hle/kernel/k_process.h" |
| 9 | #include "core/hle/kernel/k_scoped_resource_reservation.h" | 10 | #include "core/hle/kernel/k_scoped_resource_reservation.h" |
| 10 | #include "core/hle/kernel/k_shared_memory.h" | 11 | #include "core/hle/kernel/k_shared_memory.h" |
| @@ -320,7 +321,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa | |||
| 320 | 321 | ||
| 321 | // Ensure our memory is initialized. | 322 | // Ensure our memory is initialized. |
| 322 | m_memory.SetCurrentPageTable(*this); | 323 | m_memory.SetCurrentPageTable(*this); |
| 323 | m_memory.SetGPUDirtyManagers(m_dirty_memory_managers); | 324 | m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager()); |
| 324 | 325 | ||
| 325 | // Ensure we can insert the code region. | 326 | // Ensure we can insert the code region. |
| 326 | R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, | 327 | R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, |
| @@ -417,7 +418,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, | |||
| 417 | 418 | ||
| 418 | // Ensure our memory is initialized. | 419 | // Ensure our memory is initialized. |
| 419 | m_memory.SetCurrentPageTable(*this); | 420 | m_memory.SetCurrentPageTable(*this); |
| 420 | m_memory.SetGPUDirtyManagers(m_dirty_memory_managers); | 421 | m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager()); |
| 421 | 422 | ||
| 422 | // Ensure we can insert the code region. | 423 | // Ensure we can insert the code region. |
| 423 | R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code), | 424 | R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code), |
| @@ -1141,8 +1142,7 @@ void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {} | |||
| 1141 | KProcess::KProcess(KernelCore& kernel) | 1142 | KProcess::KProcess(KernelCore& kernel) |
| 1142 | : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel}, | 1143 | : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel}, |
| 1143 | m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()}, | 1144 | m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()}, |
| 1144 | m_handle_table{kernel}, m_dirty_memory_managers{}, | 1145 | m_handle_table{kernel}, m_exclusive_monitor{}, m_memory{kernel.System()} {} |
| 1145 | m_exclusive_monitor{}, m_memory{kernel.System()} {} | ||
| 1146 | KProcess::~KProcess() = default; | 1146 | KProcess::~KProcess() = default; |
| 1147 | 1147 | ||
| 1148 | Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size, | 1148 | Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size, |
| @@ -1324,10 +1324,4 @@ bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointT | |||
| 1324 | return true; | 1324 | return true; |
| 1325 | } | 1325 | } |
| 1326 | 1326 | ||
| 1327 | void KProcess::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) { | ||
| 1328 | for (auto& manager : m_dirty_memory_managers) { | ||
| 1329 | manager.Gather(callback); | ||
| 1330 | } | ||
| 1331 | } | ||
| 1332 | |||
| 1333 | } // namespace Kernel | 1327 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index 53c0e3316..ab1358a12 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | 7 | ||
| 8 | #include "core/arm/arm_interface.h" | 8 | #include "core/arm/arm_interface.h" |
| 9 | #include "core/file_sys/program_metadata.h" | 9 | #include "core/file_sys/program_metadata.h" |
| 10 | #include "core/gpu_dirty_memory_manager.h" | ||
| 11 | #include "core/hle/kernel/code_set.h" | 10 | #include "core/hle/kernel/code_set.h" |
| 12 | #include "core/hle/kernel/k_address_arbiter.h" | 11 | #include "core/hle/kernel/k_address_arbiter.h" |
| 13 | #include "core/hle/kernel/k_capabilities.h" | 12 | #include "core/hle/kernel/k_capabilities.h" |
| @@ -128,7 +127,6 @@ private: | |||
| 128 | #ifdef HAS_NCE | 127 | #ifdef HAS_NCE |
| 129 | std::unordered_map<u64, u64> m_post_handlers{}; | 128 | std::unordered_map<u64, u64> m_post_handlers{}; |
| 130 | #endif | 129 | #endif |
| 131 | std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES> m_dirty_memory_managers; | ||
| 132 | std::unique_ptr<Core::ExclusiveMonitor> m_exclusive_monitor; | 130 | std::unique_ptr<Core::ExclusiveMonitor> m_exclusive_monitor; |
| 133 | Core::Memory::Memory m_memory; | 131 | Core::Memory::Memory m_memory; |
| 134 | 132 | ||
| @@ -511,8 +509,6 @@ public: | |||
| 511 | return m_memory; | 509 | return m_memory; |
| 512 | } | 510 | } |
| 513 | 511 | ||
| 514 | void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback); | ||
| 515 | |||
| 516 | Core::ExclusiveMonitor& GetExclusiveMonitor() const { | 512 | Core::ExclusiveMonitor& GetExclusiveMonitor() const { |
| 517 | return *m_exclusive_monitor; | 513 | return *m_exclusive_monitor; |
| 518 | } | 514 | } |
diff --git a/src/core/hle/service/acc/profile_manager.cpp b/src/core/hle/service/acc/profile_manager.cpp index 683f44e27..29a10ad13 100644 --- a/src/core/hle/service/acc/profile_manager.cpp +++ b/src/core/hle/service/acc/profile_manager.cpp | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include "common/fs/path_util.h" | 11 | #include "common/fs/path_util.h" |
| 12 | #include "common/polyfill_ranges.h" | 12 | #include "common/polyfill_ranges.h" |
| 13 | #include "common/settings.h" | 13 | #include "common/settings.h" |
| 14 | #include "common/string_util.h" | ||
| 14 | #include "core/hle/service/acc/profile_manager.h" | 15 | #include "core/hle/service/acc/profile_manager.h" |
| 15 | 16 | ||
| 16 | namespace Service::Account { | 17 | namespace Service::Account { |
| @@ -164,6 +165,22 @@ std::optional<std::size_t> ProfileManager::GetUserIndex(const ProfileInfo& user) | |||
| 164 | return GetUserIndex(user.user_uuid); | 165 | return GetUserIndex(user.user_uuid); |
| 165 | } | 166 | } |
| 166 | 167 | ||
| 168 | /// Returns the first user profile seen based on username (which does not enforce uniqueness) | ||
| 169 | std::optional<std::size_t> ProfileManager::GetUserIndex(const std::string& username) const { | ||
| 170 | const auto iter = | ||
| 171 | std::find_if(profiles.begin(), profiles.end(), [&username](const ProfileInfo& p) { | ||
| 172 | const std::string profile_username = Common::StringFromFixedZeroTerminatedBuffer( | ||
| 173 | reinterpret_cast<const char*>(p.username.data()), p.username.size()); | ||
| 174 | |||
| 175 | return username.compare(profile_username) == 0; | ||
| 176 | }); | ||
| 177 | if (iter == profiles.end()) { | ||
| 178 | return std::nullopt; | ||
| 179 | } | ||
| 180 | |||
| 181 | return static_cast<std::size_t>(std::distance(profiles.begin(), iter)); | ||
| 182 | } | ||
| 183 | |||
| 167 | /// Returns the data structure used by the switch when GetProfileBase is called on acc:* | 184 | /// Returns the data structure used by the switch when GetProfileBase is called on acc:* |
| 168 | bool ProfileManager::GetProfileBase(std::optional<std::size_t> index, ProfileBase& profile) const { | 185 | bool ProfileManager::GetProfileBase(std::optional<std::size_t> index, ProfileBase& profile) const { |
| 169 | if (!index || index >= MAX_USERS) { | 186 | if (!index || index >= MAX_USERS) { |
diff --git a/src/core/hle/service/acc/profile_manager.h b/src/core/hle/service/acc/profile_manager.h index e21863e64..f94157300 100644 --- a/src/core/hle/service/acc/profile_manager.h +++ b/src/core/hle/service/acc/profile_manager.h | |||
| @@ -70,6 +70,7 @@ public: | |||
| 70 | std::optional<Common::UUID> GetUser(std::size_t index) const; | 70 | std::optional<Common::UUID> GetUser(std::size_t index) const; |
| 71 | std::optional<std::size_t> GetUserIndex(const Common::UUID& uuid) const; | 71 | std::optional<std::size_t> GetUserIndex(const Common::UUID& uuid) const; |
| 72 | std::optional<std::size_t> GetUserIndex(const ProfileInfo& user) const; | 72 | std::optional<std::size_t> GetUserIndex(const ProfileInfo& user) const; |
| 73 | std::optional<std::size_t> GetUserIndex(const std::string& username) const; | ||
| 73 | bool GetProfileBase(std::optional<std::size_t> index, ProfileBase& profile) const; | 74 | bool GetProfileBase(std::optional<std::size_t> index, ProfileBase& profile) const; |
| 74 | bool GetProfileBase(Common::UUID uuid, ProfileBase& profile) const; | 75 | bool GetProfileBase(Common::UUID uuid, ProfileBase& profile) const; |
| 75 | bool GetProfileBase(const ProfileInfo& user, ProfileBase& profile) const; | 76 | bool GetProfileBase(const ProfileInfo& user, ProfileBase& profile) const; |
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index 03ebdc137..595a3372e 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp | |||
| @@ -26,6 +26,7 @@ void LoopProcess(Core::System& system) { | |||
| 26 | resource_manager->Initialize(); | 26 | resource_manager->Initialize(); |
| 27 | resource_manager->RegisterAppletResourceUserId(system.ApplicationProcess()->GetProcessId(), | 27 | resource_manager->RegisterAppletResourceUserId(system.ApplicationProcess()->GetProcessId(), |
| 28 | true); | 28 | true); |
| 29 | resource_manager->SetAruidValidForVibration(system.ApplicationProcess()->GetProcessId(), true); | ||
| 29 | 30 | ||
| 30 | server_manager->RegisterNamedService( | 31 | server_manager->RegisterNamedService( |
| 31 | "hid", std::make_shared<IHidServer>(system, resource_manager, firmware_settings)); | 32 | "hid", std::make_shared<IHidServer>(system, resource_manager, firmware_settings)); |
diff --git a/src/core/hle/service/hid/hid_server.cpp b/src/core/hle/service/hid/hid_server.cpp index 1951da33b..30afed812 100644 --- a/src/core/hle/service/hid/hid_server.cpp +++ b/src/core/hle/service/hid/hid_server.cpp | |||
| @@ -22,12 +22,16 @@ | |||
| 22 | #include "hid_core/resources/mouse/mouse.h" | 22 | #include "hid_core/resources/mouse/mouse.h" |
| 23 | #include "hid_core/resources/npad/npad.h" | 23 | #include "hid_core/resources/npad/npad.h" |
| 24 | #include "hid_core/resources/npad/npad_types.h" | 24 | #include "hid_core/resources/npad/npad_types.h" |
| 25 | #include "hid_core/resources/npad/npad_vibration.h" | ||
| 25 | #include "hid_core/resources/palma/palma.h" | 26 | #include "hid_core/resources/palma/palma.h" |
| 26 | #include "hid_core/resources/six_axis/console_six_axis.h" | 27 | #include "hid_core/resources/six_axis/console_six_axis.h" |
| 27 | #include "hid_core/resources/six_axis/seven_six_axis.h" | 28 | #include "hid_core/resources/six_axis/seven_six_axis.h" |
| 28 | #include "hid_core/resources/six_axis/six_axis.h" | 29 | #include "hid_core/resources/six_axis/six_axis.h" |
| 29 | #include "hid_core/resources/touch_screen/gesture.h" | 30 | #include "hid_core/resources/touch_screen/gesture.h" |
| 30 | #include "hid_core/resources/touch_screen/touch_screen.h" | 31 | #include "hid_core/resources/touch_screen/touch_screen.h" |
| 32 | #include "hid_core/resources/vibration/gc_vibration_device.h" | ||
| 33 | #include "hid_core/resources/vibration/n64_vibration_device.h" | ||
| 34 | #include "hid_core/resources/vibration/vibration_device.h" | ||
| 31 | 35 | ||
| 32 | namespace Service::HID { | 36 | namespace Service::HID { |
| 33 | 37 | ||
| @@ -38,7 +42,7 @@ public: | |||
| 38 | : ServiceFramework{system_, "IActiveVibrationDeviceList"}, resource_manager(resource) { | 42 | : ServiceFramework{system_, "IActiveVibrationDeviceList"}, resource_manager(resource) { |
| 39 | // clang-format off | 43 | // clang-format off |
| 40 | static const FunctionInfo functions[] = { | 44 | static const FunctionInfo functions[] = { |
| 41 | {0, &IActiveVibrationDeviceList::InitializeVibrationDevice, "InitializeVibrationDevice"}, | 45 | {0, &IActiveVibrationDeviceList::ActivateVibrationDevice, "ActivateVibrationDevice"}, |
| 42 | }; | 46 | }; |
| 43 | // clang-format on | 47 | // clang-format on |
| 44 | 48 | ||
| @@ -46,22 +50,49 @@ public: | |||
| 46 | } | 50 | } |
| 47 | 51 | ||
| 48 | private: | 52 | private: |
| 49 | void InitializeVibrationDevice(HLERequestContext& ctx) { | 53 | void ActivateVibrationDevice(HLERequestContext& ctx) { |
| 50 | IPC::RequestParser rp{ctx}; | 54 | IPC::RequestParser rp{ctx}; |
| 51 | const auto vibration_device_handle{rp.PopRaw<Core::HID::VibrationDeviceHandle>()}; | 55 | const auto vibration_device_handle{rp.PopRaw<Core::HID::VibrationDeviceHandle>()}; |
| 52 | 56 | ||
| 53 | if (resource_manager != nullptr && resource_manager->GetNpad()) { | ||
| 54 | resource_manager->GetNpad()->InitializeVibrationDevice(vibration_device_handle); | ||
| 55 | } | ||
| 56 | |||
| 57 | LOG_DEBUG(Service_HID, "called, npad_type={}, npad_id={}, device_index={}", | 57 | LOG_DEBUG(Service_HID, "called, npad_type={}, npad_id={}, device_index={}", |
| 58 | vibration_device_handle.npad_type, vibration_device_handle.npad_id, | 58 | vibration_device_handle.npad_type, vibration_device_handle.npad_id, |
| 59 | vibration_device_handle.device_index); | 59 | vibration_device_handle.device_index); |
| 60 | 60 | ||
| 61 | const auto result = ActivateVibrationDeviceImpl(vibration_device_handle); | ||
| 62 | |||
| 61 | IPC::ResponseBuilder rb{ctx, 2}; | 63 | IPC::ResponseBuilder rb{ctx, 2}; |
| 62 | rb.Push(ResultSuccess); | 64 | rb.Push(result); |
| 63 | } | 65 | } |
| 64 | 66 | ||
| 67 | Result ActivateVibrationDeviceImpl(const Core::HID::VibrationDeviceHandle& handle) { | ||
| 68 | std::scoped_lock lock{mutex}; | ||
| 69 | |||
| 70 | const Result is_valid = IsVibrationHandleValid(handle); | ||
| 71 | if (is_valid.IsError()) { | ||
| 72 | return is_valid; | ||
| 73 | } | ||
| 74 | |||
| 75 | for (std::size_t i = 0; i < list_size; i++) { | ||
| 76 | if (handle.device_index == vibration_device_list[i].device_index && | ||
| 77 | handle.npad_id == vibration_device_list[i].npad_id && | ||
| 78 | handle.npad_type == vibration_device_list[i].npad_type) { | ||
| 79 | return ResultSuccess; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | if (list_size == vibration_device_list.size()) { | ||
| 83 | return ResultVibrationDeviceIndexOutOfRange; | ||
| 84 | } | ||
| 85 | const Result result = resource_manager->GetVibrationDevice(handle)->Activate(); | ||
| 86 | if (result.IsError()) { | ||
| 87 | return result; | ||
| 88 | } | ||
| 89 | vibration_device_list[list_size++] = handle; | ||
| 90 | return ResultSuccess; | ||
| 91 | } | ||
| 92 | |||
| 93 | mutable std::mutex mutex; | ||
| 94 | std::size_t list_size{}; | ||
| 95 | std::array<Core::HID::VibrationDeviceHandle, 0x100> vibration_device_list{}; | ||
| 65 | std::shared_ptr<ResourceManager> resource_manager; | 96 | std::shared_ptr<ResourceManager> resource_manager; |
| 66 | }; | 97 | }; |
| 67 | 98 | ||
| @@ -153,7 +184,7 @@ IHidServer::IHidServer(Core::System& system_, std::shared_ptr<ResourceManager> r | |||
| 153 | {209, &IHidServer::BeginPermitVibrationSession, "BeginPermitVibrationSession"}, | 184 | {209, &IHidServer::BeginPermitVibrationSession, "BeginPermitVibrationSession"}, |
| 154 | {210, &IHidServer::EndPermitVibrationSession, "EndPermitVibrationSession"}, | 185 | {210, &IHidServer::EndPermitVibrationSession, "EndPermitVibrationSession"}, |
| 155 | {211, &IHidServer::IsVibrationDeviceMounted, "IsVibrationDeviceMounted"}, | 186 | {211, &IHidServer::IsVibrationDeviceMounted, "IsVibrationDeviceMounted"}, |
| 156 | {212, nullptr, "SendVibrationValueInBool"}, | 187 | {212, &IHidServer::SendVibrationValueInBool, "SendVibrationValueInBool"}, |
| 157 | {300, &IHidServer::ActivateConsoleSixAxisSensor, "ActivateConsoleSixAxisSensor"}, | 188 | {300, &IHidServer::ActivateConsoleSixAxisSensor, "ActivateConsoleSixAxisSensor"}, |
| 158 | {301, &IHidServer::StartConsoleSixAxisSensor, "StartConsoleSixAxisSensor"}, | 189 | {301, &IHidServer::StartConsoleSixAxisSensor, "StartConsoleSixAxisSensor"}, |
| 159 | {302, &IHidServer::StopConsoleSixAxisSensor, "StopConsoleSixAxisSensor"}, | 190 | {302, &IHidServer::StopConsoleSixAxisSensor, "StopConsoleSixAxisSensor"}, |
| @@ -1492,59 +1523,13 @@ void IHidServer::ClearNpadCaptureButtonAssignment(HLERequestContext& ctx) { | |||
| 1492 | void IHidServer::GetVibrationDeviceInfo(HLERequestContext& ctx) { | 1523 | void IHidServer::GetVibrationDeviceInfo(HLERequestContext& ctx) { |
| 1493 | IPC::RequestParser rp{ctx}; | 1524 | IPC::RequestParser rp{ctx}; |
| 1494 | const auto vibration_device_handle{rp.PopRaw<Core::HID::VibrationDeviceHandle>()}; | 1525 | const auto vibration_device_handle{rp.PopRaw<Core::HID::VibrationDeviceHandle>()}; |
| 1495 | const auto controller = GetResourceManager()->GetNpad(); | ||
| 1496 | |||
| 1497 | Core::HID::VibrationDeviceInfo vibration_device_info; | ||
| 1498 | bool check_device_index = false; | ||
| 1499 | |||
| 1500 | switch (vibration_device_handle.npad_type) { | ||
| 1501 | case Core::HID::NpadStyleIndex::Fullkey: | ||
| 1502 | case Core::HID::NpadStyleIndex::Handheld: | ||
| 1503 | case Core::HID::NpadStyleIndex::JoyconDual: | ||
| 1504 | case Core::HID::NpadStyleIndex::JoyconLeft: | ||
| 1505 | case Core::HID::NpadStyleIndex::JoyconRight: | ||
| 1506 | vibration_device_info.type = Core::HID::VibrationDeviceType::LinearResonantActuator; | ||
| 1507 | check_device_index = true; | ||
| 1508 | break; | ||
| 1509 | case Core::HID::NpadStyleIndex::GameCube: | ||
| 1510 | vibration_device_info.type = Core::HID::VibrationDeviceType::GcErm; | ||
| 1511 | break; | ||
| 1512 | case Core::HID::NpadStyleIndex::N64: | ||
| 1513 | vibration_device_info.type = Core::HID::VibrationDeviceType::N64; | ||
| 1514 | break; | ||
| 1515 | default: | ||
| 1516 | vibration_device_info.type = Core::HID::VibrationDeviceType::Unknown; | ||
| 1517 | break; | ||
| 1518 | } | ||
| 1519 | |||
| 1520 | vibration_device_info.position = Core::HID::VibrationDevicePosition::None; | ||
| 1521 | if (check_device_index) { | ||
| 1522 | switch (vibration_device_handle.device_index) { | ||
| 1523 | case Core::HID::DeviceIndex::Left: | ||
| 1524 | vibration_device_info.position = Core::HID::VibrationDevicePosition::Left; | ||
| 1525 | break; | ||
| 1526 | case Core::HID::DeviceIndex::Right: | ||
| 1527 | vibration_device_info.position = Core::HID::VibrationDevicePosition::Right; | ||
| 1528 | break; | ||
| 1529 | case Core::HID::DeviceIndex::None: | ||
| 1530 | default: | ||
| 1531 | ASSERT_MSG(false, "DeviceIndex should never be None!"); | ||
| 1532 | break; | ||
| 1533 | } | ||
| 1534 | } | ||
| 1535 | 1526 | ||
| 1536 | LOG_DEBUG(Service_HID, "called, vibration_device_type={}, vibration_device_position={}", | 1527 | Core::HID::VibrationDeviceInfo vibration_device_info{}; |
| 1537 | vibration_device_info.type, vibration_device_info.position); | 1528 | const auto result = GetResourceManager()->GetVibrationDeviceInfo(vibration_device_info, |
| 1538 | 1529 | vibration_device_handle); | |
| 1539 | const auto result = IsVibrationHandleValid(vibration_device_handle); | ||
| 1540 | if (result.IsError()) { | ||
| 1541 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 1542 | rb.Push(result); | ||
| 1543 | return; | ||
| 1544 | } | ||
| 1545 | 1530 | ||
| 1546 | IPC::ResponseBuilder rb{ctx, 4}; | 1531 | IPC::ResponseBuilder rb{ctx, 4}; |
| 1547 | rb.Push(ResultSuccess); | 1532 | rb.Push(result); |
| 1548 | rb.PushRaw(vibration_device_info); | 1533 | rb.PushRaw(vibration_device_info); |
| 1549 | } | 1534 | } |
| 1550 | 1535 | ||
| @@ -1560,16 +1545,16 @@ void IHidServer::SendVibrationValue(HLERequestContext& ctx) { | |||
| 1560 | 1545 | ||
| 1561 | const auto parameters{rp.PopRaw<Parameters>()}; | 1546 | const auto parameters{rp.PopRaw<Parameters>()}; |
| 1562 | 1547 | ||
| 1563 | GetResourceManager()->GetNpad()->VibrateController(parameters.applet_resource_user_id, | ||
| 1564 | parameters.vibration_device_handle, | ||
| 1565 | parameters.vibration_value); | ||
| 1566 | |||
| 1567 | LOG_DEBUG(Service_HID, | 1548 | LOG_DEBUG(Service_HID, |
| 1568 | "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}", | 1549 | "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}", |
| 1569 | parameters.vibration_device_handle.npad_type, | 1550 | parameters.vibration_device_handle.npad_type, |
| 1570 | parameters.vibration_device_handle.npad_id, | 1551 | parameters.vibration_device_handle.npad_id, |
| 1571 | parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id); | 1552 | parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id); |
| 1572 | 1553 | ||
| 1554 | GetResourceManager()->SendVibrationValue(parameters.applet_resource_user_id, | ||
| 1555 | parameters.vibration_device_handle, | ||
| 1556 | parameters.vibration_value); | ||
| 1557 | |||
| 1573 | IPC::ResponseBuilder rb{ctx, 2}; | 1558 | IPC::ResponseBuilder rb{ctx, 2}; |
| 1574 | rb.Push(ResultSuccess); | 1559 | rb.Push(ResultSuccess); |
| 1575 | } | 1560 | } |
| @@ -1591,10 +1576,28 @@ void IHidServer::GetActualVibrationValue(HLERequestContext& ctx) { | |||
| 1591 | parameters.vibration_device_handle.npad_id, | 1576 | parameters.vibration_device_handle.npad_id, |
| 1592 | parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id); | 1577 | parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id); |
| 1593 | 1578 | ||
| 1579 | bool has_active_aruid{}; | ||
| 1580 | NpadVibrationDevice* device{nullptr}; | ||
| 1581 | Core::HID::VibrationValue vibration_value{}; | ||
| 1582 | Result result = GetResourceManager()->IsVibrationAruidActive(parameters.applet_resource_user_id, | ||
| 1583 | has_active_aruid); | ||
| 1584 | |||
| 1585 | if (result.IsSuccess() && has_active_aruid) { | ||
| 1586 | result = IsVibrationHandleValid(parameters.vibration_device_handle); | ||
| 1587 | } | ||
| 1588 | if (result.IsSuccess() && has_active_aruid) { | ||
| 1589 | device = GetResourceManager()->GetNSVibrationDevice(parameters.vibration_device_handle); | ||
| 1590 | } | ||
| 1591 | if (device != nullptr) { | ||
| 1592 | result = device->GetActualVibrationValue(vibration_value); | ||
| 1593 | } | ||
| 1594 | if (result.IsError()) { | ||
| 1595 | vibration_value = Core::HID::DEFAULT_VIBRATION_VALUE; | ||
| 1596 | } | ||
| 1597 | |||
| 1594 | IPC::ResponseBuilder rb{ctx, 6}; | 1598 | IPC::ResponseBuilder rb{ctx, 6}; |
| 1595 | rb.Push(ResultSuccess); | 1599 | rb.Push(ResultSuccess); |
| 1596 | rb.PushRaw(GetResourceManager()->GetNpad()->GetLastVibration( | 1600 | rb.PushRaw(vibration_value); |
| 1597 | parameters.applet_resource_user_id, parameters.vibration_device_handle)); | ||
| 1598 | } | 1601 | } |
| 1599 | 1602 | ||
| 1600 | void IHidServer::CreateActiveVibrationDeviceList(HLERequestContext& ctx) { | 1603 | void IHidServer::CreateActiveVibrationDeviceList(HLERequestContext& ctx) { |
| @@ -1609,25 +1612,27 @@ void IHidServer::PermitVibration(HLERequestContext& ctx) { | |||
| 1609 | IPC::RequestParser rp{ctx}; | 1612 | IPC::RequestParser rp{ctx}; |
| 1610 | const auto can_vibrate{rp.Pop<bool>()}; | 1613 | const auto can_vibrate{rp.Pop<bool>()}; |
| 1611 | 1614 | ||
| 1612 | // nnSDK saves this value as a float. Since it can only be 1.0f or 0.0f we simplify this value | ||
| 1613 | // by converting it to a bool | ||
| 1614 | Settings::values.vibration_enabled.SetValue(can_vibrate); | ||
| 1615 | |||
| 1616 | LOG_DEBUG(Service_HID, "called, can_vibrate={}", can_vibrate); | 1615 | LOG_DEBUG(Service_HID, "called, can_vibrate={}", can_vibrate); |
| 1617 | 1616 | ||
| 1617 | const auto result = | ||
| 1618 | GetResourceManager()->GetNpad()->GetVibrationHandler()->SetVibrationMasterVolume( | ||
| 1619 | can_vibrate ? 1.0f : 0.0f); | ||
| 1620 | |||
| 1618 | IPC::ResponseBuilder rb{ctx, 2}; | 1621 | IPC::ResponseBuilder rb{ctx, 2}; |
| 1619 | rb.Push(ResultSuccess); | 1622 | rb.Push(result); |
| 1620 | } | 1623 | } |
| 1621 | 1624 | ||
| 1622 | void IHidServer::IsVibrationPermitted(HLERequestContext& ctx) { | 1625 | void IHidServer::IsVibrationPermitted(HLERequestContext& ctx) { |
| 1623 | LOG_DEBUG(Service_HID, "called"); | 1626 | LOG_DEBUG(Service_HID, "called"); |
| 1624 | 1627 | ||
| 1625 | // nnSDK checks if a float is greater than zero. We return the bool we stored earlier | 1628 | f32 master_volume{}; |
| 1626 | const auto is_enabled = Settings::values.vibration_enabled.GetValue(); | 1629 | const auto result = |
| 1630 | GetResourceManager()->GetNpad()->GetVibrationHandler()->GetVibrationMasterVolume( | ||
| 1631 | master_volume); | ||
| 1627 | 1632 | ||
| 1628 | IPC::ResponseBuilder rb{ctx, 3}; | 1633 | IPC::ResponseBuilder rb{ctx, 3}; |
| 1629 | rb.Push(ResultSuccess); | 1634 | rb.Push(result); |
| 1630 | rb.Push(is_enabled); | 1635 | rb.Push(master_volume > 0.0f); |
| 1631 | } | 1636 | } |
| 1632 | 1637 | ||
| 1633 | void IHidServer::SendVibrationValues(HLERequestContext& ctx) { | 1638 | void IHidServer::SendVibrationValues(HLERequestContext& ctx) { |
| @@ -1645,13 +1650,22 @@ void IHidServer::SendVibrationValues(HLERequestContext& ctx) { | |||
| 1645 | auto vibration_values = std::span( | 1650 | auto vibration_values = std::span( |
| 1646 | reinterpret_cast<const Core::HID::VibrationValue*>(vibration_data.data()), vibration_count); | 1651 | reinterpret_cast<const Core::HID::VibrationValue*>(vibration_data.data()), vibration_count); |
| 1647 | 1652 | ||
| 1648 | GetResourceManager()->GetNpad()->VibrateControllers(applet_resource_user_id, | ||
| 1649 | vibration_device_handles, vibration_values); | ||
| 1650 | |||
| 1651 | LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id); | 1653 | LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id); |
| 1652 | 1654 | ||
| 1655 | Result result = ResultSuccess; | ||
| 1656 | if (handle_count != vibration_count) { | ||
| 1657 | result = ResultVibrationArraySizeMismatch; | ||
| 1658 | } | ||
| 1659 | |||
| 1660 | for (std::size_t i = 0; i < handle_count; i++) { | ||
| 1661 | if (result.IsSuccess()) { | ||
| 1662 | result = GetResourceManager()->SendVibrationValue( | ||
| 1663 | applet_resource_user_id, vibration_device_handles[i], vibration_values[i]); | ||
| 1664 | } | ||
| 1665 | } | ||
| 1666 | |||
| 1653 | IPC::ResponseBuilder rb{ctx, 2}; | 1667 | IPC::ResponseBuilder rb{ctx, 2}; |
| 1654 | rb.Push(ResultSuccess); | 1668 | rb.Push(result); |
| 1655 | } | 1669 | } |
| 1656 | 1670 | ||
| 1657 | void IHidServer::SendVibrationGcErmCommand(HLERequestContext& ctx) { | 1671 | void IHidServer::SendVibrationGcErmCommand(HLERequestContext& ctx) { |
| @@ -1666,43 +1680,6 @@ void IHidServer::SendVibrationGcErmCommand(HLERequestContext& ctx) { | |||
| 1666 | 1680 | ||
| 1667 | const auto parameters{rp.PopRaw<Parameters>()}; | 1681 | const auto parameters{rp.PopRaw<Parameters>()}; |
| 1668 | 1682 | ||
| 1669 | /** | ||
| 1670 | * Note: This uses yuzu-specific behavior such that the StopHard command produces | ||
| 1671 | * vibrations where freq_low == 0.0f and freq_high == 0.0f, as defined below, | ||
| 1672 | * in order to differentiate between Stop and StopHard commands. | ||
| 1673 | * This is done to reuse the controller vibration functions made for regular controllers. | ||
| 1674 | */ | ||
| 1675 | const auto vibration_value = [parameters] { | ||
| 1676 | switch (parameters.gc_erm_command) { | ||
| 1677 | case Core::HID::VibrationGcErmCommand::Stop: | ||
| 1678 | return Core::HID::VibrationValue{ | ||
| 1679 | .low_amplitude = 0.0f, | ||
| 1680 | .low_frequency = 160.0f, | ||
| 1681 | .high_amplitude = 0.0f, | ||
| 1682 | .high_frequency = 320.0f, | ||
| 1683 | }; | ||
| 1684 | case Core::HID::VibrationGcErmCommand::Start: | ||
| 1685 | return Core::HID::VibrationValue{ | ||
| 1686 | .low_amplitude = 1.0f, | ||
| 1687 | .low_frequency = 160.0f, | ||
| 1688 | .high_amplitude = 1.0f, | ||
| 1689 | .high_frequency = 320.0f, | ||
| 1690 | }; | ||
| 1691 | case Core::HID::VibrationGcErmCommand::StopHard: | ||
| 1692 | return Core::HID::VibrationValue{ | ||
| 1693 | .low_amplitude = 0.0f, | ||
| 1694 | .low_frequency = 0.0f, | ||
| 1695 | .high_amplitude = 0.0f, | ||
| 1696 | .high_frequency = 0.0f, | ||
| 1697 | }; | ||
| 1698 | default: | ||
| 1699 | return Core::HID::DEFAULT_VIBRATION_VALUE; | ||
| 1700 | } | ||
| 1701 | }(); | ||
| 1702 | |||
| 1703 | GetResourceManager()->GetNpad()->VibrateController( | ||
| 1704 | parameters.applet_resource_user_id, parameters.vibration_device_handle, vibration_value); | ||
| 1705 | |||
| 1706 | LOG_DEBUG(Service_HID, | 1683 | LOG_DEBUG(Service_HID, |
| 1707 | "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}, " | 1684 | "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}, " |
| 1708 | "gc_erm_command={}", | 1685 | "gc_erm_command={}", |
| @@ -1711,8 +1688,23 @@ void IHidServer::SendVibrationGcErmCommand(HLERequestContext& ctx) { | |||
| 1711 | parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id, | 1688 | parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id, |
| 1712 | parameters.gc_erm_command); | 1689 | parameters.gc_erm_command); |
| 1713 | 1690 | ||
| 1691 | bool has_active_aruid{}; | ||
| 1692 | NpadGcVibrationDevice* gc_device{nullptr}; | ||
| 1693 | Result result = GetResourceManager()->IsVibrationAruidActive(parameters.applet_resource_user_id, | ||
| 1694 | has_active_aruid); | ||
| 1695 | |||
| 1696 | if (result.IsSuccess() && has_active_aruid) { | ||
| 1697 | result = IsVibrationHandleValid(parameters.vibration_device_handle); | ||
| 1698 | } | ||
| 1699 | if (result.IsSuccess() && has_active_aruid) { | ||
| 1700 | gc_device = GetResourceManager()->GetGcVibrationDevice(parameters.vibration_device_handle); | ||
| 1701 | } | ||
| 1702 | if (gc_device != nullptr) { | ||
| 1703 | result = gc_device->SendVibrationGcErmCommand(parameters.gc_erm_command); | ||
| 1704 | } | ||
| 1705 | |||
| 1714 | IPC::ResponseBuilder rb{ctx, 2}; | 1706 | IPC::ResponseBuilder rb{ctx, 2}; |
| 1715 | rb.Push(ResultSuccess); | 1707 | rb.Push(result); |
| 1716 | } | 1708 | } |
| 1717 | 1709 | ||
| 1718 | void IHidServer::GetActualVibrationGcErmCommand(HLERequestContext& ctx) { | 1710 | void IHidServer::GetActualVibrationGcErmCommand(HLERequestContext& ctx) { |
| @@ -1725,33 +1717,31 @@ void IHidServer::GetActualVibrationGcErmCommand(HLERequestContext& ctx) { | |||
| 1725 | 1717 | ||
| 1726 | const auto parameters{rp.PopRaw<Parameters>()}; | 1718 | const auto parameters{rp.PopRaw<Parameters>()}; |
| 1727 | 1719 | ||
| 1728 | const auto last_vibration = GetResourceManager()->GetNpad()->GetLastVibration( | ||
| 1729 | parameters.applet_resource_user_id, parameters.vibration_device_handle); | ||
| 1730 | |||
| 1731 | const auto gc_erm_command = [last_vibration] { | ||
| 1732 | if (last_vibration.low_amplitude != 0.0f || last_vibration.high_amplitude != 0.0f) { | ||
| 1733 | return Core::HID::VibrationGcErmCommand::Start; | ||
| 1734 | } | ||
| 1735 | |||
| 1736 | /** | ||
| 1737 | * Note: This uses yuzu-specific behavior such that the StopHard command produces | ||
| 1738 | * vibrations where freq_low == 0.0f and freq_high == 0.0f, as defined in the HID function | ||
| 1739 | * SendVibrationGcErmCommand, in order to differentiate between Stop and StopHard commands. | ||
| 1740 | * This is done to reuse the controller vibration functions made for regular controllers. | ||
| 1741 | */ | ||
| 1742 | if (last_vibration.low_frequency == 0.0f && last_vibration.high_frequency == 0.0f) { | ||
| 1743 | return Core::HID::VibrationGcErmCommand::StopHard; | ||
| 1744 | } | ||
| 1745 | |||
| 1746 | return Core::HID::VibrationGcErmCommand::Stop; | ||
| 1747 | }(); | ||
| 1748 | |||
| 1749 | LOG_DEBUG(Service_HID, | 1720 | LOG_DEBUG(Service_HID, |
| 1750 | "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}", | 1721 | "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}", |
| 1751 | parameters.vibration_device_handle.npad_type, | 1722 | parameters.vibration_device_handle.npad_type, |
| 1752 | parameters.vibration_device_handle.npad_id, | 1723 | parameters.vibration_device_handle.npad_id, |
| 1753 | parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id); | 1724 | parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id); |
| 1754 | 1725 | ||
| 1726 | bool has_active_aruid{}; | ||
| 1727 | NpadGcVibrationDevice* gc_device{nullptr}; | ||
| 1728 | Core::HID::VibrationGcErmCommand gc_erm_command{}; | ||
| 1729 | Result result = GetResourceManager()->IsVibrationAruidActive(parameters.applet_resource_user_id, | ||
| 1730 | has_active_aruid); | ||
| 1731 | |||
| 1732 | if (result.IsSuccess() && has_active_aruid) { | ||
| 1733 | result = IsVibrationHandleValid(parameters.vibration_device_handle); | ||
| 1734 | } | ||
| 1735 | if (result.IsSuccess() && has_active_aruid) { | ||
| 1736 | gc_device = GetResourceManager()->GetGcVibrationDevice(parameters.vibration_device_handle); | ||
| 1737 | } | ||
| 1738 | if (gc_device != nullptr) { | ||
| 1739 | result = gc_device->GetActualVibrationGcErmCommand(gc_erm_command); | ||
| 1740 | } | ||
| 1741 | if (result.IsError()) { | ||
| 1742 | gc_erm_command = Core::HID::VibrationGcErmCommand::Stop; | ||
| 1743 | } | ||
| 1744 | |||
| 1755 | IPC::ResponseBuilder rb{ctx, 4}; | 1745 | IPC::ResponseBuilder rb{ctx, 4}; |
| 1756 | rb.Push(ResultSuccess); | 1746 | rb.Push(ResultSuccess); |
| 1757 | rb.PushEnum(gc_erm_command); | 1747 | rb.PushEnum(gc_erm_command); |
| @@ -1761,21 +1751,24 @@ void IHidServer::BeginPermitVibrationSession(HLERequestContext& ctx) { | |||
| 1761 | IPC::RequestParser rp{ctx}; | 1751 | IPC::RequestParser rp{ctx}; |
| 1762 | const auto applet_resource_user_id{rp.Pop<u64>()}; | 1752 | const auto applet_resource_user_id{rp.Pop<u64>()}; |
| 1763 | 1753 | ||
| 1764 | GetResourceManager()->GetNpad()->SetPermitVibrationSession(true); | ||
| 1765 | |||
| 1766 | LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id); | 1754 | LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id); |
| 1767 | 1755 | ||
| 1756 | const auto result = | ||
| 1757 | GetResourceManager()->GetNpad()->GetVibrationHandler()->BeginPermitVibrationSession( | ||
| 1758 | applet_resource_user_id); | ||
| 1759 | |||
| 1768 | IPC::ResponseBuilder rb{ctx, 2}; | 1760 | IPC::ResponseBuilder rb{ctx, 2}; |
| 1769 | rb.Push(ResultSuccess); | 1761 | rb.Push(result); |
| 1770 | } | 1762 | } |
| 1771 | 1763 | ||
| 1772 | void IHidServer::EndPermitVibrationSession(HLERequestContext& ctx) { | 1764 | void IHidServer::EndPermitVibrationSession(HLERequestContext& ctx) { |
| 1773 | GetResourceManager()->GetNpad()->SetPermitVibrationSession(false); | ||
| 1774 | |||
| 1775 | LOG_DEBUG(Service_HID, "called"); | 1765 | LOG_DEBUG(Service_HID, "called"); |
| 1776 | 1766 | ||
| 1767 | const auto result = | ||
| 1768 | GetResourceManager()->GetNpad()->GetVibrationHandler()->EndPermitVibrationSession(); | ||
| 1769 | |||
| 1777 | IPC::ResponseBuilder rb{ctx, 2}; | 1770 | IPC::ResponseBuilder rb{ctx, 2}; |
| 1778 | rb.Push(ResultSuccess); | 1771 | rb.Push(result); |
| 1779 | } | 1772 | } |
| 1780 | 1773 | ||
| 1781 | void IHidServer::IsVibrationDeviceMounted(HLERequestContext& ctx) { | 1774 | void IHidServer::IsVibrationDeviceMounted(HLERequestContext& ctx) { |
| @@ -1795,10 +1788,61 @@ void IHidServer::IsVibrationDeviceMounted(HLERequestContext& ctx) { | |||
| 1795 | parameters.vibration_device_handle.npad_id, | 1788 | parameters.vibration_device_handle.npad_id, |
| 1796 | parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id); | 1789 | parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id); |
| 1797 | 1790 | ||
| 1791 | bool is_mounted{}; | ||
| 1792 | NpadVibrationBase* device{nullptr}; | ||
| 1793 | Result result = IsVibrationHandleValid(parameters.vibration_device_handle); | ||
| 1794 | |||
| 1795 | if (result.IsSuccess()) { | ||
| 1796 | device = GetResourceManager()->GetVibrationDevice(parameters.vibration_device_handle); | ||
| 1797 | } | ||
| 1798 | |||
| 1799 | if (device != nullptr) { | ||
| 1800 | is_mounted = device->IsVibrationMounted(); | ||
| 1801 | } | ||
| 1802 | |||
| 1798 | IPC::ResponseBuilder rb{ctx, 3}; | 1803 | IPC::ResponseBuilder rb{ctx, 3}; |
| 1799 | rb.Push(ResultSuccess); | 1804 | rb.Push(result); |
| 1800 | rb.Push(GetResourceManager()->GetNpad()->IsVibrationDeviceMounted( | 1805 | rb.Push(is_mounted); |
| 1801 | parameters.applet_resource_user_id, parameters.vibration_device_handle)); | 1806 | } |
| 1807 | |||
| 1808 | void IHidServer::SendVibrationValueInBool(HLERequestContext& ctx) { | ||
| 1809 | IPC::RequestParser rp{ctx}; | ||
| 1810 | struct Parameters { | ||
| 1811 | Core::HID::VibrationDeviceHandle vibration_device_handle; | ||
| 1812 | INSERT_PADDING_WORDS_NOINIT(1); | ||
| 1813 | u64 applet_resource_user_id; | ||
| 1814 | bool is_vibrating; | ||
| 1815 | }; | ||
| 1816 | static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size."); | ||
| 1817 | |||
| 1818 | const auto parameters{rp.PopRaw<Parameters>()}; | ||
| 1819 | |||
| 1820 | LOG_DEBUG(Service_HID, | ||
| 1821 | "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}, " | ||
| 1822 | "is_vibrating={}", | ||
| 1823 | parameters.vibration_device_handle.npad_type, | ||
| 1824 | parameters.vibration_device_handle.npad_id, | ||
| 1825 | parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id, | ||
| 1826 | parameters.is_vibrating); | ||
| 1827 | |||
| 1828 | bool has_active_aruid{}; | ||
| 1829 | NpadN64VibrationDevice* n64_device{nullptr}; | ||
| 1830 | Result result = GetResourceManager()->IsVibrationAruidActive(parameters.applet_resource_user_id, | ||
| 1831 | has_active_aruid); | ||
| 1832 | |||
| 1833 | if (result.IsSuccess() && has_active_aruid) { | ||
| 1834 | result = IsVibrationHandleValid(parameters.vibration_device_handle); | ||
| 1835 | } | ||
| 1836 | if (result.IsSuccess() && has_active_aruid) { | ||
| 1837 | n64_device = | ||
| 1838 | GetResourceManager()->GetN64VibrationDevice(parameters.vibration_device_handle); | ||
| 1839 | } | ||
| 1840 | if (n64_device != nullptr) { | ||
| 1841 | result = n64_device->SendValueInBool(parameters.is_vibrating); | ||
| 1842 | } | ||
| 1843 | |||
| 1844 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 1845 | rb.Push(result); | ||
| 1802 | } | 1846 | } |
| 1803 | 1847 | ||
| 1804 | void IHidServer::ActivateConsoleSixAxisSensor(HLERequestContext& ctx) { | 1848 | void IHidServer::ActivateConsoleSixAxisSensor(HLERequestContext& ctx) { |
diff --git a/src/core/hle/service/hid/hid_server.h b/src/core/hle/service/hid/hid_server.h index cc7c4ebdd..3a2e0a230 100644 --- a/src/core/hle/service/hid/hid_server.h +++ b/src/core/hle/service/hid/hid_server.h | |||
| @@ -97,6 +97,7 @@ private: | |||
| 97 | void BeginPermitVibrationSession(HLERequestContext& ctx); | 97 | void BeginPermitVibrationSession(HLERequestContext& ctx); |
| 98 | void EndPermitVibrationSession(HLERequestContext& ctx); | 98 | void EndPermitVibrationSession(HLERequestContext& ctx); |
| 99 | void IsVibrationDeviceMounted(HLERequestContext& ctx); | 99 | void IsVibrationDeviceMounted(HLERequestContext& ctx); |
| 100 | void SendVibrationValueInBool(HLERequestContext& ctx); | ||
| 100 | void ActivateConsoleSixAxisSensor(HLERequestContext& ctx); | 101 | void ActivateConsoleSixAxisSensor(HLERequestContext& ctx); |
| 101 | void StartConsoleSixAxisSensor(HLERequestContext& ctx); | 102 | void StartConsoleSixAxisSensor(HLERequestContext& ctx); |
| 102 | void StopConsoleSixAxisSensor(HLERequestContext& ctx); | 103 | void StopConsoleSixAxisSensor(HLERequestContext& ctx); |
diff --git a/src/core/hle/service/hid/hid_system_server.cpp b/src/core/hle/service/hid/hid_system_server.cpp index 3a0cb3cb1..bf27ddfbf 100644 --- a/src/core/hle/service/hid/hid_system_server.cpp +++ b/src/core/hle/service/hid/hid_system_server.cpp | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include "hid_core/resource_manager.h" | 7 | #include "hid_core/resource_manager.h" |
| 8 | #include "hid_core/resources/npad/npad.h" | 8 | #include "hid_core/resources/npad/npad.h" |
| 9 | #include "hid_core/resources/npad/npad_types.h" | 9 | #include "hid_core/resources/npad/npad_types.h" |
| 10 | #include "hid_core/resources/npad/npad_vibration.h" | ||
| 10 | #include "hid_core/resources/palma/palma.h" | 11 | #include "hid_core/resources/palma/palma.h" |
| 11 | #include "hid_core/resources/touch_screen/touch_screen.h" | 12 | #include "hid_core/resources/touch_screen/touch_screen.h" |
| 12 | 13 | ||
| @@ -67,14 +68,14 @@ IHidSystemServer::IHidSystemServer(Core::System& system_, std::shared_ptr<Resour | |||
| 67 | {501, &IHidSystemServer::RegisterAppletResourceUserId, "RegisterAppletResourceUserId"}, | 68 | {501, &IHidSystemServer::RegisterAppletResourceUserId, "RegisterAppletResourceUserId"}, |
| 68 | {502, &IHidSystemServer::UnregisterAppletResourceUserId, "UnregisterAppletResourceUserId"}, | 69 | {502, &IHidSystemServer::UnregisterAppletResourceUserId, "UnregisterAppletResourceUserId"}, |
| 69 | {503, &IHidSystemServer::EnableAppletToGetInput, "EnableAppletToGetInput"}, | 70 | {503, &IHidSystemServer::EnableAppletToGetInput, "EnableAppletToGetInput"}, |
| 70 | {504, nullptr, "SetAruidValidForVibration"}, | 71 | {504, &IHidSystemServer::SetAruidValidForVibration, "SetAruidValidForVibration"}, |
| 71 | {505, &IHidSystemServer::EnableAppletToGetSixAxisSensor, "EnableAppletToGetSixAxisSensor"}, | 72 | {505, &IHidSystemServer::EnableAppletToGetSixAxisSensor, "EnableAppletToGetSixAxisSensor"}, |
| 72 | {506, &IHidSystemServer::EnableAppletToGetPadInput, "EnableAppletToGetPadInput"}, | 73 | {506, &IHidSystemServer::EnableAppletToGetPadInput, "EnableAppletToGetPadInput"}, |
| 73 | {507, &IHidSystemServer::EnableAppletToGetTouchScreen, "EnableAppletToGetTouchScreen"}, | 74 | {507, &IHidSystemServer::EnableAppletToGetTouchScreen, "EnableAppletToGetTouchScreen"}, |
| 74 | {510, nullptr, "SetVibrationMasterVolume"}, | 75 | {510, &IHidSystemServer::SetVibrationMasterVolume, "SetVibrationMasterVolume"}, |
| 75 | {511, nullptr, "GetVibrationMasterVolume"}, | 76 | {511, &IHidSystemServer::GetVibrationMasterVolume, "GetVibrationMasterVolume"}, |
| 76 | {512, nullptr, "BeginPermitVibrationSession"}, | 77 | {512, &IHidSystemServer::BeginPermitVibrationSession, "BeginPermitVibrationSession"}, |
| 77 | {513, nullptr, "EndPermitVibrationSession"}, | 78 | {513, &IHidSystemServer::EndPermitVibrationSession, "EndPermitVibrationSession"}, |
| 78 | {514, nullptr, "Unknown514"}, | 79 | {514, nullptr, "Unknown514"}, |
| 79 | {520, nullptr, "EnableHandheldHids"}, | 80 | {520, nullptr, "EnableHandheldHids"}, |
| 80 | {521, nullptr, "DisableHandheldHids"}, | 81 | {521, nullptr, "DisableHandheldHids"}, |
| @@ -156,7 +157,7 @@ IHidSystemServer::IHidSystemServer(Core::System& system_, std::shared_ptr<Resour | |||
| 156 | {1152, nullptr, "SetTouchScreenDefaultConfiguration"}, | 157 | {1152, nullptr, "SetTouchScreenDefaultConfiguration"}, |
| 157 | {1153, &IHidSystemServer::GetTouchScreenDefaultConfiguration, "GetTouchScreenDefaultConfiguration"}, | 158 | {1153, &IHidSystemServer::GetTouchScreenDefaultConfiguration, "GetTouchScreenDefaultConfiguration"}, |
| 158 | {1154, nullptr, "IsFirmwareAvailableForNotification"}, | 159 | {1154, nullptr, "IsFirmwareAvailableForNotification"}, |
| 159 | {1155, nullptr, "SetForceHandheldStyleVibration"}, | 160 | {1155, &IHidSystemServer::SetForceHandheldStyleVibration, "SetForceHandheldStyleVibration"}, |
| 160 | {1156, nullptr, "SendConnectionTriggerWithoutTimeoutEvent"}, | 161 | {1156, nullptr, "SendConnectionTriggerWithoutTimeoutEvent"}, |
| 161 | {1157, nullptr, "CancelConnectionTrigger"}, | 162 | {1157, nullptr, "CancelConnectionTrigger"}, |
| 162 | {1200, nullptr, "IsButtonConfigSupported"}, | 163 | {1200, nullptr, "IsButtonConfigSupported"}, |
| @@ -532,7 +533,28 @@ void IHidSystemServer::EnableAppletToGetInput(HLERequestContext& ctx) { | |||
| 532 | parameters.is_enabled, parameters.applet_resource_user_id); | 533 | parameters.is_enabled, parameters.applet_resource_user_id); |
| 533 | 534 | ||
| 534 | GetResourceManager()->EnableInput(parameters.applet_resource_user_id, parameters.is_enabled); | 535 | GetResourceManager()->EnableInput(parameters.applet_resource_user_id, parameters.is_enabled); |
| 535 | // GetResourceManager()->GetNpad()->EnableInput(parameters.applet_resource_user_id); | 536 | GetResourceManager()->GetNpad()->EnableAppletToGetInput(parameters.applet_resource_user_id); |
| 537 | |||
| 538 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 539 | rb.Push(ResultSuccess); | ||
| 540 | } | ||
| 541 | |||
| 542 | void IHidSystemServer::SetAruidValidForVibration(HLERequestContext& ctx) { | ||
| 543 | IPC::RequestParser rp{ctx}; | ||
| 544 | struct Parameters { | ||
| 545 | bool is_enabled; | ||
| 546 | INSERT_PADDING_WORDS_NOINIT(1); | ||
| 547 | u64 applet_resource_user_id; | ||
| 548 | }; | ||
| 549 | static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size."); | ||
| 550 | |||
| 551 | const auto parameters{rp.PopRaw<Parameters>()}; | ||
| 552 | |||
| 553 | LOG_INFO(Service_HID, "called, is_enabled={}, applet_resource_user_id={}", | ||
| 554 | parameters.is_enabled, parameters.applet_resource_user_id); | ||
| 555 | |||
| 556 | GetResourceManager()->SetAruidValidForVibration(parameters.applet_resource_user_id, | ||
| 557 | parameters.is_enabled); | ||
| 536 | 558 | ||
| 537 | IPC::ResponseBuilder rb{ctx, 2}; | 559 | IPC::ResponseBuilder rb{ctx, 2}; |
| 538 | rb.Push(ResultSuccess); | 560 | rb.Push(ResultSuccess); |
| @@ -574,7 +596,7 @@ void IHidSystemServer::EnableAppletToGetPadInput(HLERequestContext& ctx) { | |||
| 574 | parameters.is_enabled, parameters.applet_resource_user_id); | 596 | parameters.is_enabled, parameters.applet_resource_user_id); |
| 575 | 597 | ||
| 576 | GetResourceManager()->EnablePadInput(parameters.applet_resource_user_id, parameters.is_enabled); | 598 | GetResourceManager()->EnablePadInput(parameters.applet_resource_user_id, parameters.is_enabled); |
| 577 | // GetResourceManager()->GetNpad()->EnableInput(parameters.applet_resource_user_id); | 599 | GetResourceManager()->GetNpad()->EnableAppletToGetInput(parameters.applet_resource_user_id); |
| 578 | 600 | ||
| 579 | IPC::ResponseBuilder rb{ctx, 2}; | 601 | IPC::ResponseBuilder rb{ctx, 2}; |
| 580 | rb.Push(ResultSuccess); | 602 | rb.Push(ResultSuccess); |
| @@ -601,6 +623,57 @@ void IHidSystemServer::EnableAppletToGetTouchScreen(HLERequestContext& ctx) { | |||
| 601 | rb.Push(ResultSuccess); | 623 | rb.Push(ResultSuccess); |
| 602 | } | 624 | } |
| 603 | 625 | ||
| 626 | void IHidSystemServer::SetVibrationMasterVolume(HLERequestContext& ctx) { | ||
| 627 | IPC::RequestParser rp{ctx}; | ||
| 628 | const auto master_volume{rp.Pop<f32>()}; | ||
| 629 | |||
| 630 | LOG_INFO(Service_HID, "called, volume={}", master_volume); | ||
| 631 | |||
| 632 | const auto result = | ||
| 633 | GetResourceManager()->GetNpad()->GetVibrationHandler()->SetVibrationMasterVolume( | ||
| 634 | master_volume); | ||
| 635 | |||
| 636 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 637 | rb.Push(result); | ||
| 638 | } | ||
| 639 | |||
| 640 | void IHidSystemServer::GetVibrationMasterVolume(HLERequestContext& ctx) { | ||
| 641 | f32 master_volume{}; | ||
| 642 | const auto result = | ||
| 643 | GetResourceManager()->GetNpad()->GetVibrationHandler()->GetVibrationMasterVolume( | ||
| 644 | master_volume); | ||
| 645 | |||
| 646 | LOG_INFO(Service_HID, "called, volume={}", master_volume); | ||
| 647 | |||
| 648 | IPC::ResponseBuilder rb{ctx, 3}; | ||
| 649 | rb.Push(result); | ||
| 650 | rb.Push(master_volume); | ||
| 651 | } | ||
| 652 | |||
| 653 | void IHidSystemServer::BeginPermitVibrationSession(HLERequestContext& ctx) { | ||
| 654 | IPC::RequestParser rp{ctx}; | ||
| 655 | const auto applet_resource_user_id{rp.Pop<u64>()}; | ||
| 656 | |||
| 657 | LOG_INFO(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id); | ||
| 658 | |||
| 659 | const auto result = | ||
| 660 | GetResourceManager()->GetNpad()->GetVibrationHandler()->BeginPermitVibrationSession( | ||
| 661 | applet_resource_user_id); | ||
| 662 | |||
| 663 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 664 | rb.Push(result); | ||
| 665 | } | ||
| 666 | |||
| 667 | void IHidSystemServer::EndPermitVibrationSession(HLERequestContext& ctx) { | ||
| 668 | LOG_INFO(Service_HID, "called"); | ||
| 669 | |||
| 670 | const auto result = | ||
| 671 | GetResourceManager()->GetNpad()->GetVibrationHandler()->EndPermitVibrationSession(); | ||
| 672 | |||
| 673 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 674 | rb.Push(result); | ||
| 675 | } | ||
| 676 | |||
| 604 | void IHidSystemServer::IsJoyConAttachedOnAllRail(HLERequestContext& ctx) { | 677 | void IHidSystemServer::IsJoyConAttachedOnAllRail(HLERequestContext& ctx) { |
| 605 | const bool is_attached = true; | 678 | const bool is_attached = true; |
| 606 | 679 | ||
| @@ -749,6 +822,19 @@ void IHidSystemServer::GetTouchScreenDefaultConfiguration(HLERequestContext& ctx | |||
| 749 | rb.PushRaw(touchscreen_config); | 822 | rb.PushRaw(touchscreen_config); |
| 750 | } | 823 | } |
| 751 | 824 | ||
| 825 | void IHidSystemServer::SetForceHandheldStyleVibration(HLERequestContext& ctx) { | ||
| 826 | IPC::RequestParser rp{ctx}; | ||
| 827 | const auto is_forced{rp.Pop<bool>()}; | ||
| 828 | |||
| 829 | LOG_INFO(Service_HID, "called, is_forced={}", is_forced); | ||
| 830 | |||
| 831 | GetResourceManager()->SetForceHandheldStyleVibration(is_forced); | ||
| 832 | GetResourceManager()->GetNpad()->UpdateHandheldAbstractState(); | ||
| 833 | |||
| 834 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 835 | rb.Push(ResultSuccess); | ||
| 836 | } | ||
| 837 | |||
| 752 | void IHidSystemServer::IsUsingCustomButtonConfig(HLERequestContext& ctx) { | 838 | void IHidSystemServer::IsUsingCustomButtonConfig(HLERequestContext& ctx) { |
| 753 | const bool is_enabled = false; | 839 | const bool is_enabled = false; |
| 754 | 840 | ||
diff --git a/src/core/hle/service/hid/hid_system_server.h b/src/core/hle/service/hid/hid_system_server.h index 0c2634e3f..90a719f02 100644 --- a/src/core/hle/service/hid/hid_system_server.h +++ b/src/core/hle/service/hid/hid_system_server.h | |||
| @@ -42,9 +42,14 @@ private: | |||
| 42 | void RegisterAppletResourceUserId(HLERequestContext& ctx); | 42 | void RegisterAppletResourceUserId(HLERequestContext& ctx); |
| 43 | void UnregisterAppletResourceUserId(HLERequestContext& ctx); | 43 | void UnregisterAppletResourceUserId(HLERequestContext& ctx); |
| 44 | void EnableAppletToGetInput(HLERequestContext& ctx); | 44 | void EnableAppletToGetInput(HLERequestContext& ctx); |
| 45 | void SetAruidValidForVibration(HLERequestContext& ctx); | ||
| 45 | void EnableAppletToGetSixAxisSensor(HLERequestContext& ctx); | 46 | void EnableAppletToGetSixAxisSensor(HLERequestContext& ctx); |
| 46 | void EnableAppletToGetPadInput(HLERequestContext& ctx); | 47 | void EnableAppletToGetPadInput(HLERequestContext& ctx); |
| 47 | void EnableAppletToGetTouchScreen(HLERequestContext& ctx); | 48 | void EnableAppletToGetTouchScreen(HLERequestContext& ctx); |
| 49 | void SetVibrationMasterVolume(HLERequestContext& ctx); | ||
| 50 | void GetVibrationMasterVolume(HLERequestContext& ctx); | ||
| 51 | void BeginPermitVibrationSession(HLERequestContext& ctx); | ||
| 52 | void EndPermitVibrationSession(HLERequestContext& ctx); | ||
| 48 | void IsJoyConAttachedOnAllRail(HLERequestContext& ctx); | 53 | void IsJoyConAttachedOnAllRail(HLERequestContext& ctx); |
| 49 | void AcquireConnectionTriggerTimeoutEvent(HLERequestContext& ctx); | 54 | void AcquireConnectionTriggerTimeoutEvent(HLERequestContext& ctx); |
| 50 | void AcquireDeviceRegisteredEventForControllerSupport(HLERequestContext& ctx); | 55 | void AcquireDeviceRegisteredEventForControllerSupport(HLERequestContext& ctx); |
| @@ -61,6 +66,7 @@ private: | |||
| 61 | void FinalizeUsbFirmwareUpdate(HLERequestContext& ctx); | 66 | void FinalizeUsbFirmwareUpdate(HLERequestContext& ctx); |
| 62 | void InitializeUsbFirmwareUpdateWithoutMemory(HLERequestContext& ctx); | 67 | void InitializeUsbFirmwareUpdateWithoutMemory(HLERequestContext& ctx); |
| 63 | void GetTouchScreenDefaultConfiguration(HLERequestContext& ctx); | 68 | void GetTouchScreenDefaultConfiguration(HLERequestContext& ctx); |
| 69 | void SetForceHandheldStyleVibration(HLERequestContext& ctx); | ||
| 64 | void IsUsingCustomButtonConfig(HLERequestContext& ctx); | 70 | void IsUsingCustomButtonConfig(HLERequestContext& ctx); |
| 65 | 71 | ||
| 66 | std::shared_ptr<ResourceManager> GetResourceManager(); | 72 | std::shared_ptr<ResourceManager> GetResourceManager(); |
diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp index 3f38ceb03..e491dd260 100644 --- a/src/core/hle/service/hle_ipc.cpp +++ b/src/core/hle/service/hle_ipc.cpp | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include "common/common_types.h" | 12 | #include "common/common_types.h" |
| 13 | #include "common/logging/log.h" | 13 | #include "common/logging/log.h" |
| 14 | #include "common/scratch_buffer.h" | 14 | #include "common/scratch_buffer.h" |
| 15 | #include "core/guest_memory.h" | ||
| 15 | #include "core/hle/kernel/k_auto_object.h" | 16 | #include "core/hle/kernel/k_auto_object.h" |
| 16 | #include "core/hle/kernel/k_handle_table.h" | 17 | #include "core/hle/kernel/k_handle_table.h" |
| 17 | #include "core/hle/kernel/k_process.h" | 18 | #include "core/hle/kernel/k_process.h" |
| @@ -23,19 +24,6 @@ | |||
| 23 | #include "core/hle/service/ipc_helpers.h" | 24 | #include "core/hle/service/ipc_helpers.h" |
| 24 | #include "core/memory.h" | 25 | #include "core/memory.h" |
| 25 | 26 | ||
| 26 | namespace { | ||
| 27 | static thread_local std::array read_buffer_data_a{ | ||
| 28 | Common::ScratchBuffer<u8>(), | ||
| 29 | Common::ScratchBuffer<u8>(), | ||
| 30 | Common::ScratchBuffer<u8>(), | ||
| 31 | }; | ||
| 32 | static thread_local std::array read_buffer_data_x{ | ||
| 33 | Common::ScratchBuffer<u8>(), | ||
| 34 | Common::ScratchBuffer<u8>(), | ||
| 35 | Common::ScratchBuffer<u8>(), | ||
| 36 | }; | ||
| 37 | } // Anonymous namespace | ||
| 38 | |||
| 39 | namespace Service { | 27 | namespace Service { |
| 40 | 28 | ||
| 41 | SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_) | 29 | SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_) |
| @@ -343,48 +331,27 @@ std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons | |||
| 343 | } | 331 | } |
| 344 | 332 | ||
| 345 | std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) const { | 333 | std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) const { |
| 346 | static thread_local std::array read_buffer_a{ | 334 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0); |
| 347 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 348 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 349 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 350 | }; | ||
| 351 | 335 | ||
| 352 | ASSERT_OR_EXECUTE_MSG( | 336 | ASSERT_OR_EXECUTE_MSG( |
| 353 | BufferDescriptorA().size() > buffer_index, { return {}; }, | 337 | BufferDescriptorA().size() > buffer_index, { return {}; }, |
| 354 | "BufferDescriptorA invalid buffer_index {}", buffer_index); | 338 | "BufferDescriptorA invalid buffer_index {}", buffer_index); |
| 355 | auto& read_buffer = read_buffer_a[buffer_index]; | 339 | return gm.Read(BufferDescriptorA()[buffer_index].Address(), |
| 356 | return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(), | 340 | BufferDescriptorA()[buffer_index].Size(), &read_buffer_data_a[buffer_index]); |
| 357 | BufferDescriptorA()[buffer_index].Size(), | ||
| 358 | &read_buffer_data_a[buffer_index]); | ||
| 359 | } | 341 | } |
| 360 | 342 | ||
| 361 | std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) const { | 343 | std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) const { |
| 362 | static thread_local std::array read_buffer_x{ | 344 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0); |
| 363 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 364 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 365 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 366 | }; | ||
| 367 | 345 | ||
| 368 | ASSERT_OR_EXECUTE_MSG( | 346 | ASSERT_OR_EXECUTE_MSG( |
| 369 | BufferDescriptorX().size() > buffer_index, { return {}; }, | 347 | BufferDescriptorX().size() > buffer_index, { return {}; }, |
| 370 | "BufferDescriptorX invalid buffer_index {}", buffer_index); | 348 | "BufferDescriptorX invalid buffer_index {}", buffer_index); |
| 371 | auto& read_buffer = read_buffer_x[buffer_index]; | 349 | return gm.Read(BufferDescriptorX()[buffer_index].Address(), |
| 372 | return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(), | 350 | BufferDescriptorX()[buffer_index].Size(), &read_buffer_data_x[buffer_index]); |
| 373 | BufferDescriptorX()[buffer_index].Size(), | ||
| 374 | &read_buffer_data_x[buffer_index]); | ||
| 375 | } | 351 | } |
| 376 | 352 | ||
| 377 | std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const { | 353 | std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const { |
| 378 | static thread_local std::array read_buffer_a{ | 354 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0); |
| 379 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 380 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 381 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 382 | }; | ||
| 383 | static thread_local std::array read_buffer_x{ | ||
| 384 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 385 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 386 | Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), | ||
| 387 | }; | ||
| 388 | 355 | ||
| 389 | const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && | 356 | const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && |
| 390 | BufferDescriptorA()[buffer_index].Size()}; | 357 | BufferDescriptorA()[buffer_index].Size()}; |
| @@ -401,18 +368,14 @@ std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) cons | |||
| 401 | ASSERT_OR_EXECUTE_MSG( | 368 | ASSERT_OR_EXECUTE_MSG( |
| 402 | BufferDescriptorA().size() > buffer_index, { return {}; }, | 369 | BufferDescriptorA().size() > buffer_index, { return {}; }, |
| 403 | "BufferDescriptorA invalid buffer_index {}", buffer_index); | 370 | "BufferDescriptorA invalid buffer_index {}", buffer_index); |
| 404 | auto& read_buffer = read_buffer_a[buffer_index]; | 371 | return gm.Read(BufferDescriptorA()[buffer_index].Address(), |
| 405 | return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(), | 372 | BufferDescriptorA()[buffer_index].Size(), &read_buffer_data_a[buffer_index]); |
| 406 | BufferDescriptorA()[buffer_index].Size(), | ||
| 407 | &read_buffer_data_a[buffer_index]); | ||
| 408 | } else { | 373 | } else { |
| 409 | ASSERT_OR_EXECUTE_MSG( | 374 | ASSERT_OR_EXECUTE_MSG( |
| 410 | BufferDescriptorX().size() > buffer_index, { return {}; }, | 375 | BufferDescriptorX().size() > buffer_index, { return {}; }, |
| 411 | "BufferDescriptorX invalid buffer_index {}", buffer_index); | 376 | "BufferDescriptorX invalid buffer_index {}", buffer_index); |
| 412 | auto& read_buffer = read_buffer_x[buffer_index]; | 377 | return gm.Read(BufferDescriptorX()[buffer_index].Address(), |
| 413 | return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(), | 378 | BufferDescriptorX()[buffer_index].Size(), &read_buffer_data_x[buffer_index]); |
| 414 | BufferDescriptorX()[buffer_index].Size(), | ||
| 415 | &read_buffer_data_x[buffer_index]); | ||
| 416 | } | 379 | } |
| 417 | } | 380 | } |
| 418 | 381 | ||
diff --git a/src/core/hle/service/hle_ipc.h b/src/core/hle/service/hle_ipc.h index 440737db5..8329d7265 100644 --- a/src/core/hle/service/hle_ipc.h +++ b/src/core/hle/service/hle_ipc.h | |||
| @@ -41,6 +41,8 @@ class KernelCore; | |||
| 41 | class KHandleTable; | 41 | class KHandleTable; |
| 42 | class KProcess; | 42 | class KProcess; |
| 43 | class KServerSession; | 43 | class KServerSession; |
| 44 | template <typename T> | ||
| 45 | class KScopedAutoObject; | ||
| 44 | class KThread; | 46 | class KThread; |
| 45 | } // namespace Kernel | 47 | } // namespace Kernel |
| 46 | 48 | ||
| @@ -424,6 +426,9 @@ private: | |||
| 424 | 426 | ||
| 425 | Kernel::KernelCore& kernel; | 427 | Kernel::KernelCore& kernel; |
| 426 | Core::Memory::Memory& memory; | 428 | Core::Memory::Memory& memory; |
| 429 | |||
| 430 | mutable std::array<Common::ScratchBuffer<u8>, 3> read_buffer_data_a{}; | ||
| 431 | mutable std::array<Common::ScratchBuffer<u8>, 3> read_buffer_data_x{}; | ||
| 427 | }; | 432 | }; |
| 428 | 433 | ||
| 429 | } // namespace Service | 434 | } // namespace Service |
diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index 37ca24f5d..21ef57d27 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp | |||
| @@ -2,27 +2,135 @@ | |||
| 2 | // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors | 2 | // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors |
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | 3 | // SPDX-License-Identifier: GPL-3.0-or-later |
| 4 | 4 | ||
| 5 | #include <atomic> | ||
| 6 | #include <deque> | ||
| 7 | #include <mutex> | ||
| 8 | |||
| 9 | #include "core/hle/kernel/k_process.h" | ||
| 5 | #include "core/hle/service/nvdrv/core/container.h" | 10 | #include "core/hle/service/nvdrv/core/container.h" |
| 11 | #include "core/hle/service/nvdrv/core/heap_mapper.h" | ||
| 6 | #include "core/hle/service/nvdrv/core/nvmap.h" | 12 | #include "core/hle/service/nvdrv/core/nvmap.h" |
| 7 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" | 13 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" |
| 14 | #include "core/memory.h" | ||
| 8 | #include "video_core/host1x/host1x.h" | 15 | #include "video_core/host1x/host1x.h" |
| 9 | 16 | ||
| 10 | namespace Service::Nvidia::NvCore { | 17 | namespace Service::Nvidia::NvCore { |
| 11 | 18 | ||
| 19 | Session::Session(SessionId id_, Kernel::KProcess* process_, Core::Asid asid_) | ||
| 20 | : id{id_}, process{process_}, asid{asid_}, has_preallocated_area{}, mapper{}, is_active{} {} | ||
| 21 | |||
| 22 | Session::~Session() = default; | ||
| 23 | |||
| 12 | struct ContainerImpl { | 24 | struct ContainerImpl { |
| 13 | explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_) | 25 | explicit ContainerImpl(Container& core, Tegra::Host1x::Host1x& host1x_) |
| 14 | : file{host1x_}, manager{host1x_}, device_file_data{} {} | 26 | : host1x{host1x_}, file{core, host1x_}, manager{host1x_}, device_file_data{} {} |
| 27 | Tegra::Host1x::Host1x& host1x; | ||
| 15 | NvMap file; | 28 | NvMap file; |
| 16 | SyncpointManager manager; | 29 | SyncpointManager manager; |
| 17 | Container::Host1xDeviceFileData device_file_data; | 30 | Container::Host1xDeviceFileData device_file_data; |
| 31 | std::deque<Session> sessions; | ||
| 32 | size_t new_ids{}; | ||
| 33 | std::deque<size_t> id_pool; | ||
| 34 | std::mutex session_guard; | ||
| 18 | }; | 35 | }; |
| 19 | 36 | ||
| 20 | Container::Container(Tegra::Host1x::Host1x& host1x_) { | 37 | Container::Container(Tegra::Host1x::Host1x& host1x_) { |
| 21 | impl = std::make_unique<ContainerImpl>(host1x_); | 38 | impl = std::make_unique<ContainerImpl>(*this, host1x_); |
| 22 | } | 39 | } |
| 23 | 40 | ||
| 24 | Container::~Container() = default; | 41 | Container::~Container() = default; |
| 25 | 42 | ||
| 43 | SessionId Container::OpenSession(Kernel::KProcess* process) { | ||
| 44 | using namespace Common::Literals; | ||
| 45 | |||
| 46 | std::scoped_lock lk(impl->session_guard); | ||
| 47 | for (auto& session : impl->sessions) { | ||
| 48 | if (!session.is_active) { | ||
| 49 | continue; | ||
| 50 | } | ||
| 51 | if (session.process == process) { | ||
| 52 | return session.id; | ||
| 53 | } | ||
| 54 | } | ||
| 55 | size_t new_id{}; | ||
| 56 | auto* memory_interface = &process->GetMemory(); | ||
| 57 | auto& smmu = impl->host1x.MemoryManager(); | ||
| 58 | auto asid = smmu.RegisterProcess(memory_interface); | ||
| 59 | if (!impl->id_pool.empty()) { | ||
| 60 | new_id = impl->id_pool.front(); | ||
| 61 | impl->id_pool.pop_front(); | ||
| 62 | impl->sessions[new_id] = Session{SessionId{new_id}, process, asid}; | ||
| 63 | } else { | ||
| 64 | new_id = impl->new_ids++; | ||
| 65 | impl->sessions.emplace_back(SessionId{new_id}, process, asid); | ||
| 66 | } | ||
| 67 | auto& session = impl->sessions[new_id]; | ||
| 68 | session.is_active = true; | ||
| 69 | // Optimization | ||
| 70 | if (process->IsApplication()) { | ||
| 71 | auto& page_table = process->GetPageTable().GetBasePageTable(); | ||
| 72 | auto heap_start = page_table.GetHeapRegionStart(); | ||
| 73 | |||
| 74 | Kernel::KProcessAddress cur_addr = heap_start; | ||
| 75 | size_t region_size = 0; | ||
| 76 | VAddr region_start = 0; | ||
| 77 | while (true) { | ||
| 78 | Kernel::KMemoryInfo mem_info{}; | ||
| 79 | Kernel::Svc::PageInfo page_info{}; | ||
| 80 | R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), | ||
| 81 | cur_addr)); | ||
| 82 | auto svc_mem_info = mem_info.GetSvcMemoryInfo(); | ||
| 83 | |||
| 84 | // Check if this memory block is heap. | ||
| 85 | if (svc_mem_info.state == Kernel::Svc::MemoryState::Normal) { | ||
| 86 | if (svc_mem_info.size > region_size) { | ||
| 87 | region_size = svc_mem_info.size; | ||
| 88 | region_start = svc_mem_info.base_address; | ||
| 89 | } | ||
| 90 | } | ||
| 91 | |||
| 92 | // Check if we're done. | ||
| 93 | const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size; | ||
| 94 | if (next_address <= GetInteger(cur_addr)) { | ||
| 95 | break; | ||
| 96 | } | ||
| 97 | |||
| 98 | cur_addr = next_address; | ||
| 99 | } | ||
| 100 | session.has_preallocated_area = false; | ||
| 101 | auto start_region = region_size >= 32_MiB ? smmu.Allocate(region_size) : 0; | ||
| 102 | if (start_region != 0) { | ||
| 103 | session.mapper = std::make_unique<HeapMapper>(region_start, start_region, region_size, | ||
| 104 | asid, impl->host1x); | ||
| 105 | smmu.TrackContinuity(start_region, region_start, region_size, asid); | ||
| 106 | session.has_preallocated_area = true; | ||
| 107 | LOG_DEBUG(Debug, "Preallocation created!"); | ||
| 108 | } | ||
| 109 | } | ||
| 110 | return SessionId{new_id}; | ||
| 111 | } | ||
| 112 | |||
| 113 | void Container::CloseSession(SessionId session_id) { | ||
| 114 | std::scoped_lock lk(impl->session_guard); | ||
| 115 | auto& session = impl->sessions[session_id.id]; | ||
| 116 | auto& smmu = impl->host1x.MemoryManager(); | ||
| 117 | if (session.has_preallocated_area) { | ||
| 118 | const DAddr region_start = session.mapper->GetRegionStart(); | ||
| 119 | const size_t region_size = session.mapper->GetRegionSize(); | ||
| 120 | session.mapper.reset(); | ||
| 121 | smmu.Free(region_start, region_size); | ||
| 122 | session.has_preallocated_area = false; | ||
| 123 | } | ||
| 124 | session.is_active = false; | ||
| 125 | smmu.UnregisterProcess(impl->sessions[session_id.id].asid); | ||
| 126 | impl->id_pool.emplace_front(session_id.id); | ||
| 127 | } | ||
| 128 | |||
| 129 | Session* Container::GetSession(SessionId session_id) { | ||
| 130 | std::atomic_thread_fence(std::memory_order_acquire); | ||
| 131 | return &impl->sessions[session_id.id]; | ||
| 132 | } | ||
| 133 | |||
| 26 | NvMap& Container::GetNvMapFile() { | 134 | NvMap& Container::GetNvMapFile() { |
| 27 | return impl->file; | 135 | return impl->file; |
| 28 | } | 136 | } |
diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index b4b63ac90..b4d3938a8 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h | |||
| @@ -8,24 +8,56 @@ | |||
| 8 | #include <memory> | 8 | #include <memory> |
| 9 | #include <unordered_map> | 9 | #include <unordered_map> |
| 10 | 10 | ||
| 11 | #include "core/device_memory_manager.h" | ||
| 11 | #include "core/hle/service/nvdrv/nvdata.h" | 12 | #include "core/hle/service/nvdrv/nvdata.h" |
| 12 | 13 | ||
| 14 | namespace Kernel { | ||
| 15 | class KProcess; | ||
| 16 | } | ||
| 17 | |||
| 13 | namespace Tegra::Host1x { | 18 | namespace Tegra::Host1x { |
| 14 | class Host1x; | 19 | class Host1x; |
| 15 | } // namespace Tegra::Host1x | 20 | } // namespace Tegra::Host1x |
| 16 | 21 | ||
| 17 | namespace Service::Nvidia::NvCore { | 22 | namespace Service::Nvidia::NvCore { |
| 18 | 23 | ||
| 24 | class HeapMapper; | ||
| 19 | class NvMap; | 25 | class NvMap; |
| 20 | class SyncpointManager; | 26 | class SyncpointManager; |
| 21 | 27 | ||
| 22 | struct ContainerImpl; | 28 | struct ContainerImpl; |
| 23 | 29 | ||
| 30 | struct SessionId { | ||
| 31 | size_t id; | ||
| 32 | }; | ||
| 33 | |||
| 34 | struct Session { | ||
| 35 | Session(SessionId id_, Kernel::KProcess* process_, Core::Asid asid_); | ||
| 36 | ~Session(); | ||
| 37 | |||
| 38 | Session(const Session&) = delete; | ||
| 39 | Session& operator=(const Session&) = delete; | ||
| 40 | Session(Session&&) = default; | ||
| 41 | Session& operator=(Session&&) = default; | ||
| 42 | |||
| 43 | SessionId id; | ||
| 44 | Kernel::KProcess* process; | ||
| 45 | Core::Asid asid; | ||
| 46 | bool has_preallocated_area{}; | ||
| 47 | std::unique_ptr<HeapMapper> mapper{}; | ||
| 48 | bool is_active{}; | ||
| 49 | }; | ||
| 50 | |||
| 24 | class Container { | 51 | class Container { |
| 25 | public: | 52 | public: |
| 26 | explicit Container(Tegra::Host1x::Host1x& host1x); | 53 | explicit Container(Tegra::Host1x::Host1x& host1x); |
| 27 | ~Container(); | 54 | ~Container(); |
| 28 | 55 | ||
| 56 | SessionId OpenSession(Kernel::KProcess* process); | ||
| 57 | void CloseSession(SessionId id); | ||
| 58 | |||
| 59 | Session* GetSession(SessionId id); | ||
| 60 | |||
| 29 | NvMap& GetNvMapFile(); | 61 | NvMap& GetNvMapFile(); |
| 30 | 62 | ||
| 31 | const NvMap& GetNvMapFile() const; | 63 | const NvMap& GetNvMapFile() const; |
diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.cpp b/src/core/hle/service/nvdrv/core/heap_mapper.cpp new file mode 100644 index 000000000..096dc5deb --- /dev/null +++ b/src/core/hle/service/nvdrv/core/heap_mapper.cpp | |||
| @@ -0,0 +1,175 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #include <mutex> | ||
| 5 | |||
| 6 | #include <boost/container/small_vector.hpp> | ||
| 7 | #define BOOST_NO_MT | ||
| 8 | #include <boost/pool/detail/mutex.hpp> | ||
| 9 | #undef BOOST_NO_MT | ||
| 10 | #include <boost/icl/interval.hpp> | ||
| 11 | #include <boost/icl/interval_base_set.hpp> | ||
| 12 | #include <boost/icl/interval_set.hpp> | ||
| 13 | #include <boost/icl/split_interval_map.hpp> | ||
| 14 | #include <boost/pool/pool.hpp> | ||
| 15 | #include <boost/pool/pool_alloc.hpp> | ||
| 16 | #include <boost/pool/poolfwd.hpp> | ||
| 17 | |||
| 18 | #include "core/hle/service/nvdrv/core/heap_mapper.h" | ||
| 19 | #include "video_core/host1x/host1x.h" | ||
| 20 | |||
| 21 | namespace boost { | ||
| 22 | template <typename T> | ||
| 23 | class fast_pool_allocator<T, default_user_allocator_new_delete, details::pool::null_mutex, 4096, 0>; | ||
| 24 | } | ||
| 25 | |||
| 26 | namespace Service::Nvidia::NvCore { | ||
| 27 | |||
| 28 | using IntervalCompare = std::less<DAddr>; | ||
| 29 | using IntervalInstance = boost::icl::interval_type_default<DAddr, std::less>; | ||
| 30 | using IntervalAllocator = boost::fast_pool_allocator<DAddr>; | ||
| 31 | using IntervalSet = boost::icl::interval_set<DAddr>; | ||
| 32 | using IntervalType = typename IntervalSet::interval_type; | ||
| 33 | |||
| 34 | template <typename Type> | ||
| 35 | struct counter_add_functor : public boost::icl::identity_based_inplace_combine<Type> { | ||
| 36 | // types | ||
| 37 | typedef counter_add_functor<Type> type; | ||
| 38 | typedef boost::icl::identity_based_inplace_combine<Type> base_type; | ||
| 39 | |||
| 40 | // public member functions | ||
| 41 | void operator()(Type& current, const Type& added) const { | ||
| 42 | current += added; | ||
| 43 | if (current < base_type::identity_element()) { | ||
| 44 | current = base_type::identity_element(); | ||
| 45 | } | ||
| 46 | } | ||
| 47 | |||
| 48 | // public static functions | ||
| 49 | static void version(Type&){}; | ||
| 50 | }; | ||
| 51 | |||
| 52 | using OverlapCombine = counter_add_functor<int>; | ||
| 53 | using OverlapSection = boost::icl::inter_section<int>; | ||
| 54 | using OverlapCounter = boost::icl::split_interval_map<DAddr, int>; | ||
| 55 | |||
| 56 | struct HeapMapper::HeapMapperInternal { | ||
| 57 | HeapMapperInternal(Tegra::Host1x::Host1x& host1x) : device_memory{host1x.MemoryManager()} {} | ||
| 58 | ~HeapMapperInternal() = default; | ||
| 59 | |||
| 60 | template <typename Func> | ||
| 61 | void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size, | ||
| 62 | Func&& func) { | ||
| 63 | const DAddr start_address = cpu_addr; | ||
| 64 | const DAddr end_address = start_address + size; | ||
| 65 | const IntervalType search_interval{start_address, end_address}; | ||
| 66 | auto it = current_range.lower_bound(search_interval); | ||
| 67 | if (it == current_range.end()) { | ||
| 68 | return; | ||
| 69 | } | ||
| 70 | auto end_it = current_range.upper_bound(search_interval); | ||
| 71 | for (; it != end_it; it++) { | ||
| 72 | auto& inter = it->first; | ||
| 73 | DAddr inter_addr_end = inter.upper(); | ||
| 74 | DAddr inter_addr = inter.lower(); | ||
| 75 | if (inter_addr_end > end_address) { | ||
| 76 | inter_addr_end = end_address; | ||
| 77 | } | ||
| 78 | if (inter_addr < start_address) { | ||
| 79 | inter_addr = start_address; | ||
| 80 | } | ||
| 81 | func(inter_addr, inter_addr_end, it->second); | ||
| 82 | } | ||
| 83 | } | ||
| 84 | |||
| 85 | void RemoveEachInOverlapCounter(OverlapCounter& current_range, | ||
| 86 | const IntervalType search_interval, int subtract_value) { | ||
| 87 | bool any_removals = false; | ||
| 88 | current_range.add(std::make_pair(search_interval, subtract_value)); | ||
| 89 | do { | ||
| 90 | any_removals = false; | ||
| 91 | auto it = current_range.lower_bound(search_interval); | ||
| 92 | if (it == current_range.end()) { | ||
| 93 | return; | ||
| 94 | } | ||
| 95 | auto end_it = current_range.upper_bound(search_interval); | ||
| 96 | for (; it != end_it; it++) { | ||
| 97 | if (it->second <= 0) { | ||
| 98 | any_removals = true; | ||
| 99 | current_range.erase(it); | ||
| 100 | break; | ||
| 101 | } | ||
| 102 | } | ||
| 103 | } while (any_removals); | ||
| 104 | } | ||
| 105 | |||
| 106 | IntervalSet base_set; | ||
| 107 | OverlapCounter mapping_overlaps; | ||
| 108 | Tegra::MaxwellDeviceMemoryManager& device_memory; | ||
| 109 | std::mutex guard; | ||
| 110 | }; | ||
| 111 | |||
| 112 | HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid, | ||
| 113 | Tegra::Host1x::Host1x& host1x) | ||
| 114 | : m_vaddress{start_vaddress}, m_daddress{start_daddress}, m_size{size}, m_asid{asid} { | ||
| 115 | m_internal = std::make_unique<HeapMapperInternal>(host1x); | ||
| 116 | } | ||
| 117 | |||
| 118 | HeapMapper::~HeapMapper() { | ||
| 119 | m_internal->device_memory.Unmap(m_daddress, m_size); | ||
| 120 | } | ||
| 121 | |||
| 122 | DAddr HeapMapper::Map(VAddr start, size_t size) { | ||
| 123 | std::scoped_lock lk(m_internal->guard); | ||
| 124 | m_internal->base_set.clear(); | ||
| 125 | const IntervalType interval{start, start + size}; | ||
| 126 | m_internal->base_set.insert(interval); | ||
| 127 | m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, | ||
| 128 | [this](VAddr start_addr, VAddr end_addr, int) { | ||
| 129 | const IntervalType other{start_addr, end_addr}; | ||
| 130 | m_internal->base_set.subtract(other); | ||
| 131 | }); | ||
| 132 | if (!m_internal->base_set.empty()) { | ||
| 133 | auto it = m_internal->base_set.begin(); | ||
| 134 | auto end_it = m_internal->base_set.end(); | ||
| 135 | for (; it != end_it; it++) { | ||
| 136 | const VAddr inter_addr_end = it->upper(); | ||
| 137 | const VAddr inter_addr = it->lower(); | ||
| 138 | const size_t offset = inter_addr - m_vaddress; | ||
| 139 | const size_t sub_size = inter_addr_end - inter_addr; | ||
| 140 | m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, | ||
| 141 | m_asid); | ||
| 142 | } | ||
| 143 | } | ||
| 144 | m_internal->mapping_overlaps += std::make_pair(interval, 1); | ||
| 145 | m_internal->base_set.clear(); | ||
| 146 | return m_daddress + (start - m_vaddress); | ||
| 147 | } | ||
| 148 | |||
| 149 | void HeapMapper::Unmap(VAddr start, size_t size) { | ||
| 150 | std::scoped_lock lk(m_internal->guard); | ||
| 151 | m_internal->base_set.clear(); | ||
| 152 | m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, | ||
| 153 | [this](VAddr start_addr, VAddr end_addr, int value) { | ||
| 154 | if (value <= 1) { | ||
| 155 | const IntervalType other{start_addr, end_addr}; | ||
| 156 | m_internal->base_set.insert(other); | ||
| 157 | } | ||
| 158 | }); | ||
| 159 | if (!m_internal->base_set.empty()) { | ||
| 160 | auto it = m_internal->base_set.begin(); | ||
| 161 | auto end_it = m_internal->base_set.end(); | ||
| 162 | for (; it != end_it; it++) { | ||
| 163 | const VAddr inter_addr_end = it->upper(); | ||
| 164 | const VAddr inter_addr = it->lower(); | ||
| 165 | const size_t offset = inter_addr - m_vaddress; | ||
| 166 | const size_t sub_size = inter_addr_end - inter_addr; | ||
| 167 | m_internal->device_memory.Unmap(m_daddress + offset, sub_size); | ||
| 168 | } | ||
| 169 | } | ||
| 170 | const IntervalType to_remove{start, start + size}; | ||
| 171 | m_internal->RemoveEachInOverlapCounter(m_internal->mapping_overlaps, to_remove, -1); | ||
| 172 | m_internal->base_set.clear(); | ||
| 173 | } | ||
| 174 | |||
| 175 | } // namespace Service::Nvidia::NvCore | ||
diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.h b/src/core/hle/service/nvdrv/core/heap_mapper.h new file mode 100644 index 000000000..491a12e4f --- /dev/null +++ b/src/core/hle/service/nvdrv/core/heap_mapper.h | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <memory> | ||
| 7 | |||
| 8 | #include "common/common_types.h" | ||
| 9 | #include "core/device_memory_manager.h" | ||
| 10 | |||
| 11 | namespace Tegra::Host1x { | ||
| 12 | class Host1x; | ||
| 13 | } // namespace Tegra::Host1x | ||
| 14 | |||
| 15 | namespace Service::Nvidia::NvCore { | ||
| 16 | |||
| 17 | class HeapMapper { | ||
| 18 | public: | ||
| 19 | HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid, | ||
| 20 | Tegra::Host1x::Host1x& host1x); | ||
| 21 | ~HeapMapper(); | ||
| 22 | |||
| 23 | bool IsInBounds(VAddr start, size_t size) const { | ||
| 24 | VAddr end = start + size; | ||
| 25 | return start >= m_vaddress && end <= (m_vaddress + m_size); | ||
| 26 | } | ||
| 27 | |||
| 28 | DAddr Map(VAddr start, size_t size); | ||
| 29 | |||
| 30 | void Unmap(VAddr start, size_t size); | ||
| 31 | |||
| 32 | DAddr GetRegionStart() const { | ||
| 33 | return m_daddress; | ||
| 34 | } | ||
| 35 | |||
| 36 | size_t GetRegionSize() const { | ||
| 37 | return m_size; | ||
| 38 | } | ||
| 39 | |||
| 40 | private: | ||
| 41 | struct HeapMapperInternal; | ||
| 42 | VAddr m_vaddress; | ||
| 43 | DAddr m_daddress; | ||
| 44 | size_t m_size; | ||
| 45 | Core::Asid m_asid; | ||
| 46 | std::unique_ptr<HeapMapperInternal> m_internal; | ||
| 47 | }; | ||
| 48 | |||
| 49 | } // namespace Service::Nvidia::NvCore | ||
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 0ca05257e..1b59c6b15 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp | |||
| @@ -2,14 +2,19 @@ | |||
| 2 | // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors | 2 | // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors |
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | 3 | // SPDX-License-Identifier: GPL-3.0-or-later |
| 4 | 4 | ||
| 5 | #include <functional> | ||
| 6 | |||
| 5 | #include "common/alignment.h" | 7 | #include "common/alignment.h" |
| 6 | #include "common/assert.h" | 8 | #include "common/assert.h" |
| 7 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 10 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 11 | #include "core/hle/service/nvdrv/core/heap_mapper.h" | ||
| 8 | #include "core/hle/service/nvdrv/core/nvmap.h" | 12 | #include "core/hle/service/nvdrv/core/nvmap.h" |
| 9 | #include "core/memory.h" | 13 | #include "core/memory.h" |
| 10 | #include "video_core/host1x/host1x.h" | 14 | #include "video_core/host1x/host1x.h" |
| 11 | 15 | ||
| 12 | using Core::Memory::YUZU_PAGESIZE; | 16 | using Core::Memory::YUZU_PAGESIZE; |
| 17 | constexpr size_t BIG_PAGE_SIZE = YUZU_PAGESIZE * 16; | ||
| 13 | 18 | ||
| 14 | namespace Service::Nvidia::NvCore { | 19 | namespace Service::Nvidia::NvCore { |
| 15 | NvMap::Handle::Handle(u64 size_, Id id_) | 20 | NvMap::Handle::Handle(u64 size_, Id id_) |
| @@ -17,9 +22,9 @@ NvMap::Handle::Handle(u64 size_, Id id_) | |||
| 17 | flags.raw = 0; | 22 | flags.raw = 0; |
| 18 | } | 23 | } |
| 19 | 24 | ||
| 20 | NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) { | 25 | NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, |
| 26 | NvCore::SessionId pSessionId) { | ||
| 21 | std::scoped_lock lock(mutex); | 27 | std::scoped_lock lock(mutex); |
| 22 | |||
| 23 | // Handles cannot be allocated twice | 28 | // Handles cannot be allocated twice |
| 24 | if (allocated) { | 29 | if (allocated) { |
| 25 | return NvResult::AccessDenied; | 30 | return NvResult::AccessDenied; |
| @@ -28,6 +33,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) | |||
| 28 | flags = pFlags; | 33 | flags = pFlags; |
| 29 | kind = pKind; | 34 | kind = pKind; |
| 30 | align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; | 35 | align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; |
| 36 | session_id = pSessionId; | ||
| 31 | 37 | ||
| 32 | // This flag is only applicable for handles with an address passed | 38 | // This flag is only applicable for handles with an address passed |
| 33 | if (pAddress) { | 39 | if (pAddress) { |
| @@ -63,7 +69,7 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) { | |||
| 63 | return NvResult::Success; | 69 | return NvResult::Success; |
| 64 | } | 70 | } |
| 65 | 71 | ||
| 66 | NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {} | 72 | NvMap::NvMap(Container& core_, Tegra::Host1x::Host1x& host1x_) : host1x{host1x_}, core{core_} {} |
| 67 | 73 | ||
| 68 | void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) { | 74 | void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) { |
| 69 | std::scoped_lock lock(handles_lock); | 75 | std::scoped_lock lock(handles_lock); |
| @@ -78,12 +84,30 @@ void NvMap::UnmapHandle(Handle& handle_description) { | |||
| 78 | handle_description.unmap_queue_entry.reset(); | 84 | handle_description.unmap_queue_entry.reset(); |
| 79 | } | 85 | } |
| 80 | 86 | ||
| 87 | // Free and unmap the handle from Host1x GMMU | ||
| 88 | if (handle_description.pin_virt_address) { | ||
| 89 | host1x.GMMU().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address), | ||
| 90 | handle_description.aligned_size); | ||
| 91 | host1x.Allocator().Free(handle_description.pin_virt_address, | ||
| 92 | static_cast<u32>(handle_description.aligned_size)); | ||
| 93 | handle_description.pin_virt_address = 0; | ||
| 94 | } | ||
| 95 | |||
| 81 | // Free and unmap the handle from the SMMU | 96 | // Free and unmap the handle from the SMMU |
| 82 | host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address), | 97 | const size_t map_size = handle_description.aligned_size; |
| 83 | handle_description.aligned_size); | 98 | if (!handle_description.in_heap) { |
| 84 | host1x.Allocator().Free(handle_description.pin_virt_address, | 99 | auto& smmu = host1x.MemoryManager(); |
| 85 | static_cast<u32>(handle_description.aligned_size)); | 100 | size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE); |
| 86 | handle_description.pin_virt_address = 0; | 101 | smmu.Unmap(handle_description.d_address, map_size); |
| 102 | smmu.Free(handle_description.d_address, static_cast<size_t>(aligned_up)); | ||
| 103 | handle_description.d_address = 0; | ||
| 104 | return; | ||
| 105 | } | ||
| 106 | const VAddr vaddress = handle_description.address; | ||
| 107 | auto* session = core.GetSession(handle_description.session_id); | ||
| 108 | session->mapper->Unmap(vaddress, map_size); | ||
| 109 | handle_description.d_address = 0; | ||
| 110 | handle_description.in_heap = false; | ||
| 87 | } | 111 | } |
| 88 | 112 | ||
| 89 | bool NvMap::TryRemoveHandle(const Handle& handle_description) { | 113 | bool NvMap::TryRemoveHandle(const Handle& handle_description) { |
| @@ -124,22 +148,33 @@ std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) { | |||
| 124 | } | 148 | } |
| 125 | } | 149 | } |
| 126 | 150 | ||
| 127 | VAddr NvMap::GetHandleAddress(Handle::Id handle) { | 151 | DAddr NvMap::GetHandleAddress(Handle::Id handle) { |
| 128 | std::scoped_lock lock(handles_lock); | 152 | std::scoped_lock lock(handles_lock); |
| 129 | try { | 153 | try { |
| 130 | return handles.at(handle)->address; | 154 | return handles.at(handle)->d_address; |
| 131 | } catch (std::out_of_range&) { | 155 | } catch (std::out_of_range&) { |
| 132 | return 0; | 156 | return 0; |
| 133 | } | 157 | } |
| 134 | } | 158 | } |
| 135 | 159 | ||
| 136 | u32 NvMap::PinHandle(NvMap::Handle::Id handle) { | 160 | DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) { |
| 137 | auto handle_description{GetHandle(handle)}; | 161 | auto handle_description{GetHandle(handle)}; |
| 138 | if (!handle_description) [[unlikely]] { | 162 | if (!handle_description) [[unlikely]] { |
| 139 | return 0; | 163 | return 0; |
| 140 | } | 164 | } |
| 141 | 165 | ||
| 142 | std::scoped_lock lock(handle_description->mutex); | 166 | std::scoped_lock lock(handle_description->mutex); |
| 167 | const auto map_low_area = [&] { | ||
| 168 | if (handle_description->pin_virt_address == 0) { | ||
| 169 | auto& gmmu_allocator = host1x.Allocator(); | ||
| 170 | auto& gmmu = host1x.GMMU(); | ||
| 171 | u32 address = | ||
| 172 | gmmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size)); | ||
| 173 | gmmu.Map(static_cast<GPUVAddr>(address), handle_description->d_address, | ||
| 174 | handle_description->aligned_size); | ||
| 175 | handle_description->pin_virt_address = address; | ||
| 176 | } | ||
| 177 | }; | ||
| 143 | if (!handle_description->pins) { | 178 | if (!handle_description->pins) { |
| 144 | // If we're in the unmap queue we can just remove ourselves and return since we're already | 179 | // If we're in the unmap queue we can just remove ourselves and return since we're already |
| 145 | // mapped | 180 | // mapped |
| @@ -151,37 +186,58 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) { | |||
| 151 | unmap_queue.erase(*handle_description->unmap_queue_entry); | 186 | unmap_queue.erase(*handle_description->unmap_queue_entry); |
| 152 | handle_description->unmap_queue_entry.reset(); | 187 | handle_description->unmap_queue_entry.reset(); |
| 153 | 188 | ||
| 189 | if (low_area_pin) { | ||
| 190 | map_low_area(); | ||
| 191 | handle_description->pins++; | ||
| 192 | return static_cast<DAddr>(handle_description->pin_virt_address); | ||
| 193 | } | ||
| 194 | |||
| 154 | handle_description->pins++; | 195 | handle_description->pins++; |
| 155 | return handle_description->pin_virt_address; | 196 | return handle_description->d_address; |
| 156 | } | 197 | } |
| 157 | } | 198 | } |
| 158 | 199 | ||
| 200 | using namespace std::placeholders; | ||
| 159 | // If not then allocate some space and map it | 201 | // If not then allocate some space and map it |
| 160 | u32 address{}; | 202 | DAddr address{}; |
| 161 | auto& smmu_allocator = host1x.Allocator(); | 203 | auto& smmu = host1x.MemoryManager(); |
| 162 | auto& smmu_memory_manager = host1x.MemoryManager(); | 204 | auto* session = core.GetSession(handle_description->session_id); |
| 163 | while ((address = smmu_allocator.Allocate( | 205 | const VAddr vaddress = handle_description->address; |
| 164 | static_cast<u32>(handle_description->aligned_size))) == 0) { | 206 | const size_t map_size = handle_description->aligned_size; |
| 165 | // Free handles until the allocation succeeds | 207 | if (session->has_preallocated_area && session->mapper->IsInBounds(vaddress, map_size)) { |
| 166 | std::scoped_lock queueLock(unmap_queue_lock); | 208 | handle_description->d_address = session->mapper->Map(vaddress, map_size); |
| 167 | if (auto freeHandleDesc{unmap_queue.front()}) { | 209 | handle_description->in_heap = true; |
| 168 | // Handles in the unmap queue are guaranteed not to be pinned so don't bother | 210 | } else { |
| 169 | // checking if they are before unmapping | 211 | size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE); |
| 170 | std::scoped_lock freeLock(freeHandleDesc->mutex); | 212 | while ((address = smmu.Allocate(aligned_up)) == 0) { |
| 171 | if (handle_description->pin_virt_address) | 213 | // Free handles until the allocation succeeds |
| 172 | UnmapHandle(*freeHandleDesc); | 214 | std::scoped_lock queueLock(unmap_queue_lock); |
| 173 | } else { | 215 | if (auto freeHandleDesc{unmap_queue.front()}) { |
| 174 | LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); | 216 | // Handles in the unmap queue are guaranteed not to be pinned so don't bother |
| 217 | // checking if they are before unmapping | ||
| 218 | std::scoped_lock freeLock(freeHandleDesc->mutex); | ||
| 219 | if (handle_description->d_address) | ||
| 220 | UnmapHandle(*freeHandleDesc); | ||
| 221 | } else { | ||
| 222 | LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); | ||
| 223 | } | ||
| 175 | } | 224 | } |
| 225 | |||
| 226 | handle_description->d_address = address; | ||
| 227 | smmu.Map(address, vaddress, map_size, session->asid, true); | ||
| 228 | handle_description->in_heap = false; | ||
| 176 | } | 229 | } |
| 230 | } | ||
| 177 | 231 | ||
| 178 | smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address, | 232 | if (low_area_pin) { |
| 179 | handle_description->aligned_size); | 233 | map_low_area(); |
| 180 | handle_description->pin_virt_address = address; | ||
| 181 | } | 234 | } |
| 182 | 235 | ||
| 183 | handle_description->pins++; | 236 | handle_description->pins++; |
| 184 | return handle_description->pin_virt_address; | 237 | if (low_area_pin) { |
| 238 | return static_cast<DAddr>(handle_description->pin_virt_address); | ||
| 239 | } | ||
| 240 | return handle_description->d_address; | ||
| 185 | } | 241 | } |
| 186 | 242 | ||
| 187 | void NvMap::UnpinHandle(Handle::Id handle) { | 243 | void NvMap::UnpinHandle(Handle::Id handle) { |
| @@ -232,7 +288,7 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna | |||
| 232 | LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!"); | 288 | LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!"); |
| 233 | } else if (handle_description->dupes == 0) { | 289 | } else if (handle_description->dupes == 0) { |
| 234 | // Force unmap the handle | 290 | // Force unmap the handle |
| 235 | if (handle_description->pin_virt_address) { | 291 | if (handle_description->d_address) { |
| 236 | std::scoped_lock queueLock(unmap_queue_lock); | 292 | std::scoped_lock queueLock(unmap_queue_lock); |
| 237 | UnmapHandle(*handle_description); | 293 | UnmapHandle(*handle_description); |
| 238 | } | 294 | } |
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index a8e573890..d7f695845 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | 14 | ||
| 15 | #include "common/bit_field.h" | 15 | #include "common/bit_field.h" |
| 16 | #include "common/common_types.h" | 16 | #include "common/common_types.h" |
| 17 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 17 | #include "core/hle/service/nvdrv/nvdata.h" | 18 | #include "core/hle/service/nvdrv/nvdata.h" |
| 18 | 19 | ||
| 19 | namespace Tegra { | 20 | namespace Tegra { |
| @@ -25,6 +26,8 @@ class Host1x; | |||
| 25 | } // namespace Tegra | 26 | } // namespace Tegra |
| 26 | 27 | ||
| 27 | namespace Service::Nvidia::NvCore { | 28 | namespace Service::Nvidia::NvCore { |
| 29 | |||
| 30 | class Container; | ||
| 28 | /** | 31 | /** |
| 29 | * @brief The nvmap core class holds the global state for nvmap and provides methods to manage | 32 | * @brief The nvmap core class holds the global state for nvmap and provides methods to manage |
| 30 | * handles | 33 | * handles |
| @@ -48,7 +51,7 @@ public: | |||
| 48 | using Id = u32; | 51 | using Id = u32; |
| 49 | Id id; //!< A globally unique identifier for this handle | 52 | Id id; //!< A globally unique identifier for this handle |
| 50 | 53 | ||
| 51 | s32 pins{}; | 54 | s64 pins{}; |
| 52 | u32 pin_virt_address{}; | 55 | u32 pin_virt_address{}; |
| 53 | std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{}; | 56 | std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{}; |
| 54 | 57 | ||
| @@ -61,15 +64,18 @@ public: | |||
| 61 | } flags{}; | 64 | } flags{}; |
| 62 | static_assert(sizeof(Flags) == sizeof(u32)); | 65 | static_assert(sizeof(Flags) == sizeof(u32)); |
| 63 | 66 | ||
| 64 | u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to, | 67 | VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to, |
| 65 | //!< this can also be in the nvdrv tmem | 68 | //!< this can also be in the nvdrv tmem |
| 66 | bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC | 69 | bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC |
| 67 | //!< call | 70 | //!< call |
| 68 | 71 | ||
| 69 | u8 kind{}; //!< Used for memory compression | 72 | u8 kind{}; //!< Used for memory compression |
| 70 | bool allocated{}; //!< If the handle has been allocated with `Alloc` | 73 | bool allocated{}; //!< If the handle has been allocated with `Alloc` |
| 74 | bool in_heap{}; | ||
| 75 | NvCore::SessionId session_id{}; | ||
| 71 | 76 | ||
| 72 | u64 dma_map_addr{}; //! remove me after implementing pinning. | 77 | DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds |
| 78 | //!< to, this can also be in the nvdrv tmem | ||
| 73 | 79 | ||
| 74 | Handle(u64 size, Id id); | 80 | Handle(u64 size, Id id); |
| 75 | 81 | ||
| @@ -77,7 +83,8 @@ public: | |||
| 77 | * @brief Sets up the handle with the given memory config, can allocate memory from the tmem | 83 | * @brief Sets up the handle with the given memory config, can allocate memory from the tmem |
| 78 | * if a 0 address is passed | 84 | * if a 0 address is passed |
| 79 | */ | 85 | */ |
| 80 | [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress); | 86 | [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, |
| 87 | NvCore::SessionId pSessionId); | ||
| 81 | 88 | ||
| 82 | /** | 89 | /** |
| 83 | * @brief Increases the dupe counter of the handle for the given session | 90 | * @brief Increases the dupe counter of the handle for the given session |
| @@ -108,7 +115,7 @@ public: | |||
| 108 | bool can_unlock; //!< If the address region is ready to be unlocked | 115 | bool can_unlock; //!< If the address region is ready to be unlocked |
| 109 | }; | 116 | }; |
| 110 | 117 | ||
| 111 | explicit NvMap(Tegra::Host1x::Host1x& host1x); | 118 | explicit NvMap(Container& core, Tegra::Host1x::Host1x& host1x); |
| 112 | 119 | ||
| 113 | /** | 120 | /** |
| 114 | * @brief Creates an unallocated handle of the given size | 121 | * @brief Creates an unallocated handle of the given size |
| @@ -117,7 +124,7 @@ public: | |||
| 117 | 124 | ||
| 118 | std::shared_ptr<Handle> GetHandle(Handle::Id handle); | 125 | std::shared_ptr<Handle> GetHandle(Handle::Id handle); |
| 119 | 126 | ||
| 120 | VAddr GetHandleAddress(Handle::Id handle); | 127 | DAddr GetHandleAddress(Handle::Id handle); |
| 121 | 128 | ||
| 122 | /** | 129 | /** |
| 123 | * @brief Maps a handle into the SMMU address space | 130 | * @brief Maps a handle into the SMMU address space |
| @@ -125,7 +132,7 @@ public: | |||
| 125 | * number of calls to `UnpinHandle` | 132 | * number of calls to `UnpinHandle` |
| 126 | * @return The SMMU virtual address that the handle has been mapped to | 133 | * @return The SMMU virtual address that the handle has been mapped to |
| 127 | */ | 134 | */ |
| 128 | u32 PinHandle(Handle::Id handle); | 135 | DAddr PinHandle(Handle::Id handle, bool low_area_pin); |
| 129 | 136 | ||
| 130 | /** | 137 | /** |
| 131 | * @brief When this has been called an equal number of times to `PinHandle` for the supplied | 138 | * @brief When this has been called an equal number of times to `PinHandle` for the supplied |
| @@ -172,5 +179,7 @@ private: | |||
| 172 | * @return If the handle was removed from the map | 179 | * @return If the handle was removed from the map |
| 173 | */ | 180 | */ |
| 174 | bool TryRemoveHandle(const Handle& handle_description); | 181 | bool TryRemoveHandle(const Handle& handle_description); |
| 182 | |||
| 183 | Container& core; | ||
| 175 | }; | 184 | }; |
| 176 | } // namespace Service::Nvidia::NvCore | 185 | } // namespace Service::Nvidia::NvCore |
diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h index a04538d5d..8adaddc60 100644 --- a/src/core/hle/service/nvdrv/devices/nvdevice.h +++ b/src/core/hle/service/nvdrv/devices/nvdevice.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <vector> | 7 | #include <vector> |
| 8 | 8 | ||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 10 | #include "core/hle/service/nvdrv/nvdata.h" | 11 | #include "core/hle/service/nvdrv/nvdata.h" |
| 11 | 12 | ||
| 12 | namespace Core { | 13 | namespace Core { |
| @@ -62,7 +63,7 @@ public: | |||
| 62 | * Called once a device is opened | 63 | * Called once a device is opened |
| 63 | * @param fd The device fd | 64 | * @param fd The device fd |
| 64 | */ | 65 | */ |
| 65 | virtual void OnOpen(DeviceFD fd) = 0; | 66 | virtual void OnOpen(NvCore::SessionId session_id, DeviceFD fd) = 0; |
| 66 | 67 | ||
| 67 | /** | 68 | /** |
| 68 | * Called once a device is closed | 69 | * Called once a device is closed |
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index 05a43d8dc..c1ebbd62d 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp | |||
| @@ -35,14 +35,14 @@ NvResult nvdisp_disp0::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in | |||
| 35 | return NvResult::NotImplemented; | 35 | return NvResult::NotImplemented; |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | void nvdisp_disp0::OnOpen(DeviceFD fd) {} | 38 | void nvdisp_disp0::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} |
| 39 | void nvdisp_disp0::OnClose(DeviceFD fd) {} | 39 | void nvdisp_disp0::OnClose(DeviceFD fd) {} |
| 40 | 40 | ||
| 41 | void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, | 41 | void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, |
| 42 | u32 height, u32 stride, android::BufferTransformFlags transform, | 42 | u32 height, u32 stride, android::BufferTransformFlags transform, |
| 43 | const Common::Rectangle<int>& crop_rect, | 43 | const Common::Rectangle<int>& crop_rect, |
| 44 | std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) { | 44 | std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) { |
| 45 | const VAddr addr = nvmap.GetHandleAddress(buffer_handle); | 45 | const DAddr addr = nvmap.GetHandleAddress(buffer_handle); |
| 46 | LOG_TRACE(Service, | 46 | LOG_TRACE(Service, |
| 47 | "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", | 47 | "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", |
| 48 | addr, offset, width, height, stride, format); | 48 | addr, offset, width, height, stride, format); |
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h index daee05fe8..5f13a50a2 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h | |||
| @@ -32,7 +32,7 @@ public: | |||
| 32 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, | 32 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, |
| 33 | std::span<u8> inline_output) override; | 33 | std::span<u8> inline_output) override; |
| 34 | 34 | ||
| 35 | void OnOpen(DeviceFD fd) override; | 35 | void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; |
| 36 | void OnClose(DeviceFD fd) override; | 36 | void OnClose(DeviceFD fd) override; |
| 37 | 37 | ||
| 38 | /// Performs a screen flip, drawing the buffer pointed to by the handle. | 38 | /// Performs a screen flip, drawing the buffer pointed to by the handle. |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 6b3639008..e6646ba04 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | |||
| @@ -86,7 +86,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i | |||
| 86 | return NvResult::NotImplemented; | 86 | return NvResult::NotImplemented; |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | void nvhost_as_gpu::OnOpen(DeviceFD fd) {} | 89 | void nvhost_as_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} |
| 90 | void nvhost_as_gpu::OnClose(DeviceFD fd) {} | 90 | void nvhost_as_gpu::OnClose(DeviceFD fd) {} |
| 91 | 91 | ||
| 92 | NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) { | 92 | NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) { |
| @@ -206,6 +206,8 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) { | |||
| 206 | static_cast<u32>(aligned_size >> page_size_bits)); | 206 | static_cast<u32>(aligned_size >> page_size_bits)); |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | nvmap.UnpinHandle(mapping->handle); | ||
| 210 | |||
| 209 | // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state | 211 | // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state |
| 210 | // Only FreeSpace can unmap them fully | 212 | // Only FreeSpace can unmap them fully |
| 211 | if (mapping->sparse_alloc) { | 213 | if (mapping->sparse_alloc) { |
| @@ -293,12 +295,12 @@ NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) { | |||
| 293 | return NvResult::BadValue; | 295 | return NvResult::BadValue; |
| 294 | } | 296 | } |
| 295 | 297 | ||
| 296 | VAddr cpu_address{static_cast<VAddr>( | 298 | DAddr base = nvmap.PinHandle(entry.handle, false); |
| 297 | handle->address + | 299 | DAddr device_address{static_cast<DAddr>( |
| 298 | (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; | 300 | base + (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; |
| 299 | 301 | ||
| 300 | gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind), | 302 | gmmu->Map(virtual_address, device_address, size, |
| 301 | use_big_pages); | 303 | static_cast<Tegra::PTEKind>(entry.kind), use_big_pages); |
| 302 | } | 304 | } |
| 303 | } | 305 | } |
| 304 | 306 | ||
| @@ -331,9 +333,9 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) { | |||
| 331 | } | 333 | } |
| 332 | 334 | ||
| 333 | u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)}; | 335 | u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)}; |
| 334 | VAddr cpu_address{mapping->ptr + params.buffer_offset}; | 336 | VAddr device_address{mapping->ptr + params.buffer_offset}; |
| 335 | 337 | ||
| 336 | gmmu->Map(gpu_address, cpu_address, params.mapping_size, | 338 | gmmu->Map(gpu_address, device_address, params.mapping_size, |
| 337 | static_cast<Tegra::PTEKind>(params.kind), mapping->big_page); | 339 | static_cast<Tegra::PTEKind>(params.kind), mapping->big_page); |
| 338 | 340 | ||
| 339 | return NvResult::Success; | 341 | return NvResult::Success; |
| @@ -349,7 +351,8 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) { | |||
| 349 | return NvResult::BadValue; | 351 | return NvResult::BadValue; |
| 350 | } | 352 | } |
| 351 | 353 | ||
| 352 | VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)}; | 354 | DAddr device_address{ |
| 355 | static_cast<DAddr>(nvmap.PinHandle(params.handle, false) + params.buffer_offset)}; | ||
| 353 | u64 size{params.mapping_size ? params.mapping_size : handle->orig_size}; | 356 | u64 size{params.mapping_size ? params.mapping_size : handle->orig_size}; |
| 354 | 357 | ||
| 355 | bool big_page{[&]() { | 358 | bool big_page{[&]() { |
| @@ -373,15 +376,14 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) { | |||
| 373 | } | 376 | } |
| 374 | 377 | ||
| 375 | const bool use_big_pages = alloc->second.big_pages && big_page; | 378 | const bool use_big_pages = alloc->second.big_pages && big_page; |
| 376 | gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind), | 379 | gmmu->Map(params.offset, device_address, size, static_cast<Tegra::PTEKind>(params.kind), |
| 377 | use_big_pages); | 380 | use_big_pages); |
| 378 | 381 | ||
| 379 | auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true, | 382 | auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size, |
| 380 | use_big_pages, alloc->second.sparse)}; | 383 | true, use_big_pages, alloc->second.sparse)}; |
| 381 | alloc->second.mappings.push_back(mapping); | 384 | alloc->second.mappings.push_back(mapping); |
| 382 | mapping_map[params.offset] = mapping; | 385 | mapping_map[params.offset] = mapping; |
| 383 | } else { | 386 | } else { |
| 384 | |||
| 385 | auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; | 387 | auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; |
| 386 | u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE}; | 388 | u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE}; |
| 387 | u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; | 389 | u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; |
| @@ -394,11 +396,11 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) { | |||
| 394 | return NvResult::InsufficientMemory; | 396 | return NvResult::InsufficientMemory; |
| 395 | } | 397 | } |
| 396 | 398 | ||
| 397 | gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), | 399 | gmmu->Map(params.offset, device_address, Common::AlignUp(size, page_size), |
| 398 | static_cast<Tegra::PTEKind>(params.kind), big_page); | 400 | static_cast<Tegra::PTEKind>(params.kind), big_page); |
| 399 | 401 | ||
| 400 | auto mapping{ | 402 | auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size, |
| 401 | std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)}; | 403 | false, big_page, false)}; |
| 402 | mapping_map[params.offset] = mapping; | 404 | mapping_map[params.offset] = mapping; |
| 403 | } | 405 | } |
| 404 | 406 | ||
| @@ -433,6 +435,8 @@ NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) { | |||
| 433 | gmmu->Unmap(params.offset, mapping->size); | 435 | gmmu->Unmap(params.offset, mapping->size); |
| 434 | } | 436 | } |
| 435 | 437 | ||
| 438 | nvmap.UnpinHandle(mapping->handle); | ||
| 439 | |||
| 436 | mapping_map.erase(params.offset); | 440 | mapping_map.erase(params.offset); |
| 437 | } catch (const std::out_of_range&) { | 441 | } catch (const std::out_of_range&) { |
| 438 | LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset); | 442 | LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset); |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 79a21683d..7d0a99988 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h | |||
| @@ -55,7 +55,7 @@ public: | |||
| 55 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, | 55 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, |
| 56 | std::span<u8> inline_output) override; | 56 | std::span<u8> inline_output) override; |
| 57 | 57 | ||
| 58 | void OnOpen(DeviceFD fd) override; | 58 | void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; |
| 59 | void OnClose(DeviceFD fd) override; | 59 | void OnClose(DeviceFD fd) override; |
| 60 | 60 | ||
| 61 | Kernel::KEvent* QueryEvent(u32 event_id) override; | 61 | Kernel::KEvent* QueryEvent(u32 event_id) override; |
| @@ -159,16 +159,18 @@ private: | |||
| 159 | NvCore::NvMap& nvmap; | 159 | NvCore::NvMap& nvmap; |
| 160 | 160 | ||
| 161 | struct Mapping { | 161 | struct Mapping { |
| 162 | VAddr ptr; | 162 | NvCore::NvMap::Handle::Id handle; |
| 163 | DAddr ptr; | ||
| 163 | u64 offset; | 164 | u64 offset; |
| 164 | u64 size; | 165 | u64 size; |
| 165 | bool fixed; | 166 | bool fixed; |
| 166 | bool big_page; // Only valid if fixed == false | 167 | bool big_page; // Only valid if fixed == false |
| 167 | bool sparse_alloc; | 168 | bool sparse_alloc; |
| 168 | 169 | ||
| 169 | Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_) | 170 | Mapping(NvCore::NvMap::Handle::Id handle_, DAddr ptr_, u64 offset_, u64 size_, bool fixed_, |
| 170 | : ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_), | 171 | bool big_page_, bool sparse_alloc_) |
| 171 | sparse_alloc(sparse_alloc_) {} | 172 | : handle(handle_), ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), |
| 173 | big_page(big_page_), sparse_alloc(sparse_alloc_) {} | ||
| 172 | }; | 174 | }; |
| 173 | 175 | ||
| 174 | struct Allocation { | 176 | struct Allocation { |
| @@ -212,9 +214,6 @@ private: | |||
| 212 | bool initialised{}; | 214 | bool initialised{}; |
| 213 | } vm; | 215 | } vm; |
| 214 | std::shared_ptr<Tegra::MemoryManager> gmmu; | 216 | std::shared_ptr<Tegra::MemoryManager> gmmu; |
| 215 | |||
| 216 | // s32 channel{}; | ||
| 217 | // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE}; | ||
| 218 | }; | 217 | }; |
| 219 | 218 | ||
| 220 | } // namespace Service::Nvidia::Devices | 219 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index b8dd34e24..250d01de3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp | |||
| @@ -76,7 +76,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inp | |||
| 76 | return NvResult::NotImplemented; | 76 | return NvResult::NotImplemented; |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | void nvhost_ctrl::OnOpen(DeviceFD fd) {} | 79 | void nvhost_ctrl::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} |
| 80 | 80 | ||
| 81 | void nvhost_ctrl::OnClose(DeviceFD fd) {} | 81 | void nvhost_ctrl::OnClose(DeviceFD fd) {} |
| 82 | 82 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 992124b60..403f1a746 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h | |||
| @@ -32,7 +32,7 @@ public: | |||
| 32 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, | 32 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, |
| 33 | std::span<u8> inline_output) override; | 33 | std::span<u8> inline_output) override; |
| 34 | 34 | ||
| 35 | void OnOpen(DeviceFD fd) override; | 35 | void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; |
| 36 | void OnClose(DeviceFD fd) override; | 36 | void OnClose(DeviceFD fd) override; |
| 37 | 37 | ||
| 38 | Kernel::KEvent* QueryEvent(u32 event_id) override; | 38 | Kernel::KEvent* QueryEvent(u32 event_id) override; |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp index 3e0c96456..ddd85678b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp | |||
| @@ -82,7 +82,7 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> | |||
| 82 | return NvResult::NotImplemented; | 82 | return NvResult::NotImplemented; |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {} | 85 | void nvhost_ctrl_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} |
| 86 | void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {} | 86 | void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {} |
| 87 | 87 | ||
| 88 | NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) { | 88 | NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) { |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h index d170299bd..d2ab05b21 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h | |||
| @@ -28,7 +28,7 @@ public: | |||
| 28 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, | 28 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, |
| 29 | std::span<u8> inline_output) override; | 29 | std::span<u8> inline_output) override; |
| 30 | 30 | ||
| 31 | void OnOpen(DeviceFD fd) override; | 31 | void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; |
| 32 | void OnClose(DeviceFD fd) override; | 32 | void OnClose(DeviceFD fd) override; |
| 33 | 33 | ||
| 34 | Kernel::KEvent* QueryEvent(u32 event_id) override; | 34 | Kernel::KEvent* QueryEvent(u32 event_id) override; |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index b0395c2f0..bf12d69a5 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp | |||
| @@ -120,7 +120,7 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu | |||
| 120 | return NvResult::NotImplemented; | 120 | return NvResult::NotImplemented; |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | void nvhost_gpu::OnOpen(DeviceFD fd) {} | 123 | void nvhost_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} |
| 124 | void nvhost_gpu::OnClose(DeviceFD fd) {} | 124 | void nvhost_gpu::OnClose(DeviceFD fd) {} |
| 125 | 125 | ||
| 126 | NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) { | 126 | NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) { |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index 88fd228ff..e34a978db 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h | |||
| @@ -47,7 +47,7 @@ public: | |||
| 47 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, | 47 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, |
| 48 | std::span<u8> inline_output) override; | 48 | std::span<u8> inline_output) override; |
| 49 | 49 | ||
| 50 | void OnOpen(DeviceFD fd) override; | 50 | void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; |
| 51 | void OnClose(DeviceFD fd) override; | 51 | void OnClose(DeviceFD fd) override; |
| 52 | 52 | ||
| 53 | Kernel::KEvent* QueryEvent(u32 event_id) override; | 53 | Kernel::KEvent* QueryEvent(u32 event_id) override; |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index f43914e1b..2c0ac2a46 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp | |||
| @@ -35,7 +35,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in | |||
| 35 | case 0x7: | 35 | case 0x7: |
| 36 | return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output); | 36 | return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output); |
| 37 | case 0x9: | 37 | case 0x9: |
| 38 | return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output); | 38 | return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output, fd); |
| 39 | case 0xa: | 39 | case 0xa: |
| 40 | return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output); | 40 | return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output); |
| 41 | default: | 41 | default: |
| @@ -68,9 +68,10 @@ NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in | |||
| 68 | return NvResult::NotImplemented; | 68 | return NvResult::NotImplemented; |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | void nvhost_nvdec::OnOpen(DeviceFD fd) { | 71 | void nvhost_nvdec::OnOpen(NvCore::SessionId session_id, DeviceFD fd) { |
| 72 | LOG_INFO(Service_NVDRV, "NVDEC video stream started"); | 72 | LOG_INFO(Service_NVDRV, "NVDEC video stream started"); |
| 73 | system.SetNVDECActive(true); | 73 | system.SetNVDECActive(true); |
| 74 | sessions[fd] = session_id; | ||
| 74 | } | 75 | } |
| 75 | 76 | ||
| 76 | void nvhost_nvdec::OnClose(DeviceFD fd) { | 77 | void nvhost_nvdec::OnClose(DeviceFD fd) { |
| @@ -81,6 +82,10 @@ void nvhost_nvdec::OnClose(DeviceFD fd) { | |||
| 81 | system.GPU().ClearCdmaInstance(iter->second); | 82 | system.GPU().ClearCdmaInstance(iter->second); |
| 82 | } | 83 | } |
| 83 | system.SetNVDECActive(false); | 84 | system.SetNVDECActive(false); |
| 85 | auto it = sessions.find(fd); | ||
| 86 | if (it != sessions.end()) { | ||
| 87 | sessions.erase(it); | ||
| 88 | } | ||
| 84 | } | 89 | } |
| 85 | 90 | ||
| 86 | } // namespace Service::Nvidia::Devices | 91 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index ad2233c49..627686757 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h | |||
| @@ -20,7 +20,7 @@ public: | |||
| 20 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, | 20 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, |
| 21 | std::span<u8> inline_output) override; | 21 | std::span<u8> inline_output) override; |
| 22 | 22 | ||
| 23 | void OnOpen(DeviceFD fd) override; | 23 | void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; |
| 24 | void OnClose(DeviceFD fd) override; | 24 | void OnClose(DeviceFD fd) override; |
| 25 | }; | 25 | }; |
| 26 | 26 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 74c701b95..a0a7bfa40 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 10 | #include "core/core.h" | 10 | #include "core/core.h" |
| 11 | #include "core/hle/kernel/k_process.h" | ||
| 11 | #include "core/hle/service/nvdrv/core/container.h" | 12 | #include "core/hle/service/nvdrv/core/container.h" |
| 12 | #include "core/hle/service/nvdrv/core/nvmap.h" | 13 | #include "core/hle/service/nvdrv/core/nvmap.h" |
| 13 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" | 14 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" |
| @@ -95,6 +96,8 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De | |||
| 95 | offset += SliceVectors(data, fence_thresholds, params.fence_count, offset); | 96 | offset += SliceVectors(data, fence_thresholds, params.fence_count, offset); |
| 96 | 97 | ||
| 97 | auto& gpu = system.GPU(); | 98 | auto& gpu = system.GPU(); |
| 99 | auto* session = core.GetSession(sessions[fd]); | ||
| 100 | |||
| 98 | if (gpu.UseNvdec()) { | 101 | if (gpu.UseNvdec()) { |
| 99 | for (std::size_t i = 0; i < syncpt_increments.size(); i++) { | 102 | for (std::size_t i = 0; i < syncpt_increments.size(); i++) { |
| 100 | const SyncptIncr& syncpt_incr = syncpt_increments[i]; | 103 | const SyncptIncr& syncpt_incr = syncpt_increments[i]; |
| @@ -106,8 +109,8 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De | |||
| 106 | const auto object = nvmap.GetHandle(cmd_buffer.memory_id); | 109 | const auto object = nvmap.GetHandle(cmd_buffer.memory_id); |
| 107 | ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;); | 110 | ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;); |
| 108 | Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); | 111 | Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); |
| 109 | system.ApplicationMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(), | 112 | session->process->GetMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(), |
| 110 | cmdlist.size() * sizeof(u32)); | 113 | cmdlist.size() * sizeof(u32)); |
| 111 | gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist); | 114 | gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist); |
| 112 | } | 115 | } |
| 113 | // Some games expect command_buffers to be written back | 116 | // Some games expect command_buffers to be written back |
| @@ -133,10 +136,12 @@ NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) { | |||
| 133 | return NvResult::Success; | 136 | return NvResult::Success; |
| 134 | } | 137 | } |
| 135 | 138 | ||
| 136 | NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries) { | 139 | NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries, |
| 140 | DeviceFD fd) { | ||
| 137 | const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size())); | 141 | const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size())); |
| 138 | for (size_t i = 0; i < num_entries; i++) { | 142 | for (size_t i = 0; i < num_entries; i++) { |
| 139 | entries[i].map_address = nvmap.PinHandle(entries[i].map_handle); | 143 | DAddr pin_address = nvmap.PinHandle(entries[i].map_handle, true); |
| 144 | entries[i].map_address = static_cast<u32>(pin_address); | ||
| 140 | } | 145 | } |
| 141 | 146 | ||
| 142 | return NvResult::Success; | 147 | return NvResult::Success; |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 7ce748e18..900db81d2 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h | |||
| @@ -4,7 +4,9 @@ | |||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <deque> | 6 | #include <deque> |
| 7 | #include <unordered_map> | ||
| 7 | #include <vector> | 8 | #include <vector> |
| 9 | |||
| 8 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 9 | #include "common/swap.h" | 11 | #include "common/swap.h" |
| 10 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" | 12 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" |
| @@ -111,7 +113,7 @@ protected: | |||
| 111 | NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd); | 113 | NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd); |
| 112 | NvResult GetSyncpoint(IoctlGetSyncpoint& params); | 114 | NvResult GetSyncpoint(IoctlGetSyncpoint& params); |
| 113 | NvResult GetWaitbase(IoctlGetWaitbase& params); | 115 | NvResult GetWaitbase(IoctlGetWaitbase& params); |
| 114 | NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries); | 116 | NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries, DeviceFD fd); |
| 115 | NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries); | 117 | NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries); |
| 116 | NvResult SetSubmitTimeout(u32 timeout); | 118 | NvResult SetSubmitTimeout(u32 timeout); |
| 117 | 119 | ||
| @@ -125,6 +127,7 @@ protected: | |||
| 125 | NvCore::NvMap& nvmap; | 127 | NvCore::NvMap& nvmap; |
| 126 | NvCore::ChannelType channel_type; | 128 | NvCore::ChannelType channel_type; |
| 127 | std::array<u32, MaxSyncPoints> device_syncpoints{}; | 129 | std::array<u32, MaxSyncPoints> device_syncpoints{}; |
| 130 | std::unordered_map<DeviceFD, NvCore::SessionId> sessions; | ||
| 128 | }; | 131 | }; |
| 129 | }; // namespace Devices | 132 | }; // namespace Devices |
| 130 | } // namespace Service::Nvidia | 133 | } // namespace Service::Nvidia |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp index 9e6b86458..f87d53f12 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp | |||
| @@ -44,7 +44,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in | |||
| 44 | return NvResult::NotImplemented; | 44 | return NvResult::NotImplemented; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | void nvhost_nvjpg::OnOpen(DeviceFD fd) {} | 47 | void nvhost_nvjpg::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} |
| 48 | void nvhost_nvjpg::OnClose(DeviceFD fd) {} | 48 | void nvhost_nvjpg::OnClose(DeviceFD fd) {} |
| 49 | 49 | ||
| 50 | NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) { | 50 | NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) { |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h index 790c97f6a..def9c254d 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h | |||
| @@ -22,7 +22,7 @@ public: | |||
| 22 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, | 22 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, |
| 23 | std::span<u8> inline_output) override; | 23 | std::span<u8> inline_output) override; |
| 24 | 24 | ||
| 25 | void OnOpen(DeviceFD fd) override; | 25 | void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; |
| 26 | void OnClose(DeviceFD fd) override; | 26 | void OnClose(DeviceFD fd) override; |
| 27 | 27 | ||
| 28 | private: | 28 | private: |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index 87f8d7c22..bf090f5eb 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp | |||
| @@ -33,7 +33,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu | |||
| 33 | case 0x3: | 33 | case 0x3: |
| 34 | return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output); | 34 | return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output); |
| 35 | case 0x9: | 35 | case 0x9: |
| 36 | return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output); | 36 | return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output, fd); |
| 37 | case 0xa: | 37 | case 0xa: |
| 38 | return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output); | 38 | return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output); |
| 39 | default: | 39 | default: |
| @@ -68,7 +68,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu | |||
| 68 | return NvResult::NotImplemented; | 68 | return NvResult::NotImplemented; |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | void nvhost_vic::OnOpen(DeviceFD fd) {} | 71 | void nvhost_vic::OnOpen(NvCore::SessionId session_id, DeviceFD fd) { |
| 72 | sessions[fd] = session_id; | ||
| 73 | } | ||
| 72 | 74 | ||
| 73 | void nvhost_vic::OnClose(DeviceFD fd) { | 75 | void nvhost_vic::OnClose(DeviceFD fd) { |
| 74 | auto& host1x_file = core.Host1xDeviceFile(); | 76 | auto& host1x_file = core.Host1xDeviceFile(); |
| @@ -76,6 +78,7 @@ void nvhost_vic::OnClose(DeviceFD fd) { | |||
| 76 | if (iter != host1x_file.fd_to_id.end()) { | 78 | if (iter != host1x_file.fd_to_id.end()) { |
| 77 | system.GPU().ClearCdmaInstance(iter->second); | 79 | system.GPU().ClearCdmaInstance(iter->second); |
| 78 | } | 80 | } |
| 81 | sessions.erase(fd); | ||
| 79 | } | 82 | } |
| 80 | 83 | ||
| 81 | } // namespace Service::Nvidia::Devices | 84 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index cadbcb0a5..0cc04354a 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h | |||
| @@ -19,7 +19,7 @@ public: | |||
| 19 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, | 19 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, |
| 20 | std::span<u8> inline_output) override; | 20 | std::span<u8> inline_output) override; |
| 21 | 21 | ||
| 22 | void OnOpen(DeviceFD fd) override; | 22 | void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; |
| 23 | void OnClose(DeviceFD fd) override; | 23 | void OnClose(DeviceFD fd) override; |
| 24 | }; | 24 | }; |
| 25 | } // namespace Service::Nvidia::Devices | 25 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 71b2e62ec..da61a3bfe 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp | |||
| @@ -36,9 +36,9 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input, | |||
| 36 | case 0x3: | 36 | case 0x3: |
| 37 | return WrapFixed(this, &nvmap::IocFromId, input, output); | 37 | return WrapFixed(this, &nvmap::IocFromId, input, output); |
| 38 | case 0x4: | 38 | case 0x4: |
| 39 | return WrapFixed(this, &nvmap::IocAlloc, input, output); | 39 | return WrapFixed(this, &nvmap::IocAlloc, input, output, fd); |
| 40 | case 0x5: | 40 | case 0x5: |
| 41 | return WrapFixed(this, &nvmap::IocFree, input, output); | 41 | return WrapFixed(this, &nvmap::IocFree, input, output, fd); |
| 42 | case 0x9: | 42 | case 0x9: |
| 43 | return WrapFixed(this, &nvmap::IocParam, input, output); | 43 | return WrapFixed(this, &nvmap::IocParam, input, output); |
| 44 | case 0xe: | 44 | case 0xe: |
| @@ -67,8 +67,15 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, st | |||
| 67 | return NvResult::NotImplemented; | 67 | return NvResult::NotImplemented; |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | void nvmap::OnOpen(DeviceFD fd) {} | 70 | void nvmap::OnOpen(NvCore::SessionId session_id, DeviceFD fd) { |
| 71 | void nvmap::OnClose(DeviceFD fd) {} | 71 | sessions[fd] = session_id; |
| 72 | } | ||
| 73 | void nvmap::OnClose(DeviceFD fd) { | ||
| 74 | auto it = sessions.find(fd); | ||
| 75 | if (it != sessions.end()) { | ||
| 76 | sessions.erase(it); | ||
| 77 | } | ||
| 78 | } | ||
| 72 | 79 | ||
| 73 | NvResult nvmap::IocCreate(IocCreateParams& params) { | 80 | NvResult nvmap::IocCreate(IocCreateParams& params) { |
| 74 | LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size); | 81 | LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size); |
| @@ -87,7 +94,7 @@ NvResult nvmap::IocCreate(IocCreateParams& params) { | |||
| 87 | return NvResult::Success; | 94 | return NvResult::Success; |
| 88 | } | 95 | } |
| 89 | 96 | ||
| 90 | NvResult nvmap::IocAlloc(IocAllocParams& params) { | 97 | NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) { |
| 91 | LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address); | 98 | LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address); |
| 92 | 99 | ||
| 93 | if (!params.handle) { | 100 | if (!params.handle) { |
| @@ -116,15 +123,15 @@ NvResult nvmap::IocAlloc(IocAllocParams& params) { | |||
| 116 | return NvResult::InsufficientMemory; | 123 | return NvResult::InsufficientMemory; |
| 117 | } | 124 | } |
| 118 | 125 | ||
| 119 | const auto result = | 126 | const auto result = handle_description->Alloc(params.flags, params.align, params.kind, |
| 120 | handle_description->Alloc(params.flags, params.align, params.kind, params.address); | 127 | params.address, sessions[fd]); |
| 121 | if (result != NvResult::Success) { | 128 | if (result != NvResult::Success) { |
| 122 | LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); | 129 | LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); |
| 123 | return result; | 130 | return result; |
| 124 | } | 131 | } |
| 125 | bool is_out_io{}; | 132 | bool is_out_io{}; |
| 126 | ASSERT(system.ApplicationProcess() | 133 | auto process = container.GetSession(sessions[fd])->process; |
| 127 | ->GetPageTable() | 134 | ASSERT(process->GetPageTable() |
| 128 | .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address, | 135 | .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address, |
| 129 | handle_description->size, | 136 | handle_description->size, |
| 130 | Kernel::KMemoryPermission::None, true, false) | 137 | Kernel::KMemoryPermission::None, true, false) |
| @@ -224,7 +231,7 @@ NvResult nvmap::IocParam(IocParamParams& params) { | |||
| 224 | return NvResult::Success; | 231 | return NvResult::Success; |
| 225 | } | 232 | } |
| 226 | 233 | ||
| 227 | NvResult nvmap::IocFree(IocFreeParams& params) { | 234 | NvResult nvmap::IocFree(IocFreeParams& params, DeviceFD fd) { |
| 228 | LOG_DEBUG(Service_NVDRV, "called"); | 235 | LOG_DEBUG(Service_NVDRV, "called"); |
| 229 | 236 | ||
| 230 | if (!params.handle) { | 237 | if (!params.handle) { |
| @@ -233,9 +240,9 @@ NvResult nvmap::IocFree(IocFreeParams& params) { | |||
| 233 | } | 240 | } |
| 234 | 241 | ||
| 235 | if (auto freeInfo{file.FreeHandle(params.handle, false)}) { | 242 | if (auto freeInfo{file.FreeHandle(params.handle, false)}) { |
| 243 | auto process = container.GetSession(sessions[fd])->process; | ||
| 236 | if (freeInfo->can_unlock) { | 244 | if (freeInfo->can_unlock) { |
| 237 | ASSERT(system.ApplicationProcess() | 245 | ASSERT(process->GetPageTable() |
| 238 | ->GetPageTable() | ||
| 239 | .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size) | 246 | .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size) |
| 240 | .IsSuccess()); | 247 | .IsSuccess()); |
| 241 | } | 248 | } |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index 049c11028..d07d85f88 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h | |||
| @@ -33,7 +33,7 @@ public: | |||
| 33 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, | 33 | NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output, |
| 34 | std::span<u8> inline_output) override; | 34 | std::span<u8> inline_output) override; |
| 35 | 35 | ||
| 36 | void OnOpen(DeviceFD fd) override; | 36 | void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; |
| 37 | void OnClose(DeviceFD fd) override; | 37 | void OnClose(DeviceFD fd) override; |
| 38 | 38 | ||
| 39 | enum class HandleParameterType : u32_le { | 39 | enum class HandleParameterType : u32_le { |
| @@ -100,11 +100,11 @@ public: | |||
| 100 | static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); | 100 | static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); |
| 101 | 101 | ||
| 102 | NvResult IocCreate(IocCreateParams& params); | 102 | NvResult IocCreate(IocCreateParams& params); |
| 103 | NvResult IocAlloc(IocAllocParams& params); | 103 | NvResult IocAlloc(IocAllocParams& params, DeviceFD fd); |
| 104 | NvResult IocGetId(IocGetIdParams& params); | 104 | NvResult IocGetId(IocGetIdParams& params); |
| 105 | NvResult IocFromId(IocFromIdParams& params); | 105 | NvResult IocFromId(IocFromIdParams& params); |
| 106 | NvResult IocParam(IocParamParams& params); | 106 | NvResult IocParam(IocParamParams& params); |
| 107 | NvResult IocFree(IocFreeParams& params); | 107 | NvResult IocFree(IocFreeParams& params, DeviceFD fd); |
| 108 | 108 | ||
| 109 | private: | 109 | private: |
| 110 | /// Id to use for the next handle that is created. | 110 | /// Id to use for the next handle that is created. |
| @@ -115,6 +115,7 @@ private: | |||
| 115 | 115 | ||
| 116 | NvCore::Container& container; | 116 | NvCore::Container& container; |
| 117 | NvCore::NvMap& file; | 117 | NvCore::NvMap& file; |
| 118 | std::unordered_map<DeviceFD, NvCore::SessionId> sessions; | ||
| 118 | }; | 119 | }; |
| 119 | 120 | ||
| 120 | } // namespace Service::Nvidia::Devices | 121 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 9e46ee8dd..cb256e5b4 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp | |||
| @@ -45,13 +45,22 @@ void EventInterface::FreeEvent(Kernel::KEvent* event) { | |||
| 45 | void LoopProcess(Nvnflinger::Nvnflinger& nvnflinger, Core::System& system) { | 45 | void LoopProcess(Nvnflinger::Nvnflinger& nvnflinger, Core::System& system) { |
| 46 | auto server_manager = std::make_unique<ServerManager>(system); | 46 | auto server_manager = std::make_unique<ServerManager>(system); |
| 47 | auto module = std::make_shared<Module>(system); | 47 | auto module = std::make_shared<Module>(system); |
| 48 | server_manager->RegisterNamedService("nvdrv", std::make_shared<NVDRV>(system, module, "nvdrv")); | 48 | const auto NvdrvInterfaceFactoryForApplication = [&, module] { |
| 49 | server_manager->RegisterNamedService("nvdrv:a", | 49 | return std::make_shared<NVDRV>(system, module, "nvdrv"); |
| 50 | std::make_shared<NVDRV>(system, module, "nvdrv:a")); | 50 | }; |
| 51 | server_manager->RegisterNamedService("nvdrv:s", | 51 | const auto NvdrvInterfaceFactoryForApplets = [&, module] { |
| 52 | std::make_shared<NVDRV>(system, module, "nvdrv:s")); | 52 | return std::make_shared<NVDRV>(system, module, "nvdrv:a"); |
| 53 | server_manager->RegisterNamedService("nvdrv:t", | 53 | }; |
| 54 | std::make_shared<NVDRV>(system, module, "nvdrv:t")); | 54 | const auto NvdrvInterfaceFactoryForSysmodules = [&, module] { |
| 55 | return std::make_shared<NVDRV>(system, module, "nvdrv:s"); | ||
| 56 | }; | ||
| 57 | const auto NvdrvInterfaceFactoryForTesting = [&, module] { | ||
| 58 | return std::make_shared<NVDRV>(system, module, "nvdrv:t"); | ||
| 59 | }; | ||
| 60 | server_manager->RegisterNamedService("nvdrv", NvdrvInterfaceFactoryForApplication); | ||
| 61 | server_manager->RegisterNamedService("nvdrv:a", NvdrvInterfaceFactoryForApplets); | ||
| 62 | server_manager->RegisterNamedService("nvdrv:s", NvdrvInterfaceFactoryForSysmodules); | ||
| 63 | server_manager->RegisterNamedService("nvdrv:t", NvdrvInterfaceFactoryForTesting); | ||
| 55 | server_manager->RegisterNamedService("nvmemp", std::make_shared<NVMEMP>(system)); | 64 | server_manager->RegisterNamedService("nvmemp", std::make_shared<NVMEMP>(system)); |
| 56 | nvnflinger.SetNVDrvInstance(module); | 65 | nvnflinger.SetNVDrvInstance(module); |
| 57 | ServerManager::RunServer(std::move(server_manager)); | 66 | ServerManager::RunServer(std::move(server_manager)); |
| @@ -113,7 +122,7 @@ NvResult Module::VerifyFD(DeviceFD fd) const { | |||
| 113 | return NvResult::Success; | 122 | return NvResult::Success; |
| 114 | } | 123 | } |
| 115 | 124 | ||
| 116 | DeviceFD Module::Open(const std::string& device_name) { | 125 | DeviceFD Module::Open(const std::string& device_name, NvCore::SessionId session_id) { |
| 117 | auto it = builders.find(device_name); | 126 | auto it = builders.find(device_name); |
| 118 | if (it == builders.end()) { | 127 | if (it == builders.end()) { |
| 119 | LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name); | 128 | LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name); |
| @@ -124,7 +133,7 @@ DeviceFD Module::Open(const std::string& device_name) { | |||
| 124 | auto& builder = it->second; | 133 | auto& builder = it->second; |
| 125 | auto device = builder(fd)->second; | 134 | auto device = builder(fd)->second; |
| 126 | 135 | ||
| 127 | device->OnOpen(fd); | 136 | device->OnOpen(session_id, fd); |
| 128 | 137 | ||
| 129 | return fd; | 138 | return fd; |
| 130 | } | 139 | } |
diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index d8622b3ca..c594f0e5e 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h | |||
| @@ -77,7 +77,7 @@ public: | |||
| 77 | NvResult VerifyFD(DeviceFD fd) const; | 77 | NvResult VerifyFD(DeviceFD fd) const; |
| 78 | 78 | ||
| 79 | /// Opens a device node and returns a file descriptor to it. | 79 | /// Opens a device node and returns a file descriptor to it. |
| 80 | DeviceFD Open(const std::string& device_name); | 80 | DeviceFD Open(const std::string& device_name, NvCore::SessionId session_id); |
| 81 | 81 | ||
| 82 | /// Sends an ioctl command to the specified file descriptor. | 82 | /// Sends an ioctl command to the specified file descriptor. |
| 83 | NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output); | 83 | NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output); |
| @@ -93,6 +93,10 @@ public: | |||
| 93 | 93 | ||
| 94 | NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event); | 94 | NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event); |
| 95 | 95 | ||
| 96 | NvCore::Container& GetContainer() { | ||
| 97 | return container; | ||
| 98 | } | ||
| 99 | |||
| 96 | private: | 100 | private: |
| 97 | friend class EventInterface; | 101 | friend class EventInterface; |
| 98 | friend class Service::Nvnflinger::Nvnflinger; | 102 | friend class Service::Nvnflinger::Nvnflinger; |
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp index c8a880e84..ffe72f281 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp +++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp | |||
| @@ -3,8 +3,11 @@ | |||
| 3 | // SPDX-License-Identifier: GPL-3.0-or-later | 3 | // SPDX-License-Identifier: GPL-3.0-or-later |
| 4 | 4 | ||
| 5 | #include "common/logging/log.h" | 5 | #include "common/logging/log.h" |
| 6 | #include "common/scope_exit.h" | ||
| 7 | #include "common/string_util.h" | ||
| 6 | #include "core/core.h" | 8 | #include "core/core.h" |
| 7 | #include "core/hle/kernel/k_event.h" | 9 | #include "core/hle/kernel/k_event.h" |
| 10 | #include "core/hle/kernel/k_process.h" | ||
| 8 | #include "core/hle/kernel/k_readable_event.h" | 11 | #include "core/hle/kernel/k_readable_event.h" |
| 9 | #include "core/hle/service/ipc_helpers.h" | 12 | #include "core/hle/service/ipc_helpers.h" |
| 10 | #include "core/hle/service/nvdrv/nvdata.h" | 13 | #include "core/hle/service/nvdrv/nvdata.h" |
| @@ -27,7 +30,7 @@ void NVDRV::Open(HLERequestContext& ctx) { | |||
| 27 | } | 30 | } |
| 28 | 31 | ||
| 29 | const auto& buffer = ctx.ReadBuffer(); | 32 | const auto& buffer = ctx.ReadBuffer(); |
| 30 | const std::string device_name(buffer.begin(), buffer.end()); | 33 | const std::string device_name(Common::StringFromBuffer(buffer)); |
| 31 | 34 | ||
| 32 | if (device_name == "/dev/nvhost-prof-gpu") { | 35 | if (device_name == "/dev/nvhost-prof-gpu") { |
| 33 | rb.Push<DeviceFD>(0); | 36 | rb.Push<DeviceFD>(0); |
| @@ -37,7 +40,7 @@ void NVDRV::Open(HLERequestContext& ctx) { | |||
| 37 | return; | 40 | return; |
| 38 | } | 41 | } |
| 39 | 42 | ||
| 40 | DeviceFD fd = nvdrv->Open(device_name); | 43 | DeviceFD fd = nvdrv->Open(device_name, session_id); |
| 41 | 44 | ||
| 42 | rb.Push<DeviceFD>(fd); | 45 | rb.Push<DeviceFD>(fd); |
| 43 | rb.PushEnum(fd != INVALID_NVDRV_FD ? NvResult::Success : NvResult::FileOperationFailed); | 46 | rb.PushEnum(fd != INVALID_NVDRV_FD ? NvResult::Success : NvResult::FileOperationFailed); |
| @@ -150,12 +153,29 @@ void NVDRV::Close(HLERequestContext& ctx) { | |||
| 150 | 153 | ||
| 151 | void NVDRV::Initialize(HLERequestContext& ctx) { | 154 | void NVDRV::Initialize(HLERequestContext& ctx) { |
| 152 | LOG_WARNING(Service_NVDRV, "(STUBBED) called"); | 155 | LOG_WARNING(Service_NVDRV, "(STUBBED) called"); |
| 156 | IPC::ResponseBuilder rb{ctx, 3}; | ||
| 157 | SCOPE_EXIT({ | ||
| 158 | rb.Push(ResultSuccess); | ||
| 159 | rb.PushEnum(NvResult::Success); | ||
| 160 | }); | ||
| 153 | 161 | ||
| 154 | is_initialized = true; | 162 | if (is_initialized) { |
| 163 | // No need to initialize again | ||
| 164 | return; | ||
| 165 | } | ||
| 155 | 166 | ||
| 156 | IPC::ResponseBuilder rb{ctx, 3}; | 167 | IPC::RequestParser rp{ctx}; |
| 157 | rb.Push(ResultSuccess); | 168 | const auto process_handle{ctx.GetCopyHandle(0)}; |
| 158 | rb.PushEnum(NvResult::Success); | 169 | // The transfer memory is lent to nvdrv as a work buffer since nvdrv is |
| 170 | // unable to allocate as much memory on its own. For HLE it's unnecessary to handle it | ||
| 171 | [[maybe_unused]] const auto transfer_memory_handle{ctx.GetCopyHandle(1)}; | ||
| 172 | [[maybe_unused]] const auto transfer_memory_size = rp.Pop<u32>(); | ||
| 173 | |||
| 174 | auto& container = nvdrv->GetContainer(); | ||
| 175 | auto process = ctx.GetObjectFromHandle<Kernel::KProcess>(process_handle); | ||
| 176 | session_id = container.OpenSession(process.GetPointerUnsafe()); | ||
| 177 | |||
| 178 | is_initialized = true; | ||
| 159 | } | 179 | } |
| 160 | 180 | ||
| 161 | void NVDRV::QueryEvent(HLERequestContext& ctx) { | 181 | void NVDRV::QueryEvent(HLERequestContext& ctx) { |
| @@ -242,6 +262,9 @@ NVDRV::NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* | |||
| 242 | RegisterHandlers(functions); | 262 | RegisterHandlers(functions); |
| 243 | } | 263 | } |
| 244 | 264 | ||
| 245 | NVDRV::~NVDRV() = default; | 265 | NVDRV::~NVDRV() { |
| 266 | auto& container = nvdrv->GetContainer(); | ||
| 267 | container.CloseSession(session_id); | ||
| 268 | } | ||
| 246 | 269 | ||
| 247 | } // namespace Service::Nvidia | 270 | } // namespace Service::Nvidia |
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h index 6e98115dc..f2195ae1e 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.h +++ b/src/core/hle/service/nvdrv/nvdrv_interface.h | |||
| @@ -35,6 +35,7 @@ private: | |||
| 35 | 35 | ||
| 36 | u64 pid{}; | 36 | u64 pid{}; |
| 37 | bool is_initialized{}; | 37 | bool is_initialized{}; |
| 38 | NvCore::SessionId session_id{}; | ||
| 38 | Common::ScratchBuffer<u8> output_buffer; | 39 | Common::ScratchBuffer<u8> output_buffer; |
| 39 | Common::ScratchBuffer<u8> inline_output_buffer; | 40 | Common::ScratchBuffer<u8> inline_output_buffer; |
| 40 | }; | 41 | }; |
diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp index 2fef6cc1a..86e272b41 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp | |||
| @@ -87,19 +87,20 @@ Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap, | |||
| 87 | R_SUCCEED(); | 87 | R_SUCCEED(); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) { | 90 | Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Nvidia::DeviceFD nvmap_fd) { |
| 91 | // Free the handle. | 91 | // Free the handle. |
| 92 | Nvidia::Devices::nvmap::IocFreeParams free_params{ | 92 | Nvidia::Devices::nvmap::IocFreeParams free_params{ |
| 93 | .handle = handle, | 93 | .handle = handle, |
| 94 | }; | 94 | }; |
| 95 | R_UNLESS(nvmap.IocFree(free_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed); | 95 | R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success, |
| 96 | VI::ResultOperationFailed); | ||
| 96 | 97 | ||
| 97 | // We succeeded. | 98 | // We succeeded. |
| 98 | R_SUCCEED(); | 99 | R_SUCCEED(); |
| 99 | } | 100 | } |
| 100 | 101 | ||
| 101 | Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer, | 102 | Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer, |
| 102 | u32 size) { | 103 | u32 size, Nvidia::DeviceFD nvmap_fd) { |
| 103 | // Assign the allocated memory to the handle. | 104 | // Assign the allocated memory to the handle. |
| 104 | Nvidia::Devices::nvmap::IocAllocParams alloc_params{ | 105 | Nvidia::Devices::nvmap::IocAllocParams alloc_params{ |
| 105 | .handle = handle, | 106 | .handle = handle, |
| @@ -109,16 +110,16 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce | |||
| 109 | .kind = 0, | 110 | .kind = 0, |
| 110 | .address = GetInteger(buffer), | 111 | .address = GetInteger(buffer), |
| 111 | }; | 112 | }; |
| 112 | R_UNLESS(nvmap.IocAlloc(alloc_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed); | 113 | R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success, |
| 114 | VI::ResultOperationFailed); | ||
| 113 | 115 | ||
| 114 | // We succeeded. | 116 | // We succeeded. |
| 115 | R_SUCCEED(); | 117 | R_SUCCEED(); |
| 116 | } | 118 | } |
| 117 | 119 | ||
| 118 | Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, | 120 | Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, Nvidia::DeviceFD nvmap_fd, |
| 119 | Common::ProcessAddress buffer, u32 size) { | 121 | Common::ProcessAddress buffer, u32 size) { |
| 120 | // Get the nvmap device. | 122 | // Get the nvmap device. |
| 121 | auto nvmap_fd = nvdrv.Open("/dev/nvmap"); | ||
| 122 | auto nvmap = nvdrv.GetDevice<Nvidia::Devices::nvmap>(nvmap_fd); | 123 | auto nvmap = nvdrv.GetDevice<Nvidia::Devices::nvmap>(nvmap_fd); |
| 123 | ASSERT(nvmap != nullptr); | 124 | ASSERT(nvmap != nullptr); |
| 124 | 125 | ||
| @@ -127,11 +128,11 @@ Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, | |||
| 127 | 128 | ||
| 128 | // Ensure we maintain a clean state on failure. | 129 | // Ensure we maintain a clean state on failure. |
| 129 | ON_RESULT_FAILURE { | 130 | ON_RESULT_FAILURE { |
| 130 | ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle))); | 131 | ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle, nvmap_fd))); |
| 131 | }; | 132 | }; |
| 132 | 133 | ||
| 133 | // Assign the allocated memory to the handle. | 134 | // Assign the allocated memory to the handle. |
| 134 | R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size)); | 135 | R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size, nvmap_fd)); |
| 135 | } | 136 | } |
| 136 | 137 | ||
| 137 | constexpr auto SharedBufferBlockLinearFormat = android::PixelFormat::Rgba8888; | 138 | constexpr auto SharedBufferBlockLinearFormat = android::PixelFormat::Rgba8888; |
| @@ -197,9 +198,13 @@ Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u | |||
| 197 | std::addressof(m_buffer_page_group), m_system, | 198 | std::addressof(m_buffer_page_group), m_system, |
| 198 | SharedBufferSize)); | 199 | SharedBufferSize)); |
| 199 | 200 | ||
| 201 | auto& container = m_nvdrv->GetContainer(); | ||
| 202 | m_session_id = container.OpenSession(m_system.ApplicationProcess()); | ||
| 203 | m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id); | ||
| 204 | |||
| 200 | // Create an nvmap handle for the buffer and assign the memory to it. | 205 | // Create an nvmap handle for the buffer and assign the memory to it. |
| 201 | R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, map_address, | 206 | R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd, |
| 202 | SharedBufferSize)); | 207 | map_address, SharedBufferSize)); |
| 203 | 208 | ||
| 204 | // Record the display id. | 209 | // Record the display id. |
| 205 | m_display_id = display_id; | 210 | m_display_id = display_id; |
diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h index c809c01b4..033bf4bbe 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h | |||
| @@ -4,6 +4,8 @@ | |||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include "common/math_util.h" | 6 | #include "common/math_util.h" |
| 7 | #include "core/hle/service/nvdrv/core/container.h" | ||
| 8 | #include "core/hle/service/nvdrv/nvdata.h" | ||
| 7 | #include "core/hle/service/nvnflinger/nvnflinger.h" | 9 | #include "core/hle/service/nvnflinger/nvnflinger.h" |
| 8 | #include "core/hle/service/nvnflinger/ui/fence.h" | 10 | #include "core/hle/service/nvnflinger/ui/fence.h" |
| 9 | 11 | ||
| @@ -53,7 +55,8 @@ private: | |||
| 53 | u64 m_layer_id = 0; | 55 | u64 m_layer_id = 0; |
| 54 | u32 m_buffer_nvmap_handle = 0; | 56 | u32 m_buffer_nvmap_handle = 0; |
| 55 | SharedMemoryPoolLayout m_pool_layout = {}; | 57 | SharedMemoryPoolLayout m_pool_layout = {}; |
| 56 | 58 | Nvidia::DeviceFD m_nvmap_fd = {}; | |
| 59 | Nvidia::NvCore::SessionId m_session_id = {}; | ||
| 57 | std::unique_ptr<Kernel::KPageGroup> m_buffer_page_group; | 60 | std::unique_ptr<Kernel::KPageGroup> m_buffer_page_group; |
| 58 | 61 | ||
| 59 | std::mutex m_guard; | 62 | std::mutex m_guard; |
diff --git a/src/core/hle/service/nvnflinger/nvnflinger.cpp b/src/core/hle/service/nvnflinger/nvnflinger.cpp index 0469110e8..71d6fdb0c 100644 --- a/src/core/hle/service/nvnflinger/nvnflinger.cpp +++ b/src/core/hle/service/nvnflinger/nvnflinger.cpp | |||
| @@ -112,9 +112,7 @@ void Nvnflinger::ShutdownLayers() { | |||
| 112 | { | 112 | { |
| 113 | const auto lock_guard = Lock(); | 113 | const auto lock_guard = Lock(); |
| 114 | for (auto& display : displays) { | 114 | for (auto& display : displays) { |
| 115 | for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) { | 115 | display.Abandon(); |
| 116 | display.GetLayer(layer).GetConsumer().Abandon(); | ||
| 117 | } | ||
| 118 | } | 116 | } |
| 119 | 117 | ||
| 120 | is_abandoned = true; | 118 | is_abandoned = true; |
| @@ -126,7 +124,7 @@ void Nvnflinger::ShutdownLayers() { | |||
| 126 | 124 | ||
| 127 | void Nvnflinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { | 125 | void Nvnflinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { |
| 128 | nvdrv = std::move(instance); | 126 | nvdrv = std::move(instance); |
| 129 | disp_fd = nvdrv->Open("/dev/nvdisp_disp0"); | 127 | disp_fd = nvdrv->Open("/dev/nvdisp_disp0", {}); |
| 130 | } | 128 | } |
| 131 | 129 | ||
| 132 | std::optional<u64> Nvnflinger::OpenDisplay(std::string_view name) { | 130 | std::optional<u64> Nvnflinger::OpenDisplay(std::string_view name) { |
| @@ -176,24 +174,28 @@ void Nvnflinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { | |||
| 176 | display.CreateLayer(layer_id, buffer_id, nvdrv->container); | 174 | display.CreateLayer(layer_id, buffer_id, nvdrv->container); |
| 177 | } | 175 | } |
| 178 | 176 | ||
| 179 | void Nvnflinger::OpenLayer(u64 layer_id) { | 177 | bool Nvnflinger::OpenLayer(u64 layer_id) { |
| 180 | const auto lock_guard = Lock(); | 178 | const auto lock_guard = Lock(); |
| 181 | 179 | ||
| 182 | for (auto& display : displays) { | 180 | for (auto& display : displays) { |
| 183 | if (auto* layer = display.FindLayer(layer_id); layer) { | 181 | if (auto* layer = display.FindLayer(layer_id); layer) { |
| 184 | layer->Open(); | 182 | return layer->Open(); |
| 185 | } | 183 | } |
| 186 | } | 184 | } |
| 185 | |||
| 186 | return false; | ||
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | void Nvnflinger::CloseLayer(u64 layer_id) { | 189 | bool Nvnflinger::CloseLayer(u64 layer_id) { |
| 190 | const auto lock_guard = Lock(); | 190 | const auto lock_guard = Lock(); |
| 191 | 191 | ||
| 192 | for (auto& display : displays) { | 192 | for (auto& display : displays) { |
| 193 | if (auto* layer = display.FindLayer(layer_id); layer) { | 193 | if (auto* layer = display.FindLayer(layer_id); layer) { |
| 194 | layer->Close(); | 194 | return layer->Close(); |
| 195 | } | 195 | } |
| 196 | } | 196 | } |
| 197 | |||
| 198 | return false; | ||
| 197 | } | 199 | } |
| 198 | 200 | ||
| 199 | void Nvnflinger::DestroyLayer(u64 layer_id) { | 201 | void Nvnflinger::DestroyLayer(u64 layer_id) { |
diff --git a/src/core/hle/service/nvnflinger/nvnflinger.h b/src/core/hle/service/nvnflinger/nvnflinger.h index 871285764..a60e0ae6b 100644 --- a/src/core/hle/service/nvnflinger/nvnflinger.h +++ b/src/core/hle/service/nvnflinger/nvnflinger.h | |||
| @@ -74,10 +74,10 @@ public: | |||
| 74 | [[nodiscard]] std::optional<u64> CreateLayer(u64 display_id); | 74 | [[nodiscard]] std::optional<u64> CreateLayer(u64 display_id); |
| 75 | 75 | ||
| 76 | /// Opens a layer on all displays for the given layer ID. | 76 | /// Opens a layer on all displays for the given layer ID. |
| 77 | void OpenLayer(u64 layer_id); | 77 | bool OpenLayer(u64 layer_id); |
| 78 | 78 | ||
| 79 | /// Closes a layer on all displays for the given layer ID. | 79 | /// Closes a layer on all displays for the given layer ID. |
| 80 | void CloseLayer(u64 layer_id); | 80 | bool CloseLayer(u64 layer_id); |
| 81 | 81 | ||
| 82 | /// Destroys the given layer ID. | 82 | /// Destroys the given layer ID. |
| 83 | void DestroyLayer(u64 layer_id); | 83 | void DestroyLayer(u64 layer_id); |
diff --git a/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp index ce70946ec..ede2a1193 100644 --- a/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp +++ b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp | |||
| @@ -22,11 +22,13 @@ GraphicBuffer::GraphicBuffer(Service::Nvidia::NvCore::NvMap& nvmap, | |||
| 22 | : NvGraphicBuffer(GetBuffer(buffer)), m_nvmap(std::addressof(nvmap)) { | 22 | : NvGraphicBuffer(GetBuffer(buffer)), m_nvmap(std::addressof(nvmap)) { |
| 23 | if (this->BufferId() > 0) { | 23 | if (this->BufferId() > 0) { |
| 24 | m_nvmap->DuplicateHandle(this->BufferId(), true); | 24 | m_nvmap->DuplicateHandle(this->BufferId(), true); |
| 25 | m_nvmap->PinHandle(this->BufferId(), false); | ||
| 25 | } | 26 | } |
| 26 | } | 27 | } |
| 27 | 28 | ||
| 28 | GraphicBuffer::~GraphicBuffer() { | 29 | GraphicBuffer::~GraphicBuffer() { |
| 29 | if (m_nvmap != nullptr && this->BufferId() > 0) { | 30 | if (m_nvmap != nullptr && this->BufferId() > 0) { |
| 31 | m_nvmap->UnpinHandle(this->BufferId()); | ||
| 30 | m_nvmap->FreeHandle(this->BufferId(), true); | 32 | m_nvmap->FreeHandle(this->BufferId(), true); |
| 31 | } | 33 | } |
| 32 | } | 34 | } |
diff --git a/src/core/hle/service/set/system_settings_server.cpp b/src/core/hle/service/set/system_settings_server.cpp index 688c54b58..f40a1c8f3 100644 --- a/src/core/hle/service/set/system_settings_server.cpp +++ b/src/core/hle/service/set/system_settings_server.cpp | |||
| @@ -709,12 +709,12 @@ void ISystemSettingsServer::GetSettingsItemValueSize(HLERequestContext& ctx) { | |||
| 709 | // The category of the setting. This corresponds to the top-level keys of | 709 | // The category of the setting. This corresponds to the top-level keys of |
| 710 | // system_settings.ini. | 710 | // system_settings.ini. |
| 711 | const auto setting_category_buf{ctx.ReadBuffer(0)}; | 711 | const auto setting_category_buf{ctx.ReadBuffer(0)}; |
| 712 | const std::string setting_category{setting_category_buf.begin(), setting_category_buf.end()}; | 712 | const std::string setting_category{Common::StringFromBuffer(setting_category_buf)}; |
| 713 | 713 | ||
| 714 | // The name of the setting. This corresponds to the second-level keys of | 714 | // The name of the setting. This corresponds to the second-level keys of |
| 715 | // system_settings.ini. | 715 | // system_settings.ini. |
| 716 | const auto setting_name_buf{ctx.ReadBuffer(1)}; | 716 | const auto setting_name_buf{ctx.ReadBuffer(1)}; |
| 717 | const std::string setting_name{setting_name_buf.begin(), setting_name_buf.end()}; | 717 | const std::string setting_name{Common::StringFromBuffer(setting_name_buf)}; |
| 718 | 718 | ||
| 719 | auto settings{GetSettings()}; | 719 | auto settings{GetSettings()}; |
| 720 | u64 response_size{0}; | 720 | u64 response_size{0}; |
| @@ -732,12 +732,12 @@ void ISystemSettingsServer::GetSettingsItemValue(HLERequestContext& ctx) { | |||
| 732 | // The category of the setting. This corresponds to the top-level keys of | 732 | // The category of the setting. This corresponds to the top-level keys of |
| 733 | // system_settings.ini. | 733 | // system_settings.ini. |
| 734 | const auto setting_category_buf{ctx.ReadBuffer(0)}; | 734 | const auto setting_category_buf{ctx.ReadBuffer(0)}; |
| 735 | const std::string setting_category{setting_category_buf.begin(), setting_category_buf.end()}; | 735 | const std::string setting_category{Common::StringFromBuffer(setting_category_buf)}; |
| 736 | 736 | ||
| 737 | // The name of the setting. This corresponds to the second-level keys of | 737 | // The name of the setting. This corresponds to the second-level keys of |
| 738 | // system_settings.ini. | 738 | // system_settings.ini. |
| 739 | const auto setting_name_buf{ctx.ReadBuffer(1)}; | 739 | const auto setting_name_buf{ctx.ReadBuffer(1)}; |
| 740 | const std::string setting_name{setting_name_buf.begin(), setting_name_buf.end()}; | 740 | const std::string setting_name{Common::StringFromBuffer(setting_name_buf)}; |
| 741 | 741 | ||
| 742 | std::vector<u8> value; | 742 | std::vector<u8> value; |
| 743 | auto response = GetSettingsItemValue(value, setting_category, setting_name); | 743 | auto response = GetSettingsItemValue(value, setting_category, setting_name); |
| @@ -1036,6 +1036,11 @@ void ISystemSettingsServer::SetBluetoothEnableFlag(HLERequestContext& ctx) { | |||
| 1036 | } | 1036 | } |
| 1037 | 1037 | ||
| 1038 | void ISystemSettingsServer::GetMiiAuthorId(HLERequestContext& ctx) { | 1038 | void ISystemSettingsServer::GetMiiAuthorId(HLERequestContext& ctx) { |
| 1039 | if (m_system_settings.mii_author_id.IsInvalid()) { | ||
| 1040 | m_system_settings.mii_author_id = Common::UUID::MakeDefault(); | ||
| 1041 | SetSaveNeeded(); | ||
| 1042 | } | ||
| 1043 | |||
| 1039 | LOG_INFO(Service_SET, "called, author_id={}", | 1044 | LOG_INFO(Service_SET, "called, author_id={}", |
| 1040 | m_system_settings.mii_author_id.FormattedString()); | 1045 | m_system_settings.mii_author_id.FormattedString()); |
| 1041 | 1046 | ||
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp index e2d9cd98a..725311c53 100644 --- a/src/core/hle/service/vi/display/vi_display.cpp +++ b/src/core/hle/service/vi/display/vi_display.cpp | |||
| @@ -91,6 +91,10 @@ void Display::CreateLayer(u64 layer_id, u32 binder_id, | |||
| 91 | layers.emplace_back(std::make_unique<Layer>(layer_id, binder_id, *core, *producer, | 91 | layers.emplace_back(std::make_unique<Layer>(layer_id, binder_id, *core, *producer, |
| 92 | std::move(buffer_item_consumer))); | 92 | std::move(buffer_item_consumer))); |
| 93 | 93 | ||
| 94 | if (is_abandoned) { | ||
| 95 | this->FindLayer(layer_id)->GetConsumer().Abandon(); | ||
| 96 | } | ||
| 97 | |||
| 94 | hos_binder_driver_server.RegisterProducer(std::move(producer)); | 98 | hos_binder_driver_server.RegisterProducer(std::move(producer)); |
| 95 | } | 99 | } |
| 96 | 100 | ||
| @@ -103,6 +107,13 @@ void Display::DestroyLayer(u64 layer_id) { | |||
| 103 | [layer_id](const auto& layer) { return layer->GetLayerId() == layer_id; }); | 107 | [layer_id](const auto& layer) { return layer->GetLayerId() == layer_id; }); |
| 104 | } | 108 | } |
| 105 | 109 | ||
| 110 | void Display::Abandon() { | ||
| 111 | for (auto& layer : layers) { | ||
| 112 | layer->GetConsumer().Abandon(); | ||
| 113 | } | ||
| 114 | is_abandoned = true; | ||
| 115 | } | ||
| 116 | |||
| 106 | Layer* Display::FindLayer(u64 layer_id) { | 117 | Layer* Display::FindLayer(u64 layer_id) { |
| 107 | const auto itr = | 118 | const auto itr = |
| 108 | std::find_if(layers.begin(), layers.end(), [layer_id](const std::unique_ptr<Layer>& layer) { | 119 | std::find_if(layers.begin(), layers.end(), [layer_id](const std::unique_ptr<Layer>& layer) { |
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h index 7e68ee79b..8eb8a5155 100644 --- a/src/core/hle/service/vi/display/vi_display.h +++ b/src/core/hle/service/vi/display/vi_display.h | |||
| @@ -98,6 +98,8 @@ public: | |||
| 98 | layers.clear(); | 98 | layers.clear(); |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | void Abandon(); | ||
| 102 | |||
| 101 | /// Attempts to find a layer with the given ID. | 103 | /// Attempts to find a layer with the given ID. |
| 102 | /// | 104 | /// |
| 103 | /// @param layer_id The layer ID. | 105 | /// @param layer_id The layer ID. |
| @@ -124,6 +126,7 @@ private: | |||
| 124 | 126 | ||
| 125 | std::vector<std::unique_ptr<Layer>> layers; | 127 | std::vector<std::unique_ptr<Layer>> layers; |
| 126 | Kernel::KEvent* vsync_event{}; | 128 | Kernel::KEvent* vsync_event{}; |
| 129 | bool is_abandoned{}; | ||
| 127 | }; | 130 | }; |
| 128 | 131 | ||
| 129 | } // namespace Service::VI | 132 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/layer/vi_layer.h b/src/core/hle/service/vi/layer/vi_layer.h index 295005e23..f95e2dc71 100644 --- a/src/core/hle/service/vi/layer/vi_layer.h +++ b/src/core/hle/service/vi/layer/vi_layer.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <memory> | 6 | #include <memory> |
| 7 | #include <utility> | ||
| 7 | 8 | ||
| 8 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 9 | 10 | ||
| @@ -75,12 +76,12 @@ public: | |||
| 75 | return open; | 76 | return open; |
| 76 | } | 77 | } |
| 77 | 78 | ||
| 78 | void Close() { | 79 | bool Close() { |
| 79 | open = false; | 80 | return std::exchange(open, false); |
| 80 | } | 81 | } |
| 81 | 82 | ||
| 82 | void Open() { | 83 | bool Open() { |
| 83 | open = true; | 84 | return !std::exchange(open, true); |
| 84 | } | 85 | } |
| 85 | 86 | ||
| 86 | private: | 87 | private: |
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index 39d5be90d..1f3d82c57 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include "common/logging/log.h" | 15 | #include "common/logging/log.h" |
| 16 | #include "common/math_util.h" | 16 | #include "common/math_util.h" |
| 17 | #include "common/settings.h" | 17 | #include "common/settings.h" |
| 18 | #include "common/string_util.h" | ||
| 18 | #include "common/swap.h" | 19 | #include "common/swap.h" |
| 19 | #include "core/core_timing.h" | 20 | #include "core/core_timing.h" |
| 20 | #include "core/hle/kernel/k_readable_event.h" | 21 | #include "core/hle/kernel/k_readable_event.h" |
| @@ -694,9 +695,7 @@ private: | |||
| 694 | void OpenLayer(HLERequestContext& ctx) { | 695 | void OpenLayer(HLERequestContext& ctx) { |
| 695 | IPC::RequestParser rp{ctx}; | 696 | IPC::RequestParser rp{ctx}; |
| 696 | const auto name_buf = rp.PopRaw<std::array<u8, 0x40>>(); | 697 | const auto name_buf = rp.PopRaw<std::array<u8, 0x40>>(); |
| 697 | const auto end = std::find(name_buf.begin(), name_buf.end(), '\0'); | 698 | const std::string display_name(Common::StringFromBuffer(name_buf)); |
| 698 | |||
| 699 | const std::string display_name(name_buf.begin(), end); | ||
| 700 | 699 | ||
| 701 | const u64 layer_id = rp.Pop<u64>(); | 700 | const u64 layer_id = rp.Pop<u64>(); |
| 702 | const u64 aruid = rp.Pop<u64>(); | 701 | const u64 aruid = rp.Pop<u64>(); |
| @@ -719,7 +718,12 @@ private: | |||
| 719 | return; | 718 | return; |
| 720 | } | 719 | } |
| 721 | 720 | ||
| 722 | nvnflinger.OpenLayer(layer_id); | 721 | if (!nvnflinger.OpenLayer(layer_id)) { |
| 722 | LOG_WARNING(Service_VI, "Tried to open layer which was already open"); | ||
| 723 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 724 | rb.Push(ResultOperationFailed); | ||
| 725 | return; | ||
| 726 | } | ||
| 723 | 727 | ||
| 724 | android::OutputParcel parcel; | 728 | android::OutputParcel parcel; |
| 725 | parcel.WriteInterface(NativeWindow{*buffer_queue_id}); | 729 | parcel.WriteInterface(NativeWindow{*buffer_queue_id}); |
| @@ -737,7 +741,12 @@ private: | |||
| 737 | 741 | ||
| 738 | LOG_DEBUG(Service_VI, "called. layer_id=0x{:016X}", layer_id); | 742 | LOG_DEBUG(Service_VI, "called. layer_id=0x{:016X}", layer_id); |
| 739 | 743 | ||
| 740 | nvnflinger.CloseLayer(layer_id); | 744 | if (!nvnflinger.CloseLayer(layer_id)) { |
| 745 | LOG_WARNING(Service_VI, "Tried to close layer which was not open"); | ||
| 746 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 747 | rb.Push(ResultOperationFailed); | ||
| 748 | return; | ||
| 749 | } | ||
| 741 | 750 | ||
| 742 | IPC::ResponseBuilder rb{ctx, 2}; | 751 | IPC::ResponseBuilder rb{ctx, 2}; |
| 743 | rb.Push(ResultSuccess); | 752 | rb.Push(ResultSuccess); |
diff --git a/src/core/loader/nsp.cpp b/src/core/loader/nsp.cpp index 28116ff3a..3016d5f25 100644 --- a/src/core/loader/nsp.cpp +++ b/src/core/loader/nsp.cpp | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include "core/file_sys/nca_metadata.h" | 10 | #include "core/file_sys/nca_metadata.h" |
| 11 | #include "core/file_sys/patch_manager.h" | 11 | #include "core/file_sys/patch_manager.h" |
| 12 | #include "core/file_sys/registered_cache.h" | 12 | #include "core/file_sys/registered_cache.h" |
| 13 | #include "core/file_sys/romfs_factory.h" | ||
| 13 | #include "core/file_sys/submission_package.h" | 14 | #include "core/file_sys/submission_package.h" |
| 14 | #include "core/hle/kernel/k_process.h" | 15 | #include "core/hle/kernel/k_process.h" |
| 15 | #include "core/hle/service/filesystem/filesystem.h" | 16 | #include "core/hle/service/filesystem/filesystem.h" |
| @@ -109,6 +110,13 @@ AppLoader_NSP::LoadResult AppLoader_NSP::Load(Kernel::KProcess& process, Core::S | |||
| 109 | return result; | 110 | return result; |
| 110 | } | 111 | } |
| 111 | 112 | ||
| 113 | if (nsp->IsExtractedType()) { | ||
| 114 | system.GetFileSystemController().RegisterProcess( | ||
| 115 | process.GetProcessId(), {}, | ||
| 116 | std::make_shared<FileSys::RomFSFactory>(*this, system.GetContentProvider(), | ||
| 117 | system.GetFileSystemController())); | ||
| 118 | } | ||
| 119 | |||
| 112 | FileSys::VirtualFile update_raw; | 120 | FileSys::VirtualFile update_raw; |
| 113 | if (ReadUpdateRaw(update_raw) == ResultStatus::Success && update_raw != nullptr) { | 121 | if (ReadUpdateRaw(update_raw) == ResultStatus::Success && update_raw != nullptr) { |
| 114 | system.GetFileSystemController().SetPackedUpdate(process.GetProcessId(), | 122 | system.GetFileSystemController().SetPackedUpdate(process.GetProcessId(), |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 8176a41be..1c218566f 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -24,6 +24,8 @@ | |||
| 24 | #include "core/hle/kernel/k_process.h" | 24 | #include "core/hle/kernel/k_process.h" |
| 25 | #include "core/memory.h" | 25 | #include "core/memory.h" |
| 26 | #include "video_core/gpu.h" | 26 | #include "video_core/gpu.h" |
| 27 | #include "video_core/host1x/gpu_device_memory_manager.h" | ||
| 28 | #include "video_core/host1x/host1x.h" | ||
| 27 | #include "video_core/rasterizer_download_area.h" | 29 | #include "video_core/rasterizer_download_area.h" |
| 28 | 30 | ||
| 29 | namespace Core::Memory { | 31 | namespace Core::Memory { |
| @@ -637,17 +639,6 @@ struct Memory::Impl { | |||
| 637 | LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", GetInteger(target), | 639 | LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", GetInteger(target), |
| 638 | base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE); | 640 | base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE); |
| 639 | 641 | ||
| 640 | // During boot, current_page_table might not be set yet, in which case we need not flush | ||
| 641 | if (system.IsPoweredOn()) { | ||
| 642 | auto& gpu = system.GPU(); | ||
| 643 | for (u64 i = 0; i < size; i++) { | ||
| 644 | const auto page = base + i; | ||
| 645 | if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) { | ||
| 646 | gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE); | ||
| 647 | } | ||
| 648 | } | ||
| 649 | } | ||
| 650 | |||
| 651 | const auto end = base + size; | 642 | const auto end = base + size; |
| 652 | ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", | 643 | ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", |
| 653 | base + page_table.pointers.size()); | 644 | base + page_table.pointers.size()); |
| @@ -811,21 +802,33 @@ struct Memory::Impl { | |||
| 811 | return true; | 802 | return true; |
| 812 | } | 803 | } |
| 813 | 804 | ||
| 814 | void HandleRasterizerDownload(VAddr address, size_t size) { | 805 | void HandleRasterizerDownload(VAddr v_address, size_t size) { |
| 806 | const auto* p = GetPointerImpl( | ||
| 807 | v_address, []() {}, []() {}); | ||
| 808 | if (!gpu_device_memory) [[unlikely]] { | ||
| 809 | gpu_device_memory = &system.Host1x().MemoryManager(); | ||
| 810 | } | ||
| 815 | const size_t core = system.GetCurrentHostThreadID(); | 811 | const size_t core = system.GetCurrentHostThreadID(); |
| 816 | auto& current_area = rasterizer_read_areas[core]; | 812 | auto& current_area = rasterizer_read_areas[core]; |
| 817 | const VAddr end_address = address + size; | 813 | gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) { |
| 818 | if (current_area.start_address <= address && end_address <= current_area.end_address) | 814 | const DAddr end_address = address + size; |
| 819 | [[likely]] { | 815 | if (current_area.start_address <= address && end_address <= current_area.end_address) |
| 820 | return; | 816 | [[likely]] { |
| 821 | } | 817 | return; |
| 822 | current_area = system.GPU().OnCPURead(address, size); | 818 | } |
| 819 | current_area = system.GPU().OnCPURead(address, size); | ||
| 820 | }); | ||
| 823 | } | 821 | } |
| 824 | 822 | ||
| 825 | void HandleRasterizerWrite(VAddr address, size_t size) { | 823 | void HandleRasterizerWrite(VAddr v_address, size_t size) { |
| 824 | const auto* p = GetPointerImpl( | ||
| 825 | v_address, []() {}, []() {}); | ||
| 826 | constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1; | 826 | constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1; |
| 827 | const size_t core = std::min(system.GetCurrentHostThreadID(), | 827 | const size_t core = std::min(system.GetCurrentHostThreadID(), |
| 828 | sys_core); // any other calls threads go to syscore. | 828 | sys_core); // any other calls threads go to syscore. |
| 829 | if (!gpu_device_memory) [[unlikely]] { | ||
| 830 | gpu_device_memory = &system.Host1x().MemoryManager(); | ||
| 831 | } | ||
| 829 | // Guard on sys_core; | 832 | // Guard on sys_core; |
| 830 | if (core == sys_core) [[unlikely]] { | 833 | if (core == sys_core) [[unlikely]] { |
| 831 | sys_core_guard.lock(); | 834 | sys_core_guard.lock(); |
| @@ -835,36 +838,53 @@ struct Memory::Impl { | |||
| 835 | sys_core_guard.unlock(); | 838 | sys_core_guard.unlock(); |
| 836 | } | 839 | } |
| 837 | }); | 840 | }); |
| 838 | auto& current_area = rasterizer_write_areas[core]; | 841 | gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) { |
| 839 | VAddr subaddress = address >> YUZU_PAGEBITS; | 842 | auto& current_area = rasterizer_write_areas[core]; |
| 840 | bool do_collection = current_area.last_address == subaddress; | 843 | PAddr subaddress = address >> YUZU_PAGEBITS; |
| 841 | if (!do_collection) [[unlikely]] { | 844 | bool do_collection = current_area.last_address == subaddress; |
| 842 | do_collection = system.GPU().OnCPUWrite(address, size); | 845 | if (!do_collection) [[unlikely]] { |
| 843 | if (!do_collection) { | 846 | do_collection = system.GPU().OnCPUWrite(address, size); |
| 844 | return; | 847 | if (!do_collection) { |
| 848 | return; | ||
| 849 | } | ||
| 850 | current_area.last_address = subaddress; | ||
| 845 | } | 851 | } |
| 846 | current_area.last_address = subaddress; | 852 | gpu_dirty_managers[core].Collect(address, size); |
| 847 | } | 853 | }); |
| 848 | gpu_dirty_managers[core].Collect(address, size); | ||
| 849 | } | 854 | } |
| 850 | 855 | ||
| 851 | struct GPUDirtyState { | 856 | struct GPUDirtyState { |
| 852 | VAddr last_address; | 857 | PAddr last_address; |
| 853 | }; | 858 | }; |
| 854 | 859 | ||
| 855 | void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) { | 860 | void InvalidateGPUMemory(u8* p, size_t size) { |
| 856 | system.GPU().InvalidateRegion(GetInteger(dest_addr), size); | 861 | constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1; |
| 857 | } | 862 | const size_t core = std::min(system.GetCurrentHostThreadID(), |
| 858 | 863 | sys_core); // any other calls threads go to syscore. | |
| 859 | void FlushRegion(Common::ProcessAddress dest_addr, size_t size) { | 864 | if (!gpu_device_memory) [[unlikely]] { |
| 860 | system.GPU().FlushRegion(GetInteger(dest_addr), size); | 865 | gpu_device_memory = &system.Host1x().MemoryManager(); |
| 866 | } | ||
| 867 | // Guard on sys_core; | ||
| 868 | if (core == sys_core) [[unlikely]] { | ||
| 869 | sys_core_guard.lock(); | ||
| 870 | } | ||
| 871 | SCOPE_EXIT({ | ||
| 872 | if (core == sys_core) [[unlikely]] { | ||
| 873 | sys_core_guard.unlock(); | ||
| 874 | } | ||
| 875 | }); | ||
| 876 | auto& gpu = system.GPU(); | ||
| 877 | gpu_device_memory->ApplyOpOnPointer( | ||
| 878 | p, scratch_buffers[core], [&](DAddr address) { gpu.InvalidateRegion(address, size); }); | ||
| 861 | } | 879 | } |
| 862 | 880 | ||
| 863 | Core::System& system; | 881 | Core::System& system; |
| 882 | Tegra::MaxwellDeviceMemoryManager* gpu_device_memory{}; | ||
| 864 | Common::PageTable* current_page_table = nullptr; | 883 | Common::PageTable* current_page_table = nullptr; |
| 865 | std::array<VideoCore::RasterizerDownloadArea, Core::Hardware::NUM_CPU_CORES> | 884 | std::array<VideoCore::RasterizerDownloadArea, Core::Hardware::NUM_CPU_CORES> |
| 866 | rasterizer_read_areas{}; | 885 | rasterizer_read_areas{}; |
| 867 | std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{}; | 886 | std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{}; |
| 887 | std::array<Common::ScratchBuffer<u32>, Core::Hardware::NUM_CPU_CORES> scratch_buffers{}; | ||
| 868 | std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers; | 888 | std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers; |
| 869 | std::mutex sys_core_guard; | 889 | std::mutex sys_core_guard; |
| 870 | 890 | ||
| @@ -1059,14 +1079,6 @@ void Memory::MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug) | |||
| 1059 | impl->MarkRegionDebug(GetInteger(vaddr), size, debug); | 1079 | impl->MarkRegionDebug(GetInteger(vaddr), size, debug); |
| 1060 | } | 1080 | } |
| 1061 | 1081 | ||
| 1062 | void Memory::InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) { | ||
| 1063 | impl->InvalidateRegion(dest_addr, size); | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) { | ||
| 1067 | impl->FlushRegion(dest_addr, size); | ||
| 1068 | } | ||
| 1069 | |||
| 1070 | bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { | 1082 | bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { |
| 1071 | [[maybe_unused]] bool mapped = true; | 1083 | [[maybe_unused]] bool mapped = true; |
| 1072 | [[maybe_unused]] bool rasterizer = false; | 1084 | [[maybe_unused]] bool rasterizer = false; |
| @@ -1078,10 +1090,10 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { | |||
| 1078 | GetInteger(vaddr)); | 1090 | GetInteger(vaddr)); |
| 1079 | mapped = false; | 1091 | mapped = false; |
| 1080 | }, | 1092 | }, |
| 1081 | [&] { | 1093 | [&] { rasterizer = true; }); |
| 1082 | impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); | 1094 | if (rasterizer) { |
| 1083 | rasterizer = true; | 1095 | impl->InvalidateGPUMemory(ptr, size); |
| 1084 | }); | 1096 | } |
| 1085 | 1097 | ||
| 1086 | #ifdef __linux__ | 1098 | #ifdef __linux__ |
| 1087 | if (!rasterizer && mapped) { | 1099 | if (!rasterizer && mapped) { |
diff --git a/src/core/memory.h b/src/core/memory.h index dddfaf4a4..f7e6b297f 100644 --- a/src/core/memory.h +++ b/src/core/memory.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include "common/scratch_buffer.h" | 13 | #include "common/scratch_buffer.h" |
| 14 | #include "common/typed_address.h" | 14 | #include "common/typed_address.h" |
| 15 | #include "core/guest_memory.h" | ||
| 15 | #include "core/hle/result.h" | 16 | #include "core/hle/result.h" |
| 16 | 17 | ||
| 17 | namespace Common { | 18 | namespace Common { |
| @@ -486,10 +487,10 @@ public: | |||
| 486 | void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug); | 487 | void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug); |
| 487 | 488 | ||
| 488 | void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers); | 489 | void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers); |
| 489 | void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size); | 490 | |
| 490 | bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); | 491 | bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); |
| 492 | |||
| 491 | bool InvalidateSeparateHeap(void* fault_address); | 493 | bool InvalidateSeparateHeap(void* fault_address); |
| 492 | void FlushRegion(Common::ProcessAddress dest_addr, size_t size); | ||
| 493 | 494 | ||
| 494 | private: | 495 | private: |
| 495 | Core::System& system; | 496 | Core::System& system; |
| @@ -498,209 +499,9 @@ private: | |||
| 498 | std::unique_ptr<Impl> impl; | 499 | std::unique_ptr<Impl> impl; |
| 499 | }; | 500 | }; |
| 500 | 501 | ||
| 501 | enum GuestMemoryFlags : u32 { | ||
| 502 | Read = 1 << 0, | ||
| 503 | Write = 1 << 1, | ||
| 504 | Safe = 1 << 2, | ||
| 505 | Cached = 1 << 3, | ||
| 506 | |||
| 507 | SafeRead = Read | Safe, | ||
| 508 | SafeWrite = Write | Safe, | ||
| 509 | SafeReadWrite = SafeRead | SafeWrite, | ||
| 510 | SafeReadCachedWrite = SafeReadWrite | Cached, | ||
| 511 | |||
| 512 | UnsafeRead = Read, | ||
| 513 | UnsafeWrite = Write, | ||
| 514 | UnsafeReadWrite = UnsafeRead | UnsafeWrite, | ||
| 515 | UnsafeReadCachedWrite = UnsafeReadWrite | Cached, | ||
| 516 | }; | ||
| 517 | |||
| 518 | namespace { | ||
| 519 | template <typename M, typename T, GuestMemoryFlags FLAGS> | ||
| 520 | class GuestMemory { | ||
| 521 | using iterator = T*; | ||
| 522 | using const_iterator = const T*; | ||
| 523 | using value_type = T; | ||
| 524 | using element_type = T; | ||
| 525 | using iterator_category = std::contiguous_iterator_tag; | ||
| 526 | |||
| 527 | public: | ||
| 528 | GuestMemory() = delete; | ||
| 529 | explicit GuestMemory(M& memory, u64 addr, std::size_t size, | ||
| 530 | Common::ScratchBuffer<T>* backup = nullptr) | ||
| 531 | : m_memory{memory}, m_addr{addr}, m_size{size} { | ||
| 532 | static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write); | ||
| 533 | if constexpr (FLAGS & GuestMemoryFlags::Read) { | ||
| 534 | Read(addr, size, backup); | ||
| 535 | } | ||
| 536 | } | ||
| 537 | |||
| 538 | ~GuestMemory() = default; | ||
| 539 | |||
| 540 | T* data() noexcept { | ||
| 541 | return m_data_span.data(); | ||
| 542 | } | ||
| 543 | |||
| 544 | const T* data() const noexcept { | ||
| 545 | return m_data_span.data(); | ||
| 546 | } | ||
| 547 | |||
| 548 | size_t size() const noexcept { | ||
| 549 | return m_size; | ||
| 550 | } | ||
| 551 | |||
| 552 | size_t size_bytes() const noexcept { | ||
| 553 | return this->size() * sizeof(T); | ||
| 554 | } | ||
| 555 | |||
| 556 | [[nodiscard]] T* begin() noexcept { | ||
| 557 | return this->data(); | ||
| 558 | } | ||
| 559 | |||
| 560 | [[nodiscard]] const T* begin() const noexcept { | ||
| 561 | return this->data(); | ||
| 562 | } | ||
| 563 | |||
| 564 | [[nodiscard]] T* end() noexcept { | ||
| 565 | return this->data() + this->size(); | ||
| 566 | } | ||
| 567 | |||
| 568 | [[nodiscard]] const T* end() const noexcept { | ||
| 569 | return this->data() + this->size(); | ||
| 570 | } | ||
| 571 | |||
| 572 | T& operator[](size_t index) noexcept { | ||
| 573 | return m_data_span[index]; | ||
| 574 | } | ||
| 575 | |||
| 576 | const T& operator[](size_t index) const noexcept { | ||
| 577 | return m_data_span[index]; | ||
| 578 | } | ||
| 579 | |||
| 580 | void SetAddressAndSize(u64 addr, std::size_t size) noexcept { | ||
| 581 | m_addr = addr; | ||
| 582 | m_size = size; | ||
| 583 | m_addr_changed = true; | ||
| 584 | } | ||
| 585 | |||
| 586 | std::span<T> Read(u64 addr, std::size_t size, | ||
| 587 | Common::ScratchBuffer<T>* backup = nullptr) noexcept { | ||
| 588 | m_addr = addr; | ||
| 589 | m_size = size; | ||
| 590 | if (m_size == 0) { | ||
| 591 | m_is_data_copy = true; | ||
| 592 | return {}; | ||
| 593 | } | ||
| 594 | |||
| 595 | if (this->TrySetSpan()) { | ||
| 596 | if constexpr (FLAGS & GuestMemoryFlags::Safe) { | ||
| 597 | m_memory.FlushRegion(m_addr, this->size_bytes()); | ||
| 598 | } | ||
| 599 | } else { | ||
| 600 | if (backup) { | ||
| 601 | backup->resize_destructive(this->size()); | ||
| 602 | m_data_span = *backup; | ||
| 603 | } else { | ||
| 604 | m_data_copy.resize(this->size()); | ||
| 605 | m_data_span = std::span(m_data_copy); | ||
| 606 | } | ||
| 607 | m_is_data_copy = true; | ||
| 608 | m_span_valid = true; | ||
| 609 | if constexpr (FLAGS & GuestMemoryFlags::Safe) { | ||
| 610 | m_memory.ReadBlock(m_addr, this->data(), this->size_bytes()); | ||
| 611 | } else { | ||
| 612 | m_memory.ReadBlockUnsafe(m_addr, this->data(), this->size_bytes()); | ||
| 613 | } | ||
| 614 | } | ||
| 615 | return m_data_span; | ||
| 616 | } | ||
| 617 | |||
| 618 | void Write(std::span<T> write_data) noexcept { | ||
| 619 | if constexpr (FLAGS & GuestMemoryFlags::Cached) { | ||
| 620 | m_memory.WriteBlockCached(m_addr, write_data.data(), this->size_bytes()); | ||
| 621 | } else if constexpr (FLAGS & GuestMemoryFlags::Safe) { | ||
| 622 | m_memory.WriteBlock(m_addr, write_data.data(), this->size_bytes()); | ||
| 623 | } else { | ||
| 624 | m_memory.WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes()); | ||
| 625 | } | ||
| 626 | } | ||
| 627 | |||
| 628 | bool TrySetSpan() noexcept { | ||
| 629 | if (u8* ptr = m_memory.GetSpan(m_addr, this->size_bytes()); ptr) { | ||
| 630 | m_data_span = {reinterpret_cast<T*>(ptr), this->size()}; | ||
| 631 | m_span_valid = true; | ||
| 632 | return true; | ||
| 633 | } | ||
| 634 | return false; | ||
| 635 | } | ||
| 636 | |||
| 637 | protected: | ||
| 638 | bool IsDataCopy() const noexcept { | ||
| 639 | return m_is_data_copy; | ||
| 640 | } | ||
| 641 | |||
| 642 | bool AddressChanged() const noexcept { | ||
| 643 | return m_addr_changed; | ||
| 644 | } | ||
| 645 | |||
| 646 | M& m_memory; | ||
| 647 | u64 m_addr{}; | ||
| 648 | size_t m_size{}; | ||
| 649 | std::span<T> m_data_span{}; | ||
| 650 | std::vector<T> m_data_copy{}; | ||
| 651 | bool m_span_valid{false}; | ||
| 652 | bool m_is_data_copy{false}; | ||
| 653 | bool m_addr_changed{false}; | ||
| 654 | }; | ||
| 655 | |||
| 656 | template <typename M, typename T, GuestMemoryFlags FLAGS> | ||
| 657 | class GuestMemoryScoped : public GuestMemory<M, T, FLAGS> { | ||
| 658 | public: | ||
| 659 | GuestMemoryScoped() = delete; | ||
| 660 | explicit GuestMemoryScoped(M& memory, u64 addr, std::size_t size, | ||
| 661 | Common::ScratchBuffer<T>* backup = nullptr) | ||
| 662 | : GuestMemory<M, T, FLAGS>(memory, addr, size, backup) { | ||
| 663 | if constexpr (!(FLAGS & GuestMemoryFlags::Read)) { | ||
| 664 | if (!this->TrySetSpan()) { | ||
| 665 | if (backup) { | ||
| 666 | this->m_data_span = *backup; | ||
| 667 | this->m_span_valid = true; | ||
| 668 | this->m_is_data_copy = true; | ||
| 669 | } | ||
| 670 | } | ||
| 671 | } | ||
| 672 | } | ||
| 673 | |||
| 674 | ~GuestMemoryScoped() { | ||
| 675 | if constexpr (FLAGS & GuestMemoryFlags::Write) { | ||
| 676 | if (this->size() == 0) [[unlikely]] { | ||
| 677 | return; | ||
| 678 | } | ||
| 679 | |||
| 680 | if (this->AddressChanged() || this->IsDataCopy()) { | ||
| 681 | ASSERT(this->m_span_valid); | ||
| 682 | if constexpr (FLAGS & GuestMemoryFlags::Cached) { | ||
| 683 | this->m_memory.WriteBlockCached(this->m_addr, this->data(), this->size_bytes()); | ||
| 684 | } else if constexpr (FLAGS & GuestMemoryFlags::Safe) { | ||
| 685 | this->m_memory.WriteBlock(this->m_addr, this->data(), this->size_bytes()); | ||
| 686 | } else { | ||
| 687 | this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes()); | ||
| 688 | } | ||
| 689 | } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) || | ||
| 690 | (FLAGS & GuestMemoryFlags::Cached)) { | ||
| 691 | this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes()); | ||
| 692 | } | ||
| 693 | } | ||
| 694 | } | ||
| 695 | }; | ||
| 696 | } // namespace | ||
| 697 | |||
| 698 | template <typename T, GuestMemoryFlags FLAGS> | 502 | template <typename T, GuestMemoryFlags FLAGS> |
| 699 | using CpuGuestMemory = GuestMemory<Memory, T, FLAGS>; | 503 | using CpuGuestMemory = GuestMemory<Core::Memory::Memory, T, FLAGS>; |
| 700 | template <typename T, GuestMemoryFlags FLAGS> | 504 | template <typename T, GuestMemoryFlags FLAGS> |
| 701 | using CpuGuestMemoryScoped = GuestMemoryScoped<Memory, T, FLAGS>; | 505 | using CpuGuestMemoryScoped = GuestMemoryScoped<Core::Memory::Memory, T, FLAGS>; |
| 702 | template <typename T, GuestMemoryFlags FLAGS> | 506 | |
| 703 | using GpuGuestMemory = GuestMemory<Tegra::MemoryManager, T, FLAGS>; | ||
| 704 | template <typename T, GuestMemoryFlags FLAGS> | ||
| 705 | using GpuGuestMemoryScoped = GuestMemoryScoped<Tegra::MemoryManager, T, FLAGS>; | ||
| 706 | } // namespace Core::Memory | 507 | } // namespace Core::Memory |