summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/audio_core/device/device_session.cpp4
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/heap_tracker.cpp281
-rw-r--r--src/common/heap_tracker.h98
-rw-r--r--src/common/host_memory.cpp10
-rw-r--r--src/common/host_memory.h11
-rw-r--r--src/core/CMakeLists.txt3
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.cpp49
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.h20
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp5
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp5
-rw-r--r--src/core/core_timing.cpp48
-rw-r--r--src/core/core_timing.h27
-rw-r--r--src/core/file_sys/ips_layer.cpp7
-rw-r--r--src/core/hle/kernel/k_hardware_timer.cpp19
-rw-r--r--src/core/hle/kernel/k_page_table_base.cpp26
-rw-r--r--src/core/hle/kernel/k_page_table_base.h3
-rw-r--r--src/core/hle/kernel/k_process.cpp6
-rw-r--r--src/core/hle/kernel/kernel.cpp2
-rw-r--r--src/core/hle/service/hid/hidbus.cpp8
-rw-r--r--src/core/hle/service/hid/hidbus.h2
-rw-r--r--src/core/hle/service/hid/resource_manager.cpp42
-rw-r--r--src/core/hle/service/hid/resource_manager.h8
-rw-r--r--src/core/hle/service/jit/jit.cpp59
-rw-r--r--src/core/hle/service/jit/jit_code_memory.cpp54
-rw-r--r--src/core/hle/service/jit/jit_code_memory.h49
-rw-r--r--src/core/hle/service/nvnflinger/nvnflinger.cpp8
-rw-r--r--src/core/memory.cpp86
-rw-r--r--src/core/memory.h7
-rw-r--r--src/core/memory/cheat_engine.cpp8
-rw-r--r--src/core/memory/cheat_engine.h2
-rw-r--r--src/core/tools/freezer.cpp17
-rw-r--r--src/core/tools/freezer.h2
-rw-r--r--src/tests/common/host_memory.cpp99
-rw-r--r--src/tests/core/core_timing.cpp14
35 files changed, 870 insertions, 221 deletions
diff --git a/src/audio_core/device/device_session.cpp b/src/audio_core/device/device_session.cpp
index c41d9d1ea..ee42ae529 100644
--- a/src/audio_core/device/device_session.cpp
+++ b/src/audio_core/device/device_session.cpp
@@ -18,9 +18,7 @@ constexpr auto INCREMENT_TIME{5ms};
18DeviceSession::DeviceSession(Core::System& system_) 18DeviceSession::DeviceSession(Core::System& system_)
19 : system{system_}, thread_event{Core::Timing::CreateEvent( 19 : system{system_}, thread_event{Core::Timing::CreateEvent(
20 "AudioOutSampleTick", 20 "AudioOutSampleTick",
21 [this](std::uintptr_t, s64 time, std::chrono::nanoseconds) { 21 [this](s64 time, std::chrono::nanoseconds) { return ThreadFunc(); })} {}
22 return ThreadFunc();
23 })} {}
24 22
25DeviceSession::~DeviceSession() { 23DeviceSession::~DeviceSession() {
26 Finalize(); 24 Finalize();
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index b58a7073f..8c57d47c6 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -64,6 +64,8 @@ add_library(common STATIC
64 fs/path_util.cpp 64 fs/path_util.cpp
65 fs/path_util.h 65 fs/path_util.h
66 hash.h 66 hash.h
67 heap_tracker.cpp
68 heap_tracker.h
67 hex_util.cpp 69 hex_util.cpp
68 hex_util.h 70 hex_util.h
69 host_memory.cpp 71 host_memory.cpp
diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp
new file mode 100644
index 000000000..683208795
--- /dev/null
+++ b/src/common/heap_tracker.cpp
@@ -0,0 +1,281 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include <fstream>
5#include <vector>
6
7#include "common/heap_tracker.h"
8#include "common/logging/log.h"
9
10namespace Common {
11
12namespace {
13
14s64 GetMaxPermissibleResidentMapCount() {
15 // Default value.
16 s64 value = 65530;
17
18 // Try to read how many mappings we can make.
19 std::ifstream s("/proc/sys/vm/max_map_count");
20 s >> value;
21
22 // Print, for debug.
23 LOG_INFO(HW_Memory, "Current maximum map count: {}", value);
24
25 // Allow 20000 maps for other code and to account for split inaccuracy.
26 return std::max<s64>(value - 20000, 0);
27}
28
29} // namespace
30
31HeapTracker::HeapTracker(Common::HostMemory& buffer)
32 : m_buffer(buffer), m_max_resident_map_count(GetMaxPermissibleResidentMapCount()) {}
33HeapTracker::~HeapTracker() = default;
34
35void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
36 MemoryPermission perm, bool is_separate_heap) {
37 // When mapping other memory, map pages immediately.
38 if (!is_separate_heap) {
39 m_buffer.Map(virtual_offset, host_offset, length, perm, false);
40 return;
41 }
42
43 {
44 // We are mapping part of a separate heap.
45 std::scoped_lock lk{m_lock};
46
47 auto* const map = new SeparateHeapMap{
48 .vaddr = virtual_offset,
49 .paddr = host_offset,
50 .size = length,
51 .tick = m_tick++,
52 .perm = perm,
53 .is_resident = false,
54 };
55
56 // Insert into mappings.
57 m_map_count++;
58 m_mappings.insert(*map);
59 }
60
61 // Finally, map.
62 this->DeferredMapSeparateHeap(virtual_offset);
63}
64
65void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
66 // If this is a separate heap...
67 if (is_separate_heap) {
68 std::scoped_lock lk{m_lock};
69
70 const SeparateHeapMap key{
71 .vaddr = virtual_offset,
72 };
73
74 // Split at the boundaries of the region we are removing.
75 this->SplitHeapMapLocked(virtual_offset);
76 this->SplitHeapMapLocked(virtual_offset + size);
77
78 // Erase all mappings in range.
79 auto it = m_mappings.find(key);
80 while (it != m_mappings.end() && it->vaddr < virtual_offset + size) {
81 // Get underlying item.
82 auto* const item = std::addressof(*it);
83
84 // If resident, erase from resident map.
85 if (item->is_resident) {
86 ASSERT(--m_resident_map_count >= 0);
87 m_resident_mappings.erase(m_resident_mappings.iterator_to(*item));
88 }
89
90 // Erase from map.
91 ASSERT(--m_map_count >= 0);
92 it = m_mappings.erase(it);
93
94 // Free the item.
95 delete item;
96 }
97 }
98
99 // Unmap pages.
100 m_buffer.Unmap(virtual_offset, size, false);
101}
102
103void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) {
104 // Ensure no rebuild occurs while reprotecting.
105 std::shared_lock lk{m_rebuild_lock};
106
107 // Split at the boundaries of the region we are reprotecting.
108 this->SplitHeapMap(virtual_offset, size);
109
110 // Declare tracking variables.
111 const VAddr end = virtual_offset + size;
112 VAddr cur = virtual_offset;
113
114 while (cur < end) {
115 VAddr next = cur;
116 bool should_protect = false;
117
118 {
119 std::scoped_lock lk2{m_lock};
120
121 const SeparateHeapMap key{
122 .vaddr = next,
123 };
124
125 // Try to get the next mapping corresponding to this address.
126 const auto it = m_mappings.nfind(key);
127
128 if (it == m_mappings.end()) {
129 // There are no separate heap mappings remaining.
130 next = end;
131 should_protect = true;
132 } else if (it->vaddr == cur) {
133 // We are in range.
134 // Update permission bits.
135 it->perm = perm;
136
137 // Determine next address and whether we should protect.
138 next = cur + it->size;
139 should_protect = it->is_resident;
140 } else /* if (it->vaddr > cur) */ {
141 // We weren't in range, but there is a block coming up that will be.
142 next = it->vaddr;
143 should_protect = true;
144 }
145 }
146
147 // Clamp to end.
148 next = std::min(next, end);
149
150 // Reprotect, if we need to.
151 if (should_protect) {
152 m_buffer.Protect(cur, next - cur, perm);
153 }
154
155 // Advance.
156 cur = next;
157 }
158}
159
160bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
161 if (m_buffer.IsInVirtualRange(fault_address)) {
162 return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
163 }
164
165 return false;
166}
167
168bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
169 bool rebuild_required = false;
170
171 {
172 std::scoped_lock lk{m_lock};
173
174 // Check to ensure this was a non-resident separate heap mapping.
175 const auto it = this->GetNearestHeapMapLocked(virtual_offset);
176 if (it == m_mappings.end() || it->is_resident) {
177 return false;
178 }
179
180 // Update tick before possible rebuild.
181 it->tick = m_tick++;
182
183 // Check if we need to rebuild.
184 if (m_resident_map_count > m_max_resident_map_count) {
185 rebuild_required = true;
186 }
187
188 // Map the area.
189 m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
190
191 // This map is now resident.
192 it->is_resident = true;
193 m_resident_map_count++;
194 m_resident_mappings.insert(*it);
195 }
196
197 if (rebuild_required) {
198 // A rebuild was required, so perform it now.
199 this->RebuildSeparateHeapAddressSpace();
200 }
201
202 return true;
203}
204
205void HeapTracker::RebuildSeparateHeapAddressSpace() {
206 std::scoped_lock lk{m_rebuild_lock, m_lock};
207
208 ASSERT(!m_resident_mappings.empty());
209
210 // Dump half of the mappings.
211 //
212 // Despite being worse in theory, this has proven to be better in practice than more
213 // regularly dumping a smaller amount, because it significantly reduces average case
214 // lock contention.
215 const size_t desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2;
216 const size_t evict_count = m_resident_map_count - desired_count;
217 auto it = m_resident_mappings.begin();
218
219 for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
220 // Unmark and unmap.
221 it->is_resident = false;
222 m_buffer.Unmap(it->vaddr, it->size, false);
223
224 // Advance.
225 ASSERT(--m_resident_map_count >= 0);
226 it = m_resident_mappings.erase(it);
227 }
228}
229
230void HeapTracker::SplitHeapMap(VAddr offset, size_t size) {
231 std::scoped_lock lk{m_lock};
232
233 this->SplitHeapMapLocked(offset);
234 this->SplitHeapMapLocked(offset + size);
235}
236
237void HeapTracker::SplitHeapMapLocked(VAddr offset) {
238 const auto it = this->GetNearestHeapMapLocked(offset);
239 if (it == m_mappings.end() || it->vaddr == offset) {
240 // Not contained or no split required.
241 return;
242 }
243
244 // Cache the original values.
245 auto* const left = std::addressof(*it);
246 const size_t orig_size = left->size;
247
248 // Adjust the left map.
249 const size_t left_size = offset - left->vaddr;
250 left->size = left_size;
251
252 // Create the new right map.
253 auto* const right = new SeparateHeapMap{
254 .vaddr = left->vaddr + left_size,
255 .paddr = left->paddr + left_size,
256 .size = orig_size - left_size,
257 .tick = left->tick,
258 .perm = left->perm,
259 .is_resident = left->is_resident,
260 };
261
262 // Insert the new right map.
263 m_map_count++;
264 m_mappings.insert(*right);
265
266 // If resident, also insert into resident map.
267 if (right->is_resident) {
268 m_resident_map_count++;
269 m_resident_mappings.insert(*right);
270 }
271}
272
273HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) {
274 const SeparateHeapMap key{
275 .vaddr = offset,
276 };
277
278 return m_mappings.find(key);
279}
280
281} // namespace Common
diff --git a/src/common/heap_tracker.h b/src/common/heap_tracker.h
new file mode 100644
index 000000000..ee5b0bf43
--- /dev/null
+++ b/src/common/heap_tracker.h
@@ -0,0 +1,98 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <atomic>
7#include <mutex>
8#include <set>
9#include <shared_mutex>
10
11#include "common/host_memory.h"
12#include "common/intrusive_red_black_tree.h"
13
14namespace Common {
15
16struct SeparateHeapMap {
17 Common::IntrusiveRedBlackTreeNode addr_node{};
18 Common::IntrusiveRedBlackTreeNode tick_node{};
19 VAddr vaddr{};
20 PAddr paddr{};
21 size_t size{};
22 size_t tick{};
23 MemoryPermission perm{};
24 bool is_resident{};
25};
26
27struct SeparateHeapMapAddrComparator {
28 static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
29 if (lhs.vaddr < rhs.vaddr) {
30 return -1;
31 } else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) {
32 return 0;
33 } else {
34 return 1;
35 }
36 }
37};
38
39struct SeparateHeapMapTickComparator {
40 static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
41 if (lhs.tick < rhs.tick) {
42 return -1;
43 } else if (lhs.tick > rhs.tick) {
44 return 1;
45 } else {
46 return SeparateHeapMapAddrComparator::Compare(lhs, rhs);
47 }
48 }
49};
50
51class HeapTracker {
52public:
53 explicit HeapTracker(Common::HostMemory& buffer);
54 ~HeapTracker();
55
56 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm,
57 bool is_separate_heap);
58 void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap);
59 void Protect(size_t virtual_offset, size_t length, MemoryPermission perm);
60 u8* VirtualBasePointer() {
61 return m_buffer.VirtualBasePointer();
62 }
63
64 bool DeferredMapSeparateHeap(u8* fault_address);
65 bool DeferredMapSeparateHeap(size_t virtual_offset);
66
67private:
68 using AddrTreeTraits =
69 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>;
70 using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>;
71
72 using TickTreeTraits =
73 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>;
74 using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>;
75
76 AddrTree m_mappings{};
77 TickTree m_resident_mappings{};
78
79private:
80 void SplitHeapMap(VAddr offset, size_t size);
81 void SplitHeapMapLocked(VAddr offset);
82
83 AddrTree::iterator GetNearestHeapMapLocked(VAddr offset);
84
85 void RebuildSeparateHeapAddressSpace();
86
87private:
88 Common::HostMemory& m_buffer;
89 const s64 m_max_resident_map_count;
90
91 std::shared_mutex m_rebuild_lock{};
92 std::mutex m_lock{};
93 s64 m_map_count{};
94 s64 m_resident_map_count{};
95 size_t m_tick{};
96};
97
98} // namespace Common
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp
index e540375b8..860c39e6a 100644
--- a/src/common/host_memory.cpp
+++ b/src/common/host_memory.cpp
@@ -679,7 +679,7 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default;
679HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; 679HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default;
680 680
681void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, 681void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
682 MemoryPermission perms) { 682 MemoryPermission perms, bool separate_heap) {
683 ASSERT(virtual_offset % PageAlignment == 0); 683 ASSERT(virtual_offset % PageAlignment == 0);
684 ASSERT(host_offset % PageAlignment == 0); 684 ASSERT(host_offset % PageAlignment == 0);
685 ASSERT(length % PageAlignment == 0); 685 ASSERT(length % PageAlignment == 0);
@@ -691,7 +691,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
691 impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms); 691 impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms);
692} 692}
693 693
694void HostMemory::Unmap(size_t virtual_offset, size_t length) { 694void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) {
695 ASSERT(virtual_offset % PageAlignment == 0); 695 ASSERT(virtual_offset % PageAlignment == 0);
696 ASSERT(length % PageAlignment == 0); 696 ASSERT(length % PageAlignment == 0);
697 ASSERT(virtual_offset + length <= virtual_size); 697 ASSERT(virtual_offset + length <= virtual_size);
@@ -701,14 +701,16 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length) {
701 impl->Unmap(virtual_offset + virtual_base_offset, length); 701 impl->Unmap(virtual_offset + virtual_base_offset, length);
702} 702}
703 703
704void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write, 704void HostMemory::Protect(size_t virtual_offset, size_t length, MemoryPermission perm) {
705 bool execute) {
706 ASSERT(virtual_offset % PageAlignment == 0); 705 ASSERT(virtual_offset % PageAlignment == 0);
707 ASSERT(length % PageAlignment == 0); 706 ASSERT(length % PageAlignment == 0);
708 ASSERT(virtual_offset + length <= virtual_size); 707 ASSERT(virtual_offset + length <= virtual_size);
709 if (length == 0 || !virtual_base || !impl) { 708 if (length == 0 || !virtual_base || !impl) {
710 return; 709 return;
711 } 710 }
711 const bool read = True(perm & MemoryPermission::Read);
712 const bool write = True(perm & MemoryPermission::Write);
713 const bool execute = True(perm & MemoryPermission::Execute);
712 impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute); 714 impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute);
713} 715}
714 716
diff --git a/src/common/host_memory.h b/src/common/host_memory.h
index 747c5850c..72fbb05af 100644
--- a/src/common/host_memory.h
+++ b/src/common/host_memory.h
@@ -40,11 +40,12 @@ public:
40 HostMemory(HostMemory&& other) noexcept; 40 HostMemory(HostMemory&& other) noexcept;
41 HostMemory& operator=(HostMemory&& other) noexcept; 41 HostMemory& operator=(HostMemory&& other) noexcept;
42 42
43 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms); 43 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms,
44 bool separate_heap);
44 45
45 void Unmap(size_t virtual_offset, size_t length); 46 void Unmap(size_t virtual_offset, size_t length, bool separate_heap);
46 47
47 void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute = false); 48 void Protect(size_t virtual_offset, size_t length, MemoryPermission perms);
48 49
49 void EnableDirectMappedAddress(); 50 void EnableDirectMappedAddress();
50 51
@@ -64,6 +65,10 @@ public:
64 return virtual_base; 65 return virtual_base;
65 } 66 }
66 67
68 bool IsInVirtualRange(void* address) const noexcept {
69 return address >= virtual_base && address < virtual_base + virtual_size;
70 }
71
67private: 72private:
68 size_t backing_size{}; 73 size_t backing_size{};
69 size_t virtual_size{}; 74 size_t virtual_size{};
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 96ab39cb8..367d01dc7 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -978,6 +978,7 @@ endif()
978 978
979if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) 979if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
980 target_sources(core PRIVATE 980 target_sources(core PRIVATE
981 arm/dynarmic/arm_dynarmic.cpp
981 arm/dynarmic/arm_dynarmic.h 982 arm/dynarmic/arm_dynarmic.h
982 arm/dynarmic/arm_dynarmic_64.cpp 983 arm/dynarmic/arm_dynarmic_64.cpp
983 arm/dynarmic/arm_dynarmic_64.h 984 arm/dynarmic/arm_dynarmic_64.h
@@ -987,6 +988,8 @@ if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
987 arm/dynarmic/dynarmic_cp15.h 988 arm/dynarmic/dynarmic_cp15.h
988 arm/dynarmic/dynarmic_exclusive_monitor.cpp 989 arm/dynarmic/dynarmic_exclusive_monitor.cpp
989 arm/dynarmic/dynarmic_exclusive_monitor.h 990 arm/dynarmic/dynarmic_exclusive_monitor.h
991 hle/service/jit/jit_code_memory.cpp
992 hle/service/jit/jit_code_memory.h
990 hle/service/jit/jit_context.cpp 993 hle/service/jit/jit_context.cpp
991 hle/service/jit/jit_context.h 994 hle/service/jit/jit_context.h
992 hle/service/jit/jit.cpp 995 hle/service/jit/jit.cpp
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp
new file mode 100644
index 000000000..e6e9fc45b
--- /dev/null
+++ b/src/core/arm/dynarmic/arm_dynarmic.cpp
@@ -0,0 +1,49 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#ifdef __linux__
5
6#include "common/signal_chain.h"
7
8#include "core/arm/dynarmic/arm_dynarmic.h"
9#include "core/hle/kernel/k_process.h"
10#include "core/memory.h"
11
12namespace Core {
13
14namespace {
15
16thread_local Core::Memory::Memory* g_current_memory{};
17std::once_flag g_registered{};
18struct sigaction g_old_segv {};
19
20void HandleSigSegv(int sig, siginfo_t* info, void* ctx) {
21 if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) {
22 return;
23 }
24
25 return g_old_segv.sa_sigaction(sig, info, ctx);
26}
27
28} // namespace
29
30ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) {
31 g_current_memory = std::addressof(process->GetMemory());
32}
33
34ScopedJitExecution::~ScopedJitExecution() {
35 g_current_memory = nullptr;
36}
37
38void ScopedJitExecution::RegisterHandler() {
39 std::call_once(g_registered, [] {
40 struct sigaction sa {};
41 sa.sa_sigaction = &HandleSigSegv;
42 sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
43 Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv));
44 });
45}
46
47} // namespace Core
48
49#endif
diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h
index eef7c3116..53dd18815 100644
--- a/src/core/arm/dynarmic/arm_dynarmic.h
+++ b/src/core/arm/dynarmic/arm_dynarmic.h
@@ -26,4 +26,24 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) {
26 return static_cast<HaltReason>(hr); 26 return static_cast<HaltReason>(hr);
27} 27}
28 28
29#ifdef __linux__
30
31class ScopedJitExecution {
32public:
33 explicit ScopedJitExecution(Kernel::KProcess* process);
34 ~ScopedJitExecution();
35 static void RegisterHandler();
36};
37
38#else
39
40class ScopedJitExecution {
41public:
42 explicit ScopedJitExecution(Kernel::KProcess* process) {}
43 ~ScopedJitExecution() {}
44 static void RegisterHandler() {}
45};
46
47#endif
48
29} // namespace Core 49} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index c78cfd528..36478f722 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -331,11 +331,15 @@ bool ArmDynarmic32::IsInThumbMode() const {
331} 331}
332 332
333HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) { 333HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) {
334 ScopedJitExecution sj(thread->GetOwnerProcess());
335
334 m_jit->ClearExclusiveState(); 336 m_jit->ClearExclusiveState();
335 return TranslateHaltReason(m_jit->Run()); 337 return TranslateHaltReason(m_jit->Run());
336} 338}
337 339
338HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) { 340HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) {
341 ScopedJitExecution sj(thread->GetOwnerProcess());
342
339 m_jit->ClearExclusiveState(); 343 m_jit->ClearExclusiveState();
340 return TranslateHaltReason(m_jit->Step()); 344 return TranslateHaltReason(m_jit->Step());
341} 345}
@@ -377,6 +381,7 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc
377 m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} { 381 m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {
378 auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl(); 382 auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();
379 m_jit = MakeJit(&page_table_impl); 383 m_jit = MakeJit(&page_table_impl);
384 ScopedJitExecution::RegisterHandler();
380} 385}
381 386
382ArmDynarmic32::~ArmDynarmic32() = default; 387ArmDynarmic32::~ArmDynarmic32() = default;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index f351b13d9..c811c8ad5 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -362,11 +362,15 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa
362} 362}
363 363
364HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) { 364HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) {
365 ScopedJitExecution sj(thread->GetOwnerProcess());
366
365 m_jit->ClearExclusiveState(); 367 m_jit->ClearExclusiveState();
366 return TranslateHaltReason(m_jit->Run()); 368 return TranslateHaltReason(m_jit->Run());
367} 369}
368 370
369HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) { 371HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) {
372 ScopedJitExecution sj(thread->GetOwnerProcess());
373
370 m_jit->ClearExclusiveState(); 374 m_jit->ClearExclusiveState();
371 return TranslateHaltReason(m_jit->Step()); 375 return TranslateHaltReason(m_jit->Step());
372} 376}
@@ -406,6 +410,7 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc
406 auto& page_table = process->GetPageTable().GetBasePageTable(); 410 auto& page_table = process->GetPageTable().GetBasePageTable();
407 auto& page_table_impl = page_table.GetImpl(); 411 auto& page_table_impl = page_table.GetImpl();
408 m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth()); 412 m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
413 ScopedJitExecution::RegisterHandler();
409} 414}
410 415
411ArmDynarmic64::~ArmDynarmic64() = default; 416ArmDynarmic64::~ArmDynarmic64() = default;
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index d6b5abc68..fc536413b 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -29,7 +29,6 @@ std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callbac
29struct CoreTiming::Event { 29struct CoreTiming::Event {
30 s64 time; 30 s64 time;
31 u64 fifo_order; 31 u64 fifo_order;
32 std::uintptr_t user_data;
33 std::weak_ptr<EventType> type; 32 std::weak_ptr<EventType> type;
34 s64 reschedule_time; 33 s64 reschedule_time;
35 heap_t::handle_type handle{}; 34 heap_t::handle_type handle{};
@@ -67,17 +66,15 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
67 event_fifo_id = 0; 66 event_fifo_id = 0;
68 shutting_down = false; 67 shutting_down = false;
69 cpu_ticks = 0; 68 cpu_ticks = 0;
70 const auto empty_timed_callback = [](std::uintptr_t, u64, std::chrono::nanoseconds)
71 -> std::optional<std::chrono::nanoseconds> { return std::nullopt; };
72 ev_lost = CreateEvent("_lost_event", empty_timed_callback);
73 if (is_multicore) { 69 if (is_multicore) {
74 timer_thread = std::make_unique<std::jthread>(ThreadEntry, std::ref(*this)); 70 timer_thread = std::make_unique<std::jthread>(ThreadEntry, std::ref(*this));
75 } 71 }
76} 72}
77 73
78void CoreTiming::ClearPendingEvents() { 74void CoreTiming::ClearPendingEvents() {
79 std::scoped_lock lock{basic_lock}; 75 std::scoped_lock lock{advance_lock, basic_lock};
80 event_queue.clear(); 76 event_queue.clear();
77 event.Set();
81} 78}
82 79
83void CoreTiming::Pause(bool is_paused) { 80void CoreTiming::Pause(bool is_paused) {
@@ -119,14 +116,12 @@ bool CoreTiming::HasPendingEvents() const {
119} 116}
120 117
121void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future, 118void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
122 const std::shared_ptr<EventType>& event_type, 119 const std::shared_ptr<EventType>& event_type, bool absolute_time) {
123 std::uintptr_t user_data, bool absolute_time) {
124 { 120 {
125 std::scoped_lock scope{basic_lock}; 121 std::scoped_lock scope{basic_lock};
126 const auto next_time{absolute_time ? ns_into_future : GetGlobalTimeNs() + ns_into_future}; 122 const auto next_time{absolute_time ? ns_into_future : GetGlobalTimeNs() + ns_into_future};
127 123
128 auto h{event_queue.emplace( 124 auto h{event_queue.emplace(Event{next_time.count(), event_fifo_id++, event_type, 0})};
129 Event{next_time.count(), event_fifo_id++, user_data, event_type, 0})};
130 (*h).handle = h; 125 (*h).handle = h;
131 } 126 }
132 127
@@ -136,13 +131,13 @@ void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
136void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time, 131void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time,
137 std::chrono::nanoseconds resched_time, 132 std::chrono::nanoseconds resched_time,
138 const std::shared_ptr<EventType>& event_type, 133 const std::shared_ptr<EventType>& event_type,
139 std::uintptr_t user_data, bool absolute_time) { 134 bool absolute_time) {
140 { 135 {
141 std::scoped_lock scope{basic_lock}; 136 std::scoped_lock scope{basic_lock};
142 const auto next_time{absolute_time ? start_time : GetGlobalTimeNs() + start_time}; 137 const auto next_time{absolute_time ? start_time : GetGlobalTimeNs() + start_time};
143 138
144 auto h{event_queue.emplace(Event{next_time.count(), event_fifo_id++, user_data, event_type, 139 auto h{event_queue.emplace(
145 resched_time.count()})}; 140 Event{next_time.count(), event_fifo_id++, event_type, resched_time.count()})};
146 (*h).handle = h; 141 (*h).handle = h;
147 } 142 }
148 143
@@ -150,14 +145,14 @@ void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time,
150} 145}
151 146
152void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, 147void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
153 std::uintptr_t user_data, bool wait) { 148 UnscheduleEventType type) {
154 { 149 {
155 std::scoped_lock lk{basic_lock}; 150 std::scoped_lock lk{basic_lock};
156 151
157 std::vector<heap_t::handle_type> to_remove; 152 std::vector<heap_t::handle_type> to_remove;
158 for (auto itr = event_queue.begin(); itr != event_queue.end(); itr++) { 153 for (auto itr = event_queue.begin(); itr != event_queue.end(); itr++) {
159 const Event& e = *itr; 154 const Event& e = *itr;
160 if (e.type.lock().get() == event_type.get() && e.user_data == user_data) { 155 if (e.type.lock().get() == event_type.get()) {
161 to_remove.push_back(itr->handle); 156 to_remove.push_back(itr->handle);
162 } 157 }
163 } 158 }
@@ -165,10 +160,12 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
165 for (auto h : to_remove) { 160 for (auto h : to_remove) {
166 event_queue.erase(h); 161 event_queue.erase(h);
167 } 162 }
163
164 event_type->sequence_number++;
168 } 165 }
169 166
170 // Force any in-progress events to finish 167 // Force any in-progress events to finish
171 if (wait) { 168 if (type == UnscheduleEventType::Wait) {
172 std::scoped_lock lk{advance_lock}; 169 std::scoped_lock lk{advance_lock};
173 } 170 }
174} 171}
@@ -208,28 +205,31 @@ std::optional<s64> CoreTiming::Advance() {
208 const Event& evt = event_queue.top(); 205 const Event& evt = event_queue.top();
209 206
210 if (const auto event_type{evt.type.lock()}) { 207 if (const auto event_type{evt.type.lock()}) {
211 if (evt.reschedule_time == 0) { 208 const auto evt_time = evt.time;
212 const auto evt_user_data = evt.user_data; 209 const auto evt_sequence_num = event_type->sequence_number;
213 const auto evt_time = evt.time;
214 210
211 if (evt.reschedule_time == 0) {
215 event_queue.pop(); 212 event_queue.pop();
216 213
217 basic_lock.unlock(); 214 basic_lock.unlock();
218 215
219 event_type->callback( 216 event_type->callback(
220 evt_user_data, evt_time, 217 evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time});
221 std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time});
222 218
223 basic_lock.lock(); 219 basic_lock.lock();
224 } else { 220 } else {
225 basic_lock.unlock(); 221 basic_lock.unlock();
226 222
227 const auto new_schedule_time{event_type->callback( 223 const auto new_schedule_time{event_type->callback(
228 evt.user_data, evt.time, 224 evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time})};
229 std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt.time})};
230 225
231 basic_lock.lock(); 226 basic_lock.lock();
232 227
228 if (evt_sequence_num != event_type->sequence_number) {
229 // Heap handle is invalidated after external modification.
230 continue;
231 }
232
233 const auto next_schedule_time{new_schedule_time.has_value() 233 const auto next_schedule_time{new_schedule_time.has_value()
234 ? new_schedule_time.value().count() 234 ? new_schedule_time.value().count()
235 : evt.reschedule_time}; 235 : evt.reschedule_time};
@@ -241,8 +241,8 @@ std::optional<s64> CoreTiming::Advance() {
241 next_time = pause_end_time + next_schedule_time; 241 next_time = pause_end_time + next_schedule_time;
242 } 242 }
243 243
244 event_queue.update(evt.handle, Event{next_time, event_fifo_id++, evt.user_data, 244 event_queue.update(evt.handle, Event{next_time, event_fifo_id++, evt.type,
245 evt.type, next_schedule_time, evt.handle}); 245 next_schedule_time, evt.handle});
246 } 246 }
247 } 247 }
248 248
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 21548f0a9..7e4dff7f3 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -22,17 +22,25 @@ namespace Core::Timing {
22 22
23/// A callback that may be scheduled for a particular core timing event. 23/// A callback that may be scheduled for a particular core timing event.
24using TimedCallback = std::function<std::optional<std::chrono::nanoseconds>( 24using TimedCallback = std::function<std::optional<std::chrono::nanoseconds>(
25 std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)>; 25 s64 time, std::chrono::nanoseconds ns_late)>;
26 26
27/// Contains the characteristics of a particular event. 27/// Contains the characteristics of a particular event.
28struct EventType { 28struct EventType {
29 explicit EventType(TimedCallback&& callback_, std::string&& name_) 29 explicit EventType(TimedCallback&& callback_, std::string&& name_)
30 : callback{std::move(callback_)}, name{std::move(name_)} {} 30 : callback{std::move(callback_)}, name{std::move(name_)}, sequence_number{0} {}
31 31
32 /// The event's callback function. 32 /// The event's callback function.
33 TimedCallback callback; 33 TimedCallback callback;
34 /// A pointer to the name of the event. 34 /// A pointer to the name of the event.
35 const std::string name; 35 const std::string name;
36 /// A monotonic sequence number, incremented when this event is
37 /// changed externally.
38 size_t sequence_number;
39};
40
41enum class UnscheduleEventType {
42 Wait,
43 NoWait,
36}; 44};
37 45
38/** 46/**
@@ -89,23 +97,17 @@ public:
89 97
90 /// Schedules an event in core timing 98 /// Schedules an event in core timing
91 void ScheduleEvent(std::chrono::nanoseconds ns_into_future, 99 void ScheduleEvent(std::chrono::nanoseconds ns_into_future,
92 const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data = 0, 100 const std::shared_ptr<EventType>& event_type, bool absolute_time = false);
93 bool absolute_time = false);
94 101
95 /// Schedules an event which will automatically re-schedule itself with the given time, until 102 /// Schedules an event which will automatically re-schedule itself with the given time, until
96 /// unscheduled 103 /// unscheduled
97 void ScheduleLoopingEvent(std::chrono::nanoseconds start_time, 104 void ScheduleLoopingEvent(std::chrono::nanoseconds start_time,
98 std::chrono::nanoseconds resched_time, 105 std::chrono::nanoseconds resched_time,
99 const std::shared_ptr<EventType>& event_type, 106 const std::shared_ptr<EventType>& event_type,
100 std::uintptr_t user_data = 0, bool absolute_time = false); 107 bool absolute_time = false);
101 108
102 void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data, 109 void UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
103 bool wait = true); 110 UnscheduleEventType type = UnscheduleEventType::Wait);
104
105 void UnscheduleEventWithoutWait(const std::shared_ptr<EventType>& event_type,
106 std::uintptr_t user_data) {
107 UnscheduleEvent(event_type, user_data, false);
108 }
109 111
110 void AddTicks(u64 ticks_to_add); 112 void AddTicks(u64 ticks_to_add);
111 113
@@ -158,7 +160,6 @@ private:
158 heap_t event_queue; 160 heap_t event_queue;
159 u64 event_fifo_id = 0; 161 u64 event_fifo_id = 0;
160 162
161 std::shared_ptr<EventType> ev_lost;
162 Common::Event event{}; 163 Common::Event event{};
163 Common::Event pause_event{}; 164 Common::Event pause_event{};
164 mutable std::mutex basic_lock; 165 mutable std::mutex basic_lock;
diff --git a/src/core/file_sys/ips_layer.cpp b/src/core/file_sys/ips_layer.cpp
index 7be1322cc..31033634c 100644
--- a/src/core/file_sys/ips_layer.cpp
+++ b/src/core/file_sys/ips_layer.cpp
@@ -73,6 +73,9 @@ VirtualFile PatchIPS(const VirtualFile& in, const VirtualFile& ips) {
73 return nullptr; 73 return nullptr;
74 74
75 auto in_data = in->ReadAllBytes(); 75 auto in_data = in->ReadAllBytes();
76 if (in_data.size() == 0) {
77 return nullptr;
78 }
76 79
77 std::vector<u8> temp(type == IPSFileType::IPS ? 3 : 4); 80 std::vector<u8> temp(type == IPSFileType::IPS ? 3 : 4);
78 u64 offset = 5; // After header 81 u64 offset = 5; // After header
@@ -88,6 +91,10 @@ VirtualFile PatchIPS(const VirtualFile& in, const VirtualFile& ips) {
88 else 91 else
89 real_offset = (temp[0] << 16) | (temp[1] << 8) | temp[2]; 92 real_offset = (temp[0] << 16) | (temp[1] << 8) | temp[2];
90 93
94 if (real_offset > in_data.size()) {
95 return nullptr;
96 }
97
91 u16 data_size{}; 98 u16 data_size{};
92 if (ips->ReadObject(&data_size, offset) != sizeof(u16)) 99 if (ips->ReadObject(&data_size, offset) != sizeof(u16))
93 return nullptr; 100 return nullptr;
diff --git a/src/core/hle/kernel/k_hardware_timer.cpp b/src/core/hle/kernel/k_hardware_timer.cpp
index 8e2e40307..4e947dd6b 100644
--- a/src/core/hle/kernel/k_hardware_timer.cpp
+++ b/src/core/hle/kernel/k_hardware_timer.cpp
@@ -10,15 +10,15 @@ namespace Kernel {
10 10
11void KHardwareTimer::Initialize() { 11void KHardwareTimer::Initialize() {
12 // Create the timing callback to register with CoreTiming. 12 // Create the timing callback to register with CoreTiming.
13 m_event_type = Core::Timing::CreateEvent( 13 m_event_type = Core::Timing::CreateEvent("KHardwareTimer::Callback",
14 "KHardwareTimer::Callback", [](std::uintptr_t timer_handle, s64, std::chrono::nanoseconds) { 14 [this](s64, std::chrono::nanoseconds) {
15 reinterpret_cast<KHardwareTimer*>(timer_handle)->DoTask(); 15 this->DoTask();
16 return std::nullopt; 16 return std::nullopt;
17 }); 17 });
18} 18}
19 19
20void KHardwareTimer::Finalize() { 20void KHardwareTimer::Finalize() {
21 m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type, reinterpret_cast<uintptr_t>(this)); 21 m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type);
22 m_wakeup_time = std::numeric_limits<s64>::max(); 22 m_wakeup_time = std::numeric_limits<s64>::max();
23 m_event_type.reset(); 23 m_event_type.reset();
24} 24}
@@ -57,13 +57,12 @@ void KHardwareTimer::EnableInterrupt(s64 wakeup_time) {
57 57
58 m_wakeup_time = wakeup_time; 58 m_wakeup_time = wakeup_time;
59 m_kernel.System().CoreTiming().ScheduleEvent(std::chrono::nanoseconds{m_wakeup_time}, 59 m_kernel.System().CoreTiming().ScheduleEvent(std::chrono::nanoseconds{m_wakeup_time},
60 m_event_type, reinterpret_cast<uintptr_t>(this), 60 m_event_type, true);
61 true);
62} 61}
63 62
64void KHardwareTimer::DisableInterrupt() { 63void KHardwareTimer::DisableInterrupt() {
65 m_kernel.System().CoreTiming().UnscheduleEventWithoutWait(m_event_type, 64 m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type,
66 reinterpret_cast<uintptr_t>(this)); 65 Core::Timing::UnscheduleEventType::NoWait);
67 m_wakeup_time = std::numeric_limits<s64>::max(); 66 m_wakeup_time = std::numeric_limits<s64>::max();
68} 67}
69 68
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
index 423289145..8c1549559 100644
--- a/src/core/hle/kernel/k_page_table_base.cpp
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -434,7 +434,7 @@ Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool
434void KPageTableBase::Finalize() { 434void KPageTableBase::Finalize() {
435 auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { 435 auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
436 if (Settings::IsFastmemEnabled()) { 436 if (Settings::IsFastmemEnabled()) {
437 m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); 437 m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false);
438 } 438 }
439 }; 439 };
440 440
@@ -5243,7 +5243,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
5243 // Unmap. 5243 // Unmap.
5244 R_ASSERT(this->Operate(updater.GetPageList(), cur_address, 5244 R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
5245 cur_pages, 0, false, unmap_properties, 5245 cur_pages, 0, false, unmap_properties,
5246 OperationType::Unmap, true)); 5246 OperationType::UnmapPhysical, true));
5247 } 5247 }
5248 5248
5249 // Check if we're done. 5249 // Check if we're done.
@@ -5326,7 +5326,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
5326 // Map the papges. 5326 // Map the papges.
5327 R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, 5327 R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,
5328 cur_pg, map_properties, 5328 cur_pg, map_properties,
5329 OperationType::MapFirstGroup, false)); 5329 OperationType::MapFirstGroupPhysical, false));
5330 } 5330 }
5331 } 5331 }
5332 5332
@@ -5480,7 +5480,7 @@ Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size)
5480 5480
5481 // Unmap. 5481 // Unmap.
5482 R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false, 5482 R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false,
5483 unmap_properties, OperationType::Unmap, false)); 5483 unmap_properties, OperationType::UnmapPhysical, false));
5484 } 5484 }
5485 5485
5486 // Check if we're done. 5486 // Check if we're done.
@@ -5655,7 +5655,10 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5655 // or free them to the page list, and so it goes unused (along with page properties). 5655 // or free them to the page list, and so it goes unused (along with page properties).
5656 5656
5657 switch (operation) { 5657 switch (operation) {
5658 case OperationType::Unmap: { 5658 case OperationType::Unmap:
5659 case OperationType::UnmapPhysical: {
5660 const bool separate_heap = operation == OperationType::UnmapPhysical;
5661
5659 // Ensure that any pages we track are closed on exit. 5662 // Ensure that any pages we track are closed on exit.
5660 KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager()); 5663 KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
5661 SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); 5664 SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
@@ -5664,7 +5667,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5664 this->MakePageGroup(pages_to_close, virt_addr, num_pages); 5667 this->MakePageGroup(pages_to_close, virt_addr, num_pages);
5665 5668
5666 // Unmap. 5669 // Unmap.
5667 m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize); 5670 m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize, separate_heap);
5668 5671
5669 R_SUCCEED(); 5672 R_SUCCEED();
5670 } 5673 }
@@ -5672,7 +5675,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5672 ASSERT(virt_addr != 0); 5675 ASSERT(virt_addr != 0);
5673 ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); 5676 ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
5674 m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr, 5677 m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr,
5675 ConvertToMemoryPermission(properties.perm)); 5678 ConvertToMemoryPermission(properties.perm), false);
5676 5679
5677 // Open references to pages, if we should. 5680 // Open references to pages, if we should.
5678 if (this->IsHeapPhysicalAddress(phys_addr)) { 5681 if (this->IsHeapPhysicalAddress(phys_addr)) {
@@ -5711,16 +5714,19 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5711 5714
5712 switch (operation) { 5715 switch (operation) {
5713 case OperationType::MapGroup: 5716 case OperationType::MapGroup:
5714 case OperationType::MapFirstGroup: { 5717 case OperationType::MapFirstGroup:
5718 case OperationType::MapFirstGroupPhysical: {
5719 const bool separate_heap = operation == OperationType::MapFirstGroupPhysical;
5720
5715 // We want to maintain a new reference to every page in the group. 5721 // We want to maintain a new reference to every page in the group.
5716 KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); 5722 KScopedPageGroup spg(page_group, operation == OperationType::MapGroup);
5717 5723
5718 for (const auto& node : page_group) { 5724 for (const auto& node : page_group) {
5719 const size_t size{node.GetNumPages() * PageSize}; 5725 const size_t size{node.GetNumPages() * PageSize};
5720 5726
5721 // Map the pages. 5727 // Map the pages.
5722 m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(), 5728 m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(),
5723 ConvertToMemoryPermission(properties.perm)); 5729 ConvertToMemoryPermission(properties.perm), separate_heap);
5724 5730
5725 virt_addr += size; 5731 virt_addr += size;
5726 } 5732 }
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h
index 556d230b3..077cafc96 100644
--- a/src/core/hle/kernel/k_page_table_base.h
+++ b/src/core/hle/kernel/k_page_table_base.h
@@ -104,6 +104,9 @@ protected:
104 ChangePermissionsAndRefresh = 5, 104 ChangePermissionsAndRefresh = 5,
105 ChangePermissionsAndRefreshAndFlush = 6, 105 ChangePermissionsAndRefreshAndFlush = 6,
106 Separate = 7, 106 Separate = 7,
107
108 MapFirstGroupPhysical = 65000,
109 UnmapPhysical = 65001,
107 }; 110 };
108 111
109 static constexpr size_t MaxPhysicalMapAlignment = 1_GiB; 112 static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index d6869c228..068e71dff 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -1237,8 +1237,10 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
1237 auto& buffer = m_kernel.System().DeviceMemory().buffer; 1237 auto& buffer = m_kernel.System().DeviceMemory().buffer;
1238 const auto& code = code_set.CodeSegment(); 1238 const auto& code = code_set.CodeSegment();
1239 const auto& patch = code_set.PatchSegment(); 1239 const auto& patch = code_set.PatchSegment();
1240 buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true); 1240 buffer.Protect(GetInteger(base_addr + code.addr), code.size,
1241 buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true); 1241 Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
1242 buffer.Protect(GetInteger(base_addr + patch.addr), patch.size,
1243 Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
1242 ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None); 1244 ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None);
1243 } 1245 }
1244#endif 1246#endif
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index c14d2d2f3..1030f0c12 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -238,7 +238,7 @@ struct KernelCore::Impl {
238 void InitializePreemption(KernelCore& kernel) { 238 void InitializePreemption(KernelCore& kernel) {
239 preemption_event = Core::Timing::CreateEvent( 239 preemption_event = Core::Timing::CreateEvent(
240 "PreemptionCallback", 240 "PreemptionCallback",
241 [this, &kernel](std::uintptr_t, s64 time, 241 [this, &kernel](s64 time,
242 std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> { 242 std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {
243 { 243 {
244 KScopedSchedulerLock lock(kernel); 244 KScopedSchedulerLock lock(kernel);
diff --git a/src/core/hle/service/hid/hidbus.cpp b/src/core/hle/service/hid/hidbus.cpp
index d12f9beb0..ffa7e144d 100644
--- a/src/core/hle/service/hid/hidbus.cpp
+++ b/src/core/hle/service/hid/hidbus.cpp
@@ -49,10 +49,10 @@ HidBus::HidBus(Core::System& system_)
49 // Register update callbacks 49 // Register update callbacks
50 hidbus_update_event = Core::Timing::CreateEvent( 50 hidbus_update_event = Core::Timing::CreateEvent(
51 "Hidbus::UpdateCallback", 51 "Hidbus::UpdateCallback",
52 [this](std::uintptr_t user_data, s64 time, 52 [this](s64 time,
53 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> { 53 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
54 const auto guard = LockService(); 54 const auto guard = LockService();
55 UpdateHidbus(user_data, ns_late); 55 UpdateHidbus(ns_late);
56 return std::nullopt; 56 return std::nullopt;
57 }); 57 });
58 58
@@ -61,10 +61,10 @@ HidBus::HidBus(Core::System& system_)
61} 61}
62 62
63HidBus::~HidBus() { 63HidBus::~HidBus() {
64 system.CoreTiming().UnscheduleEvent(hidbus_update_event, 0); 64 system.CoreTiming().UnscheduleEvent(hidbus_update_event);
65} 65}
66 66
67void HidBus::UpdateHidbus(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) { 67void HidBus::UpdateHidbus(std::chrono::nanoseconds ns_late) {
68 if (is_hidbus_enabled) { 68 if (is_hidbus_enabled) {
69 for (std::size_t i = 0; i < devices.size(); ++i) { 69 for (std::size_t i = 0; i < devices.size(); ++i) {
70 if (!devices[i].is_device_initializated) { 70 if (!devices[i].is_device_initializated) {
diff --git a/src/core/hle/service/hid/hidbus.h b/src/core/hle/service/hid/hidbus.h
index c29b5e882..85a1df133 100644
--- a/src/core/hle/service/hid/hidbus.h
+++ b/src/core/hle/service/hid/hidbus.h
@@ -108,7 +108,7 @@ private:
108 void DisableJoyPollingReceiveMode(HLERequestContext& ctx); 108 void DisableJoyPollingReceiveMode(HLERequestContext& ctx);
109 void SetStatusManagerType(HLERequestContext& ctx); 109 void SetStatusManagerType(HLERequestContext& ctx);
110 110
111 void UpdateHidbus(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); 111 void UpdateHidbus(std::chrono::nanoseconds ns_late);
112 std::optional<std::size_t> GetDeviceIndexFromHandle(BusHandle handle) const; 112 std::optional<std::size_t> GetDeviceIndexFromHandle(BusHandle handle) const;
113 113
114 template <typename T> 114 template <typename T>
diff --git a/src/core/hle/service/hid/resource_manager.cpp b/src/core/hle/service/hid/resource_manager.cpp
index 6c6cbd802..afc61f70d 100644
--- a/src/core/hle/service/hid/resource_manager.cpp
+++ b/src/core/hle/service/hid/resource_manager.cpp
@@ -227,8 +227,7 @@ void ResourceManager::EnableTouchScreen(u64 aruid, bool is_enabled) {
227 applet_resource->EnableTouchScreen(aruid, is_enabled); 227 applet_resource->EnableTouchScreen(aruid, is_enabled);
228} 228}
229 229
230void ResourceManager::UpdateControllers(std::uintptr_t user_data, 230void ResourceManager::UpdateControllers(std::chrono::nanoseconds ns_late) {
231 std::chrono::nanoseconds ns_late) {
232 auto& core_timing = system.CoreTiming(); 231 auto& core_timing = system.CoreTiming();
233 debug_pad->OnUpdate(core_timing); 232 debug_pad->OnUpdate(core_timing);
234 digitizer->OnUpdate(core_timing); 233 digitizer->OnUpdate(core_timing);
@@ -241,20 +240,19 @@ void ResourceManager::UpdateControllers(std::uintptr_t user_data,
241 capture_button->OnUpdate(core_timing); 240 capture_button->OnUpdate(core_timing);
242} 241}
243 242
244void ResourceManager::UpdateNpad(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) { 243void ResourceManager::UpdateNpad(std::chrono::nanoseconds ns_late) {
245 auto& core_timing = system.CoreTiming(); 244 auto& core_timing = system.CoreTiming();
246 npad->OnUpdate(core_timing); 245 npad->OnUpdate(core_timing);
247} 246}
248 247
249void ResourceManager::UpdateMouseKeyboard(std::uintptr_t user_data, 248void ResourceManager::UpdateMouseKeyboard(std::chrono::nanoseconds ns_late) {
250 std::chrono::nanoseconds ns_late) {
251 auto& core_timing = system.CoreTiming(); 249 auto& core_timing = system.CoreTiming();
252 mouse->OnUpdate(core_timing); 250 mouse->OnUpdate(core_timing);
253 debug_mouse->OnUpdate(core_timing); 251 debug_mouse->OnUpdate(core_timing);
254 keyboard->OnUpdate(core_timing); 252 keyboard->OnUpdate(core_timing);
255} 253}
256 254
257void ResourceManager::UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) { 255void ResourceManager::UpdateMotion(std::chrono::nanoseconds ns_late) {
258 auto& core_timing = system.CoreTiming(); 256 auto& core_timing = system.CoreTiming();
259 six_axis->OnUpdate(core_timing); 257 six_axis->OnUpdate(core_timing);
260 seven_six_axis->OnUpdate(core_timing); 258 seven_six_axis->OnUpdate(core_timing);
@@ -273,34 +271,34 @@ IAppletResource::IAppletResource(Core::System& system_, std::shared_ptr<Resource
273 // Register update callbacks 271 // Register update callbacks
274 npad_update_event = Core::Timing::CreateEvent( 272 npad_update_event = Core::Timing::CreateEvent(
275 "HID::UpdatePadCallback", 273 "HID::UpdatePadCallback",
276 [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late) 274 [this, resource](
277 -> std::optional<std::chrono::nanoseconds> { 275 s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
278 const auto guard = LockService(); 276 const auto guard = LockService();
279 resource->UpdateNpad(user_data, ns_late); 277 resource->UpdateNpad(ns_late);
280 return std::nullopt; 278 return std::nullopt;
281 }); 279 });
282 default_update_event = Core::Timing::CreateEvent( 280 default_update_event = Core::Timing::CreateEvent(
283 "HID::UpdateDefaultCallback", 281 "HID::UpdateDefaultCallback",
284 [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late) 282 [this, resource](
285 -> std::optional<std::chrono::nanoseconds> { 283 s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
286 const auto guard = LockService(); 284 const auto guard = LockService();
287 resource->UpdateControllers(user_data, ns_late); 285 resource->UpdateControllers(ns_late);
288 return std::nullopt; 286 return std::nullopt;
289 }); 287 });
290 mouse_keyboard_update_event = Core::Timing::CreateEvent( 288 mouse_keyboard_update_event = Core::Timing::CreateEvent(
291 "HID::UpdateMouseKeyboardCallback", 289 "HID::UpdateMouseKeyboardCallback",
292 [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late) 290 [this, resource](
293 -> std::optional<std::chrono::nanoseconds> { 291 s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
294 const auto guard = LockService(); 292 const auto guard = LockService();
295 resource->UpdateMouseKeyboard(user_data, ns_late); 293 resource->UpdateMouseKeyboard(ns_late);
296 return std::nullopt; 294 return std::nullopt;
297 }); 295 });
298 motion_update_event = Core::Timing::CreateEvent( 296 motion_update_event = Core::Timing::CreateEvent(
299 "HID::UpdateMotionCallback", 297 "HID::UpdateMotionCallback",
300 [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late) 298 [this, resource](
301 -> std::optional<std::chrono::nanoseconds> { 299 s64 time, std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
302 const auto guard = LockService(); 300 const auto guard = LockService();
303 resource->UpdateMotion(user_data, ns_late); 301 resource->UpdateMotion(ns_late);
304 return std::nullopt; 302 return std::nullopt;
305 }); 303 });
306 304
@@ -314,10 +312,10 @@ IAppletResource::IAppletResource(Core::System& system_, std::shared_ptr<Resource
314} 312}
315 313
316IAppletResource::~IAppletResource() { 314IAppletResource::~IAppletResource() {
317 system.CoreTiming().UnscheduleEvent(npad_update_event, 0); 315 system.CoreTiming().UnscheduleEvent(npad_update_event);
318 system.CoreTiming().UnscheduleEvent(default_update_event, 0); 316 system.CoreTiming().UnscheduleEvent(default_update_event);
319 system.CoreTiming().UnscheduleEvent(mouse_keyboard_update_event, 0); 317 system.CoreTiming().UnscheduleEvent(mouse_keyboard_update_event);
320 system.CoreTiming().UnscheduleEvent(motion_update_event, 0); 318 system.CoreTiming().UnscheduleEvent(motion_update_event);
321 resource_manager->FreeAppletResourceId(aruid); 319 resource_manager->FreeAppletResourceId(aruid);
322} 320}
323 321
diff --git a/src/core/hle/service/hid/resource_manager.h b/src/core/hle/service/hid/resource_manager.h
index 5ad7cb564..5a6596099 100644
--- a/src/core/hle/service/hid/resource_manager.h
+++ b/src/core/hle/service/hid/resource_manager.h
@@ -81,10 +81,10 @@ public:
81 void EnablePadInput(u64 aruid, bool is_enabled); 81 void EnablePadInput(u64 aruid, bool is_enabled);
82 void EnableTouchScreen(u64 aruid, bool is_enabled); 82 void EnableTouchScreen(u64 aruid, bool is_enabled);
83 83
84 void UpdateControllers(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); 84 void UpdateControllers(std::chrono::nanoseconds ns_late);
85 void UpdateNpad(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); 85 void UpdateNpad(std::chrono::nanoseconds ns_late);
86 void UpdateMouseKeyboard(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); 86 void UpdateMouseKeyboard(std::chrono::nanoseconds ns_late);
87 void UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); 87 void UpdateMotion(std::chrono::nanoseconds ns_late);
88 88
89private: 89private:
90 Result CreateAppletResourceImpl(u64 aruid); 90 Result CreateAppletResourceImpl(u64 aruid);
diff --git a/src/core/hle/service/jit/jit.cpp b/src/core/hle/service/jit/jit.cpp
index a94d05e19..77aa6d7d1 100644
--- a/src/core/hle/service/jit/jit.cpp
+++ b/src/core/hle/service/jit/jit.cpp
@@ -4,11 +4,11 @@
4#include "core/arm/debug.h" 4#include "core/arm/debug.h"
5#include "core/arm/symbols.h" 5#include "core/arm/symbols.h"
6#include "core/core.h" 6#include "core/core.h"
7#include "core/hle/kernel/k_code_memory.h"
8#include "core/hle/kernel/k_transfer_memory.h" 7#include "core/hle/kernel/k_transfer_memory.h"
9#include "core/hle/result.h" 8#include "core/hle/result.h"
10#include "core/hle/service/ipc_helpers.h" 9#include "core/hle/service/ipc_helpers.h"
11#include "core/hle/service/jit/jit.h" 10#include "core/hle/service/jit/jit.h"
11#include "core/hle/service/jit/jit_code_memory.h"
12#include "core/hle/service/jit/jit_context.h" 12#include "core/hle/service/jit/jit_context.h"
13#include "core/hle/service/server_manager.h" 13#include "core/hle/service/server_manager.h"
14#include "core/hle/service/service.h" 14#include "core/hle/service/service.h"
@@ -23,10 +23,12 @@ struct CodeRange {
23 23
24class IJitEnvironment final : public ServiceFramework<IJitEnvironment> { 24class IJitEnvironment final : public ServiceFramework<IJitEnvironment> {
25public: 25public:
26 explicit IJitEnvironment(Core::System& system_, Kernel::KProcess& process_, CodeRange user_rx, 26 explicit IJitEnvironment(Core::System& system_,
27 CodeRange user_ro) 27 Kernel::KScopedAutoObject<Kernel::KProcess>&& process_,
28 : ServiceFramework{system_, "IJitEnvironment"}, process{&process_}, 28 CodeMemory&& user_rx_, CodeMemory&& user_ro_)
29 context{process->GetMemory()} { 29 : ServiceFramework{system_, "IJitEnvironment"}, process{std::move(process_)},
30 user_rx{std::move(user_rx_)}, user_ro{std::move(user_ro_)},
31 context{system_.ApplicationMemory()} {
30 // clang-format off 32 // clang-format off
31 static const FunctionInfo functions[] = { 33 static const FunctionInfo functions[] = {
32 {0, &IJitEnvironment::GenerateCode, "GenerateCode"}, 34 {0, &IJitEnvironment::GenerateCode, "GenerateCode"},
@@ -39,10 +41,13 @@ public:
39 RegisterHandlers(functions); 41 RegisterHandlers(functions);
40 42
41 // Identity map user code range into sysmodule context 43 // Identity map user code range into sysmodule context
42 configuration.user_ro_memory = user_ro; 44 configuration.user_rx_memory.size = user_rx.GetSize();
43 configuration.user_rx_memory = user_rx; 45 configuration.user_rx_memory.offset = user_rx.GetAddress();
44 configuration.sys_ro_memory = user_ro; 46 configuration.user_ro_memory.size = user_ro.GetSize();
45 configuration.sys_rx_memory = user_rx; 47 configuration.user_ro_memory.offset = user_ro.GetAddress();
48
49 configuration.sys_rx_memory = configuration.user_rx_memory;
50 configuration.sys_ro_memory = configuration.user_ro_memory;
46 } 51 }
47 52
48 void GenerateCode(HLERequestContext& ctx) { 53 void GenerateCode(HLERequestContext& ctx) {
@@ -318,6 +323,8 @@ private:
318 } 323 }
319 324
320 Kernel::KScopedAutoObject<Kernel::KProcess> process; 325 Kernel::KScopedAutoObject<Kernel::KProcess> process;
326 CodeMemory user_rx;
327 CodeMemory user_ro;
321 GuestCallbacks callbacks; 328 GuestCallbacks callbacks;
322 JITConfiguration configuration; 329 JITConfiguration configuration;
323 JITContext context; 330 JITContext context;
@@ -335,6 +342,7 @@ public:
335 RegisterHandlers(functions); 342 RegisterHandlers(functions);
336 } 343 }
337 344
345private:
338 void CreateJitEnvironment(HLERequestContext& ctx) { 346 void CreateJitEnvironment(HLERequestContext& ctx) {
339 LOG_DEBUG(Service_JIT, "called"); 347 LOG_DEBUG(Service_JIT, "called");
340 348
@@ -380,20 +388,35 @@ public:
380 return; 388 return;
381 } 389 }
382 390
383 const CodeRange user_rx{ 391 CodeMemory rx, ro;
384 .offset = GetInteger(rx_mem->GetSourceAddress()), 392 Result res;
385 .size = parameters.rx_size,
386 };
387 393
388 const CodeRange user_ro{ 394 res = rx.Initialize(*process, *rx_mem, parameters.rx_size,
389 .offset = GetInteger(ro_mem->GetSourceAddress()), 395 Kernel::Svc::MemoryPermission::ReadExecute, generate_random);
390 .size = parameters.ro_size, 396 if (R_FAILED(res)) {
391 }; 397 LOG_ERROR(Service_JIT, "rx_mem could not be mapped for handle=0x{:08X}", rx_mem_handle);
398 IPC::ResponseBuilder rb{ctx, 2};
399 rb.Push(res);
400 return;
401 }
402
403 res = ro.Initialize(*process, *ro_mem, parameters.ro_size,
404 Kernel::Svc::MemoryPermission::Read, generate_random);
405 if (R_FAILED(res)) {
406 LOG_ERROR(Service_JIT, "ro_mem could not be mapped for handle=0x{:08X}", ro_mem_handle);
407 IPC::ResponseBuilder rb{ctx, 2};
408 rb.Push(res);
409 return;
410 }
392 411
393 IPC::ResponseBuilder rb{ctx, 2, 0, 1}; 412 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
394 rb.Push(ResultSuccess); 413 rb.Push(ResultSuccess);
395 rb.PushIpcInterface<IJitEnvironment>(system, *process, user_rx, user_ro); 414 rb.PushIpcInterface<IJitEnvironment>(system, std::move(process), std::move(rx),
415 std::move(ro));
396 } 416 }
417
418private:
419 std::mt19937_64 generate_random{};
397}; 420};
398 421
399void LoopProcess(Core::System& system) { 422void LoopProcess(Core::System& system) {
diff --git a/src/core/hle/service/jit/jit_code_memory.cpp b/src/core/hle/service/jit/jit_code_memory.cpp
new file mode 100644
index 000000000..2b480488a
--- /dev/null
+++ b/src/core/hle/service/jit/jit_code_memory.cpp
@@ -0,0 +1,54 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/hle/service/jit/jit_code_memory.h"
5
6namespace Service::JIT {
7
8Result CodeMemory::Initialize(Kernel::KProcess& process, Kernel::KCodeMemory& code_memory,
9 size_t size, Kernel::Svc::MemoryPermission perm,
10 std::mt19937_64& generate_random) {
11 auto& page_table = process.GetPageTable();
12 const u64 alias_code_start =
13 GetInteger(page_table.GetAliasCodeRegionStart()) / Kernel::PageSize;
14 const u64 alias_code_size = page_table.GetAliasCodeRegionSize() / Kernel::PageSize;
15
16 // NOTE: This will retry indefinitely until mapping the code memory succeeds.
17 while (true) {
18 // Generate a new trial address.
19 const u64 mapped_address =
20 (alias_code_start + (generate_random() % alias_code_size)) * Kernel::PageSize;
21
22 // Try to map the address
23 R_TRY_CATCH(code_memory.MapToOwner(mapped_address, size, perm)) {
24 R_CATCH(Kernel::ResultInvalidMemoryRegion) {
25 // If we could not map here, retry.
26 continue;
27 }
28 }
29 R_END_TRY_CATCH;
30
31 // Set members.
32 m_code_memory = std::addressof(code_memory);
33 m_size = size;
34 m_address = mapped_address;
35 m_perm = perm;
36
37 // Open a new reference to the code memory.
38 m_code_memory->Open();
39
40 // We succeeded.
41 R_SUCCEED();
42 }
43}
44
45void CodeMemory::Finalize() {
46 if (m_code_memory) {
47 R_ASSERT(m_code_memory->UnmapFromOwner(m_address, m_size));
48 m_code_memory->Close();
49 }
50
51 m_code_memory = nullptr;
52}
53
54} // namespace Service::JIT
diff --git a/src/core/hle/service/jit/jit_code_memory.h b/src/core/hle/service/jit/jit_code_memory.h
new file mode 100644
index 000000000..6376d4c4e
--- /dev/null
+++ b/src/core/hle/service/jit/jit_code_memory.h
@@ -0,0 +1,49 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <random>
7
8#include "core/hle/kernel/k_code_memory.h"
9
10namespace Service::JIT {
11
12class CodeMemory {
13public:
14 YUZU_NON_COPYABLE(CodeMemory);
15
16 explicit CodeMemory() = default;
17
18 CodeMemory(CodeMemory&& rhs) {
19 std::swap(m_code_memory, rhs.m_code_memory);
20 std::swap(m_size, rhs.m_size);
21 std::swap(m_address, rhs.m_address);
22 std::swap(m_perm, rhs.m_perm);
23 }
24
25 ~CodeMemory() {
26 this->Finalize();
27 }
28
29public:
30 Result Initialize(Kernel::KProcess& process, Kernel::KCodeMemory& code_memory, size_t size,
31 Kernel::Svc::MemoryPermission perm, std::mt19937_64& generate_random);
32 void Finalize();
33
34 size_t GetSize() const {
35 return m_size;
36 }
37
38 u64 GetAddress() const {
39 return m_address;
40 }
41
42private:
43 Kernel::KCodeMemory* m_code_memory{};
44 size_t m_size{};
45 u64 m_address{};
46 Kernel::Svc::MemoryPermission m_perm{};
47};
48
49} // namespace Service::JIT
diff --git a/src/core/hle/service/nvnflinger/nvnflinger.cpp b/src/core/hle/service/nvnflinger/nvnflinger.cpp
index 6352b09a9..aa8aaa2d9 100644
--- a/src/core/hle/service/nvnflinger/nvnflinger.cpp
+++ b/src/core/hle/service/nvnflinger/nvnflinger.cpp
@@ -67,7 +67,7 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
67 // Schedule the screen composition events 67 // Schedule the screen composition events
68 multi_composition_event = Core::Timing::CreateEvent( 68 multi_composition_event = Core::Timing::CreateEvent(
69 "ScreenComposition", 69 "ScreenComposition",
70 [this](std::uintptr_t, s64 time, 70 [this](s64 time,
71 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> { 71 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
72 vsync_signal.Set(); 72 vsync_signal.Set();
73 return std::chrono::nanoseconds(GetNextTicks()); 73 return std::chrono::nanoseconds(GetNextTicks());
@@ -75,7 +75,7 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
75 75
76 single_composition_event = Core::Timing::CreateEvent( 76 single_composition_event = Core::Timing::CreateEvent(
77 "ScreenComposition", 77 "ScreenComposition",
78 [this](std::uintptr_t, s64 time, 78 [this](s64 time,
79 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> { 79 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
80 const auto lock_guard = Lock(); 80 const auto lock_guard = Lock();
81 Compose(); 81 Compose();
@@ -93,11 +93,11 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
93 93
94Nvnflinger::~Nvnflinger() { 94Nvnflinger::~Nvnflinger() {
95 if (system.IsMulticore()) { 95 if (system.IsMulticore()) {
96 system.CoreTiming().UnscheduleEvent(multi_composition_event, {}); 96 system.CoreTiming().UnscheduleEvent(multi_composition_event);
97 vsync_thread.request_stop(); 97 vsync_thread.request_stop();
98 vsync_signal.Set(); 98 vsync_signal.Set();
99 } else { 99 } else {
100 system.CoreTiming().UnscheduleEvent(single_composition_event, {}); 100 system.CoreTiming().UnscheduleEvent(single_composition_event);
101 } 101 }
102 102
103 ShutdownLayers(); 103 ShutdownLayers();
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index c7eb32c19..8176a41be 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -10,6 +10,7 @@
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/atomic_ops.h" 11#include "common/atomic_ops.h"
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/heap_tracker.h"
13#include "common/logging/log.h" 14#include "common/logging/log.h"
14#include "common/page_table.h" 15#include "common/page_table.h"
15#include "common/scope_exit.h" 16#include "common/scope_exit.h"
@@ -52,10 +53,18 @@ struct Memory::Impl {
52 } else { 53 } else {
53 current_page_table->fastmem_arena = nullptr; 54 current_page_table->fastmem_arena = nullptr;
54 } 55 }
56
57#ifdef __linux__
58 heap_tracker.emplace(system.DeviceMemory().buffer);
59 buffer = std::addressof(*heap_tracker);
60#else
61 buffer = std::addressof(system.DeviceMemory().buffer);
62#endif
55 } 63 }
56 64
57 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 65 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
58 Common::PhysicalAddress target, Common::MemoryPermission perms) { 66 Common::PhysicalAddress target, Common::MemoryPermission perms,
67 bool separate_heap) {
59 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 68 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
60 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); 69 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
61 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", 70 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
@@ -64,19 +73,20 @@ struct Memory::Impl {
64 Common::PageType::Memory); 73 Common::PageType::Memory);
65 74
66 if (current_page_table->fastmem_arena) { 75 if (current_page_table->fastmem_arena) {
67 system.DeviceMemory().buffer.Map(GetInteger(base), 76 buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms,
68 GetInteger(target) - DramMemoryMap::Base, size, perms); 77 separate_heap);
69 } 78 }
70 } 79 }
71 80
72 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { 81 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
82 bool separate_heap) {
73 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 83 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
74 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); 84 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
75 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, 85 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
76 Common::PageType::Unmapped); 86 Common::PageType::Unmapped);
77 87
78 if (current_page_table->fastmem_arena) { 88 if (current_page_table->fastmem_arena) {
79 system.DeviceMemory().buffer.Unmap(GetInteger(base), size); 89 buffer->Unmap(GetInteger(base), size, separate_heap);
80 } 90 }
81 } 91 }
82 92
@@ -89,11 +99,6 @@ struct Memory::Impl {
89 return; 99 return;
90 } 100 }
91 101
92 const bool is_r = True(perms & Common::MemoryPermission::Read);
93 const bool is_w = True(perms & Common::MemoryPermission::Write);
94 const bool is_x =
95 True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled();
96
97 u64 protect_bytes{}; 102 u64 protect_bytes{};
98 u64 protect_begin{}; 103 u64 protect_begin{};
99 for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) { 104 for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
@@ -102,8 +107,7 @@ struct Memory::Impl {
102 switch (page_type) { 107 switch (page_type) {
103 case Common::PageType::RasterizerCachedMemory: 108 case Common::PageType::RasterizerCachedMemory:
104 if (protect_bytes > 0) { 109 if (protect_bytes > 0) {
105 system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, 110 buffer->Protect(protect_begin, protect_bytes, perms);
106 is_x);
107 protect_bytes = 0; 111 protect_bytes = 0;
108 } 112 }
109 break; 113 break;
@@ -116,7 +120,7 @@ struct Memory::Impl {
116 } 120 }
117 121
118 if (protect_bytes > 0) { 122 if (protect_bytes > 0) {
119 system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x); 123 buffer->Protect(protect_begin, protect_bytes, perms);
120 } 124 }
121 } 125 }
122 126
@@ -486,7 +490,9 @@ struct Memory::Impl {
486 } 490 }
487 491
488 if (current_page_table->fastmem_arena) { 492 if (current_page_table->fastmem_arena) {
489 system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug); 493 const auto perm{debug ? Common::MemoryPermission{}
494 : Common::MemoryPermission::ReadWrite};
495 buffer->Protect(vaddr, size, perm);
490 } 496 }
491 497
492 // Iterate over a contiguous CPU address space, marking/unmarking the region. 498 // Iterate over a contiguous CPU address space, marking/unmarking the region.
@@ -543,9 +549,14 @@ struct Memory::Impl {
543 } 549 }
544 550
545 if (current_page_table->fastmem_arena) { 551 if (current_page_table->fastmem_arena) {
546 const bool is_read_enable = 552 Common::MemoryPermission perm{};
547 !Settings::values.use_reactive_flushing.GetValue() || !cached; 553 if (!Settings::values.use_reactive_flushing.GetValue() || !cached) {
548 system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); 554 perm |= Common::MemoryPermission::Read;
555 }
556 if (!cached) {
557 perm |= Common::MemoryPermission::Write;
558 }
559 buffer->Protect(vaddr, size, perm);
549 } 560 }
550 561
551 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU 562 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU
@@ -856,6 +867,13 @@ struct Memory::Impl {
856 std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{}; 867 std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
857 std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers; 868 std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
858 std::mutex sys_core_guard; 869 std::mutex sys_core_guard;
870
871 std::optional<Common::HeapTracker> heap_tracker;
872#ifdef __linux__
873 Common::HeapTracker* buffer{};
874#else
875 Common::HostMemory* buffer{};
876#endif
859}; 877};
860 878
861Memory::Memory(Core::System& system_) : system{system_} { 879Memory::Memory(Core::System& system_) : system{system_} {
@@ -873,12 +891,14 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process) {
873} 891}
874 892
875void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 893void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
876 Common::PhysicalAddress target, Common::MemoryPermission perms) { 894 Common::PhysicalAddress target, Common::MemoryPermission perms,
877 impl->MapMemoryRegion(page_table, base, size, target, perms); 895 bool separate_heap) {
896 impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap);
878} 897}
879 898
880void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { 899void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
881 impl->UnmapRegion(page_table, base, size); 900 bool separate_heap) {
901 impl->UnmapRegion(page_table, base, size, separate_heap);
882} 902}
883 903
884void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size, 904void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size,
@@ -1048,7 +1068,9 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
1048} 1068}
1049 1069
1050bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { 1070bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
1051 bool mapped = true; 1071 [[maybe_unused]] bool mapped = true;
1072 [[maybe_unused]] bool rasterizer = false;
1073
1052 u8* const ptr = impl->GetPointerImpl( 1074 u8* const ptr = impl->GetPointerImpl(
1053 GetInteger(vaddr), 1075 GetInteger(vaddr),
1054 [&] { 1076 [&] {
@@ -1056,8 +1078,26 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
1056 GetInteger(vaddr)); 1078 GetInteger(vaddr));
1057 mapped = false; 1079 mapped = false;
1058 }, 1080 },
1059 [&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); }); 1081 [&] {
1082 impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size);
1083 rasterizer = true;
1084 });
1085
1086#ifdef __linux__
1087 if (!rasterizer && mapped) {
1088 impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr));
1089 }
1090#endif
1091
1060 return mapped && ptr != nullptr; 1092 return mapped && ptr != nullptr;
1061} 1093}
1062 1094
1095bool Memory::InvalidateSeparateHeap(void* fault_address) {
1096#ifdef __linux__
1097 return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
1098#else
1099 return false;
1100#endif
1101}
1102
1063} // namespace Core::Memory 1103} // namespace Core::Memory
diff --git a/src/core/memory.h b/src/core/memory.h
index c1879e78f..3e4d03f57 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -86,7 +86,8 @@ public:
86 * @param perms The permissions to map the memory with. 86 * @param perms The permissions to map the memory with.
87 */ 87 */
88 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 88 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
89 Common::PhysicalAddress target, Common::MemoryPermission perms); 89 Common::PhysicalAddress target, Common::MemoryPermission perms,
90 bool separate_heap);
90 91
91 /** 92 /**
92 * Unmaps a region of the emulated process address space. 93 * Unmaps a region of the emulated process address space.
@@ -95,7 +96,8 @@ public:
95 * @param base The address to begin unmapping at. 96 * @param base The address to begin unmapping at.
96 * @param size The amount of bytes to unmap. 97 * @param size The amount of bytes to unmap.
97 */ 98 */
98 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size); 99 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
100 bool separate_heap);
99 101
100 /** 102 /**
101 * Protects a region of the emulated process address space with the new permissions. 103 * Protects a region of the emulated process address space with the new permissions.
@@ -486,6 +488,7 @@ public:
486 void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers); 488 void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
487 void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size); 489 void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);
488 bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); 490 bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
491 bool InvalidateSeparateHeap(void* fault_address);
489 void FlushRegion(Common::ProcessAddress dest_addr, size_t size); 492 void FlushRegion(Common::ProcessAddress dest_addr, size_t size);
490 493
491private: 494private:
diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp
index 3fc4024dc..7bc5b5ae5 100644
--- a/src/core/memory/cheat_engine.cpp
+++ b/src/core/memory/cheat_engine.cpp
@@ -190,15 +190,15 @@ CheatEngine::CheatEngine(System& system_, std::vector<CheatEntry> cheats_,
190} 190}
191 191
192CheatEngine::~CheatEngine() { 192CheatEngine::~CheatEngine() {
193 core_timing.UnscheduleEvent(event, 0); 193 core_timing.UnscheduleEvent(event);
194} 194}
195 195
196void CheatEngine::Initialize() { 196void CheatEngine::Initialize() {
197 event = Core::Timing::CreateEvent( 197 event = Core::Timing::CreateEvent(
198 "CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id), 198 "CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id),
199 [this](std::uintptr_t user_data, s64 time, 199 [this](s64 time,
200 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> { 200 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
201 FrameCallback(user_data, ns_late); 201 FrameCallback(ns_late);
202 return std::nullopt; 202 return std::nullopt;
203 }); 203 });
204 core_timing.ScheduleLoopingEvent(CHEAT_ENGINE_NS, CHEAT_ENGINE_NS, event); 204 core_timing.ScheduleLoopingEvent(CHEAT_ENGINE_NS, CHEAT_ENGINE_NS, event);
@@ -239,7 +239,7 @@ void CheatEngine::Reload(std::vector<CheatEntry> reload_cheats) {
239 239
240MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70)); 240MICROPROFILE_DEFINE(Cheat_Engine, "Add-Ons", "Cheat Engine", MP_RGB(70, 200, 70));
241 241
242void CheatEngine::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) { 242void CheatEngine::FrameCallback(std::chrono::nanoseconds ns_late) {
243 if (is_pending_reload.exchange(false)) { 243 if (is_pending_reload.exchange(false)) {
244 vm.LoadProgram(cheats); 244 vm.LoadProgram(cheats);
245 } 245 }
diff --git a/src/core/memory/cheat_engine.h b/src/core/memory/cheat_engine.h
index 284abdd28..ced2168d1 100644
--- a/src/core/memory/cheat_engine.h
+++ b/src/core/memory/cheat_engine.h
@@ -70,7 +70,7 @@ public:
70 void Reload(std::vector<CheatEntry> reload_cheats); 70 void Reload(std::vector<CheatEntry> reload_cheats);
71 71
72private: 72private:
73 void FrameCallback(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); 73 void FrameCallback(std::chrono::nanoseconds ns_late);
74 74
75 DmntCheatVm vm; 75 DmntCheatVm vm;
76 CheatProcessMetadata metadata; 76 CheatProcessMetadata metadata;
diff --git a/src/core/tools/freezer.cpp b/src/core/tools/freezer.cpp
index 98ebbbf32..9d42c726e 100644
--- a/src/core/tools/freezer.cpp
+++ b/src/core/tools/freezer.cpp
@@ -51,18 +51,17 @@ void MemoryWriteWidth(Core::Memory::Memory& memory, u32 width, VAddr addr, u64 v
51 51
52Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_) 52Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_)
53 : core_timing{core_timing_}, memory{memory_} { 53 : core_timing{core_timing_}, memory{memory_} {
54 event = Core::Timing::CreateEvent( 54 event = Core::Timing::CreateEvent("MemoryFreezer::FrameCallback",
55 "MemoryFreezer::FrameCallback", 55 [this](s64 time, std::chrono::nanoseconds ns_late)
56 [this](std::uintptr_t user_data, s64 time, 56 -> std::optional<std::chrono::nanoseconds> {
57 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> { 57 FrameCallback(ns_late);
58 FrameCallback(user_data, ns_late); 58 return std::nullopt;
59 return std::nullopt; 59 });
60 });
61 core_timing.ScheduleEvent(memory_freezer_ns, event); 60 core_timing.ScheduleEvent(memory_freezer_ns, event);
62} 61}
63 62
64Freezer::~Freezer() { 63Freezer::~Freezer() {
65 core_timing.UnscheduleEvent(event, 0); 64 core_timing.UnscheduleEvent(event);
66} 65}
67 66
68void Freezer::SetActive(bool is_active) { 67void Freezer::SetActive(bool is_active) {
@@ -159,7 +158,7 @@ Freezer::Entries::const_iterator Freezer::FindEntry(VAddr address) const {
159 [address](const Entry& entry) { return entry.address == address; }); 158 [address](const Entry& entry) { return entry.address == address; });
160} 159}
161 160
162void Freezer::FrameCallback(std::uintptr_t, std::chrono::nanoseconds ns_late) { 161void Freezer::FrameCallback(std::chrono::nanoseconds ns_late) {
163 if (!IsActive()) { 162 if (!IsActive()) {
164 LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events."); 163 LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events.");
165 return; 164 return;
diff --git a/src/core/tools/freezer.h b/src/core/tools/freezer.h
index 0d6df5217..2efbc11f3 100644
--- a/src/core/tools/freezer.h
+++ b/src/core/tools/freezer.h
@@ -77,7 +77,7 @@ private:
77 Entries::iterator FindEntry(VAddr address); 77 Entries::iterator FindEntry(VAddr address);
78 Entries::const_iterator FindEntry(VAddr address) const; 78 Entries::const_iterator FindEntry(VAddr address) const;
79 79
80 void FrameCallback(std::uintptr_t user_data, std::chrono::nanoseconds ns_late); 80 void FrameCallback(std::chrono::nanoseconds ns_late);
81 void FillEntryReads(); 81 void FillEntryReads();
82 82
83 std::atomic_bool active{false}; 83 std::atomic_bool active{false};
diff --git a/src/tests/common/host_memory.cpp b/src/tests/common/host_memory.cpp
index 1a28e862b..cb040c942 100644
--- a/src/tests/common/host_memory.cpp
+++ b/src/tests/common/host_memory.cpp
@@ -12,6 +12,7 @@ using namespace Common::Literals;
12static constexpr size_t VIRTUAL_SIZE = 1ULL << 39; 12static constexpr size_t VIRTUAL_SIZE = 1ULL << 39;
13static constexpr size_t BACKING_SIZE = 4_GiB; 13static constexpr size_t BACKING_SIZE = 4_GiB;
14static constexpr auto PERMS = Common::MemoryPermission::ReadWrite; 14static constexpr auto PERMS = Common::MemoryPermission::ReadWrite;
15static constexpr auto HEAP = false;
15 16
16TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") { 17TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") {
17 { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); } 18 { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); }
@@ -20,7 +21,7 @@ TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") {
20 21
21TEST_CASE("HostMemory: Simple map", "[common]") { 22TEST_CASE("HostMemory: Simple map", "[common]") {
22 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 23 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
23 mem.Map(0x5000, 0x8000, 0x1000, PERMS); 24 mem.Map(0x5000, 0x8000, 0x1000, PERMS, HEAP);
24 25
25 volatile u8* const data = mem.VirtualBasePointer() + 0x5000; 26 volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
26 data[0] = 50; 27 data[0] = 50;
@@ -29,8 +30,8 @@ TEST_CASE("HostMemory: Simple map", "[common]") {
29 30
30TEST_CASE("HostMemory: Simple mirror map", "[common]") { 31TEST_CASE("HostMemory: Simple mirror map", "[common]") {
31 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 32 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
32 mem.Map(0x5000, 0x3000, 0x2000, PERMS); 33 mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
33 mem.Map(0x8000, 0x4000, 0x1000, PERMS); 34 mem.Map(0x8000, 0x4000, 0x1000, PERMS, HEAP);
34 35
35 volatile u8* const mirror_a = mem.VirtualBasePointer() + 0x5000; 36 volatile u8* const mirror_a = mem.VirtualBasePointer() + 0x5000;
36 volatile u8* const mirror_b = mem.VirtualBasePointer() + 0x8000; 37 volatile u8* const mirror_b = mem.VirtualBasePointer() + 0x8000;
@@ -40,116 +41,116 @@ TEST_CASE("HostMemory: Simple mirror map", "[common]") {
40 41
41TEST_CASE("HostMemory: Simple unmap", "[common]") { 42TEST_CASE("HostMemory: Simple unmap", "[common]") {
42 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 43 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
43 mem.Map(0x5000, 0x3000, 0x2000, PERMS); 44 mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
44 45
45 volatile u8* const data = mem.VirtualBasePointer() + 0x5000; 46 volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
46 data[75] = 50; 47 data[75] = 50;
47 REQUIRE(data[75] == 50); 48 REQUIRE(data[75] == 50);
48 49
49 mem.Unmap(0x5000, 0x2000); 50 mem.Unmap(0x5000, 0x2000, HEAP);
50} 51}
51 52
52TEST_CASE("HostMemory: Simple unmap and remap", "[common]") { 53TEST_CASE("HostMemory: Simple unmap and remap", "[common]") {
53 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 54 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
54 mem.Map(0x5000, 0x3000, 0x2000, PERMS); 55 mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
55 56
56 volatile u8* const data = mem.VirtualBasePointer() + 0x5000; 57 volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
57 data[0] = 50; 58 data[0] = 50;
58 REQUIRE(data[0] == 50); 59 REQUIRE(data[0] == 50);
59 60
60 mem.Unmap(0x5000, 0x2000); 61 mem.Unmap(0x5000, 0x2000, HEAP);
61 62
62 mem.Map(0x5000, 0x3000, 0x2000, PERMS); 63 mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
63 REQUIRE(data[0] == 50); 64 REQUIRE(data[0] == 50);
64 65
65 mem.Map(0x7000, 0x2000, 0x5000, PERMS); 66 mem.Map(0x7000, 0x2000, 0x5000, PERMS, HEAP);
66 REQUIRE(data[0x3000] == 50); 67 REQUIRE(data[0x3000] == 50);
67} 68}
68 69
69TEST_CASE("HostMemory: Nieche allocation", "[common]") { 70TEST_CASE("HostMemory: Nieche allocation", "[common]") {
70 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 71 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
71 mem.Map(0x0000, 0, 0x20000, PERMS); 72 mem.Map(0x0000, 0, 0x20000, PERMS, HEAP);
72 mem.Unmap(0x0000, 0x4000); 73 mem.Unmap(0x0000, 0x4000, HEAP);
73 mem.Map(0x1000, 0, 0x2000, PERMS); 74 mem.Map(0x1000, 0, 0x2000, PERMS, HEAP);
74 mem.Map(0x3000, 0, 0x1000, PERMS); 75 mem.Map(0x3000, 0, 0x1000, PERMS, HEAP);
75 mem.Map(0, 0, 0x1000, PERMS); 76 mem.Map(0, 0, 0x1000, PERMS, HEAP);
76} 77}
77 78
78TEST_CASE("HostMemory: Full unmap", "[common]") { 79TEST_CASE("HostMemory: Full unmap", "[common]") {
79 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 80 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
80 mem.Map(0x8000, 0, 0x4000, PERMS); 81 mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
81 mem.Unmap(0x8000, 0x4000); 82 mem.Unmap(0x8000, 0x4000, HEAP);
82 mem.Map(0x6000, 0, 0x16000, PERMS); 83 mem.Map(0x6000, 0, 0x16000, PERMS, HEAP);
83} 84}
84 85
85TEST_CASE("HostMemory: Right out of bounds unmap", "[common]") { 86TEST_CASE("HostMemory: Right out of bounds unmap", "[common]") {
86 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 87 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
87 mem.Map(0x0000, 0, 0x4000, PERMS); 88 mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
88 mem.Unmap(0x2000, 0x4000); 89 mem.Unmap(0x2000, 0x4000, HEAP);
89 mem.Map(0x2000, 0x80000, 0x4000, PERMS); 90 mem.Map(0x2000, 0x80000, 0x4000, PERMS, HEAP);
90} 91}
91 92
92TEST_CASE("HostMemory: Left out of bounds unmap", "[common]") { 93TEST_CASE("HostMemory: Left out of bounds unmap", "[common]") {
93 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 94 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
94 mem.Map(0x8000, 0, 0x4000, PERMS); 95 mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
95 mem.Unmap(0x6000, 0x4000); 96 mem.Unmap(0x6000, 0x4000, HEAP);
96 mem.Map(0x8000, 0, 0x2000, PERMS); 97 mem.Map(0x8000, 0, 0x2000, PERMS, HEAP);
97} 98}
98 99
99TEST_CASE("HostMemory: Multiple placeholder unmap", "[common]") { 100TEST_CASE("HostMemory: Multiple placeholder unmap", "[common]") {
100 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 101 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
101 mem.Map(0x0000, 0, 0x4000, PERMS); 102 mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
102 mem.Map(0x4000, 0, 0x1b000, PERMS); 103 mem.Map(0x4000, 0, 0x1b000, PERMS, HEAP);
103 mem.Unmap(0x3000, 0x1c000); 104 mem.Unmap(0x3000, 0x1c000, HEAP);
104 mem.Map(0x3000, 0, 0x20000, PERMS); 105 mem.Map(0x3000, 0, 0x20000, PERMS, HEAP);
105} 106}
106 107
107TEST_CASE("HostMemory: Unmap between placeholders", "[common]") { 108TEST_CASE("HostMemory: Unmap between placeholders", "[common]") {
108 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 109 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
109 mem.Map(0x0000, 0, 0x4000, PERMS); 110 mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
110 mem.Map(0x4000, 0, 0x4000, PERMS); 111 mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
111 mem.Unmap(0x2000, 0x4000); 112 mem.Unmap(0x2000, 0x4000, HEAP);
112 mem.Map(0x2000, 0, 0x4000, PERMS); 113 mem.Map(0x2000, 0, 0x4000, PERMS, HEAP);
113} 114}
114 115
115TEST_CASE("HostMemory: Unmap to origin", "[common]") { 116TEST_CASE("HostMemory: Unmap to origin", "[common]") {
116 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 117 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
117 mem.Map(0x4000, 0, 0x4000, PERMS); 118 mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
118 mem.Map(0x8000, 0, 0x4000, PERMS); 119 mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
119 mem.Unmap(0x4000, 0x4000); 120 mem.Unmap(0x4000, 0x4000, HEAP);
120 mem.Map(0, 0, 0x4000, PERMS); 121 mem.Map(0, 0, 0x4000, PERMS, HEAP);
121 mem.Map(0x4000, 0, 0x4000, PERMS); 122 mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
122} 123}
123 124
124TEST_CASE("HostMemory: Unmap to right", "[common]") { 125TEST_CASE("HostMemory: Unmap to right", "[common]") {
125 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 126 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
126 mem.Map(0x4000, 0, 0x4000, PERMS); 127 mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
127 mem.Map(0x8000, 0, 0x4000, PERMS); 128 mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
128 mem.Unmap(0x8000, 0x4000); 129 mem.Unmap(0x8000, 0x4000, HEAP);
129 mem.Map(0x8000, 0, 0x4000, PERMS); 130 mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
130} 131}
131 132
132TEST_CASE("HostMemory: Partial right unmap check bindings", "[common]") { 133TEST_CASE("HostMemory: Partial right unmap check bindings", "[common]") {
133 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 134 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
134 mem.Map(0x4000, 0x10000, 0x4000, PERMS); 135 mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
135 136
136 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; 137 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
137 ptr[0x1000] = 17; 138 ptr[0x1000] = 17;
138 139
139 mem.Unmap(0x6000, 0x2000); 140 mem.Unmap(0x6000, 0x2000, HEAP);
140 141
141 REQUIRE(ptr[0x1000] == 17); 142 REQUIRE(ptr[0x1000] == 17);
142} 143}
143 144
144TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") { 145TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") {
145 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 146 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
146 mem.Map(0x4000, 0x10000, 0x4000, PERMS); 147 mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
147 148
148 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; 149 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
149 ptr[0x3000] = 19; 150 ptr[0x3000] = 19;
150 ptr[0x3fff] = 12; 151 ptr[0x3fff] = 12;
151 152
152 mem.Unmap(0x4000, 0x2000); 153 mem.Unmap(0x4000, 0x2000, HEAP);
153 154
154 REQUIRE(ptr[0x3000] == 19); 155 REQUIRE(ptr[0x3000] == 19);
155 REQUIRE(ptr[0x3fff] == 12); 156 REQUIRE(ptr[0x3fff] == 12);
@@ -157,13 +158,13 @@ TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") {
157 158
158TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") { 159TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") {
159 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 160 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
160 mem.Map(0x4000, 0x10000, 0x4000, PERMS); 161 mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
161 162
162 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; 163 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
163 ptr[0x0000] = 19; 164 ptr[0x0000] = 19;
164 ptr[0x3fff] = 12; 165 ptr[0x3fff] = 12;
165 166
166 mem.Unmap(0x1000, 0x2000); 167 mem.Unmap(0x1000, 0x2000, HEAP);
167 168
168 REQUIRE(ptr[0x0000] == 19); 169 REQUIRE(ptr[0x0000] == 19);
169 REQUIRE(ptr[0x3fff] == 12); 170 REQUIRE(ptr[0x3fff] == 12);
@@ -171,14 +172,14 @@ TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") {
171 172
172TEST_CASE("HostMemory: Partial sparse middle unmap and check bindings", "[common]") { 173TEST_CASE("HostMemory: Partial sparse middle unmap and check bindings", "[common]") {
173 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 174 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
174 mem.Map(0x4000, 0x10000, 0x2000, PERMS); 175 mem.Map(0x4000, 0x10000, 0x2000, PERMS, HEAP);
175 mem.Map(0x6000, 0x20000, 0x2000, PERMS); 176 mem.Map(0x6000, 0x20000, 0x2000, PERMS, HEAP);
176 177
177 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; 178 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
178 ptr[0x0000] = 19; 179 ptr[0x0000] = 19;
179 ptr[0x3fff] = 12; 180 ptr[0x3fff] = 12;
180 181
181 mem.Unmap(0x5000, 0x2000); 182 mem.Unmap(0x5000, 0x2000, HEAP);
182 183
183 REQUIRE(ptr[0x0000] == 19); 184 REQUIRE(ptr[0x0000] == 19);
184 REQUIRE(ptr[0x3fff] == 12); 185 REQUIRE(ptr[0x3fff] == 12);
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp
index f08afbf9a..81898a1d3 100644
--- a/src/tests/core/core_timing.cpp
+++ b/src/tests/core/core_timing.cpp
@@ -16,20 +16,16 @@
16 16
17namespace { 17namespace {
18// Numbers are chosen randomly to make sure the correct one is given. 18// Numbers are chosen randomly to make sure the correct one is given.
19constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}};
20constexpr std::array<u64, 5> calls_order{{2, 0, 1, 4, 3}}; 19constexpr std::array<u64, 5> calls_order{{2, 0, 1, 4, 3}};
21std::array<s64, 5> delays{}; 20std::array<s64, 5> delays{};
22 21std::bitset<5> callbacks_ran_flags;
23std::bitset<CB_IDS.size()> callbacks_ran_flags;
24u64 expected_callback = 0; 22u64 expected_callback = 0;
25 23
26template <unsigned int IDX> 24template <unsigned int IDX>
27std::optional<std::chrono::nanoseconds> HostCallbackTemplate(std::uintptr_t user_data, s64 time, 25std::optional<std::chrono::nanoseconds> HostCallbackTemplate(s64 time,
28 std::chrono::nanoseconds ns_late) { 26 std::chrono::nanoseconds ns_late) {
29 static_assert(IDX < CB_IDS.size(), "IDX out of range"); 27 static_assert(IDX < callbacks_ran_flags.size(), "IDX out of range");
30 callbacks_ran_flags.set(IDX); 28 callbacks_ran_flags.set(IDX);
31 REQUIRE(CB_IDS[IDX] == user_data);
32 REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]);
33 delays[IDX] = ns_late.count(); 29 delays[IDX] = ns_late.count();
34 ++expected_callback; 30 ++expected_callback;
35 return std::nullopt; 31 return std::nullopt;
@@ -76,7 +72,7 @@ TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
76 const u64 order = calls_order[i]; 72 const u64 order = calls_order[i];
77 const auto future_ns = std::chrono::nanoseconds{static_cast<s64>(i * one_micro + 100)}; 73 const auto future_ns = std::chrono::nanoseconds{static_cast<s64>(i * one_micro + 100)};
78 74
79 core_timing.ScheduleEvent(future_ns, events[order], CB_IDS[order]); 75 core_timing.ScheduleEvent(future_ns, events[order]);
80 } 76 }
81 /// test pause 77 /// test pause
82 REQUIRE(callbacks_ran_flags.none()); 78 REQUIRE(callbacks_ran_flags.none());
@@ -118,7 +114,7 @@ TEST_CASE("CoreTiming[BasicOrderNoPausing]", "[core]") {
118 for (std::size_t i = 0; i < events.size(); i++) { 114 for (std::size_t i = 0; i < events.size(); i++) {
119 const u64 order = calls_order[i]; 115 const u64 order = calls_order[i];
120 const auto future_ns = std::chrono::nanoseconds{static_cast<s64>(i * one_micro + 100)}; 116 const auto future_ns = std::chrono::nanoseconds{static_cast<s64>(i * one_micro + 100)};
121 core_timing.ScheduleEvent(future_ns, events[order], CB_IDS[order]); 117 core_timing.ScheduleEvent(future_ns, events[order]);
122 } 118 }
123 119
124 const u64 end = core_timing.GetGlobalTimeNs().count(); 120 const u64 end = core_timing.GetGlobalTimeNs().count();