summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/arm/arm_interface.h3
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp21
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.h7
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp21
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.h7
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp1
-rw-r--r--src/core/hle/kernel/k_slab_heap.h199
-rw-r--r--src/core/hle/kernel/k_thread.cpp10
-rw-r--r--src/core/hle/kernel/kernel.cpp5
-rw-r--r--src/core/hle/service/hid/hid.cpp11
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h4
11 files changed, 251 insertions, 38 deletions
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index 77094b48f..689e3ceb5 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -65,9 +65,6 @@ public:
65 /// Step CPU by one instruction 65 /// Step CPU by one instruction
66 virtual void Step() = 0; 66 virtual void Step() = 0;
67 67
68 /// Exits execution from a callback, the callback must rewind the stack
69 virtual void ExceptionalExit() = 0;
70
71 /// Clear all instruction cache 68 /// Clear all instruction cache
72 virtual void ClearInstructionCache() = 0; 69 virtual void ClearInstructionCache() = 0;
73 70
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index e401fa825..50dc82382 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -78,7 +78,9 @@ public:
78 } 78 }
79 79
80 void CallSVC(u32 swi) override { 80 void CallSVC(u32 swi) override {
81 Kernel::Svc::Call(parent.system, swi); 81 parent.svc_called = true;
82 parent.svc_swi = swi;
83 parent.jit->HaltExecution();
82 } 84 }
83 85
84 void AddTicks(u64 ticks) override { 86 void AddTicks(u64 ticks) override {
@@ -187,11 +189,17 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
187} 189}
188 190
189void ARM_Dynarmic_32::Run() { 191void ARM_Dynarmic_32::Run() {
190 jit->Run(); 192 while (true) {
191} 193 jit->Run();
192 194 if (!svc_called) {
193void ARM_Dynarmic_32::ExceptionalExit() { 195 break;
194 jit->ExceptionalExit(); 196 }
197 svc_called = false;
198 Kernel::Svc::Call(system, svc_swi);
199 if (shutdown) {
200 break;
201 }
202 }
195} 203}
196 204
197void ARM_Dynarmic_32::Step() { 205void ARM_Dynarmic_32::Step() {
@@ -275,6 +283,7 @@ void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
275 283
276void ARM_Dynarmic_32::PrepareReschedule() { 284void ARM_Dynarmic_32::PrepareReschedule() {
277 jit->HaltExecution(); 285 jit->HaltExecution();
286 shutdown = true;
278} 287}
279 288
280void ARM_Dynarmic_32::ClearInstructionCache() { 289void ARM_Dynarmic_32::ClearInstructionCache() {
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h
index b882b0c59..fa6f4f430 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.h
@@ -42,7 +42,6 @@ public:
42 u32 GetPSTATE() const override; 42 u32 GetPSTATE() const override;
43 void SetPSTATE(u32 pstate) override; 43 void SetPSTATE(u32 pstate) override;
44 void Run() override; 44 void Run() override;
45 void ExceptionalExit() override;
46 void Step() override; 45 void Step() override;
47 VAddr GetTlsAddress() const override; 46 VAddr GetTlsAddress() const override;
48 void SetTlsAddress(VAddr address) override; 47 void SetTlsAddress(VAddr address) override;
@@ -82,6 +81,12 @@ private:
82 std::size_t core_index; 81 std::size_t core_index;
83 DynarmicExclusiveMonitor& exclusive_monitor; 82 DynarmicExclusiveMonitor& exclusive_monitor;
84 std::shared_ptr<Dynarmic::A32::Jit> jit; 83 std::shared_ptr<Dynarmic::A32::Jit> jit;
84
85 // SVC callback
86 u32 svc_swi{};
87 bool svc_called{};
88
89 bool shutdown{};
85}; 90};
86 91
87} // namespace Core 92} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 157051d69..4f5a58b38 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -102,7 +102,9 @@ public:
102 } 102 }
103 103
104 void CallSVC(u32 swi) override { 104 void CallSVC(u32 swi) override {
105 Kernel::Svc::Call(parent.system, swi); 105 parent.svc_called = true;
106 parent.svc_swi = swi;
107 parent.jit->HaltExecution();
106 } 108 }
107 109
108 void AddTicks(u64 ticks) override { 110 void AddTicks(u64 ticks) override {
@@ -227,11 +229,17 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
227} 229}
228 230
229void ARM_Dynarmic_64::Run() { 231void ARM_Dynarmic_64::Run() {
230 jit->Run(); 232 while (true) {
231} 233 jit->Run();
232 234 if (!svc_called) {
233void ARM_Dynarmic_64::ExceptionalExit() { 235 break;
234 jit->ExceptionalExit(); 236 }
237 svc_called = false;
238 Kernel::Svc::Call(system, svc_swi);
239 if (shutdown) {
240 break;
241 }
242 }
235} 243}
236 244
237void ARM_Dynarmic_64::Step() { 245void ARM_Dynarmic_64::Step() {
@@ -320,6 +328,7 @@ void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) {
320 328
321void ARM_Dynarmic_64::PrepareReschedule() { 329void ARM_Dynarmic_64::PrepareReschedule() {
322 jit->HaltExecution(); 330 jit->HaltExecution();
331 shutdown = true;
323} 332}
324 333
325void ARM_Dynarmic_64::ClearInstructionCache() { 334void ARM_Dynarmic_64::ClearInstructionCache() {
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h
index 92f715f19..5214a8147 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.h
@@ -40,7 +40,6 @@ public:
40 void SetPSTATE(u32 pstate) override; 40 void SetPSTATE(u32 pstate) override;
41 void Run() override; 41 void Run() override;
42 void Step() override; 42 void Step() override;
43 void ExceptionalExit() override;
44 VAddr GetTlsAddress() const override; 43 VAddr GetTlsAddress() const override;
45 void SetTlsAddress(VAddr address) override; 44 void SetTlsAddress(VAddr address) override;
46 void SetTPIDR_EL0(u64 value) override; 45 void SetTPIDR_EL0(u64 value) override;
@@ -75,6 +74,12 @@ private:
75 DynarmicExclusiveMonitor& exclusive_monitor; 74 DynarmicExclusiveMonitor& exclusive_monitor;
76 75
77 std::shared_ptr<Dynarmic::A64::Jit> jit; 76 std::shared_ptr<Dynarmic::A64::Jit> jit;
77
78 // SVC callback
79 u32 svc_swi{};
80 bool svc_called{};
81
82 bool shutdown{};
78}; 83};
79 84
80} // namespace Core 85} // namespace Core
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 2f82fbcd6..6a7d80d03 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -659,7 +659,6 @@ void KScheduler::Unload(KThread* thread) {
659 659
660 if (thread) { 660 if (thread) {
661 if (thread->IsCallingSvc()) { 661 if (thread->IsCallingSvc()) {
662 system.ArmInterface(core_id).ExceptionalExit();
663 thread->ClearIsCallingSvc(); 662 thread->ClearIsCallingSvc();
664 } 663 }
665 if (!thread->IsTerminationRequested()) { 664 if (!thread->IsTerminationRequested()) {
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index 81d472a3e..0ad74b0a0 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -4,34 +4,213 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <atomic>
8
9#include "common/assert.h"
10#include "common/common_types.h"
11
7namespace Kernel { 12namespace Kernel {
8 13
9class KernelCore; 14class KernelCore;
10 15
11/// This is a placeholder class to manage slab heaps for kernel objects. For now, we just allocate 16namespace impl {
12/// these with new/delete, but this can be re-implemented later to allocate these in emulated 17
13/// memory. 18class KSlabHeapImpl final : NonCopyable {
19public:
20 struct Node {
21 Node* next{};
22 };
23
24 constexpr KSlabHeapImpl() = default;
25
26 void Initialize(std::size_t size) {
27 ASSERT(head == nullptr);
28 obj_size = size;
29 }
30
31 constexpr std::size_t GetObjectSize() const {
32 return obj_size;
33 }
34
35 Node* GetHead() const {
36 return head;
37 }
38
39 void* Allocate() {
40 Node* ret = head.load();
41
42 do {
43 if (ret == nullptr) {
44 break;
45 }
46 } while (!head.compare_exchange_weak(ret, ret->next));
47
48 return ret;
49 }
50
51 void Free(void* obj) {
52 Node* node = static_cast<Node*>(obj);
53
54 Node* cur_head = head.load();
55 do {
56 node->next = cur_head;
57 } while (!head.compare_exchange_weak(cur_head, node));
58 }
59
60private:
61 std::atomic<Node*> head{};
62 std::size_t obj_size{};
63};
64
65} // namespace impl
66
67class KSlabHeapBase : NonCopyable {
68public:
69 constexpr KSlabHeapBase() = default;
70
71 constexpr bool Contains(uintptr_t addr) const {
72 return start <= addr && addr < end;
73 }
74
75 constexpr std::size_t GetSlabHeapSize() const {
76 return (end - start) / GetObjectSize();
77 }
78
79 constexpr std::size_t GetObjectSize() const {
80 return impl.GetObjectSize();
81 }
82
83 constexpr uintptr_t GetSlabHeapAddress() const {
84 return start;
85 }
86
87 std::size_t GetObjectIndexImpl(const void* obj) const {
88 return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
89 }
90
91 std::size_t GetPeakIndex() const {
92 return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
93 }
94
95 void* AllocateImpl() {
96 return impl.Allocate();
97 }
98
99 void FreeImpl(void* obj) {
100 // Don't allow freeing an object that wasn't allocated from this heap
101 ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
102
103 impl.Free(obj);
104 }
105
106 void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
107 // Ensure we don't initialize a slab using null memory
108 ASSERT(memory != nullptr);
109
110 // Initialize the base allocator
111 impl.Initialize(obj_size);
112
113 // Set our tracking variables
114 const std::size_t num_obj = (memory_size / obj_size);
115 start = reinterpret_cast<uintptr_t>(memory);
116 end = start + num_obj * obj_size;
117 peak = start;
118
119 // Free the objects
120 u8* cur = reinterpret_cast<u8*>(end);
121
122 for (std::size_t i{}; i < num_obj; i++) {
123 cur -= obj_size;
124 impl.Free(cur);
125 }
126 }
127
128private:
129 using Impl = impl::KSlabHeapImpl;
130
131 Impl impl;
132 uintptr_t peak{};
133 uintptr_t start{};
134 uintptr_t end{};
135};
14 136
15template <typename T> 137template <typename T>
16class KSlabHeap final : NonCopyable { 138class KSlabHeap final : public KSlabHeapBase {
17public: 139public:
18 KSlabHeap() = default; 140 enum class AllocationType {
141 Host,
142 Guest,
143 };
19 144
20 void Initialize([[maybe_unused]] void* memory, [[maybe_unused]] std::size_t memory_size) { 145 explicit constexpr KSlabHeap(AllocationType allocation_type_ = AllocationType::Host)
21 // Placeholder that should initialize the backing slab heap implementation. 146 : KSlabHeapBase(), allocation_type{allocation_type_} {}
147
148 void Initialize(void* memory, std::size_t memory_size) {
149 if (allocation_type == AllocationType::Guest) {
150 InitializeImpl(sizeof(T), memory, memory_size);
151 }
22 } 152 }
23 153
24 T* Allocate() { 154 T* Allocate() {
25 return new T(); 155 switch (allocation_type) {
156 case AllocationType::Host:
157 // Fallback for cases where we do not yet support allocating guest memory from the slab
158 // heap, such as for kernel memory regions.
159 return new T;
160
161 case AllocationType::Guest:
162 T* obj = static_cast<T*>(AllocateImpl());
163 if (obj != nullptr) {
164 new (obj) T();
165 }
166 return obj;
167 }
168
169 UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
170 return nullptr;
26 } 171 }
27 172
28 T* AllocateWithKernel(KernelCore& kernel) { 173 T* AllocateWithKernel(KernelCore& kernel) {
29 return new T(kernel); 174 switch (allocation_type) {
175 case AllocationType::Host:
176 // Fallback for cases where we do not yet support allocating guest memory from the slab
177 // heap, such as for kernel memory regions.
178 return new T(kernel);
179
180 case AllocationType::Guest:
181 T* obj = static_cast<T*>(AllocateImpl());
182 if (obj != nullptr) {
183 new (obj) T(kernel);
184 }
185 return obj;
186 }
187
188 UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
189 return nullptr;
30 } 190 }
31 191
32 void Free(T* obj) { 192 void Free(T* obj) {
33 delete obj; 193 switch (allocation_type) {
194 case AllocationType::Host:
195 // Fallback for cases where we do not yet support allocating guest memory from the slab
196 // heap, such as for kernel memory regions.
197 delete obj;
198 return;
199
200 case AllocationType::Guest:
201 FreeImpl(obj);
202 return;
203 }
204
205 UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
34 } 206 }
207
208 constexpr std::size_t GetObjectIndex(const T* obj) const {
209 return GetObjectIndexImpl(obj);
210 }
211
212private:
213 const AllocationType allocation_type;
35}; 214};
36 215
37} // namespace Kernel 216} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index e3f08f256..3cf43d290 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -168,13 +168,13 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
168 std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0, 168 std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0,
169 sizeof(StackParameters)); 169 sizeof(StackParameters));
170 170
171 // Setup the TLS, if needed.
172 if (type == ThreadType::User) {
173 tls_address = owner->CreateTLSRegion();
174 }
175
176 // Set parent, if relevant. 171 // Set parent, if relevant.
177 if (owner != nullptr) { 172 if (owner != nullptr) {
173 // Setup the TLS, if needed.
174 if (type == ThreadType::User) {
175 tls_address = owner->CreateTLSRegion();
176 }
177
178 parent = owner; 178 parent = owner;
179 parent->Open(); 179 parent->Open();
180 parent->IncrementThreadCount(); 180 parent->IncrementThreadCount();
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 8b55df82e..0ffb78d51 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -258,7 +258,7 @@ struct KernelCore::Impl {
258 KAutoObject::Create(thread.get()); 258 KAutoObject::Create(thread.get());
259 ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess()); 259 ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess());
260 thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); 260 thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
261 return std::move(thread); 261 return thread;
262 }; 262 };
263 263
264 thread_local auto thread = make_thread(); 264 thread_local auto thread = make_thread();
@@ -620,7 +620,8 @@ struct KernelCore::Impl {
620 620
621 void InitializePageSlab() { 621 void InitializePageSlab() {
622 // Allocate slab heaps 622 // Allocate slab heaps
623 user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>(); 623 user_slab_heap_pages =
624 std::make_unique<KSlabHeap<Page>>(KSlabHeap<Page>::AllocationType::Guest);
624 625
625 // TODO(ameerj): This should be derived, not hardcoded within the kernel 626 // TODO(ameerj): This should be derived, not hardcoded within the kernel
626 constexpr u64 user_slab_heap_size{0x3de000}; 627 constexpr u64 user_slab_heap_size{0x3de000};
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 49c17fd14..df0fe1c8e 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -1770,7 +1770,7 @@ public:
1770 {232, nullptr, "GetIrSensorState"}, 1770 {232, nullptr, "GetIrSensorState"},
1771 {233, nullptr, "GetXcdHandleForNpadWithIrSensor"}, 1771 {233, nullptr, "GetXcdHandleForNpadWithIrSensor"},
1772 {301, nullptr, "ActivateNpadSystem"}, 1772 {301, nullptr, "ActivateNpadSystem"},
1773 {303, nullptr, "ApplyNpadSystemCommonPolicy"}, 1773 {303, &HidSys::ApplyNpadSystemCommonPolicy, "ApplyNpadSystemCommonPolicy"},
1774 {304, nullptr, "EnableAssigningSingleOnSlSrPress"}, 1774 {304, nullptr, "EnableAssigningSingleOnSlSrPress"},
1775 {305, nullptr, "DisableAssigningSingleOnSlSrPress"}, 1775 {305, nullptr, "DisableAssigningSingleOnSlSrPress"},
1776 {306, nullptr, "GetLastActiveNpad"}, 1776 {306, nullptr, "GetLastActiveNpad"},
@@ -1949,6 +1949,15 @@ public:
1949 1949
1950 RegisterHandlers(functions); 1950 RegisterHandlers(functions);
1951 } 1951 }
1952
1953private:
1954 void ApplyNpadSystemCommonPolicy(Kernel::HLERequestContext& ctx) {
1955 // We already do this for homebrew so we can just stub it out
1956 LOG_WARNING(Service_HID, "called");
1957
1958 IPC::ResponseBuilder rb{ctx, 2};
1959 rb.Push(RESULT_SUCCESS);
1960 }
1952}; 1961};
1953 1962
1954class HidTmp final : public ServiceFramework<HidTmp> { 1963class HidTmp final : public ServiceFramework<HidTmp> {
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index de971041f..9e6b87960 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -596,7 +596,7 @@ void BufferCache<P>::PopAsyncFlushes() {
596 runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies); 596 runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies);
597 } 597 }
598 runtime.Finish(); 598 runtime.Finish();
599 for (const auto [copy, buffer_id] : downloads) { 599 for (const auto& [copy, buffer_id] : downloads) {
600 const Buffer& buffer = slot_buffers[buffer_id]; 600 const Buffer& buffer = slot_buffers[buffer_id];
601 const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset; 601 const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
602 // Undo the modified offset 602 // Undo the modified offset
@@ -606,7 +606,7 @@ void BufferCache<P>::PopAsyncFlushes() {
606 } 606 }
607 } else { 607 } else {
608 const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy); 608 const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
609 for (const auto [copy, buffer_id] : downloads) { 609 for (const auto& [copy, buffer_id] : downloads) {
610 Buffer& buffer = slot_buffers[buffer_id]; 610 Buffer& buffer = slot_buffers[buffer_id];
611 buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size)); 611 buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size));
612 const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset; 612 const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;