summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorGravatar bunnei2020-10-20 19:07:39 -0700
committerGravatar GitHub2020-10-20 19:07:39 -0700
commit3d592972dc3fd61cc88771b889eff237e4e03e0f (patch)
tree0dbc65ac86e609ae22087c7be9d4759ac6b73004 /src/core/hle/kernel
parentkernel: Fix build with recent compiler flag changes (diff)
downloadyuzu-3d592972dc3fd61cc88771b889eff237e4e03e0f.tar.gz
yuzu-3d592972dc3fd61cc88771b889eff237e4e03e0f.tar.xz
yuzu-3d592972dc3fd61cc88771b889eff237e4e03e0f.zip
Revert "core: Fix clang build"
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp4
-rw-r--r--src/core/hle/kernel/handle_table.cpp2
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp2
-rw-r--r--src/core/hle/kernel/kernel.cpp2
-rw-r--r--src/core/hle/kernel/memory/address_space_info.cpp2
-rw-r--r--src/core/hle/kernel/memory/memory_manager.cpp4
-rw-r--r--src/core/hle/kernel/memory/page_heap.cpp17
-rw-r--r--src/core/hle/kernel/memory/page_heap.h18
-rw-r--r--src/core/hle/kernel/memory/page_table.cpp6
-rw-r--r--src/core/hle/kernel/physical_core.h2
-rw-r--r--src/core/hle/kernel/process.cpp17
-rw-r--r--src/core/hle/kernel/resource_limit.cpp4
-rw-r--r--src/core/hle/kernel/scheduler.cpp72
-rw-r--r--src/core/hle/kernel/svc.cpp7
-rw-r--r--src/core/hle/kernel/svc_wrap.h17
-rw-r--r--src/core/hle/kernel/synchronization.cpp4
-rw-r--r--src/core/hle/kernel/thread.cpp2
-rw-r--r--src/core/hle/kernel/thread.h6
18 files changed, 74 insertions, 114 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index b6ebc5329..b882eaa0f 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -108,7 +108,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
108 auto& monitor = system.Monitor(); 108 auto& monitor = system.Monitor();
109 s32 updated_value; 109 s32 updated_value;
110 do { 110 do {
111 updated_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address)); 111 updated_value = monitor.ExclusiveRead32(current_core, address);
112 112
113 if (updated_value != value) { 113 if (updated_value != value) {
114 return ERR_INVALID_STATE; 114 return ERR_INVALID_STATE;
@@ -129,7 +129,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
129 updated_value = value; 129 updated_value = value;
130 } 130 }
131 } 131 }
132 } while (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(updated_value))); 132 } while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
133 133
134 WakeThreads(waiting_threads, num_to_wake); 134 WakeThreads(waiting_threads, num_to_wake);
135 return RESULT_SUCCESS; 135 return RESULT_SUCCESS;
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index fe4988f84..3e745c18b 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -68,7 +68,7 @@ ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
68 generations[slot] = generation; 68 generations[slot] = generation;
69 objects[slot] = std::move(obj); 69 objects[slot] = std::move(obj);
70 70
71 const auto handle = static_cast<Handle>(generation | static_cast<u16>(slot << 15)); 71 Handle handle = generation | (slot << 15);
72 return MakeResult<Handle>(handle); 72 return MakeResult<Handle>(handle);
73} 73}
74 74
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 0a2de4270..81f85643b 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -58,7 +58,7 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
58 58
59 { 59 {
60 Handle event_handle = InvalidHandle; 60 Handle event_handle = InvalidHandle;
61 SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), static_cast<s64>(timeout)); 61 SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
62 thread->SetHLECallback( 62 thread->SetHLECallback(
63 [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool { 63 [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
64 ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT 64 ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 56e14da6b..b2b5b8adf 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -171,7 +171,7 @@ struct KernelCore::Impl {
171 const auto type = 171 const auto type =
172 static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND); 172 static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
173 auto thread_res = 173 auto thread_res =
174 Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<s32>(i), 0, 174 Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
175 nullptr, std::move(init_func), init_func_parameter); 175 nullptr, std::move(init_func), init_func_parameter);
176 176
177 suspend_threads[i] = std::move(thread_res).Unwrap(); 177 suspend_threads[i] = std::move(thread_res).Unwrap();
diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp
index 6cf43ba24..e4288cab4 100644
--- a/src/core/hle/kernel/memory/address_space_info.cpp
+++ b/src/core/hle/kernel/memory/address_space_info.cpp
@@ -96,7 +96,6 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
96 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; 96 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
97 } 97 }
98 UNREACHABLE(); 98 UNREACHABLE();
99 return 0;
100} 99}
101 100
102std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { 101std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
@@ -113,7 +112,6 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
113 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; 112 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
114 } 113 }
115 UNREACHABLE(); 114 UNREACHABLE();
116 return 0;
117} 115}
118 116
119} // namespace Kernel::Memory 117} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp
index a96157c37..acf13585c 100644
--- a/src/core/hle/kernel/memory/memory_manager.cpp
+++ b/src/core/hle/kernel/memory/memory_manager.cpp
@@ -71,7 +71,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align
71 } 71 }
72 72
73 // If we allocated more than we need, free some 73 // If we allocated more than we need, free some
74 const auto allocated_pages{PageHeap::GetBlockNumPages(static_cast<u32>(heap_index))}; 74 const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
75 if (allocated_pages > num_pages) { 75 if (allocated_pages > num_pages) {
76 chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); 76 chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
77 } 77 }
@@ -112,7 +112,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
112 112
113 // Keep allocating until we've allocated all our pages 113 // Keep allocating until we've allocated all our pages
114 for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { 114 for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
115 const auto pages_per_alloc{PageHeap::GetBlockNumPages(static_cast<u32>(index))}; 115 const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)};
116 116
117 while (num_pages >= pages_per_alloc) { 117 while (num_pages >= pages_per_alloc) {
118 // Allocate a block 118 // Allocate a block
diff --git a/src/core/hle/kernel/memory/page_heap.cpp b/src/core/hle/kernel/memory/page_heap.cpp
index 7890b8c1a..0ab1f7205 100644
--- a/src/core/hle/kernel/memory/page_heap.cpp
+++ b/src/core/hle/kernel/memory/page_heap.cpp
@@ -33,12 +33,11 @@ void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_
33} 33}
34 34
35VAddr PageHeap::AllocateBlock(s32 index) { 35VAddr PageHeap::AllocateBlock(s32 index) {
36 const auto u_index = static_cast<std::size_t>(index); 36 const std::size_t needed_size{blocks[index].GetSize()};
37 const auto needed_size{blocks[u_index].GetSize()};
38 37
39 for (auto i = u_index; i < MemoryBlockPageShifts.size(); i++) { 38 for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
40 if (const VAddr addr = blocks[i].PopBlock(); addr != 0) { 39 if (const VAddr addr{blocks[i].PopBlock()}; addr) {
41 if (const std::size_t allocated_size = blocks[i].GetSize(); 40 if (const std::size_t allocated_size{blocks[i].GetSize()};
42 allocated_size > needed_size) { 41 allocated_size > needed_size) {
43 Free(addr + needed_size, (allocated_size - needed_size) / PageSize); 42 Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
44 } 43 }
@@ -51,7 +50,7 @@ VAddr PageHeap::AllocateBlock(s32 index) {
51 50
52void PageHeap::FreeBlock(VAddr block, s32 index) { 51void PageHeap::FreeBlock(VAddr block, s32 index) {
53 do { 52 do {
54 block = blocks[static_cast<std::size_t>(index++)].PushBlock(block); 53 block = blocks[index++].PushBlock(block);
55 } while (block != 0); 54 } while (block != 0);
56} 55}
57 56
@@ -70,7 +69,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
70 VAddr after_start{end}; 69 VAddr after_start{end};
71 VAddr after_end{end}; 70 VAddr after_end{end};
72 while (big_index >= 0) { 71 while (big_index >= 0) {
73 const std::size_t block_size{blocks[static_cast<std::size_t>(big_index)].GetSize()}; 72 const std::size_t block_size{blocks[big_index].GetSize()};
74 const VAddr big_start{Common::AlignUp((start), block_size)}; 73 const VAddr big_start{Common::AlignUp((start), block_size)};
75 const VAddr big_end{Common::AlignDown((end), block_size)}; 74 const VAddr big_end{Common::AlignDown((end), block_size)};
76 if (big_start < big_end) { 75 if (big_start < big_end) {
@@ -88,7 +87,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
88 87
89 // Free space before the big blocks 88 // Free space before the big blocks
90 for (s32 i{big_index - 1}; i >= 0; i--) { 89 for (s32 i{big_index - 1}; i >= 0; i--) {
91 const std::size_t block_size{blocks[static_cast<size_t>(i)].GetSize()}; 90 const std::size_t block_size{blocks[i].GetSize()};
92 while (before_start + block_size <= before_end) { 91 while (before_start + block_size <= before_end) {
93 before_end -= block_size; 92 before_end -= block_size;
94 FreeBlock(before_end, i); 93 FreeBlock(before_end, i);
@@ -97,7 +96,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
97 96
98 // Free space after the big blocks 97 // Free space after the big blocks
99 for (s32 i{big_index - 1}; i >= 0; i--) { 98 for (s32 i{big_index - 1}; i >= 0; i--) {
100 const std::size_t block_size{blocks[static_cast<size_t>(i)].GetSize()}; 99 const std::size_t block_size{blocks[i].GetSize()};
101 while (after_start + block_size <= after_end) { 100 while (after_start + block_size <= after_end) {
102 FreeBlock(after_start, i); 101 FreeBlock(after_start, i);
103 after_start += block_size; 102 after_start += block_size;
diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h
index 92a2bce04..22b0de860 100644
--- a/src/core/hle/kernel/memory/page_heap.h
+++ b/src/core/hle/kernel/memory/page_heap.h
@@ -34,9 +34,7 @@ public:
34 34
35 static constexpr s32 GetBlockIndex(std::size_t num_pages) { 35 static constexpr s32 GetBlockIndex(std::size_t num_pages) {
36 for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { 36 for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
37 const auto shift_index = static_cast<std::size_t>(i); 37 if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
38 if (num_pages >=
39 (static_cast<std::size_t>(1) << MemoryBlockPageShifts[shift_index]) / PageSize) {
40 return i; 38 return i;
41 } 39 }
42 } 40 }
@@ -88,7 +86,7 @@ private:
88 86
89 // Set the bitmap pointers 87 // Set the bitmap pointers
90 for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) { 88 for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
91 bit_storages[static_cast<std::size_t>(depth)] = storage; 89 bit_storages[depth] = storage;
92 size = Common::AlignUp(size, 64) / 64; 90 size = Common::AlignUp(size, 64) / 64;
93 storage += size; 91 storage += size;
94 } 92 }
@@ -101,7 +99,7 @@ private:
101 s32 depth{}; 99 s32 depth{};
102 100
103 do { 101 do {
104 const u64 v{bit_storages[static_cast<std::size_t>(depth)][offset]}; 102 const u64 v{bit_storages[depth][offset]};
105 if (v == 0) { 103 if (v == 0) {
106 // Non-zero depth indicates that a previous level had a free block 104 // Non-zero depth indicates that a previous level had a free block
107 ASSERT(depth == 0); 105 ASSERT(depth == 0);
@@ -127,7 +125,7 @@ private:
127 constexpr bool ClearRange(std::size_t offset, std::size_t count) { 125 constexpr bool ClearRange(std::size_t offset, std::size_t count) {
128 const s32 depth{GetHighestDepthIndex()}; 126 const s32 depth{GetHighestDepthIndex()};
129 const auto bit_ind{offset / 64}; 127 const auto bit_ind{offset / 64};
130 u64* bits{bit_storages[static_cast<std::size_t>(depth)]}; 128 u64* bits{bit_storages[depth]};
131 if (count < 64) { 129 if (count < 64) {
132 const auto shift{offset % 64}; 130 const auto shift{offset % 64};
133 ASSERT(shift + count <= 64); 131 ASSERT(shift + count <= 64);
@@ -179,11 +177,11 @@ private:
179 const auto which{offset % 64}; 177 const auto which{offset % 64};
180 const u64 mask{1ULL << which}; 178 const u64 mask{1ULL << which};
181 179
182 u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])}; 180 u64* bit{std::addressof(bit_storages[depth][ind])};
183 const u64 v{*bit}; 181 const u64 v{*bit};
184 ASSERT((v & mask) == 0); 182 ASSERT((v & mask) == 0);
185 *bit = v | mask; 183 *bit = v | mask;
186 if (v != 0) { 184 if (v) {
187 break; 185 break;
188 } 186 }
189 offset = ind; 187 offset = ind;
@@ -197,12 +195,12 @@ private:
197 const auto which{offset % 64}; 195 const auto which{offset % 64};
198 const u64 mask{1ULL << which}; 196 const u64 mask{1ULL << which};
199 197
200 u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])}; 198 u64* bit{std::addressof(bit_storages[depth][ind])};
201 u64 v{*bit}; 199 u64 v{*bit};
202 ASSERT((v & mask) != 0); 200 ASSERT((v & mask) != 0);
203 v &= ~mask; 201 v &= ~mask;
204 *bit = v; 202 *bit = v;
205 if (v != 0) { 203 if (v) {
206 break; 204 break;
207 } 205 }
208 offset = ind; 206 offset = ind;
diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp
index 4f759d078..a3fadb533 100644
--- a/src/core/hle/kernel/memory/page_table.cpp
+++ b/src/core/hle/kernel/memory/page_table.cpp
@@ -414,8 +414,7 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
414 const std::size_t remaining_pages{remaining_size / PageSize}; 414 const std::size_t remaining_pages{remaining_size / PageSize};
415 415
416 if (process->GetResourceLimit() && 416 if (process->GetResourceLimit() &&
417 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, 417 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) {
418 static_cast<s64>(remaining_size))) {
419 return ERR_RESOURCE_LIMIT_EXCEEDED; 418 return ERR_RESOURCE_LIMIT_EXCEEDED;
420 } 419 }
421 420
@@ -779,8 +778,7 @@ ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
779 778
780 auto process{system.Kernel().CurrentProcess()}; 779 auto process{system.Kernel().CurrentProcess()};
781 if (process->GetResourceLimit() && delta != 0 && 780 if (process->GetResourceLimit() && delta != 0 &&
782 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, 781 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) {
783 static_cast<s64>(delta))) {
784 return ERR_RESOURCE_LIMIT_EXCEEDED; 782 return ERR_RESOURCE_LIMIT_EXCEEDED;
785 } 783 }
786 784
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 6cb59d0fc..d7a7a951c 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -34,7 +34,7 @@ public:
34 PhysicalCore& operator=(const PhysicalCore&) = delete; 34 PhysicalCore& operator=(const PhysicalCore&) = delete;
35 35
36 PhysicalCore(PhysicalCore&&) = default; 36 PhysicalCore(PhysicalCore&&) = default;
37 PhysicalCore& operator=(PhysicalCore&&) = delete; 37 PhysicalCore& operator=(PhysicalCore&&) = default;
38 38
39 void Idle(); 39 void Idle();
40 /// Interrupt this physical core. 40 /// Interrupt this physical core.
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 0b39f2955..ff9d9248b 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -137,10 +137,9 @@ std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
137} 137}
138 138
139u64 Process::GetTotalPhysicalMemoryAvailable() const { 139u64 Process::GetTotalPhysicalMemoryAvailable() const {
140 const u64 capacity{ 140 const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) +
141 static_cast<u64>(resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory)) + 141 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
142 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + 142 main_thread_stack_size};
143 main_thread_stack_size};
144 143
145 if (capacity < memory_usage_capacity) { 144 if (capacity < memory_usage_capacity) {
146 return capacity; 145 return capacity;
@@ -280,12 +279,12 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
280 // Set initial resource limits 279 // Set initial resource limits
281 resource_limit->SetLimitValue( 280 resource_limit->SetLimitValue(
282 ResourceType::PhysicalMemory, 281 ResourceType::PhysicalMemory,
283 static_cast<s64>(kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application))); 282 kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
284 resource_limit->SetLimitValue(ResourceType::Threads, 608); 283 resource_limit->SetLimitValue(ResourceType::Threads, 608);
285 resource_limit->SetLimitValue(ResourceType::Events, 700); 284 resource_limit->SetLimitValue(ResourceType::Events, 700);
286 resource_limit->SetLimitValue(ResourceType::TransferMemory, 128); 285 resource_limit->SetLimitValue(ResourceType::TransferMemory, 128);
287 resource_limit->SetLimitValue(ResourceType::Sessions, 894); 286 resource_limit->SetLimitValue(ResourceType::Sessions, 894);
288 ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast<s64>(code_size))); 287 ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size));
289 288
290 // Create TLS region 289 // Create TLS region
291 tls_region_address = CreateTLSRegion(); 290 tls_region_address = CreateTLSRegion();
@@ -301,9 +300,9 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
301 300
302 ChangeStatus(ProcessStatus::Running); 301 ChangeStatus(ProcessStatus::Running);
303 302
304 SetupMainThread(system, *this, static_cast<u32>(main_thread_priority), main_thread_stack_top); 303 SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
305 resource_limit->Reserve(ResourceType::Threads, 1); 304 resource_limit->Reserve(ResourceType::Threads, 1);
306 resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast<s64>(main_thread_stack_size)); 305 resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size);
307} 306}
308 307
309void Process::PrepareForTermination() { 308void Process::PrepareForTermination() {
@@ -364,7 +363,7 @@ VAddr Process::CreateTLSRegion() {
364 ->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize, 363 ->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize,
365 Memory::MemoryState::ThreadLocal, 364 Memory::MemoryState::ThreadLocal,
366 Memory::MemoryPermission::ReadAndWrite, tls_map_addr) 365 Memory::MemoryPermission::ReadAndWrite, tls_map_addr)
367 .ValueOr(0U)}; 366 .ValueOr(0)};
368 367
369 ASSERT(tls_page_addr); 368 ASSERT(tls_page_addr);
370 369
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index e94093f24..212e442f4 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -43,8 +43,8 @@ void ResourceLimit::Release(ResourceType resource, u64 amount) {
43void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) { 43void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) {
44 const std::size_t index{ResourceTypeToIndex(resource)}; 44 const std::size_t index{ResourceTypeToIndex(resource)};
45 45
46 current[index] -= static_cast<s64>(used_amount); 46 current[index] -= used_amount;
47 available[index] -= static_cast<s64>(available_amount); 47 available[index] -= available_amount;
48} 48}
49 49
50std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) { 50std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 4a9a762f3..6b7db5372 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -89,11 +89,9 @@ u32 GlobalScheduler::SelectThreads() {
89 while (iter != suggested_queue[core_id].end()) { 89 while (iter != suggested_queue[core_id].end()) {
90 suggested = *iter; 90 suggested = *iter;
91 iter++; 91 iter++;
92 const s32 suggested_core_id = suggested->GetProcessorID(); 92 s32 suggested_core_id = suggested->GetProcessorID();
93 Thread* top_thread = suggested_core_id >= 0 93 Thread* top_thread =
94 ? top_threads[static_cast<u32>(suggested_core_id)] 94 suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
95 : nullptr;
96
97 if (top_thread != suggested) { 95 if (top_thread != suggested) {
98 if (top_thread != nullptr && 96 if (top_thread != nullptr &&
99 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) { 97 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
@@ -104,19 +102,16 @@ u32 GlobalScheduler::SelectThreads() {
104 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested); 102 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
105 break; 103 break;
106 } 104 }
107
108 suggested = nullptr; 105 suggested = nullptr;
109 migration_candidates[num_candidates++] = suggested_core_id; 106 migration_candidates[num_candidates++] = suggested_core_id;
110 } 107 }
111
112 // Step 3: Select a suggested thread from another core 108 // Step 3: Select a suggested thread from another core
113 if (suggested == nullptr) { 109 if (suggested == nullptr) {
114 for (std::size_t i = 0; i < num_candidates; i++) { 110 for (std::size_t i = 0; i < num_candidates; i++) {
115 const auto candidate_core = static_cast<u32>(migration_candidates[i]); 111 s32 candidate_core = migration_candidates[i];
116 suggested = top_threads[candidate_core]; 112 suggested = top_threads[candidate_core];
117 auto it = scheduled_queue[candidate_core].begin(); 113 auto it = scheduled_queue[candidate_core].begin();
118 ++it; 114 it++;
119
120 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr; 115 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
121 if (next != nullptr) { 116 if (next != nullptr) {
122 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), 117 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
@@ -133,8 +128,7 @@ u32 GlobalScheduler::SelectThreads() {
133 128
134 idle_cores &= ~(1U << core_id); 129 idle_cores &= ~(1U << core_id);
135 } 130 }
136 131 u32 cores_needing_context_switch{};
137 u32 cores_needing_context_switch = 0;
138 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
139 Scheduler& sched = kernel.Scheduler(core); 133 Scheduler& sched = kernel.Scheduler(core);
140 ASSERT(top_threads[core] == nullptr || 134 ASSERT(top_threads[core] == nullptr ||
@@ -192,16 +186,13 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
192 for (auto& thread : suggested_queue[core_id]) { 186 for (auto& thread : suggested_queue[core_id]) {
193 const s32 source_core = thread->GetProcessorID(); 187 const s32 source_core = thread->GetProcessorID();
194 if (source_core >= 0) { 188 if (source_core >= 0) {
195 const auto sanitized_source_core = static_cast<u32>(source_core); 189 if (current_threads[source_core] != nullptr) {
196 190 if (thread == current_threads[source_core] ||
197 if (current_threads[sanitized_source_core] != nullptr) { 191 current_threads[source_core]->GetPriority() < min_regular_priority) {
198 if (thread == current_threads[sanitized_source_core] ||
199 current_threads[sanitized_source_core]->GetPriority() < min_regular_priority) {
200 continue; 192 continue;
201 } 193 }
202 } 194 }
203 } 195 }
204
205 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || 196 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
206 next_thread->GetPriority() < thread->GetPriority()) { 197 next_thread->GetPriority() < thread->GetPriority()) {
207 if (thread->GetPriority() <= priority) { 198 if (thread->GetPriority() <= priority) {
@@ -249,25 +240,17 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
249 for (std::size_t i = 0; i < current_threads.size(); i++) { 240 for (std::size_t i = 0; i < current_threads.size(); i++) {
250 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); 241 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
251 } 242 }
252
253 for (auto& thread : suggested_queue[core_id]) { 243 for (auto& thread : suggested_queue[core_id]) {
254 const s32 source_core = thread->GetProcessorID(); 244 const s32 source_core = thread->GetProcessorID();
255 if (source_core < 0) { 245 if (source_core < 0 || thread == current_threads[source_core]) {
256 continue;
257 }
258
259 const auto sanitized_source_core = static_cast<u32>(source_core);
260 if (thread == current_threads[sanitized_source_core]) {
261 continue; 246 continue;
262 } 247 }
263 248 if (current_threads[source_core] == nullptr ||
264 if (current_threads[sanitized_source_core] == nullptr || 249 current_threads[source_core]->GetPriority() >= min_regular_priority) {
265 current_threads[sanitized_source_core]->GetPriority() >= min_regular_priority) {
266 winner = thread; 250 winner = thread;
267 } 251 }
268 break; 252 break;
269 } 253 }
270
271 if (winner != nullptr) { 254 if (winner != nullptr) {
272 if (winner != yielding_thread) { 255 if (winner != yielding_thread) {
273 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); 256 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
@@ -309,22 +292,17 @@ void GlobalScheduler::PreemptThreads() {
309 if (thread->GetPriority() != priority) { 292 if (thread->GetPriority() != priority) {
310 continue; 293 continue;
311 } 294 }
312
313 if (source_core >= 0) { 295 if (source_core >= 0) {
314 const auto sanitized_source_core = static_cast<u32>(source_core); 296 Thread* next_thread = scheduled_queue[source_core].empty()
315 Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
316 ? nullptr 297 ? nullptr
317 : scheduled_queue[sanitized_source_core].front(); 298 : scheduled_queue[source_core].front();
318
319 if (next_thread != nullptr && next_thread->GetPriority() < 2) { 299 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
320 break; 300 break;
321 } 301 }
322
323 if (next_thread == thread) { 302 if (next_thread == thread) {
324 continue; 303 continue;
325 } 304 }
326 } 305 }
327
328 if (current_thread != nullptr && 306 if (current_thread != nullptr &&
329 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { 307 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
330 winner = thread; 308 winner = thread;
@@ -344,22 +322,17 @@ void GlobalScheduler::PreemptThreads() {
344 if (thread->GetPriority() < priority) { 322 if (thread->GetPriority() < priority) {
345 continue; 323 continue;
346 } 324 }
347
348 if (source_core >= 0) { 325 if (source_core >= 0) {
349 const auto sanitized_source_core = static_cast<u32>(source_core); 326 Thread* next_thread = scheduled_queue[source_core].empty()
350 Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
351 ? nullptr 327 ? nullptr
352 : scheduled_queue[sanitized_source_core].front(); 328 : scheduled_queue[source_core].front();
353
354 if (next_thread != nullptr && next_thread->GetPriority() < 2) { 329 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
355 break; 330 break;
356 } 331 }
357
358 if (next_thread == thread) { 332 if (next_thread == thread) {
359 continue; 333 continue;
360 } 334 }
361 } 335 }
362
363 if (current_thread != nullptr && 336 if (current_thread != nullptr &&
364 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { 337 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
365 winner = thread; 338 winner = thread;
@@ -379,11 +352,11 @@ void GlobalScheduler::PreemptThreads() {
379 352
380void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, 353void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
381 Core::EmuThreadHandle global_thread) { 354 Core::EmuThreadHandle global_thread) {
382 const u32 current_core = global_thread.host_handle; 355 u32 current_core = global_thread.host_handle;
383 bool must_context_switch = global_thread.guest_handle != InvalidHandle && 356 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
384 (current_core < Core::Hardware::NUM_CPU_CORES); 357 (current_core < Core::Hardware::NUM_CPU_CORES);
385 while (cores_pending_reschedule != 0) { 358 while (cores_pending_reschedule != 0) {
386 const u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); 359 u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
387 ASSERT(core < Core::Hardware::NUM_CPU_CORES); 360 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
388 if (!must_context_switch || core != current_core) { 361 if (!must_context_switch || core != current_core) {
389 auto& phys_core = kernel.PhysicalCore(core); 362 auto& phys_core = kernel.PhysicalCore(core);
@@ -393,7 +366,6 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
393 } 366 }
394 cores_pending_reschedule &= ~(1U << core); 367 cores_pending_reschedule &= ~(1U << core);
395 } 368 }
396
397 if (must_context_switch) { 369 if (must_context_switch) {
398 auto& core_scheduler = kernel.CurrentScheduler(); 370 auto& core_scheduler = kernel.CurrentScheduler();
399 kernel.ExitSVCProfile(); 371 kernel.ExitSVCProfile();
@@ -831,11 +803,9 @@ void Scheduler::Initialize() {
831 std::string name = "Idle Thread Id:" + std::to_string(core_id); 803 std::string name = "Idle Thread Id:" + std::to_string(core_id);
832 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); 804 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
833 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); 805 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
834 const auto type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); 806 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
835 auto thread_res = 807 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
836 Thread::Create(system, type, std::move(name), 0, 64, 0, static_cast<s32>(core_id), 0, 808 nullptr, std::move(init_func), init_func_parameter);
837 nullptr, std::move(init_func), init_func_parameter);
838
839 idle_thread = std::move(thread_res).Unwrap(); 809 idle_thread = std::move(thread_res).Unwrap();
840} 810}
841 811
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index b8623e831..bafd1ced7 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -482,8 +482,7 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
482static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address, 482static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
483 s32 handle_count, u32 timeout_high, Handle* index) { 483 s32 handle_count, u32 timeout_high, Handle* index) {
484 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)}; 484 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
485 return WaitSynchronization(system, index, handles_address, static_cast<u32>(handle_count), 485 return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds);
486 nano_seconds);
487} 486}
488 487
489/// Resumes a thread waiting on WaitSynchronization 488/// Resumes a thread waiting on WaitSynchronization
@@ -2003,7 +2002,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
2003 return ERR_INVALID_HANDLE; 2002 return ERR_INVALID_HANDLE;
2004 } 2003 }
2005 2004
2006 *core = static_cast<u32>(thread->GetIdealCore()); 2005 *core = thread->GetIdealCore();
2007 *mask = thread->GetAffinityMask(); 2006 *mask = thread->GetAffinityMask();
2008 2007
2009 return RESULT_SUCCESS; 2008 return RESULT_SUCCESS;
@@ -2071,7 +2070,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
2071 return ERR_INVALID_HANDLE; 2070 return ERR_INVALID_HANDLE;
2072 } 2071 }
2073 2072
2074 return thread->SetCoreAndAffinityMask(static_cast<s32>(core), affinity_mask); 2073 return thread->SetCoreAndAffinityMask(core, affinity_mask);
2075} 2074}
2076 2075
2077static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core, 2076static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core,
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 9284a4c84..0b6dd9df0 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -11,11 +11,11 @@
11 11
12namespace Kernel { 12namespace Kernel {
13 13
14static inline u64 Param(const Core::System& system, std::size_t n) { 14static inline u64 Param(const Core::System& system, int n) {
15 return system.CurrentArmInterface().GetReg(n); 15 return system.CurrentArmInterface().GetReg(n);
16} 16}
17 17
18static inline u32 Param32(const Core::System& system, std::size_t n) { 18static inline u32 Param32(const Core::System& system, int n) {
19 return static_cast<u32>(system.CurrentArmInterface().GetReg(n)); 19 return static_cast<u32>(system.CurrentArmInterface().GetReg(n));
20} 20}
21 21
@@ -29,7 +29,7 @@ static inline void FuncReturn(Core::System& system, u64 result) {
29} 29}
30 30
31static inline void FuncReturn32(Core::System& system, u32 result) { 31static inline void FuncReturn32(Core::System& system, u32 result) {
32 system.CurrentArmInterface().SetReg(0, static_cast<u64>(result)); 32 system.CurrentArmInterface().SetReg(0, (u64)result);
33} 33}
34 34
35//////////////////////////////////////////////////////////////////////////////////////////////////// 35////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -386,10 +386,9 @@ template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
386void SvcWrap32(Core::System& system) { 386void SvcWrap32(Core::System& system) {
387 Handle param_1 = 0; 387 Handle param_1 = 0;
388 388
389 const u32 retval = 389 const u32 retval = func(system, &param_1, Param32(system, 0), Param32(system, 1),
390 func(system, &param_1, Param32(system, 0), Param32(system, 1), Param32(system, 2), 390 Param32(system, 2), Param32(system, 3), Param32(system, 4))
391 Param32(system, 3), static_cast<s32>(Param32(system, 4))) 391 .raw;
392 .raw;
393 392
394 system.CurrentArmInterface().SetReg(1, param_1); 393 system.CurrentArmInterface().SetReg(1, param_1);
395 FuncReturn(system, retval); 394 FuncReturn(system, retval);
@@ -543,8 +542,8 @@ void SvcWrap32(Core::System& system) {
543template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)> 542template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)>
544void SvcWrap32(Core::System& system) { 543void SvcWrap32(Core::System& system) {
545 u32 param_1 = 0; 544 u32 param_1 = 0;
546 const u32 retval = func(system, Param32(system, 0), Param32(system, 1), 545 const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
547 static_cast<s32>(Param32(system, 2)), Param32(system, 3), &param_1) 546 Param32(system, 3), &param_1)
548 .raw; 547 .raw;
549 system.CurrentArmInterface().SetReg(1, param_1); 548 system.CurrentArmInterface().SetReg(1, param_1);
550 FuncReturn(system, retval); 549 FuncReturn(system, retval);
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
index 653f722b3..8b875d853 100644
--- a/src/core/hle/kernel/synchronization.cpp
+++ b/src/core/hle/kernel/synchronization.cpp
@@ -51,7 +51,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
51 // We found a ready object, acquire it and set the result value 51 // We found a ready object, acquire it and set the result value
52 SynchronizationObject* object = itr->get(); 52 SynchronizationObject* object = itr->get();
53 object->Acquire(thread); 53 object->Acquire(thread);
54 const auto index = static_cast<u32>(std::distance(sync_objects.begin(), itr)); 54 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
55 lock.CancelSleep(); 55 lock.CancelSleep();
56 return {RESULT_SUCCESS, index}; 56 return {RESULT_SUCCESS, index};
57 } 57 }
@@ -105,7 +105,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
105 }); 105 });
106 ASSERT(itr != sync_objects.end()); 106 ASSERT(itr != sync_objects.end());
107 signaling_object->Acquire(thread); 107 signaling_object->Acquire(thread);
108 const auto index = static_cast<u32>(std::distance(sync_objects.begin(), itr)); 108 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
109 return {signaling_result, index}; 109 return {signaling_result, index};
110 } 110 }
111 return {signaling_result, -1}; 111 return {signaling_result, -1};
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 323e740e9..d132aba34 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -525,7 +525,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
525 if (old_affinity_mask != new_affinity_mask) { 525 if (old_affinity_mask != new_affinity_mask) {
526 const s32 old_core = processor_id; 526 const s32 old_core = processor_id;
527 if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { 527 if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
528 if (ideal_core < 0) { 528 if (static_cast<s32>(ideal_core) < 0) {
529 processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); 529 processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES);
530 } else { 530 } else {
531 processor_id = ideal_core; 531 processor_id = ideal_core;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 21b22ca45..8daf79fac 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -470,7 +470,7 @@ public:
470 470
471 bool InvokeHLECallback(std::shared_ptr<Thread> thread); 471 bool InvokeHLECallback(std::shared_ptr<Thread> thread);
472 472
473 s32 GetIdealCore() const { 473 u32 GetIdealCore() const {
474 return ideal_core; 474 return ideal_core;
475 } 475 }
476 476
@@ -654,8 +654,8 @@ private:
654 654
655 Scheduler* scheduler = nullptr; 655 Scheduler* scheduler = nullptr;
656 656
657 s32 ideal_core = -1; 657 u32 ideal_core{0xFFFFFFFF};
658 u64 affinity_mask = 1; 658 u64 affinity_mask{0x1};
659 659
660 s32 ideal_core_override = -1; 660 s32 ideal_core_override = -1;
661 u64 affinity_mask_override = 0x1; 661 u64 affinity_mask_override = 0x1;