summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorGravatar Lioncash2020-10-15 14:49:45 -0400
committerGravatar Lioncash2020-10-17 19:50:39 -0400
commitbe1954e04cb5a0c3a526f78ed5490a5e65310280 (patch)
tree267db7ae4be88dbbc288fa605e35d4a2a13839f6 /src/core/hle/kernel
parentMerge pull request #4787 from lioncash/conversion (diff)
downloadyuzu-be1954e04cb5a0c3a526f78ed5490a5e65310280.tar.gz
yuzu-be1954e04cb5a0c3a526f78ed5490a5e65310280.tar.xz
yuzu-be1954e04cb5a0c3a526f78ed5490a5e65310280.zip
core: Fix clang build
Recent changes to the build system that made more warnings be flagged as errors caused building via clang to break. Fixes #4795
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp4
-rw-r--r--src/core/hle/kernel/handle_table.cpp2
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp2
-rw-r--r--src/core/hle/kernel/kernel.cpp2
-rw-r--r--src/core/hle/kernel/memory/address_space_info.cpp2
-rw-r--r--src/core/hle/kernel/memory/memory_manager.cpp4
-rw-r--r--src/core/hle/kernel/memory/page_heap.cpp17
-rw-r--r--src/core/hle/kernel/memory/page_heap.h18
-rw-r--r--src/core/hle/kernel/memory/page_table.cpp6
-rw-r--r--src/core/hle/kernel/physical_core.h2
-rw-r--r--src/core/hle/kernel/process.cpp17
-rw-r--r--src/core/hle/kernel/resource_limit.cpp4
-rw-r--r--src/core/hle/kernel/scheduler.cpp72
-rw-r--r--src/core/hle/kernel/svc.cpp7
-rw-r--r--src/core/hle/kernel/svc_wrap.h17
-rw-r--r--src/core/hle/kernel/synchronization.cpp4
-rw-r--r--src/core/hle/kernel/thread.cpp2
-rw-r--r--src/core/hle/kernel/thread.h6
18 files changed, 114 insertions, 74 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index b882eaa0f..b6ebc5329 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -108,7 +108,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
108 auto& monitor = system.Monitor(); 108 auto& monitor = system.Monitor();
109 s32 updated_value; 109 s32 updated_value;
110 do { 110 do {
111 updated_value = monitor.ExclusiveRead32(current_core, address); 111 updated_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
112 112
113 if (updated_value != value) { 113 if (updated_value != value) {
114 return ERR_INVALID_STATE; 114 return ERR_INVALID_STATE;
@@ -129,7 +129,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
129 updated_value = value; 129 updated_value = value;
130 } 130 }
131 } 131 }
132 } while (!monitor.ExclusiveWrite32(current_core, address, updated_value)); 132 } while (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(updated_value)));
133 133
134 WakeThreads(waiting_threads, num_to_wake); 134 WakeThreads(waiting_threads, num_to_wake);
135 return RESULT_SUCCESS; 135 return RESULT_SUCCESS;
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index 3e745c18b..fe4988f84 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -68,7 +68,7 @@ ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
68 generations[slot] = generation; 68 generations[slot] = generation;
69 objects[slot] = std::move(obj); 69 objects[slot] = std::move(obj);
70 70
71 Handle handle = generation | (slot << 15); 71 const auto handle = static_cast<Handle>(generation | static_cast<u16>(slot << 15));
72 return MakeResult<Handle>(handle); 72 return MakeResult<Handle>(handle);
73} 73}
74 74
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 81f85643b..0a2de4270 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -58,7 +58,7 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
58 58
59 { 59 {
60 Handle event_handle = InvalidHandle; 60 Handle event_handle = InvalidHandle;
61 SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout); 61 SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), static_cast<s64>(timeout));
62 thread->SetHLECallback( 62 thread->SetHLECallback(
63 [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool { 63 [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
64 ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT 64 ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index f2b0fe2fd..c04b23eff 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -168,7 +168,7 @@ struct KernelCore::Impl {
168 const auto type = 168 const auto type =
169 static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND); 169 static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
170 auto thread_res = 170 auto thread_res =
171 Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0, 171 Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<s32>(i), 0,
172 nullptr, std::move(init_func), init_func_parameter); 172 nullptr, std::move(init_func), init_func_parameter);
173 173
174 suspend_threads[i] = std::move(thread_res).Unwrap(); 174 suspend_threads[i] = std::move(thread_res).Unwrap();
diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp
index e4288cab4..6cf43ba24 100644
--- a/src/core/hle/kernel/memory/address_space_info.cpp
+++ b/src/core/hle/kernel/memory/address_space_info.cpp
@@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
96 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; 96 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
97 } 97 }
98 UNREACHABLE(); 98 UNREACHABLE();
99 return 0;
99} 100}
100 101
101std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { 102std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
@@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
112 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; 113 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
113 } 114 }
114 UNREACHABLE(); 115 UNREACHABLE();
116 return 0;
115} 117}
116 118
117} // namespace Kernel::Memory 119} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp
index acf13585c..a96157c37 100644
--- a/src/core/hle/kernel/memory/memory_manager.cpp
+++ b/src/core/hle/kernel/memory/memory_manager.cpp
@@ -71,7 +71,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align
71 } 71 }
72 72
73 // If we allocated more than we need, free some 73 // If we allocated more than we need, free some
74 const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)}; 74 const auto allocated_pages{PageHeap::GetBlockNumPages(static_cast<u32>(heap_index))};
75 if (allocated_pages > num_pages) { 75 if (allocated_pages > num_pages) {
76 chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); 76 chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
77 } 77 }
@@ -112,7 +112,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
112 112
113 // Keep allocating until we've allocated all our pages 113 // Keep allocating until we've allocated all our pages
114 for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { 114 for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
115 const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)}; 115 const auto pages_per_alloc{PageHeap::GetBlockNumPages(static_cast<u32>(index))};
116 116
117 while (num_pages >= pages_per_alloc) { 117 while (num_pages >= pages_per_alloc) {
118 // Allocate a block 118 // Allocate a block
diff --git a/src/core/hle/kernel/memory/page_heap.cpp b/src/core/hle/kernel/memory/page_heap.cpp
index 0ab1f7205..7890b8c1a 100644
--- a/src/core/hle/kernel/memory/page_heap.cpp
+++ b/src/core/hle/kernel/memory/page_heap.cpp
@@ -33,11 +33,12 @@ void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_
33} 33}
34 34
35VAddr PageHeap::AllocateBlock(s32 index) { 35VAddr PageHeap::AllocateBlock(s32 index) {
36 const std::size_t needed_size{blocks[index].GetSize()}; 36 const auto u_index = static_cast<std::size_t>(index);
37 const auto needed_size{blocks[u_index].GetSize()};
37 38
38 for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) { 39 for (auto i = u_index; i < MemoryBlockPageShifts.size(); i++) {
39 if (const VAddr addr{blocks[i].PopBlock()}; addr) { 40 if (const VAddr addr = blocks[i].PopBlock(); addr != 0) {
40 if (const std::size_t allocated_size{blocks[i].GetSize()}; 41 if (const std::size_t allocated_size = blocks[i].GetSize();
41 allocated_size > needed_size) { 42 allocated_size > needed_size) {
42 Free(addr + needed_size, (allocated_size - needed_size) / PageSize); 43 Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
43 } 44 }
@@ -50,7 +51,7 @@ VAddr PageHeap::AllocateBlock(s32 index) {
50 51
51void PageHeap::FreeBlock(VAddr block, s32 index) { 52void PageHeap::FreeBlock(VAddr block, s32 index) {
52 do { 53 do {
53 block = blocks[index++].PushBlock(block); 54 block = blocks[static_cast<std::size_t>(index++)].PushBlock(block);
54 } while (block != 0); 55 } while (block != 0);
55} 56}
56 57
@@ -69,7 +70,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
69 VAddr after_start{end}; 70 VAddr after_start{end};
70 VAddr after_end{end}; 71 VAddr after_end{end};
71 while (big_index >= 0) { 72 while (big_index >= 0) {
72 const std::size_t block_size{blocks[big_index].GetSize()}; 73 const std::size_t block_size{blocks[static_cast<std::size_t>(big_index)].GetSize()};
73 const VAddr big_start{Common::AlignUp((start), block_size)}; 74 const VAddr big_start{Common::AlignUp((start), block_size)};
74 const VAddr big_end{Common::AlignDown((end), block_size)}; 75 const VAddr big_end{Common::AlignDown((end), block_size)};
75 if (big_start < big_end) { 76 if (big_start < big_end) {
@@ -87,7 +88,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
87 88
88 // Free space before the big blocks 89 // Free space before the big blocks
89 for (s32 i{big_index - 1}; i >= 0; i--) { 90 for (s32 i{big_index - 1}; i >= 0; i--) {
90 const std::size_t block_size{blocks[i].GetSize()}; 91 const std::size_t block_size{blocks[static_cast<size_t>(i)].GetSize()};
91 while (before_start + block_size <= before_end) { 92 while (before_start + block_size <= before_end) {
92 before_end -= block_size; 93 before_end -= block_size;
93 FreeBlock(before_end, i); 94 FreeBlock(before_end, i);
@@ -96,7 +97,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
96 97
97 // Free space after the big blocks 98 // Free space after the big blocks
98 for (s32 i{big_index - 1}; i >= 0; i--) { 99 for (s32 i{big_index - 1}; i >= 0; i--) {
99 const std::size_t block_size{blocks[i].GetSize()}; 100 const std::size_t block_size{blocks[static_cast<size_t>(i)].GetSize()};
100 while (after_start + block_size <= after_end) { 101 while (after_start + block_size <= after_end) {
101 FreeBlock(after_start, i); 102 FreeBlock(after_start, i);
102 after_start += block_size; 103 after_start += block_size;
diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h
index 22b0de860..92a2bce04 100644
--- a/src/core/hle/kernel/memory/page_heap.h
+++ b/src/core/hle/kernel/memory/page_heap.h
@@ -34,7 +34,9 @@ public:
34 34
35 static constexpr s32 GetBlockIndex(std::size_t num_pages) { 35 static constexpr s32 GetBlockIndex(std::size_t num_pages) {
36 for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { 36 for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
37 if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { 37 const auto shift_index = static_cast<std::size_t>(i);
38 if (num_pages >=
39 (static_cast<std::size_t>(1) << MemoryBlockPageShifts[shift_index]) / PageSize) {
38 return i; 40 return i;
39 } 41 }
40 } 42 }
@@ -86,7 +88,7 @@ private:
86 88
87 // Set the bitmap pointers 89 // Set the bitmap pointers
88 for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) { 90 for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
89 bit_storages[depth] = storage; 91 bit_storages[static_cast<std::size_t>(depth)] = storage;
90 size = Common::AlignUp(size, 64) / 64; 92 size = Common::AlignUp(size, 64) / 64;
91 storage += size; 93 storage += size;
92 } 94 }
@@ -99,7 +101,7 @@ private:
99 s32 depth{}; 101 s32 depth{};
100 102
101 do { 103 do {
102 const u64 v{bit_storages[depth][offset]}; 104 const u64 v{bit_storages[static_cast<std::size_t>(depth)][offset]};
103 if (v == 0) { 105 if (v == 0) {
104 // Non-zero depth indicates that a previous level had a free block 106 // Non-zero depth indicates that a previous level had a free block
105 ASSERT(depth == 0); 107 ASSERT(depth == 0);
@@ -125,7 +127,7 @@ private:
125 constexpr bool ClearRange(std::size_t offset, std::size_t count) { 127 constexpr bool ClearRange(std::size_t offset, std::size_t count) {
126 const s32 depth{GetHighestDepthIndex()}; 128 const s32 depth{GetHighestDepthIndex()};
127 const auto bit_ind{offset / 64}; 129 const auto bit_ind{offset / 64};
128 u64* bits{bit_storages[depth]}; 130 u64* bits{bit_storages[static_cast<std::size_t>(depth)]};
129 if (count < 64) { 131 if (count < 64) {
130 const auto shift{offset % 64}; 132 const auto shift{offset % 64};
131 ASSERT(shift + count <= 64); 133 ASSERT(shift + count <= 64);
@@ -177,11 +179,11 @@ private:
177 const auto which{offset % 64}; 179 const auto which{offset % 64};
178 const u64 mask{1ULL << which}; 180 const u64 mask{1ULL << which};
179 181
180 u64* bit{std::addressof(bit_storages[depth][ind])}; 182 u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])};
181 const u64 v{*bit}; 183 const u64 v{*bit};
182 ASSERT((v & mask) == 0); 184 ASSERT((v & mask) == 0);
183 *bit = v | mask; 185 *bit = v | mask;
184 if (v) { 186 if (v != 0) {
185 break; 187 break;
186 } 188 }
187 offset = ind; 189 offset = ind;
@@ -195,12 +197,12 @@ private:
195 const auto which{offset % 64}; 197 const auto which{offset % 64};
196 const u64 mask{1ULL << which}; 198 const u64 mask{1ULL << which};
197 199
198 u64* bit{std::addressof(bit_storages[depth][ind])}; 200 u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])};
199 u64 v{*bit}; 201 u64 v{*bit};
200 ASSERT((v & mask) != 0); 202 ASSERT((v & mask) != 0);
201 v &= ~mask; 203 v &= ~mask;
202 *bit = v; 204 *bit = v;
203 if (v) { 205 if (v != 0) {
204 break; 206 break;
205 } 207 }
206 offset = ind; 208 offset = ind;
diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp
index a3fadb533..4f759d078 100644
--- a/src/core/hle/kernel/memory/page_table.cpp
+++ b/src/core/hle/kernel/memory/page_table.cpp
@@ -414,7 +414,8 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
414 const std::size_t remaining_pages{remaining_size / PageSize}; 414 const std::size_t remaining_pages{remaining_size / PageSize};
415 415
416 if (process->GetResourceLimit() && 416 if (process->GetResourceLimit() &&
417 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) { 417 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory,
418 static_cast<s64>(remaining_size))) {
418 return ERR_RESOURCE_LIMIT_EXCEEDED; 419 return ERR_RESOURCE_LIMIT_EXCEEDED;
419 } 420 }
420 421
@@ -778,7 +779,8 @@ ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
778 779
779 auto process{system.Kernel().CurrentProcess()}; 780 auto process{system.Kernel().CurrentProcess()};
780 if (process->GetResourceLimit() && delta != 0 && 781 if (process->GetResourceLimit() && delta != 0 &&
781 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) { 782 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory,
783 static_cast<s64>(delta))) {
782 return ERR_RESOURCE_LIMIT_EXCEEDED; 784 return ERR_RESOURCE_LIMIT_EXCEEDED;
783 } 785 }
784 786
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index d7a7a951c..6cb59d0fc 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -34,7 +34,7 @@ public:
34 PhysicalCore& operator=(const PhysicalCore&) = delete; 34 PhysicalCore& operator=(const PhysicalCore&) = delete;
35 35
36 PhysicalCore(PhysicalCore&&) = default; 36 PhysicalCore(PhysicalCore&&) = default;
37 PhysicalCore& operator=(PhysicalCore&&) = default; 37 PhysicalCore& operator=(PhysicalCore&&) = delete;
38 38
39 void Idle(); 39 void Idle();
40 /// Interrupt this physical core. 40 /// Interrupt this physical core.
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index ff9d9248b..0b39f2955 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -137,9 +137,10 @@ std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
137} 137}
138 138
139u64 Process::GetTotalPhysicalMemoryAvailable() const { 139u64 Process::GetTotalPhysicalMemoryAvailable() const {
140 const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) + 140 const u64 capacity{
141 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + 141 static_cast<u64>(resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory)) +
142 main_thread_stack_size}; 142 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
143 main_thread_stack_size};
143 144
144 if (capacity < memory_usage_capacity) { 145 if (capacity < memory_usage_capacity) {
145 return capacity; 146 return capacity;
@@ -279,12 +280,12 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
279 // Set initial resource limits 280 // Set initial resource limits
280 resource_limit->SetLimitValue( 281 resource_limit->SetLimitValue(
281 ResourceType::PhysicalMemory, 282 ResourceType::PhysicalMemory,
282 kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application)); 283 static_cast<s64>(kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application)));
283 resource_limit->SetLimitValue(ResourceType::Threads, 608); 284 resource_limit->SetLimitValue(ResourceType::Threads, 608);
284 resource_limit->SetLimitValue(ResourceType::Events, 700); 285 resource_limit->SetLimitValue(ResourceType::Events, 700);
285 resource_limit->SetLimitValue(ResourceType::TransferMemory, 128); 286 resource_limit->SetLimitValue(ResourceType::TransferMemory, 128);
286 resource_limit->SetLimitValue(ResourceType::Sessions, 894); 287 resource_limit->SetLimitValue(ResourceType::Sessions, 894);
287 ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size)); 288 ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast<s64>(code_size)));
288 289
289 // Create TLS region 290 // Create TLS region
290 tls_region_address = CreateTLSRegion(); 291 tls_region_address = CreateTLSRegion();
@@ -300,9 +301,9 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
300 301
301 ChangeStatus(ProcessStatus::Running); 302 ChangeStatus(ProcessStatus::Running);
302 303
303 SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top); 304 SetupMainThread(system, *this, static_cast<u32>(main_thread_priority), main_thread_stack_top);
304 resource_limit->Reserve(ResourceType::Threads, 1); 305 resource_limit->Reserve(ResourceType::Threads, 1);
305 resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size); 306 resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast<s64>(main_thread_stack_size));
306} 307}
307 308
308void Process::PrepareForTermination() { 309void Process::PrepareForTermination() {
@@ -363,7 +364,7 @@ VAddr Process::CreateTLSRegion() {
363 ->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize, 364 ->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize,
364 Memory::MemoryState::ThreadLocal, 365 Memory::MemoryState::ThreadLocal,
365 Memory::MemoryPermission::ReadAndWrite, tls_map_addr) 366 Memory::MemoryPermission::ReadAndWrite, tls_map_addr)
366 .ValueOr(0)}; 367 .ValueOr(0U)};
367 368
368 ASSERT(tls_page_addr); 369 ASSERT(tls_page_addr);
369 370
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index 212e442f4..e94093f24 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -43,8 +43,8 @@ void ResourceLimit::Release(ResourceType resource, u64 amount) {
43void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) { 43void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) {
44 const std::size_t index{ResourceTypeToIndex(resource)}; 44 const std::size_t index{ResourceTypeToIndex(resource)};
45 45
46 current[index] -= used_amount; 46 current[index] -= static_cast<s64>(used_amount);
47 available[index] -= available_amount; 47 available[index] -= static_cast<s64>(available_amount);
48} 48}
49 49
50std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) { 50std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 6b7db5372..4a9a762f3 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -89,9 +89,11 @@ u32 GlobalScheduler::SelectThreads() {
89 while (iter != suggested_queue[core_id].end()) { 89 while (iter != suggested_queue[core_id].end()) {
90 suggested = *iter; 90 suggested = *iter;
91 iter++; 91 iter++;
92 s32 suggested_core_id = suggested->GetProcessorID(); 92 const s32 suggested_core_id = suggested->GetProcessorID();
93 Thread* top_thread = 93 Thread* top_thread = suggested_core_id >= 0
94 suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr; 94 ? top_threads[static_cast<u32>(suggested_core_id)]
95 : nullptr;
96
95 if (top_thread != suggested) { 97 if (top_thread != suggested) {
96 if (top_thread != nullptr && 98 if (top_thread != nullptr &&
97 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) { 99 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
@@ -102,16 +104,19 @@ u32 GlobalScheduler::SelectThreads() {
102 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested); 104 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
103 break; 105 break;
104 } 106 }
107
105 suggested = nullptr; 108 suggested = nullptr;
106 migration_candidates[num_candidates++] = suggested_core_id; 109 migration_candidates[num_candidates++] = suggested_core_id;
107 } 110 }
111
108 // Step 3: Select a suggested thread from another core 112 // Step 3: Select a suggested thread from another core
109 if (suggested == nullptr) { 113 if (suggested == nullptr) {
110 for (std::size_t i = 0; i < num_candidates; i++) { 114 for (std::size_t i = 0; i < num_candidates; i++) {
111 s32 candidate_core = migration_candidates[i]; 115 const auto candidate_core = static_cast<u32>(migration_candidates[i]);
112 suggested = top_threads[candidate_core]; 116 suggested = top_threads[candidate_core];
113 auto it = scheduled_queue[candidate_core].begin(); 117 auto it = scheduled_queue[candidate_core].begin();
114 it++; 118 ++it;
119
115 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr; 120 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
116 if (next != nullptr) { 121 if (next != nullptr) {
117 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), 122 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
@@ -128,7 +133,8 @@ u32 GlobalScheduler::SelectThreads() {
128 133
129 idle_cores &= ~(1U << core_id); 134 idle_cores &= ~(1U << core_id);
130 } 135 }
131 u32 cores_needing_context_switch{}; 136
137 u32 cores_needing_context_switch = 0;
132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 138 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
133 Scheduler& sched = kernel.Scheduler(core); 139 Scheduler& sched = kernel.Scheduler(core);
134 ASSERT(top_threads[core] == nullptr || 140 ASSERT(top_threads[core] == nullptr ||
@@ -186,13 +192,16 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
186 for (auto& thread : suggested_queue[core_id]) { 192 for (auto& thread : suggested_queue[core_id]) {
187 const s32 source_core = thread->GetProcessorID(); 193 const s32 source_core = thread->GetProcessorID();
188 if (source_core >= 0) { 194 if (source_core >= 0) {
189 if (current_threads[source_core] != nullptr) { 195 const auto sanitized_source_core = static_cast<u32>(source_core);
190 if (thread == current_threads[source_core] || 196
191 current_threads[source_core]->GetPriority() < min_regular_priority) { 197 if (current_threads[sanitized_source_core] != nullptr) {
198 if (thread == current_threads[sanitized_source_core] ||
199 current_threads[sanitized_source_core]->GetPriority() < min_regular_priority) {
192 continue; 200 continue;
193 } 201 }
194 } 202 }
195 } 203 }
204
196 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || 205 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
197 next_thread->GetPriority() < thread->GetPriority()) { 206 next_thread->GetPriority() < thread->GetPriority()) {
198 if (thread->GetPriority() <= priority) { 207 if (thread->GetPriority() <= priority) {
@@ -240,17 +249,25 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
240 for (std::size_t i = 0; i < current_threads.size(); i++) { 249 for (std::size_t i = 0; i < current_threads.size(); i++) {
241 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); 250 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
242 } 251 }
252
243 for (auto& thread : suggested_queue[core_id]) { 253 for (auto& thread : suggested_queue[core_id]) {
244 const s32 source_core = thread->GetProcessorID(); 254 const s32 source_core = thread->GetProcessorID();
245 if (source_core < 0 || thread == current_threads[source_core]) { 255 if (source_core < 0) {
256 continue;
257 }
258
259 const auto sanitized_source_core = static_cast<u32>(source_core);
260 if (thread == current_threads[sanitized_source_core]) {
246 continue; 261 continue;
247 } 262 }
248 if (current_threads[source_core] == nullptr || 263
249 current_threads[source_core]->GetPriority() >= min_regular_priority) { 264 if (current_threads[sanitized_source_core] == nullptr ||
265 current_threads[sanitized_source_core]->GetPriority() >= min_regular_priority) {
250 winner = thread; 266 winner = thread;
251 } 267 }
252 break; 268 break;
253 } 269 }
270
254 if (winner != nullptr) { 271 if (winner != nullptr) {
255 if (winner != yielding_thread) { 272 if (winner != yielding_thread) {
256 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); 273 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
@@ -292,17 +309,22 @@ void GlobalScheduler::PreemptThreads() {
292 if (thread->GetPriority() != priority) { 309 if (thread->GetPriority() != priority) {
293 continue; 310 continue;
294 } 311 }
312
295 if (source_core >= 0) { 313 if (source_core >= 0) {
296 Thread* next_thread = scheduled_queue[source_core].empty() 314 const auto sanitized_source_core = static_cast<u32>(source_core);
315 Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
297 ? nullptr 316 ? nullptr
298 : scheduled_queue[source_core].front(); 317 : scheduled_queue[sanitized_source_core].front();
318
299 if (next_thread != nullptr && next_thread->GetPriority() < 2) { 319 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
300 break; 320 break;
301 } 321 }
322
302 if (next_thread == thread) { 323 if (next_thread == thread) {
303 continue; 324 continue;
304 } 325 }
305 } 326 }
327
306 if (current_thread != nullptr && 328 if (current_thread != nullptr &&
307 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { 329 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
308 winner = thread; 330 winner = thread;
@@ -322,17 +344,22 @@ void GlobalScheduler::PreemptThreads() {
322 if (thread->GetPriority() < priority) { 344 if (thread->GetPriority() < priority) {
323 continue; 345 continue;
324 } 346 }
347
325 if (source_core >= 0) { 348 if (source_core >= 0) {
326 Thread* next_thread = scheduled_queue[source_core].empty() 349 const auto sanitized_source_core = static_cast<u32>(source_core);
350 Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
327 ? nullptr 351 ? nullptr
328 : scheduled_queue[source_core].front(); 352 : scheduled_queue[sanitized_source_core].front();
353
329 if (next_thread != nullptr && next_thread->GetPriority() < 2) { 354 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
330 break; 355 break;
331 } 356 }
357
332 if (next_thread == thread) { 358 if (next_thread == thread) {
333 continue; 359 continue;
334 } 360 }
335 } 361 }
362
336 if (current_thread != nullptr && 363 if (current_thread != nullptr &&
337 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { 364 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
338 winner = thread; 365 winner = thread;
@@ -352,11 +379,11 @@ void GlobalScheduler::PreemptThreads() {
352 379
353void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, 380void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
354 Core::EmuThreadHandle global_thread) { 381 Core::EmuThreadHandle global_thread) {
355 u32 current_core = global_thread.host_handle; 382 const u32 current_core = global_thread.host_handle;
356 bool must_context_switch = global_thread.guest_handle != InvalidHandle && 383 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
357 (current_core < Core::Hardware::NUM_CPU_CORES); 384 (current_core < Core::Hardware::NUM_CPU_CORES);
358 while (cores_pending_reschedule != 0) { 385 while (cores_pending_reschedule != 0) {
359 u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); 386 const u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
360 ASSERT(core < Core::Hardware::NUM_CPU_CORES); 387 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
361 if (!must_context_switch || core != current_core) { 388 if (!must_context_switch || core != current_core) {
362 auto& phys_core = kernel.PhysicalCore(core); 389 auto& phys_core = kernel.PhysicalCore(core);
@@ -366,6 +393,7 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
366 } 393 }
367 cores_pending_reschedule &= ~(1U << core); 394 cores_pending_reschedule &= ~(1U << core);
368 } 395 }
396
369 if (must_context_switch) { 397 if (must_context_switch) {
370 auto& core_scheduler = kernel.CurrentScheduler(); 398 auto& core_scheduler = kernel.CurrentScheduler();
371 kernel.ExitSVCProfile(); 399 kernel.ExitSVCProfile();
@@ -803,9 +831,11 @@ void Scheduler::Initialize() {
803 std::string name = "Idle Thread Id:" + std::to_string(core_id); 831 std::string name = "Idle Thread Id:" + std::to_string(core_id);
804 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); 832 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
805 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); 833 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
806 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); 834 const auto type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
807 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0, 835 auto thread_res =
808 nullptr, std::move(init_func), init_func_parameter); 836 Thread::Create(system, type, std::move(name), 0, 64, 0, static_cast<s32>(core_id), 0,
837 nullptr, std::move(init_func), init_func_parameter);
838
809 idle_thread = std::move(thread_res).Unwrap(); 839 idle_thread = std::move(thread_res).Unwrap();
810} 840}
811 841
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index bafd1ced7..b8623e831 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -482,7 +482,8 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
482static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address, 482static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
483 s32 handle_count, u32 timeout_high, Handle* index) { 483 s32 handle_count, u32 timeout_high, Handle* index) {
484 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)}; 484 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
485 return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds); 485 return WaitSynchronization(system, index, handles_address, static_cast<u32>(handle_count),
486 nano_seconds);
486} 487}
487 488
488/// Resumes a thread waiting on WaitSynchronization 489/// Resumes a thread waiting on WaitSynchronization
@@ -2002,7 +2003,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
2002 return ERR_INVALID_HANDLE; 2003 return ERR_INVALID_HANDLE;
2003 } 2004 }
2004 2005
2005 *core = thread->GetIdealCore(); 2006 *core = static_cast<u32>(thread->GetIdealCore());
2006 *mask = thread->GetAffinityMask(); 2007 *mask = thread->GetAffinityMask();
2007 2008
2008 return RESULT_SUCCESS; 2009 return RESULT_SUCCESS;
@@ -2070,7 +2071,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
2070 return ERR_INVALID_HANDLE; 2071 return ERR_INVALID_HANDLE;
2071 } 2072 }
2072 2073
2073 return thread->SetCoreAndAffinityMask(core, affinity_mask); 2074 return thread->SetCoreAndAffinityMask(static_cast<s32>(core), affinity_mask);
2074} 2075}
2075 2076
2076static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core, 2077static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core,
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 0b6dd9df0..9284a4c84 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -11,11 +11,11 @@
11 11
12namespace Kernel { 12namespace Kernel {
13 13
14static inline u64 Param(const Core::System& system, int n) { 14static inline u64 Param(const Core::System& system, std::size_t n) {
15 return system.CurrentArmInterface().GetReg(n); 15 return system.CurrentArmInterface().GetReg(n);
16} 16}
17 17
18static inline u32 Param32(const Core::System& system, int n) { 18static inline u32 Param32(const Core::System& system, std::size_t n) {
19 return static_cast<u32>(system.CurrentArmInterface().GetReg(n)); 19 return static_cast<u32>(system.CurrentArmInterface().GetReg(n));
20} 20}
21 21
@@ -29,7 +29,7 @@ static inline void FuncReturn(Core::System& system, u64 result) {
29} 29}
30 30
31static inline void FuncReturn32(Core::System& system, u32 result) { 31static inline void FuncReturn32(Core::System& system, u32 result) {
32 system.CurrentArmInterface().SetReg(0, (u64)result); 32 system.CurrentArmInterface().SetReg(0, static_cast<u64>(result));
33} 33}
34 34
35//////////////////////////////////////////////////////////////////////////////////////////////////// 35////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -386,9 +386,10 @@ template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
386void SvcWrap32(Core::System& system) { 386void SvcWrap32(Core::System& system) {
387 Handle param_1 = 0; 387 Handle param_1 = 0;
388 388
389 const u32 retval = func(system, &param_1, Param32(system, 0), Param32(system, 1), 389 const u32 retval =
390 Param32(system, 2), Param32(system, 3), Param32(system, 4)) 390 func(system, &param_1, Param32(system, 0), Param32(system, 1), Param32(system, 2),
391 .raw; 391 Param32(system, 3), static_cast<s32>(Param32(system, 4)))
392 .raw;
392 393
393 system.CurrentArmInterface().SetReg(1, param_1); 394 system.CurrentArmInterface().SetReg(1, param_1);
394 FuncReturn(system, retval); 395 FuncReturn(system, retval);
@@ -542,8 +543,8 @@ void SvcWrap32(Core::System& system) {
542template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)> 543template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)>
543void SvcWrap32(Core::System& system) { 544void SvcWrap32(Core::System& system) {
544 u32 param_1 = 0; 545 u32 param_1 = 0;
545 const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2), 546 const u32 retval = func(system, Param32(system, 0), Param32(system, 1),
546 Param32(system, 3), &param_1) 547 static_cast<s32>(Param32(system, 2)), Param32(system, 3), &param_1)
547 .raw; 548 .raw;
548 system.CurrentArmInterface().SetReg(1, param_1); 549 system.CurrentArmInterface().SetReg(1, param_1);
549 FuncReturn(system, retval); 550 FuncReturn(system, retval);
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
index 8b875d853..653f722b3 100644
--- a/src/core/hle/kernel/synchronization.cpp
+++ b/src/core/hle/kernel/synchronization.cpp
@@ -51,7 +51,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
51 // We found a ready object, acquire it and set the result value 51 // We found a ready object, acquire it and set the result value
52 SynchronizationObject* object = itr->get(); 52 SynchronizationObject* object = itr->get();
53 object->Acquire(thread); 53 object->Acquire(thread);
54 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr)); 54 const auto index = static_cast<u32>(std::distance(sync_objects.begin(), itr));
55 lock.CancelSleep(); 55 lock.CancelSleep();
56 return {RESULT_SUCCESS, index}; 56 return {RESULT_SUCCESS, index};
57 } 57 }
@@ -105,7 +105,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
105 }); 105 });
106 ASSERT(itr != sync_objects.end()); 106 ASSERT(itr != sync_objects.end());
107 signaling_object->Acquire(thread); 107 signaling_object->Acquire(thread);
108 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr)); 108 const auto index = static_cast<u32>(std::distance(sync_objects.begin(), itr));
109 return {signaling_result, index}; 109 return {signaling_result, index};
110 } 110 }
111 return {signaling_result, -1}; 111 return {signaling_result, -1};
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index d132aba34..323e740e9 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -525,7 +525,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
525 if (old_affinity_mask != new_affinity_mask) { 525 if (old_affinity_mask != new_affinity_mask) {
526 const s32 old_core = processor_id; 526 const s32 old_core = processor_id;
527 if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { 527 if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
528 if (static_cast<s32>(ideal_core) < 0) { 528 if (ideal_core < 0) {
529 processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); 529 processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES);
530 } else { 530 } else {
531 processor_id = ideal_core; 531 processor_id = ideal_core;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 8daf79fac..21b22ca45 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -470,7 +470,7 @@ public:
470 470
471 bool InvokeHLECallback(std::shared_ptr<Thread> thread); 471 bool InvokeHLECallback(std::shared_ptr<Thread> thread);
472 472
473 u32 GetIdealCore() const { 473 s32 GetIdealCore() const {
474 return ideal_core; 474 return ideal_core;
475 } 475 }
476 476
@@ -654,8 +654,8 @@ private:
654 654
655 Scheduler* scheduler = nullptr; 655 Scheduler* scheduler = nullptr;
656 656
657 u32 ideal_core{0xFFFFFFFF}; 657 s32 ideal_core = -1;
658 u64 affinity_mask{0x1}; 658 u64 affinity_mask = 1;
659 659
660 s32 ideal_core_override = -1; 660 s32 ideal_core_override = -1;
661 u64 affinity_mask_override = 0x1; 661 u64 affinity_mask_override = 0x1;