diff options
| author | 2018-09-15 15:21:06 +0200 | |
|---|---|---|
| committer | 2018-09-15 15:21:06 +0200 | |
| commit | 63c2e32e207d31ecadd9022e1d7cd705c9febac8 (patch) | |
| tree | 8a90e8ef2804f147dff7225a543a8740ecf7160c /src/core/hle/kernel | |
| parent | Merge pull request #1310 from lioncash/kernel-ns (diff) | |
| download | yuzu-63c2e32e207d31ecadd9022e1d7cd705c9febac8.tar.gz yuzu-63c2e32e207d31ecadd9022e1d7cd705c9febac8.tar.xz yuzu-63c2e32e207d31ecadd9022e1d7cd705c9febac8.zip | |
Port #4182 from Citra: "Prefix all size_t with std::"
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/address_arbiter.cpp | 25 | ||||
| -rw-r--r-- | src/core/hle/kernel/handle_table.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/handle_table.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/hle_ipc.cpp | 27 | ||||
| -rw-r--r-- | src/core/hle/kernel/hle_ipc.h | 20 | ||||
| -rw-r--r-- | src/core/hle/kernel/process.cpp | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/process.h | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/shared_memory.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 27 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.h | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/wait_object.cpp | 2 |
14 files changed, 65 insertions, 62 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 6657accd5..93577591f 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp | |||
| @@ -35,16 +35,17 @@ static ResultCode WaitForAddress(VAddr address, s64 timeout) { | |||
| 35 | 35 | ||
| 36 | // Gets the threads waiting on an address. | 36 | // Gets the threads waiting on an address. |
| 37 | static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) { | 37 | static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) { |
| 38 | const auto RetrieveWaitingThreads = | 38 | const auto RetrieveWaitingThreads = [](std::size_t core_index, |
| 39 | [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr arb_addr) { | 39 | std::vector<SharedPtr<Thread>>& waiting_threads, |
| 40 | const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); | 40 | VAddr arb_addr) { |
| 41 | auto& thread_list = scheduler->GetThreadList(); | 41 | const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); |
| 42 | 42 | auto& thread_list = scheduler->GetThreadList(); | |
| 43 | for (auto& thread : thread_list) { | 43 | |
| 44 | if (thread->arb_wait_address == arb_addr) | 44 | for (auto& thread : thread_list) { |
| 45 | waiting_threads.push_back(thread); | 45 | if (thread->arb_wait_address == arb_addr) |
| 46 | } | 46 | waiting_threads.push_back(thread); |
| 47 | }; | 47 | } |
| 48 | }; | ||
| 48 | 49 | ||
| 49 | // Retrieve all threads that are waiting for this address. | 50 | // Retrieve all threads that are waiting for this address. |
| 50 | std::vector<SharedPtr<Thread>> threads; | 51 | std::vector<SharedPtr<Thread>> threads; |
| @@ -66,12 +67,12 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) | |||
| 66 | static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) { | 67 | static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) { |
| 67 | // Only process up to 'target' threads, unless 'target' is <= 0, in which case process | 68 | // Only process up to 'target' threads, unless 'target' is <= 0, in which case process |
| 68 | // them all. | 69 | // them all. |
| 69 | size_t last = waiting_threads.size(); | 70 | std::size_t last = waiting_threads.size(); |
| 70 | if (num_to_wake > 0) | 71 | if (num_to_wake > 0) |
| 71 | last = num_to_wake; | 72 | last = num_to_wake; |
| 72 | 73 | ||
| 73 | // Signal the waiting threads. | 74 | // Signal the waiting threads. |
| 74 | for (size_t i = 0; i < last; i++) { | 75 | for (std::size_t i = 0; i < last; i++) { |
| 75 | ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb); | 76 | ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb); |
| 76 | waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS); | 77 | waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS); |
| 77 | waiting_threads[i]->arb_wait_address = 0; | 78 | waiting_threads[i]->arb_wait_address = 0; |
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index 3a079b9a9..5ee5c05e3 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp | |||
| @@ -65,7 +65,7 @@ ResultCode HandleTable::Close(Handle handle) { | |||
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | bool HandleTable::IsValid(Handle handle) const { | 67 | bool HandleTable::IsValid(Handle handle) const { |
| 68 | size_t slot = GetSlot(handle); | 68 | std::size_t slot = GetSlot(handle); |
| 69 | u16 generation = GetGeneration(handle); | 69 | u16 generation = GetGeneration(handle); |
| 70 | 70 | ||
| 71 | return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation; | 71 | return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation; |
diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h index cac928adb..9e2f33e8a 100644 --- a/src/core/hle/kernel/handle_table.h +++ b/src/core/hle/kernel/handle_table.h | |||
| @@ -93,7 +93,7 @@ private: | |||
| 93 | * This is the maximum limit of handles allowed per process in CTR-OS. It can be further | 93 | * This is the maximum limit of handles allowed per process in CTR-OS. It can be further |
| 94 | * reduced by ExHeader values, but this is not emulated here. | 94 | * reduced by ExHeader values, but this is not emulated here. |
| 95 | */ | 95 | */ |
| 96 | static const size_t MAX_COUNT = 4096; | 96 | static const std::size_t MAX_COUNT = 4096; |
| 97 | 97 | ||
| 98 | static u16 GetSlot(Handle handle) { | 98 | static u16 GetSlot(Handle handle) { |
| 99 | return handle >> 15; | 99 | return handle >> 15; |
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 7264be906..72fb9d250 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp | |||
| @@ -42,9 +42,9 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread, | |||
| 42 | Kernel::SharedPtr<Kernel::Event> event) { | 42 | Kernel::SharedPtr<Kernel::Event> event) { |
| 43 | 43 | ||
| 44 | // Put the client thread to sleep until the wait event is signaled or the timeout expires. | 44 | // Put the client thread to sleep until the wait event is signaled or the timeout expires. |
| 45 | thread->wakeup_callback = | 45 | thread->wakeup_callback = [context = *this, callback]( |
| 46 | [context = *this, callback](ThreadWakeupReason reason, SharedPtr<Thread> thread, | 46 | ThreadWakeupReason reason, SharedPtr<Thread> thread, |
| 47 | SharedPtr<WaitObject> object, size_t index) mutable -> bool { | 47 | SharedPtr<WaitObject> object, std::size_t index) mutable -> bool { |
| 48 | ASSERT(thread->status == ThreadStatus::WaitHLEEvent); | 48 | ASSERT(thread->status == ThreadStatus::WaitHLEEvent); |
| 49 | callback(thread, context, reason); | 49 | callback(thread, context, reason); |
| 50 | context.WriteToOutgoingCommandBuffer(*thread); | 50 | context.WriteToOutgoingCommandBuffer(*thread); |
| @@ -199,8 +199,8 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb | |||
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | // The data_size already includes the payload header, the padding and the domain header. | 201 | // The data_size already includes the payload header, the padding and the domain header. |
| 202 | size_t size = data_payload_offset + command_header->data_size - | 202 | std::size_t size = data_payload_offset + command_header->data_size - |
| 203 | sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; | 203 | sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; |
| 204 | if (domain_message_header) | 204 | if (domain_message_header) |
| 205 | size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32); | 205 | size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32); |
| 206 | std::copy_n(src_cmdbuf, size, cmd_buf.begin()); | 206 | std::copy_n(src_cmdbuf, size, cmd_buf.begin()); |
| @@ -217,8 +217,8 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) | |||
| 217 | ParseCommandBuffer(cmd_buf.data(), false); | 217 | ParseCommandBuffer(cmd_buf.data(), false); |
| 218 | 218 | ||
| 219 | // The data_size already includes the payload header, the padding and the domain header. | 219 | // The data_size already includes the payload header, the padding and the domain header. |
| 220 | size_t size = data_payload_offset + command_header->data_size - | 220 | std::size_t size = data_payload_offset + command_header->data_size - |
| 221 | sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; | 221 | sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; |
| 222 | if (domain_message_header) | 222 | if (domain_message_header) |
| 223 | size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32); | 223 | size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32); |
| 224 | 224 | ||
| @@ -229,7 +229,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) | |||
| 229 | "Handle descriptor bit set but no handles to translate"); | 229 | "Handle descriptor bit set but no handles to translate"); |
| 230 | // We write the translated handles at a specific offset in the command buffer, this space | 230 | // We write the translated handles at a specific offset in the command buffer, this space |
| 231 | // was already reserved when writing the header. | 231 | // was already reserved when writing the header. |
| 232 | size_t current_offset = | 232 | std::size_t current_offset = |
| 233 | (sizeof(IPC::CommandHeader) + sizeof(IPC::HandleDescriptorHeader)) / sizeof(u32); | 233 | (sizeof(IPC::CommandHeader) + sizeof(IPC::HandleDescriptorHeader)) / sizeof(u32); |
| 234 | ASSERT_MSG(!handle_descriptor_header->send_current_pid, "Sending PID is not implemented"); | 234 | ASSERT_MSG(!handle_descriptor_header->send_current_pid, "Sending PID is not implemented"); |
| 235 | 235 | ||
| @@ -258,7 +258,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) | |||
| 258 | ASSERT(domain_message_header->num_objects == domain_objects.size()); | 258 | ASSERT(domain_message_header->num_objects == domain_objects.size()); |
| 259 | // Write the domain objects to the command buffer, these go after the raw untranslated data. | 259 | // Write the domain objects to the command buffer, these go after the raw untranslated data. |
| 260 | // TODO(Subv): This completely ignores C buffers. | 260 | // TODO(Subv): This completely ignores C buffers. |
| 261 | size_t domain_offset = size - domain_message_header->num_objects; | 261 | std::size_t domain_offset = size - domain_message_header->num_objects; |
| 262 | auto& request_handlers = server_session->domain_request_handlers; | 262 | auto& request_handlers = server_session->domain_request_handlers; |
| 263 | 263 | ||
| 264 | for (auto& object : domain_objects) { | 264 | for (auto& object : domain_objects) { |
| @@ -291,14 +291,15 @@ std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const { | |||
| 291 | return buffer; | 291 | return buffer; |
| 292 | } | 292 | } |
| 293 | 293 | ||
| 294 | size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffer_index) const { | 294 | std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, |
| 295 | int buffer_index) const { | ||
| 295 | if (size == 0) { | 296 | if (size == 0) { |
| 296 | LOG_WARNING(Core, "skip empty buffer write"); | 297 | LOG_WARNING(Core, "skip empty buffer write"); |
| 297 | return 0; | 298 | return 0; |
| 298 | } | 299 | } |
| 299 | 300 | ||
| 300 | const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()}; | 301 | const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()}; |
| 301 | const size_t buffer_size{GetWriteBufferSize(buffer_index)}; | 302 | const std::size_t buffer_size{GetWriteBufferSize(buffer_index)}; |
| 302 | if (size > buffer_size) { | 303 | if (size > buffer_size) { |
| 303 | LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size, | 304 | LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size, |
| 304 | buffer_size); | 305 | buffer_size); |
| @@ -314,13 +315,13 @@ size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffe | |||
| 314 | return size; | 315 | return size; |
| 315 | } | 316 | } |
| 316 | 317 | ||
| 317 | size_t HLERequestContext::GetReadBufferSize(int buffer_index) const { | 318 | std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const { |
| 318 | const bool is_buffer_a{BufferDescriptorA().size() && BufferDescriptorA()[buffer_index].Size()}; | 319 | const bool is_buffer_a{BufferDescriptorA().size() && BufferDescriptorA()[buffer_index].Size()}; |
| 319 | return is_buffer_a ? BufferDescriptorA()[buffer_index].Size() | 320 | return is_buffer_a ? BufferDescriptorA()[buffer_index].Size() |
| 320 | : BufferDescriptorX()[buffer_index].Size(); | 321 | : BufferDescriptorX()[buffer_index].Size(); |
| 321 | } | 322 | } |
| 322 | 323 | ||
| 323 | size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const { | 324 | std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const { |
| 324 | const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()}; | 325 | const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()}; |
| 325 | return is_buffer_b ? BufferDescriptorB()[buffer_index].Size() | 326 | return is_buffer_b ? BufferDescriptorB()[buffer_index].Size() |
| 326 | : BufferDescriptorC()[buffer_index].Size(); | 327 | : BufferDescriptorC()[buffer_index].Size(); |
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index f0d07f1b6..894479ee0 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h | |||
| @@ -170,7 +170,7 @@ public: | |||
| 170 | std::vector<u8> ReadBuffer(int buffer_index = 0) const; | 170 | std::vector<u8> ReadBuffer(int buffer_index = 0) const; |
| 171 | 171 | ||
| 172 | /// Helper function to write a buffer using the appropriate buffer descriptor | 172 | /// Helper function to write a buffer using the appropriate buffer descriptor |
| 173 | size_t WriteBuffer(const void* buffer, size_t size, int buffer_index = 0) const; | 173 | std::size_t WriteBuffer(const void* buffer, std::size_t size, int buffer_index = 0) const; |
| 174 | 174 | ||
| 175 | /* Helper function to write a buffer using the appropriate buffer descriptor | 175 | /* Helper function to write a buffer using the appropriate buffer descriptor |
| 176 | * | 176 | * |
| @@ -182,7 +182,7 @@ public: | |||
| 182 | */ | 182 | */ |
| 183 | template <typename ContiguousContainer, | 183 | template <typename ContiguousContainer, |
| 184 | typename = std::enable_if_t<!std::is_pointer_v<ContiguousContainer>>> | 184 | typename = std::enable_if_t<!std::is_pointer_v<ContiguousContainer>>> |
| 185 | size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const { | 185 | std::size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const { |
| 186 | using ContiguousType = typename ContiguousContainer::value_type; | 186 | using ContiguousType = typename ContiguousContainer::value_type; |
| 187 | 187 | ||
| 188 | static_assert(std::is_trivially_copyable_v<ContiguousType>, | 188 | static_assert(std::is_trivially_copyable_v<ContiguousType>, |
| @@ -193,19 +193,19 @@ public: | |||
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | /// Helper function to get the size of the input buffer | 195 | /// Helper function to get the size of the input buffer |
| 196 | size_t GetReadBufferSize(int buffer_index = 0) const; | 196 | std::size_t GetReadBufferSize(int buffer_index = 0) const; |
| 197 | 197 | ||
| 198 | /// Helper function to get the size of the output buffer | 198 | /// Helper function to get the size of the output buffer |
| 199 | size_t GetWriteBufferSize(int buffer_index = 0) const; | 199 | std::size_t GetWriteBufferSize(int buffer_index = 0) const; |
| 200 | 200 | ||
| 201 | template <typename T> | 201 | template <typename T> |
| 202 | SharedPtr<T> GetCopyObject(size_t index) { | 202 | SharedPtr<T> GetCopyObject(std::size_t index) { |
| 203 | ASSERT(index < copy_objects.size()); | 203 | ASSERT(index < copy_objects.size()); |
| 204 | return DynamicObjectCast<T>(copy_objects[index]); | 204 | return DynamicObjectCast<T>(copy_objects[index]); |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | template <typename T> | 207 | template <typename T> |
| 208 | SharedPtr<T> GetMoveObject(size_t index) { | 208 | SharedPtr<T> GetMoveObject(std::size_t index) { |
| 209 | ASSERT(index < move_objects.size()); | 209 | ASSERT(index < move_objects.size()); |
| 210 | return DynamicObjectCast<T>(move_objects[index]); | 210 | return DynamicObjectCast<T>(move_objects[index]); |
| 211 | } | 211 | } |
| @@ -223,7 +223,7 @@ public: | |||
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | template <typename T> | 225 | template <typename T> |
| 226 | std::shared_ptr<T> GetDomainRequestHandler(size_t index) const { | 226 | std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const { |
| 227 | return std::static_pointer_cast<T>(domain_request_handlers[index]); | 227 | return std::static_pointer_cast<T>(domain_request_handlers[index]); |
| 228 | } | 228 | } |
| 229 | 229 | ||
| @@ -240,15 +240,15 @@ public: | |||
| 240 | domain_objects.clear(); | 240 | domain_objects.clear(); |
| 241 | } | 241 | } |
| 242 | 242 | ||
| 243 | size_t NumMoveObjects() const { | 243 | std::size_t NumMoveObjects() const { |
| 244 | return move_objects.size(); | 244 | return move_objects.size(); |
| 245 | } | 245 | } |
| 246 | 246 | ||
| 247 | size_t NumCopyObjects() const { | 247 | std::size_t NumCopyObjects() const { |
| 248 | return copy_objects.size(); | 248 | return copy_objects.size(); |
| 249 | } | 249 | } |
| 250 | 250 | ||
| 251 | size_t NumDomainObjects() const { | 251 | std::size_t NumDomainObjects() const { |
| 252 | return domain_objects.size(); | 252 | return domain_objects.size(); |
| 253 | } | 253 | } |
| 254 | 254 | ||
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index b025e323f..7a272d031 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp | |||
| @@ -40,8 +40,8 @@ SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) { | |||
| 40 | return process; | 40 | return process; |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) { | 43 | void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) { |
| 44 | for (size_t i = 0; i < len; ++i) { | 44 | for (std::size_t i = 0; i < len; ++i) { |
| 45 | u32 descriptor = kernel_caps[i]; | 45 | u32 descriptor = kernel_caps[i]; |
| 46 | u32 type = descriptor >> 20; | 46 | u32 type = descriptor >> 20; |
| 47 | 47 | ||
| @@ -211,7 +211,7 @@ ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) { | |||
| 211 | "Shared memory exceeds bounds of mapped block"); | 211 | "Shared memory exceeds bounds of mapped block"); |
| 212 | 212 | ||
| 213 | const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block; | 213 | const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block; |
| 214 | size_t backing_block_offset = vma->second.offset + vma_offset; | 214 | std::size_t backing_block_offset = vma->second.offset + vma_offset; |
| 215 | 215 | ||
| 216 | CASCADE_RESULT(auto new_vma, | 216 | CASCADE_RESULT(auto new_vma, |
| 217 | vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size, | 217 | vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size, |
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 1587d40c1..81538f70c 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h | |||
| @@ -59,7 +59,7 @@ class ResourceLimit; | |||
| 59 | 59 | ||
| 60 | struct CodeSet final : public Object { | 60 | struct CodeSet final : public Object { |
| 61 | struct Segment { | 61 | struct Segment { |
| 62 | size_t offset = 0; | 62 | std::size_t offset = 0; |
| 63 | VAddr addr = 0; | 63 | VAddr addr = 0; |
| 64 | u32 size = 0; | 64 | u32 size = 0; |
| 65 | }; | 65 | }; |
| @@ -164,7 +164,7 @@ public: | |||
| 164 | * Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them | 164 | * Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them |
| 165 | * to this process. | 165 | * to this process. |
| 166 | */ | 166 | */ |
| 167 | void ParseKernelCaps(const u32* kernel_caps, size_t len); | 167 | void ParseKernelCaps(const u32* kernel_caps, std::size_t len); |
| 168 | 168 | ||
| 169 | /** | 169 | /** |
| 170 | * Applies address space changes and launches the process main thread. | 170 | * Applies address space changes and launches the process main thread. |
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h index 2c729afe3..2c06bb7ce 100644 --- a/src/core/hle/kernel/shared_memory.h +++ b/src/core/hle/kernel/shared_memory.h | |||
| @@ -119,7 +119,7 @@ public: | |||
| 119 | /// Backing memory for this shared memory block. | 119 | /// Backing memory for this shared memory block. |
| 120 | std::shared_ptr<std::vector<u8>> backing_block; | 120 | std::shared_ptr<std::vector<u8>> backing_block; |
| 121 | /// Offset into the backing block for this shared memory. | 121 | /// Offset into the backing block for this shared memory. |
| 122 | size_t backing_block_offset; | 122 | std::size_t backing_block_offset; |
| 123 | /// Size of the memory block. Page-aligned. | 123 | /// Size of the memory block. Page-aligned. |
| 124 | u64 size; | 124 | u64 size; |
| 125 | /// Permission restrictions applied to the process which created the block. | 125 | /// Permission restrictions applied to the process which created the block. |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index f500fd2e7..a5aaa089d 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -146,7 +146,7 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) { | |||
| 146 | 146 | ||
| 147 | /// Default thread wakeup callback for WaitSynchronization | 147 | /// Default thread wakeup callback for WaitSynchronization |
| 148 | static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread, | 148 | static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread, |
| 149 | SharedPtr<WaitObject> object, size_t index) { | 149 | SharedPtr<WaitObject> object, std::size_t index) { |
| 150 | ASSERT(thread->status == ThreadStatus::WaitSynchAny); | 150 | ASSERT(thread->status == ThreadStatus::WaitSynchAny); |
| 151 | 151 | ||
| 152 | if (reason == ThreadWakeupReason::Timeout) { | 152 | if (reason == ThreadWakeupReason::Timeout) { |
| @@ -647,16 +647,17 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 647 | LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", | 647 | LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", |
| 648 | condition_variable_addr, target); | 648 | condition_variable_addr, target); |
| 649 | 649 | ||
| 650 | auto RetrieveWaitingThreads = | 650 | auto RetrieveWaitingThreads = [](std::size_t core_index, |
| 651 | [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr condvar_addr) { | 651 | std::vector<SharedPtr<Thread>>& waiting_threads, |
| 652 | const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); | 652 | VAddr condvar_addr) { |
| 653 | auto& thread_list = scheduler->GetThreadList(); | 653 | const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); |
| 654 | auto& thread_list = scheduler->GetThreadList(); | ||
| 654 | 655 | ||
| 655 | for (auto& thread : thread_list) { | 656 | for (auto& thread : thread_list) { |
| 656 | if (thread->condvar_wait_address == condvar_addr) | 657 | if (thread->condvar_wait_address == condvar_addr) |
| 657 | waiting_threads.push_back(thread); | 658 | waiting_threads.push_back(thread); |
| 658 | } | 659 | } |
| 659 | }; | 660 | }; |
| 660 | 661 | ||
| 661 | // Retrieve a list of all threads that are waiting for this condition variable. | 662 | // Retrieve a list of all threads that are waiting for this condition variable. |
| 662 | std::vector<SharedPtr<Thread>> waiting_threads; | 663 | std::vector<SharedPtr<Thread>> waiting_threads; |
| @@ -672,7 +673,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 672 | 673 | ||
| 673 | // Only process up to 'target' threads, unless 'target' is -1, in which case process | 674 | // Only process up to 'target' threads, unless 'target' is -1, in which case process |
| 674 | // them all. | 675 | // them all. |
| 675 | size_t last = waiting_threads.size(); | 676 | std::size_t last = waiting_threads.size(); |
| 676 | if (target != -1) | 677 | if (target != -1) |
| 677 | last = target; | 678 | last = target; |
| 678 | 679 | ||
| @@ -680,12 +681,12 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 680 | if (last > waiting_threads.size()) | 681 | if (last > waiting_threads.size()) |
| 681 | return RESULT_SUCCESS; | 682 | return RESULT_SUCCESS; |
| 682 | 683 | ||
| 683 | for (size_t index = 0; index < last; ++index) { | 684 | for (std::size_t index = 0; index < last; ++index) { |
| 684 | auto& thread = waiting_threads[index]; | 685 | auto& thread = waiting_threads[index]; |
| 685 | 686 | ||
| 686 | ASSERT(thread->condvar_wait_address == condition_variable_addr); | 687 | ASSERT(thread->condvar_wait_address == condition_variable_addr); |
| 687 | 688 | ||
| 688 | size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); | 689 | std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); |
| 689 | 690 | ||
| 690 | auto& monitor = Core::System::GetInstance().Monitor(); | 691 | auto& monitor = Core::System::GetInstance().Monitor(); |
| 691 | 692 | ||
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 3f12a84dc..89cd5f401 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -275,7 +275,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name | |||
| 275 | available_slot = 0; // Use the first slot in the new page | 275 | available_slot = 0; // Use the first slot in the new page |
| 276 | 276 | ||
| 277 | // Allocate some memory from the end of the linear heap for this region. | 277 | // Allocate some memory from the end of the linear heap for this region. |
| 278 | const size_t offset = thread->tls_memory->size(); | 278 | const std::size_t offset = thread->tls_memory->size(); |
| 279 | thread->tls_memory->insert(thread->tls_memory->end(), Memory::PAGE_SIZE, 0); | 279 | thread->tls_memory->insert(thread->tls_memory->end(), Memory::PAGE_SIZE, 0); |
| 280 | 280 | ||
| 281 | auto& vm_manager = owner_process->vm_manager; | 281 | auto& vm_manager = owner_process->vm_manager; |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index cb57ee78a..df4748942 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -254,7 +254,7 @@ public: | |||
| 254 | Handle callback_handle; | 254 | Handle callback_handle; |
| 255 | 255 | ||
| 256 | using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr<Thread> thread, | 256 | using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr<Thread> thread, |
| 257 | SharedPtr<WaitObject> object, size_t index); | 257 | SharedPtr<WaitObject> object, std::size_t index); |
| 258 | // Callback that will be invoked when the thread is resumed from a waiting state. If the thread | 258 | // Callback that will be invoked when the thread is resumed from a waiting state. If the thread |
| 259 | // was waiting via WaitSynchronizationN then the object will be the last object that became | 259 | // was waiting via WaitSynchronizationN then the object will be the last object that became |
| 260 | // available. In case of a timeout, the object will be nullptr. | 260 | // available. In case of a timeout, the object will be nullptr. |
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 479cacb62..608cbd57b 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp | |||
| @@ -86,7 +86,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { | |||
| 86 | 86 | ||
| 87 | ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, | 87 | ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, |
| 88 | std::shared_ptr<std::vector<u8>> block, | 88 | std::shared_ptr<std::vector<u8>> block, |
| 89 | size_t offset, u64 size, | 89 | std::size_t offset, u64 size, |
| 90 | MemoryState state) { | 90 | MemoryState state) { |
| 91 | ASSERT(block != nullptr); | 91 | ASSERT(block != nullptr); |
| 92 | ASSERT(offset + size <= block->size()); | 92 | ASSERT(offset + size <= block->size()); |
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 98bd04bea..de75036c0 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h | |||
| @@ -81,7 +81,7 @@ struct VirtualMemoryArea { | |||
| 81 | /// Memory block backing this VMA. | 81 | /// Memory block backing this VMA. |
| 82 | std::shared_ptr<std::vector<u8>> backing_block = nullptr; | 82 | std::shared_ptr<std::vector<u8>> backing_block = nullptr; |
| 83 | /// Offset into the backing_memory the mapping starts from. | 83 | /// Offset into the backing_memory the mapping starts from. |
| 84 | size_t offset = 0; | 84 | std::size_t offset = 0; |
| 85 | 85 | ||
| 86 | // Settings for type = BackingMemory | 86 | // Settings for type = BackingMemory |
| 87 | /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed. | 87 | /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed. |
| @@ -147,7 +147,7 @@ public: | |||
| 147 | * @param state MemoryState tag to attach to the VMA. | 147 | * @param state MemoryState tag to attach to the VMA. |
| 148 | */ | 148 | */ |
| 149 | ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, | 149 | ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, |
| 150 | size_t offset, u64 size, MemoryState state); | 150 | std::size_t offset, u64 size, MemoryState state); |
| 151 | 151 | ||
| 152 | /** | 152 | /** |
| 153 | * Maps an unmanaged host memory pointer at a given address. | 153 | * Maps an unmanaged host memory pointer at a given address. |
diff --git a/src/core/hle/kernel/wait_object.cpp b/src/core/hle/kernel/wait_object.cpp index eef00b729..b190ceb98 100644 --- a/src/core/hle/kernel/wait_object.cpp +++ b/src/core/hle/kernel/wait_object.cpp | |||
| @@ -81,7 +81,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) { | |||
| 81 | } | 81 | } |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | size_t index = thread->GetWaitObjectIndex(this); | 84 | std::size_t index = thread->GetWaitObjectIndex(this); |
| 85 | 85 | ||
| 86 | for (auto& object : thread->wait_objects) | 86 | for (auto& object : thread->wait_objects) |
| 87 | object->RemoveWaitingThread(thread.get()); | 87 | object->RemoveWaitingThread(thread.get()); |