summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/k_client_session.cpp8
-rw-r--r--src/core/hle/kernel/k_page_table_base.cpp40
-rw-r--r--src/core/hle/kernel/k_process.cpp16
-rw-r--r--src/core/hle/kernel/k_server_session.cpp36
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp4
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp4
-rw-r--r--src/core/hle/kernel/kernel.cpp12
-rw-r--r--src/core/hle/kernel/svc/svc_code_memory.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_device_address_space.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_event.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_ipc.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_port.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_resource_limit.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_session.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_thread.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_transfer_memory.cpp4
17 files changed, 109 insertions, 59 deletions
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index 472e8571c..3e01e3b67 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -24,7 +24,9 @@ Result KClientSession::SendSyncRequest(uintptr_t address, size_t size) {
24 // Create a session request. 24 // Create a session request.
25 KSessionRequest* request = KSessionRequest::Create(m_kernel); 25 KSessionRequest* request = KSessionRequest::Create(m_kernel);
26 R_UNLESS(request != nullptr, ResultOutOfResource); 26 R_UNLESS(request != nullptr, ResultOutOfResource);
27 SCOPE_EXIT({ request->Close(); }); 27 SCOPE_EXIT {
28 request->Close();
29 };
28 30
29 // Initialize the request. 31 // Initialize the request.
30 request->Initialize(nullptr, address, size); 32 request->Initialize(nullptr, address, size);
@@ -37,7 +39,9 @@ Result KClientSession::SendAsyncRequest(KEvent* event, uintptr_t address, size_t
37 // Create a session request. 39 // Create a session request.
38 KSessionRequest* request = KSessionRequest::Create(m_kernel); 40 KSessionRequest* request = KSessionRequest::Create(m_kernel);
39 R_UNLESS(request != nullptr, ResultOutOfResource); 41 R_UNLESS(request != nullptr, ResultOutOfResource);
40 SCOPE_EXIT({ request->Close(); }); 42 SCOPE_EXIT {
43 request->Close();
44 };
41 45
42 // Initialize the request. 46 // Initialize the request.
43 request->Initialize(event, address, size); 47 request->Initialize(event, address, size);
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
index 1dd86fb3c..19cdf4f3a 100644
--- a/src/core/hle/kernel/k_page_table_base.cpp
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -1305,11 +1305,11 @@ Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddr
1305 1305
1306 // Ensure that we maintain the instruction cache. 1306 // Ensure that we maintain the instruction cache.
1307 bool reprotected_pages = false; 1307 bool reprotected_pages = false;
1308 SCOPE_EXIT({ 1308 SCOPE_EXIT {
1309 if (reprotected_pages && any_code_pages) { 1309 if (reprotected_pages && any_code_pages) {
1310 InvalidateInstructionCache(m_kernel, this, dst_address, size); 1310 InvalidateInstructionCache(m_kernel, this, dst_address, size);
1311 } 1311 }
1312 }); 1312 };
1313 1313
1314 // Unmap. 1314 // Unmap.
1315 { 1315 {
@@ -1397,7 +1397,9 @@ Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) {
1397 // Close the opened pages when we're done with them. 1397 // Close the opened pages when we're done with them.
1398 // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed 1398 // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
1399 // automatically. 1399 // automatically.
1400 SCOPE_EXIT({ pg.Close(); }); 1400 SCOPE_EXIT {
1401 pg.Close();
1402 };
1401 1403
1402 // Clear all the newly allocated pages. 1404 // Clear all the newly allocated pages.
1403 for (const auto& it : pg) { 1405 for (const auto& it : pg) {
@@ -1603,7 +1605,9 @@ Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProce
1603 m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option)); 1605 m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
1604 1606
1605 // Ensure that the page group is closed when we're done working with it. 1607 // Ensure that the page group is closed when we're done working with it.
1606 SCOPE_EXIT({ pg.Close(); }); 1608 SCOPE_EXIT {
1609 pg.Close();
1610 };
1607 1611
1608 // Clear all pages. 1612 // Clear all pages.
1609 for (const auto& it : pg) { 1613 for (const auto& it : pg) {
@@ -2191,7 +2195,9 @@ Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) {
2191 // Close the opened pages when we're done with them. 2195 // Close the opened pages when we're done with them.
2192 // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed 2196 // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
2193 // automatically. 2197 // automatically.
2194 SCOPE_EXIT({ pg.Close(); }); 2198 SCOPE_EXIT {
2199 pg.Close();
2200 };
2195 2201
2196 // Clear all the newly allocated pages. 2202 // Clear all the newly allocated pages.
2197 for (const auto& it : pg) { 2203 for (const auto& it : pg) {
@@ -2592,7 +2598,9 @@ Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddre
2592 // Temporarily unlock ourselves, so that other operations can occur while we flush the 2598 // Temporarily unlock ourselves, so that other operations can occur while we flush the
2593 // region. 2599 // region.
2594 m_general_lock.Unlock(); 2600 m_general_lock.Unlock();
2595 SCOPE_EXIT({ m_general_lock.Lock(); }); 2601 SCOPE_EXIT {
2602 m_general_lock.Lock();
2603 };
2596 2604
2597 // Flush the region. 2605 // Flush the region.
2598 R_ASSERT(FlushDataCache(dst_address, size)); 2606 R_ASSERT(FlushDataCache(dst_address, size));
@@ -3311,10 +3319,10 @@ Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddre
3311 // Ensure we unmap the io memory when we're done with it. 3319 // Ensure we unmap the io memory when we're done with it.
3312 const KPageProperties unmap_properties = 3320 const KPageProperties unmap_properties =
3313 KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None}; 3321 KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
3314 SCOPE_EXIT({ 3322 SCOPE_EXIT {
3315 R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false, 3323 R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
3316 unmap_properties, OperationType::Unmap, true)); 3324 unmap_properties, OperationType::Unmap, true));
3317 }); 3325 };
3318 3326
3319 // Read the memory. 3327 // Read the memory.
3320 const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1)); 3328 const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
@@ -3347,10 +3355,10 @@ Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAdd
3347 // Ensure we unmap the io memory when we're done with it. 3355 // Ensure we unmap the io memory when we're done with it.
3348 const KPageProperties unmap_properties = 3356 const KPageProperties unmap_properties =
3349 KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None}; 3357 KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
3350 SCOPE_EXIT({ 3358 SCOPE_EXIT {
3351 R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false, 3359 R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
3352 unmap_properties, OperationType::Unmap, true)); 3360 unmap_properties, OperationType::Unmap, true));
3353 }); 3361 };
3354 3362
3355 // Write the memory. 3363 // Write the memory.
3356 const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1)); 3364 const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
@@ -4491,14 +4499,14 @@ Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
4491 4499
4492 // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll 4500 // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
4493 // free on scope exit. 4501 // free on scope exit.
4494 SCOPE_EXIT({ 4502 SCOPE_EXIT {
4495 if (start_partial_page != 0) { 4503 if (start_partial_page != 0) {
4496 m_kernel.MemoryManager().Close(start_partial_page, 1); 4504 m_kernel.MemoryManager().Close(start_partial_page, 1);
4497 } 4505 }
4498 if (end_partial_page != 0) { 4506 if (end_partial_page != 0) {
4499 m_kernel.MemoryManager().Close(end_partial_page, 1); 4507 m_kernel.MemoryManager().Close(end_partial_page, 1);
4500 } 4508 }
4501 }); 4509 };
4502 4510
4503 ON_RESULT_FAILURE { 4511 ON_RESULT_FAILURE {
4504 if (cur_mapped_addr != dst_addr) { 4512 if (cur_mapped_addr != dst_addr) {
@@ -5166,10 +5174,10 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
5166 GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value)); 5174 GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value));
5167 5175
5168 // If we fail in the next bit (or retry), we need to cleanup the pages. 5176 // If we fail in the next bit (or retry), we need to cleanup the pages.
5169 auto pg_guard = SCOPE_GUARD({ 5177 auto pg_guard = SCOPE_GUARD {
5170 pg.OpenFirst(); 5178 pg.OpenFirst();
5171 pg.Close(); 5179 pg.Close();
5172 }); 5180 };
5173 5181
5174 // Map the memory. 5182 // Map the memory.
5175 { 5183 {
@@ -5694,7 +5702,9 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5694 5702
5695 // Ensure that any pages we track are closed on exit. 5703 // Ensure that any pages we track are closed on exit.
5696 KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager()); 5704 KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
5697 SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); 5705 SCOPE_EXIT {
5706 pages_to_close.CloseAndReset();
5707 };
5698 5708
5699 // Make a page group representing the region to unmap. 5709 // Make a page group representing the region to unmap.
5700 this->MakePageGroup(pages_to_close, virt_addr, num_pages); 5710 this->MakePageGroup(pages_to_close, virt_addr, num_pages);
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 1bcc42890..cb9a11a63 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -77,7 +77,9 @@ Result TerminateChildren(KernelCore& kernel, KProcess* process,
77 } 77 }
78 78
79 // Terminate and close the thread. 79 // Terminate and close the thread.
80 SCOPE_EXIT({ cur_child->Close(); }); 80 SCOPE_EXIT {
81 cur_child->Close();
82 };
81 83
82 if (const Result terminate_result = cur_child->Terminate(); 84 if (const Result terminate_result = cur_child->Terminate();
83 ResultTerminationRequested == terminate_result) { 85 ResultTerminationRequested == terminate_result) {
@@ -466,11 +468,11 @@ void KProcess::DoWorkerTaskImpl() {
466 468
467Result KProcess::StartTermination() { 469Result KProcess::StartTermination() {
468 // Finalize the handle table when we're done, if the process isn't immortal. 470 // Finalize the handle table when we're done, if the process isn't immortal.
469 SCOPE_EXIT({ 471 SCOPE_EXIT {
470 if (!m_is_immortal) { 472 if (!m_is_immortal) {
471 this->FinalizeHandleTable(); 473 this->FinalizeHandleTable();
472 } 474 }
473 }); 475 };
474 476
475 // Terminate child threads other than the current one. 477 // Terminate child threads other than the current one.
476 R_RETURN(TerminateChildren(m_kernel, this, GetCurrentThreadPointer(m_kernel))); 478 R_RETURN(TerminateChildren(m_kernel, this, GetCurrentThreadPointer(m_kernel)));
@@ -964,7 +966,9 @@ Result KProcess::Run(s32 priority, size_t stack_size) {
964 // Create a new thread for the process. 966 // Create a new thread for the process.
965 KThread* main_thread = KThread::Create(m_kernel); 967 KThread* main_thread = KThread::Create(m_kernel);
966 R_UNLESS(main_thread != nullptr, ResultOutOfResource); 968 R_UNLESS(main_thread != nullptr, ResultOutOfResource);
967 SCOPE_EXIT({ main_thread->Close(); }); 969 SCOPE_EXIT {
970 main_thread->Close();
971 };
968 972
969 // Initialize the thread. 973 // Initialize the thread.
970 R_TRY(KThread::InitializeUserThread(m_kernel.System(), main_thread, this->GetEntryPoint(), 0, 974 R_TRY(KThread::InitializeUserThread(m_kernel.System(), main_thread, this->GetEntryPoint(), 0,
@@ -1155,7 +1159,9 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
1155 Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size); 1159 Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size);
1156 1160
1157 // Ensure we maintain a clean state on exit. 1161 // Ensure we maintain a clean state on exit.
1158 SCOPE_EXIT({ res_limit->Close(); }); 1162 SCOPE_EXIT {
1163 res_limit->Close();
1164 };
1159 1165
1160 // Declare flags and code address. 1166 // Declare flags and code address.
1161 Svc::CreateProcessFlag flag{}; 1167 Svc::CreateProcessFlag flag{};
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index adaabdd6d..40c3323ef 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -651,11 +651,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
651 // Process any special data. 651 // Process any special data.
652 if (src_header.GetHasSpecialHeader()) { 652 if (src_header.GetHasSpecialHeader()) {
653 // After we process, make sure we track whether the receive list is broken. 653 // After we process, make sure we track whether the receive list is broken.
654 SCOPE_EXIT({ 654 SCOPE_EXIT {
655 if (offset > dst_recv_list_idx) { 655 if (offset > dst_recv_list_idx) {
656 recv_list_broken = true; 656 recv_list_broken = true;
657 } 657 }
658 }); 658 };
659 659
660 // Process special data. 660 // Process special data.
661 R_TRY(ProcessMessageSpecialData<false>(offset, dst_process, src_process, src_thread, 661 R_TRY(ProcessMessageSpecialData<false>(offset, dst_process, src_process, src_thread,
@@ -665,11 +665,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
665 // Process any pointer buffers. 665 // Process any pointer buffers.
666 for (auto i = 0; i < src_header.GetPointerCount(); ++i) { 666 for (auto i = 0; i < src_header.GetPointerCount(); ++i) {
667 // After we process, make sure we track whether the receive list is broken. 667 // After we process, make sure we track whether the receive list is broken.
668 SCOPE_EXIT({ 668 SCOPE_EXIT {
669 if (offset > dst_recv_list_idx) { 669 if (offset > dst_recv_list_idx) {
670 recv_list_broken = true; 670 recv_list_broken = true;
671 } 671 }
672 }); 672 };
673 673
674 R_TRY(ProcessReceiveMessagePointerDescriptors( 674 R_TRY(ProcessReceiveMessagePointerDescriptors(
675 offset, pointer_key, dst_page_table, src_page_table, dst_msg, src_msg, dst_recv_list, 675 offset, pointer_key, dst_page_table, src_page_table, dst_msg, src_msg, dst_recv_list,
@@ -680,11 +680,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
680 // Process any map alias buffers. 680 // Process any map alias buffers.
681 for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) { 681 for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) {
682 // After we process, make sure we track whether the receive list is broken. 682 // After we process, make sure we track whether the receive list is broken.
683 SCOPE_EXIT({ 683 SCOPE_EXIT {
684 if (offset > dst_recv_list_idx) { 684 if (offset > dst_recv_list_idx) {
685 recv_list_broken = true; 685 recv_list_broken = true;
686 } 686 }
687 }); 687 };
688 688
689 // We process in order send, recv, exch. Buffers after send (recv/exch) are ReadWrite. 689 // We process in order send, recv, exch. Buffers after send (recv/exch) are ReadWrite.
690 const KMemoryPermission perm = (i >= src_header.GetSendCount()) 690 const KMemoryPermission perm = (i >= src_header.GetSendCount())
@@ -702,11 +702,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
702 // Process any raw data. 702 // Process any raw data.
703 if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) { 703 if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) {
704 // After we process, make sure we track whether the receive list is broken. 704 // After we process, make sure we track whether the receive list is broken.
705 SCOPE_EXIT({ 705 SCOPE_EXIT {
706 if (offset + raw_count > dst_recv_list_idx) { 706 if (offset + raw_count > dst_recv_list_idx) {
707 recv_list_broken = true; 707 recv_list_broken = true;
708 } 708 }
709 }); 709 };
710 710
711 // Get the offset and size. 711 // Get the offset and size.
712 const size_t offset_words = offset * sizeof(u32); 712 const size_t offset_words = offset * sizeof(u32);
@@ -1124,7 +1124,9 @@ Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server
1124 client_thread->Open(); 1124 client_thread->Open();
1125 } 1125 }
1126 1126
1127 SCOPE_EXIT({ client_thread->Close(); }); 1127 SCOPE_EXIT {
1128 client_thread->Close();
1129 };
1128 1130
1129 // Set the request as our current. 1131 // Set the request as our current.
1130 m_current_request = request; 1132 m_current_request = request;
@@ -1174,7 +1176,9 @@ Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server
1174 // Reply to the client. 1176 // Reply to the client.
1175 { 1177 {
1176 // After we reply, close our reference to the request. 1178 // After we reply, close our reference to the request.
1177 SCOPE_EXIT({ request->Close(); }); 1179 SCOPE_EXIT {
1180 request->Close();
1181 };
1178 1182
1179 // Get the event to check whether the request is async. 1183 // Get the event to check whether the request is async.
1180 if (KEvent* event = request->GetEvent(); event != nullptr) { 1184 if (KEvent* event = request->GetEvent(); event != nullptr) {
@@ -1236,7 +1240,9 @@ Result KServerSession::SendReply(uintptr_t server_message, uintptr_t server_buff
1236 } 1240 }
1237 1241
1238 // Close reference to the request once we're done processing it. 1242 // Close reference to the request once we're done processing it.
1239 SCOPE_EXIT({ request->Close(); }); 1243 SCOPE_EXIT {
1244 request->Close();
1245 };
1240 1246
1241 // Extract relevant information from the request. 1247 // Extract relevant information from the request.
1242 const uint64_t client_message = request->GetAddress(); 1248 const uint64_t client_message = request->GetAddress();
@@ -1394,7 +1400,9 @@ void KServerSession::CleanupRequests() {
1394 } 1400 }
1395 1401
1396 // Close a reference to the request once it's cleaned up. 1402 // Close a reference to the request once it's cleaned up.
1397 SCOPE_EXIT({ request->Close(); }); 1403 SCOPE_EXIT {
1404 request->Close();
1405 };
1398 1406
1399 // Extract relevant information from the request. 1407 // Extract relevant information from the request.
1400 const uint64_t client_message = request->GetAddress(); 1408 const uint64_t client_message = request->GetAddress();
@@ -1491,7 +1499,9 @@ void KServerSession::OnClientClosed() {
1491 ASSERT(thread != nullptr); 1499 ASSERT(thread != nullptr);
1492 1500
1493 // Ensure that we close the request when done. 1501 // Ensure that we close the request when done.
1494 SCOPE_EXIT({ request->Close(); }); 1502 SCOPE_EXIT {
1503 request->Close();
1504 };
1495 1505
1496 // If we're terminating, close a reference to the thread and event. 1506 // If we're terminating, close a reference to the thread and event.
1497 if (terminate) { 1507 if (terminate) {
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index a632d1634..1952c0083 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -21,7 +21,9 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
21 // Allocate a new page. 21 // Allocate a new page.
22 KPageBuffer* page_buf = KPageBuffer::Allocate(kernel); 22 KPageBuffer* page_buf = KPageBuffer::Allocate(kernel);
23 R_UNLESS(page_buf != nullptr, ResultOutOfMemory); 23 R_UNLESS(page_buf != nullptr, ResultOutOfMemory);
24 auto page_buf_guard = SCOPE_GUARD({ KPageBuffer::Free(kernel, page_buf); }); 24 auto page_buf_guard = SCOPE_GUARD {
25 KPageBuffer::Free(kernel, page_buf);
26 };
25 27
26 // Map the address in. 28 // Map the address in.
27 const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf); 29 const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf);
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
index cbb1b02bb..09295e8ad 100644
--- a/src/core/hle/kernel/k_transfer_memory.cpp
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -24,7 +24,9 @@ Result KTransferMemory::Initialize(KProcessAddress addr, std::size_t size,
24 24
25 // Construct the page group, guarding to make sure our state is valid on exit. 25 // Construct the page group, guarding to make sure our state is valid on exit.
26 m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager()); 26 m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
27 auto pg_guard = SCOPE_GUARD({ m_page_group.reset(); }); 27 auto pg_guard = SCOPE_GUARD {
28 m_page_group.reset();
29 };
28 30
29 // Lock the memory. 31 // Lock the memory.
30 R_TRY(page_table.LockForTransferMemory(std::addressof(*m_page_group), addr, size, 32 R_TRY(page_table.LockForTransferMemory(std::addressof(*m_page_group), addr, size,
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 4f4b02fac..9e5eaeec4 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -109,7 +109,9 @@ struct KernelCore::Impl {
109 109
110 void Shutdown() { 110 void Shutdown() {
111 is_shutting_down.store(true, std::memory_order_relaxed); 111 is_shutting_down.store(true, std::memory_order_relaxed);
112 SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); 112 SCOPE_EXIT {
113 is_shutting_down.store(false, std::memory_order_relaxed);
114 };
113 115
114 CloseServices(); 116 CloseServices();
115 117
@@ -1080,7 +1082,9 @@ std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
1080 process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false))); 1082 process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
1081 1083
1082 // Ensure that we don't hold onto any extra references. 1084 // Ensure that we don't hold onto any extra references.
1083 SCOPE_EXIT({ process->Close(); }); 1085 SCOPE_EXIT {
1086 process->Close();
1087 };
1084 1088
1085 // Register the new process. 1089 // Register the new process.
1086 KProcess::Register(*this, process); 1090 KProcess::Register(*this, process);
@@ -1108,7 +1112,9 @@ void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function
1108 process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false))); 1112 process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
1109 1113
1110 // Ensure that we don't hold onto any extra references. 1114 // Ensure that we don't hold onto any extra references.
1111 SCOPE_EXIT({ process->Close(); }); 1115 SCOPE_EXIT {
1116 process->Close();
1117 };
1112 1118
1113 // Register the new process. 1119 // Register the new process.
1114 KProcess::Register(*this, process); 1120 KProcess::Register(*this, process);
diff --git a/src/core/hle/kernel/svc/svc_code_memory.cpp b/src/core/hle/kernel/svc/svc_code_memory.cpp
index bae4cb0cd..7be2802f0 100644
--- a/src/core/hle/kernel/svc/svc_code_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_code_memory.cpp
@@ -45,7 +45,9 @@ Result CreateCodeMemory(Core::System& system, Handle* out, u64 address, uint64_t
45 45
46 KCodeMemory* code_mem = KCodeMemory::Create(kernel); 46 KCodeMemory* code_mem = KCodeMemory::Create(kernel);
47 R_UNLESS(code_mem != nullptr, ResultOutOfResource); 47 R_UNLESS(code_mem != nullptr, ResultOutOfResource);
48 SCOPE_EXIT({ code_mem->Close(); }); 48 SCOPE_EXIT {
49 code_mem->Close();
50 };
49 51
50 // Verify that the region is in range. 52 // Verify that the region is in range.
51 R_UNLESS(GetCurrentProcess(system.Kernel()).GetPageTable().Contains(address, size), 53 R_UNLESS(GetCurrentProcess(system.Kernel()).GetPageTable().Contains(address, size),
diff --git a/src/core/hle/kernel/svc/svc_device_address_space.cpp b/src/core/hle/kernel/svc/svc_device_address_space.cpp
index 42add9473..ac828320f 100644
--- a/src/core/hle/kernel/svc/svc_device_address_space.cpp
+++ b/src/core/hle/kernel/svc/svc_device_address_space.cpp
@@ -28,7 +28,9 @@ Result CreateDeviceAddressSpace(Core::System& system, Handle* out, uint64_t das_
28 // Create the device address space. 28 // Create the device address space.
29 KDeviceAddressSpace* das = KDeviceAddressSpace::Create(system.Kernel()); 29 KDeviceAddressSpace* das = KDeviceAddressSpace::Create(system.Kernel());
30 R_UNLESS(das != nullptr, ResultOutOfResource); 30 R_UNLESS(das != nullptr, ResultOutOfResource);
31 SCOPE_EXIT({ das->Close(); }); 31 SCOPE_EXIT {
32 das->Close();
33 };
32 34
33 // Initialize the device address space. 35 // Initialize the device address space.
34 R_TRY(das->Initialize(das_address, das_size)); 36 R_TRY(das->Initialize(das_address, das_size));
diff --git a/src/core/hle/kernel/svc/svc_event.cpp b/src/core/hle/kernel/svc/svc_event.cpp
index 901202e6a..8e4beb396 100644
--- a/src/core/hle/kernel/svc/svc_event.cpp
+++ b/src/core/hle/kernel/svc/svc_event.cpp
@@ -72,10 +72,10 @@ Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
72 event_reservation.Commit(); 72 event_reservation.Commit();
73 73
74 // Ensure that we clean up the event (and its only references are handle table) on function end. 74 // Ensure that we clean up the event (and its only references are handle table) on function end.
75 SCOPE_EXIT({ 75 SCOPE_EXIT {
76 event->GetReadableEvent().Close(); 76 event->GetReadableEvent().Close();
77 event->Close(); 77 event->Close();
78 }); 78 };
79 79
80 // Register the event. 80 // Register the event.
81 KEvent::Register(kernel, event); 81 KEvent::Register(kernel, event);
diff --git a/src/core/hle/kernel/svc/svc_ipc.cpp b/src/core/hle/kernel/svc/svc_ipc.cpp
index 85cc4f561..b619bd70a 100644
--- a/src/core/hle/kernel/svc/svc_ipc.cpp
+++ b/src/core/hle/kernel/svc/svc_ipc.cpp
@@ -129,11 +129,11 @@ Result ReplyAndReceiveImpl(KernelCore& kernel, int32_t* out_index, uintptr_t mes
129 } 129 }
130 130
131 // Ensure handles are closed when we're done. 131 // Ensure handles are closed when we're done.
132 SCOPE_EXIT({ 132 SCOPE_EXIT {
133 for (auto i = 0; i < num_handles; ++i) { 133 for (auto i = 0; i < num_handles; ++i) {
134 objs[i]->Close(); 134 objs[i]->Close();
135 } 135 }
136 }); 136 };
137 137
138 R_RETURN(ReplyAndReceiveImpl(kernel, out_index, message, buffer_size, message_paddr, objs, 138 R_RETURN(ReplyAndReceiveImpl(kernel, out_index, message, buffer_size, message_paddr, objs,
139 num_handles, reply_target, timeout_ns)); 139 num_handles, reply_target, timeout_ns));
@@ -208,10 +208,10 @@ Result SendAsyncRequestWithUserBuffer(Core::System& system, Handle* out_event_ha
208 event_reservation.Commit(); 208 event_reservation.Commit();
209 209
210 // At end of scope, kill the standing references to the sub events. 210 // At end of scope, kill the standing references to the sub events.
211 SCOPE_EXIT({ 211 SCOPE_EXIT {
212 event->GetReadableEvent().Close(); 212 event->GetReadableEvent().Close();
213 event->Close(); 213 event->Close();
214 }); 214 };
215 215
216 // Register the event. 216 // Register the event.
217 KEvent::Register(system.Kernel(), event); 217 KEvent::Register(system.Kernel(), event);
diff --git a/src/core/hle/kernel/svc/svc_port.cpp b/src/core/hle/kernel/svc/svc_port.cpp
index 737749f7d..9a22dadaf 100644
--- a/src/core/hle/kernel/svc/svc_port.cpp
+++ b/src/core/hle/kernel/svc/svc_port.cpp
@@ -68,10 +68,10 @@ Result CreatePort(Core::System& system, Handle* out_server, Handle* out_client,
68 port->Initialize(max_sessions, is_light, name); 68 port->Initialize(max_sessions, is_light, name);
69 69
70 // Ensure that we clean up the port (and its only references are handle table) on function end. 70 // Ensure that we clean up the port (and its only references are handle table) on function end.
71 SCOPE_EXIT({ 71 SCOPE_EXIT {
72 port->GetServerPort().Close(); 72 port->GetServerPort().Close();
73 port->GetClientPort().Close(); 73 port->GetClientPort().Close();
74 }); 74 };
75 75
76 // Register the port. 76 // Register the port.
77 KPort::Register(kernel, port); 77 KPort::Register(kernel, port);
@@ -150,10 +150,10 @@ Result ManageNamedPort(Core::System& system, Handle* out_server_handle, uint64_t
150 KPort::Register(system.Kernel(), port); 150 KPort::Register(system.Kernel(), port);
151 151
152 // Ensure that our only reference to the port is in the handle table when we're done. 152 // Ensure that our only reference to the port is in the handle table when we're done.
153 SCOPE_EXIT({ 153 SCOPE_EXIT {
154 port->GetClientPort().Close(); 154 port->GetClientPort().Close();
155 port->GetServerPort().Close(); 155 port->GetServerPort().Close();
156 }); 156 };
157 157
158 // Register the handle in the table. 158 // Register the handle in the table.
159 R_TRY(handle_table.Add(out_server_handle, std::addressof(port->GetServerPort()))); 159 R_TRY(handle_table.Add(out_server_handle, std::addressof(port->GetServerPort())));
diff --git a/src/core/hle/kernel/svc/svc_resource_limit.cpp b/src/core/hle/kernel/svc/svc_resource_limit.cpp
index c8e820b6a..6f3972482 100644
--- a/src/core/hle/kernel/svc/svc_resource_limit.cpp
+++ b/src/core/hle/kernel/svc/svc_resource_limit.cpp
@@ -18,7 +18,9 @@ Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
18 R_UNLESS(resource_limit != nullptr, ResultOutOfResource); 18 R_UNLESS(resource_limit != nullptr, ResultOutOfResource);
19 19
20 // Ensure we don't leak a reference to the limit. 20 // Ensure we don't leak a reference to the limit.
21 SCOPE_EXIT({ resource_limit->Close(); }); 21 SCOPE_EXIT {
22 resource_limit->Close();
23 };
22 24
23 // Initialize the resource limit. 25 // Initialize the resource limit.
24 resource_limit->Initialize(); 26 resource_limit->Initialize();
diff --git a/src/core/hle/kernel/svc/svc_session.cpp b/src/core/hle/kernel/svc/svc_session.cpp
index 2f5905f32..b034d21d1 100644
--- a/src/core/hle/kernel/svc/svc_session.cpp
+++ b/src/core/hle/kernel/svc/svc_session.cpp
@@ -69,10 +69,10 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien
69 69
70 // Ensure that we clean up the session (and its only references are handle table) on function 70 // Ensure that we clean up the session (and its only references are handle table) on function
71 // end. 71 // end.
72 SCOPE_EXIT({ 72 SCOPE_EXIT {
73 session->GetClientSession().Close(); 73 session->GetClientSession().Close();
74 session->GetServerSession().Close(); 74 session->GetServerSession().Close();
75 }); 75 };
76 76
77 // Register the session. 77 // Register the session.
78 T::Register(system.Kernel(), session); 78 T::Register(system.Kernel(), session);
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
index 6c79cfd8d..fb03908d7 100644
--- a/src/core/hle/kernel/svc/svc_synchronization.cpp
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -78,11 +78,11 @@ Result WaitSynchronization(Core::System& system, int32_t* out_index, u64 user_ha
78 } 78 }
79 79
80 // Ensure handles are closed when we're done. 80 // Ensure handles are closed when we're done.
81 SCOPE_EXIT({ 81 SCOPE_EXIT {
82 for (auto i = 0; i < num_handles; ++i) { 82 for (auto i = 0; i < num_handles; ++i) {
83 objs[i]->Close(); 83 objs[i]->Close();
84 } 84 }
85 }); 85 };
86 86
87 // Convert the timeout from nanoseconds to ticks. 87 // Convert the timeout from nanoseconds to ticks.
88 s64 timeout; 88 s64 timeout;
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp
index 7681afa33..7517bb9d3 100644
--- a/src/core/hle/kernel/svc/svc_thread.cpp
+++ b/src/core/hle/kernel/svc/svc_thread.cpp
@@ -51,7 +51,9 @@ Result CreateThread(Core::System& system, Handle* out_handle, u64 entry_point, u
51 // Create the thread. 51 // Create the thread.
52 KThread* thread = KThread::Create(kernel); 52 KThread* thread = KThread::Create(kernel);
53 R_UNLESS(thread != nullptr, ResultOutOfResource) 53 R_UNLESS(thread != nullptr, ResultOutOfResource)
54 SCOPE_EXIT({ thread->Close(); }); 54 SCOPE_EXIT {
55 thread->Close();
56 };
55 57
56 // Initialize the thread. 58 // Initialize the thread.
57 { 59 {
diff --git a/src/core/hle/kernel/svc/svc_transfer_memory.cpp b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
index 671bca23f..2ea0d4421 100644
--- a/src/core/hle/kernel/svc/svc_transfer_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_transfer_memory.cpp
@@ -52,7 +52,9 @@ Result CreateTransferMemory(Core::System& system, Handle* out, u64 address, u64
52 R_UNLESS(trmem != nullptr, ResultOutOfResource); 52 R_UNLESS(trmem != nullptr, ResultOutOfResource);
53 53
54 // Ensure the only reference is in the handle table when we're done. 54 // Ensure the only reference is in the handle table when we're done.
55 SCOPE_EXIT({ trmem->Close(); }); 55 SCOPE_EXIT {
56 trmem->Close();
57 };
56 58
57 // Ensure that the region is in range. 59 // Ensure that the region is in range.
58 R_UNLESS(process.GetPageTable().Contains(address, size), ResultInvalidCurrentMemory); 60 R_UNLESS(process.GetPageTable().Contains(address, size), ResultInvalidCurrentMemory);