summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Liam2022-08-19 21:58:25 -0400
committerGravatar Fernando Sahmkow2022-10-06 21:00:54 +0200
commitc80ed6d81fef5858508ac4b841defe8ee3a8663d (patch)
tree3f2a193176de0b7e6dff6cefc47172aaf3d6c34e
parentnvdisp: End system frame after requesting to swap buffers (diff)
downloadyuzu-c80ed6d81fef5858508ac4b841defe8ee3a8663d.tar.gz
yuzu-c80ed6d81fef5858508ac4b841defe8ee3a8663d.tar.xz
yuzu-c80ed6d81fef5858508ac4b841defe8ee3a8663d.zip
general: rework usages of UNREACHABLE macro
-rw-r--r--src/common/address_space.inc31
-rw-r--r--src/core/hle/service/nvdrv/core/syncpoint_manager.cpp14
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp10
3 files changed, 28 insertions, 27 deletions
diff --git a/src/common/address_space.inc b/src/common/address_space.inc
index 9f957c81d..2195dabd5 100644
--- a/src/common/address_space.inc
+++ b/src/common/address_space.inc
@@ -34,7 +34,7 @@ MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_,
34 std::function<void(VaType, VaType)> unmap_callback_) 34 std::function<void(VaType, VaType)> unmap_callback_)
35 : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} { 35 : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} {
36 if (va_limit > VaMaximum) { 36 if (va_limit > VaMaximum) {
37 UNREACHABLE_MSG("Invalid VA limit!"); 37 ASSERT_MSG(false, "Invalid VA limit!");
38 } 38 }
39} 39}
40 40
@@ -42,14 +42,14 @@ MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInf
42 VaType virt_end{virt + size}; 42 VaType virt_end{virt + size};
43 43
44 if (virt_end > va_limit) { 44 if (virt_end > va_limit) {
45 UNREACHABLE_MSG( 45 ASSERT_MSG(false,
46 "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", virt_end, 46 "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
47 va_limit); 47 virt_end, va_limit);
48 } 48 }
49 49
50 auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; 50 auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
51 if (block_end_successor == blocks.begin()) { 51 if (block_end_successor == blocks.begin()) {
52 UNREACHABLE_MSG("Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end); 52 ASSERT_MSG(false, "Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end);
53 } 53 }
54 54
55 auto block_end_predecessor{std::prev(block_end_successor)}; 55 auto block_end_predecessor{std::prev(block_end_successor)};
@@ -124,7 +124,7 @@ MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInf
124 124
125 // Check that the start successor is either the end block or something in between 125 // Check that the start successor is either the end block or something in between
126 if (block_start_successor->virt > virt_end) { 126 if (block_start_successor->virt > virt_end) {
127 UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); 127 ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
128 } else if (block_start_successor->virt == virt_end) { 128 } else if (block_start_successor->virt == virt_end) {
129 // We need to create a new block as there are none spare that we would overwrite 129 // We need to create a new block as there are none spare that we would overwrite
130 blocks.insert(block_start_successor, Block(virt, phys, extra_info)); 130 blocks.insert(block_start_successor, Block(virt, phys, extra_info));
@@ -149,14 +149,15 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
149 VaType virt_end{virt + size}; 149 VaType virt_end{virt + size};
150 150
151 if (virt_end > va_limit) { 151 if (virt_end > va_limit) {
152 UNREACHABLE_MSG( 152 ASSERT_MSG(false,
153 "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}", virt_end, 153 "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
154 va_limit); 154 virt_end, va_limit);
155 } 155 }
156 156
157 auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)}; 157 auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
158 if (block_end_successor == blocks.begin()) { 158 if (block_end_successor == blocks.begin()) {
159 UNREACHABLE_MSG("Trying to unmap a block before the VA start: virt_end: 0x{:X}", virt_end); 159 ASSERT_MSG(false, "Trying to unmap a block before the VA start: virt_end: 0x{:X}",
160 virt_end);
160 } 161 }
161 162
162 auto block_end_predecessor{std::prev(block_end_successor)}; 163 auto block_end_predecessor{std::prev(block_end_successor)};
@@ -190,7 +191,7 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
190 if (eraseEnd != blocks.end() && 191 if (eraseEnd != blocks.end() &&
191 (eraseEnd == block_start_successor || 192 (eraseEnd == block_start_successor ||
192 (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) { 193 (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) {
193 UNREACHABLE_MSG("Multiple contiguous unmapped regions are unsupported!"); 194 ASSERT_MSG(false, "Multiple contiguous unmapped regions are unsupported!");
194 } 195 }
195 196
196 blocks.erase(block_start_successor, eraseEnd); 197 blocks.erase(block_start_successor, eraseEnd);
@@ -217,7 +218,7 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
217 return; // The region is unmapped here and doesn't need splitting, bail out early 218 return; // The region is unmapped here and doesn't need splitting, bail out early
218 } else if (block_end_successor == blocks.end()) { 219 } else if (block_end_successor == blocks.end()) {
219 // This should never happen as the end should always follow an unmapped block 220 // This should never happen as the end should always follow an unmapped block
220 UNREACHABLE_MSG("Unexpected Memory Manager state!"); 221 ASSERT_MSG(false, "Unexpected Memory Manager state!");
221 } else if (block_end_successor->virt != virt_end) { 222 } else if (block_end_successor->virt != virt_end) {
222 // If one block is directly in front then we don't have to add a tail 223 // If one block is directly in front then we don't have to add a tail
223 224
@@ -256,7 +257,7 @@ MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
256 auto block_start_successor{std::next(block_start_predecessor)}; 257 auto block_start_successor{std::next(block_start_predecessor)};
257 258
258 if (block_start_successor->virt > virt_end) { 259 if (block_start_successor->virt > virt_end) {
259 UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt); 260 ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
260 } else if (block_start_successor->virt == virt_end) { 261 } else if (block_start_successor->virt == virt_end) {
261 // There are no blocks between the start and the end that would let us skip inserting a new 262 // There are no blocks between the start and the end that would let us skip inserting a new
262 // one for head 263 // one for head
@@ -298,7 +299,7 @@ ALLOC_MEMBER(VaType)::Allocate(VaType size) {
298 auto alloc_end_successor{ 299 auto alloc_end_successor{
299 std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)}; 300 std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)};
300 if (alloc_end_successor == this->blocks.begin()) { 301 if (alloc_end_successor == this->blocks.begin()) {
301 UNREACHABLE_MSG("First block in AS map is invalid!"); 302 ASSERT_MSG(false, "First block in AS map is invalid!");
302 } 303 }
303 304
304 auto alloc_end_predecessor{std::prev(alloc_end_successor)}; 305 auto alloc_end_predecessor{std::prev(alloc_end_successor)};
@@ -332,7 +333,7 @@ ALLOC_MEMBER(VaType)::Allocate(VaType size) {
332 current_linear_alloc_end = alloc_start + size; 333 current_linear_alloc_end = alloc_start + size;
333 } else { // If linear allocation overflows the AS then find a gap 334 } else { // If linear allocation overflows the AS then find a gap
334 if (this->blocks.size() <= 2) { 335 if (this->blocks.size() <= 2) {
335 UNREACHABLE_MSG("Unexpected allocator state!"); 336 ASSERT_MSG(false, "Unexpected allocator state!");
336 } 337 }
337 338
338 auto search_predecessor{this->blocks.begin()}; 339 auto search_predecessor{this->blocks.begin()};
diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
index 0bb2aec97..072b3a22f 100644
--- a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
+++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
@@ -29,7 +29,7 @@ SyncpointManager::~SyncpointManager() = default;
29 29
30u32 SyncpointManager::ReserveSyncpoint(u32 id, bool clientManaged) { 30u32 SyncpointManager::ReserveSyncpoint(u32 id, bool clientManaged) {
31 if (syncpoints.at(id).reserved) { 31 if (syncpoints.at(id).reserved) {
32 UNREACHABLE_MSG("Requested syncpoint is in use"); 32 ASSERT_MSG(false, "Requested syncpoint is in use");
33 return 0; 33 return 0;
34 } 34 }
35 35
@@ -45,7 +45,7 @@ u32 SyncpointManager::FindFreeSyncpoint() {
45 return i; 45 return i;
46 } 46 }
47 } 47 }
48 UNREACHABLE_MSG("Failed to find a free syncpoint!"); 48 ASSERT_MSG(false, "Failed to find a free syncpoint!");
49 return 0; 49 return 0;
50} 50}
51 51
@@ -68,7 +68,7 @@ bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) {
68 const SyncpointInfo& syncpoint{syncpoints.at(id)}; 68 const SyncpointInfo& syncpoint{syncpoints.at(id)};
69 69
70 if (!syncpoint.reserved) { 70 if (!syncpoint.reserved) {
71 UNREACHABLE(); 71 ASSERT(false);
72 return 0; 72 return 0;
73 } 73 }
74 74
@@ -83,7 +83,7 @@ bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) {
83 83
84u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) { 84u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) {
85 if (!syncpoints.at(id).reserved) { 85 if (!syncpoints.at(id).reserved) {
86 UNREACHABLE(); 86 ASSERT(false);
87 return 0; 87 return 0;
88 } 88 }
89 89
@@ -92,7 +92,7 @@ u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) {
92 92
93u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { 93u32 SyncpointManager::ReadSyncpointMinValue(u32 id) {
94 if (!syncpoints.at(id).reserved) { 94 if (!syncpoints.at(id).reserved) {
95 UNREACHABLE(); 95 ASSERT(false);
96 return 0; 96 return 0;
97 } 97 }
98 98
@@ -101,7 +101,7 @@ u32 SyncpointManager::ReadSyncpointMinValue(u32 id) {
101 101
102u32 SyncpointManager::UpdateMin(u32 id) { 102u32 SyncpointManager::UpdateMin(u32 id) {
103 if (!syncpoints.at(id).reserved) { 103 if (!syncpoints.at(id).reserved) {
104 UNREACHABLE(); 104 ASSERT(false);
105 return 0; 105 return 0;
106 } 106 }
107 107
@@ -111,7 +111,7 @@ u32 SyncpointManager::UpdateMin(u32 id) {
111 111
112NvFence SyncpointManager::GetSyncpointFence(u32 id) { 112NvFence SyncpointManager::GetSyncpointFence(u32 id) {
113 if (!syncpoints.at(id).reserved) { 113 if (!syncpoints.at(id).reserved) {
114 UNREACHABLE(); 114 ASSERT(false);
115 return NvFence{}; 115 return NvFence{};
116 } 116 }
117 117
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 7a95f5305..192503ffc 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -96,7 +96,7 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>&
96 std::scoped_lock lock(mutex); 96 std::scoped_lock lock(mutex);
97 97
98 if (vm.initialised) { 98 if (vm.initialised) {
99 UNREACHABLE_MSG("Cannot initialise an address space twice!"); 99 ASSERT_MSG(false, "Cannot initialise an address space twice!");
100 return NvResult::InvalidState; 100 return NvResult::InvalidState;
101 } 101 }
102 102
@@ -174,7 +174,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
174 } else { 174 } else {
175 params.offset = static_cast<u64>(allocator.Allocate(params.pages)) << page_size_bits; 175 params.offset = static_cast<u64>(allocator.Allocate(params.pages)) << page_size_bits;
176 if (!params.offset) { 176 if (!params.offset) {
177 UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!"); 177 ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!");
178 return NvResult::InsufficientMemory; 178 return NvResult::InsufficientMemory;
179 } 179 }
180 } 180 }
@@ -372,7 +372,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
372 else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) 372 else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE))
373 return false; 373 return false;
374 else { 374 else {
375 UNREACHABLE(); 375 ASSERT(false);
376 return false; 376 return false;
377 } 377 }
378 }()}; 378 }()};
@@ -382,7 +382,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
382 382
383 if (alloc-- == allocation_map.begin() || 383 if (alloc-- == allocation_map.begin() ||
384 (params.offset - alloc->first) + size > alloc->second.size) { 384 (params.offset - alloc->first) + size > alloc->second.size) {
385 UNREACHABLE_MSG("Cannot perform a fixed mapping into an unallocated region!"); 385 ASSERT_MSG(false, "Cannot perform a fixed mapping into an unallocated region!");
386 return NvResult::BadValue; 386 return NvResult::BadValue;
387 } 387 }
388 388
@@ -403,7 +403,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
403 static_cast<u32>(Common::AlignUp(size, page_size) >> page_size_bits))) 403 static_cast<u32>(Common::AlignUp(size, page_size) >> page_size_bits)))
404 << page_size_bits; 404 << page_size_bits;
405 if (!params.offset) { 405 if (!params.offset) {
406 UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!"); 406 ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!");
407 return NvResult::InsufficientMemory; 407 return NvResult::InsufficientMemory;
408 } 408 }
409 409