diff options
| m--------- | externals/dynarmic | 0 | ||||
| -rw-r--r-- | src/video_core/memory_manager.cpp | 96 | ||||
| -rw-r--r-- | src/video_core/memory_manager.h | 5 | ||||
| -rw-r--r-- | src/video_core/texture_cache/texture_cache.h | 4 | ||||
| -rw-r--r-- | src/yuzu/configuration/configure_per_game.cpp | 24 |
5 files changed, 53 insertions, 76 deletions
diff --git a/externals/dynarmic b/externals/dynarmic | |||
| Subproject 28714ee75aa079cbb706e38bdabc8ee1f6c6951 | Subproject 1635958d0613da376046532e0db5aed6316fbc1 | ||
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index dce00e829..4ff3fa268 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp | |||
| @@ -73,12 +73,12 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { | |||
| 73 | } | 73 | } |
| 74 | const auto submapped_ranges = GetSubmappedRange(gpu_addr, size); | 74 | const auto submapped_ranges = GetSubmappedRange(gpu_addr, size); |
| 75 | 75 | ||
| 76 | for (const auto& map : submapped_ranges) { | 76 | for (const auto& [map_addr, map_size] : submapped_ranges) { |
| 77 | // Flush and invalidate through the GPU interface, to be asynchronous if possible. | 77 | // Flush and invalidate through the GPU interface, to be asynchronous if possible. |
| 78 | const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map.first); | 78 | const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr); |
| 79 | ASSERT(cpu_addr); | 79 | ASSERT(cpu_addr); |
| 80 | 80 | ||
| 81 | rasterizer->UnmapMemory(*cpu_addr, map.second); | 81 | rasterizer->UnmapMemory(*cpu_addr, map_size); |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | UpdateRange(gpu_addr, PageEntry::State::Unmapped, size); | 84 | UpdateRange(gpu_addr, PageEntry::State::Unmapped, size); |
| @@ -265,7 +265,8 @@ size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept { | |||
| 265 | return it->second - (gpu_addr - it->first); | 265 | return it->second - (gpu_addr - it->first); |
| 266 | } | 266 | } |
| 267 | 267 | ||
| 268 | void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { | 268 | void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, |
| 269 | bool is_safe) const { | ||
| 269 | std::size_t remaining_size{size}; | 270 | std::size_t remaining_size{size}; |
| 270 | std::size_t page_index{gpu_src_addr >> page_bits}; | 271 | std::size_t page_index{gpu_src_addr >> page_bits}; |
| 271 | std::size_t page_offset{gpu_src_addr & page_mask}; | 272 | std::size_t page_offset{gpu_src_addr & page_mask}; |
| @@ -273,14 +274,18 @@ void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::siz | |||
| 273 | while (remaining_size > 0) { | 274 | while (remaining_size > 0) { |
| 274 | const std::size_t copy_amount{ | 275 | const std::size_t copy_amount{ |
| 275 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; | 276 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; |
| 276 | 277 | const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; | |
| 277 | if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { | 278 | if (page_addr && *page_addr != 0) { |
| 278 | const auto src_addr{*page_addr + page_offset}; | 279 | const auto src_addr{*page_addr + page_offset}; |
| 279 | 280 | if (is_safe) { | |
| 280 | // Flush must happen on the rasterizer interface, such that memory is always synchronous | 281 | // Flush must happen on the rasterizer interface, such that memory is always |
| 281 | // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. | 282 | // synchronous when it is read (even when in asynchronous GPU mode). |
| 282 | rasterizer->FlushRegion(src_addr, copy_amount); | 283 | // Fixes Dead Cells title menu. |
| 284 | rasterizer->FlushRegion(src_addr, copy_amount); | ||
| 285 | } | ||
| 283 | system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); | 286 | system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); |
| 287 | } else { | ||
| 288 | std::memset(dest_buffer, 0, copy_amount); | ||
| 284 | } | 289 | } |
| 285 | 290 | ||
| 286 | page_index++; | 291 | page_index++; |
| @@ -290,31 +295,17 @@ void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::siz | |||
| 290 | } | 295 | } |
| 291 | } | 296 | } |
| 292 | 297 | ||
| 298 | void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { | ||
| 299 | ReadBlockImpl(gpu_src_addr, dest_buffer, size, true); | ||
| 300 | } | ||
| 301 | |||
| 293 | void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, | 302 | void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, |
| 294 | const std::size_t size) const { | 303 | const std::size_t size) const { |
| 295 | std::size_t remaining_size{size}; | 304 | ReadBlockImpl(gpu_src_addr, dest_buffer, size, false); |
| 296 | std::size_t page_index{gpu_src_addr >> page_bits}; | ||
| 297 | std::size_t page_offset{gpu_src_addr & page_mask}; | ||
| 298 | |||
| 299 | while (remaining_size > 0) { | ||
| 300 | const std::size_t copy_amount{ | ||
| 301 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; | ||
| 302 | |||
| 303 | if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { | ||
| 304 | const auto src_addr{*page_addr + page_offset}; | ||
| 305 | system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); | ||
| 306 | } else { | ||
| 307 | std::memset(dest_buffer, 0, copy_amount); | ||
| 308 | } | ||
| 309 | |||
| 310 | page_index++; | ||
| 311 | page_offset = 0; | ||
| 312 | dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; | ||
| 313 | remaining_size -= copy_amount; | ||
| 314 | } | ||
| 315 | } | 305 | } |
| 316 | 306 | ||
| 317 | void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { | 307 | void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, |
| 308 | bool is_safe) { | ||
| 318 | std::size_t remaining_size{size}; | 309 | std::size_t remaining_size{size}; |
| 319 | std::size_t page_index{gpu_dest_addr >> page_bits}; | 310 | std::size_t page_index{gpu_dest_addr >> page_bits}; |
| 320 | std::size_t page_offset{gpu_dest_addr & page_mask}; | 311 | std::size_t page_offset{gpu_dest_addr & page_mask}; |
| @@ -322,13 +313,15 @@ void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, s | |||
| 322 | while (remaining_size > 0) { | 313 | while (remaining_size > 0) { |
| 323 | const std::size_t copy_amount{ | 314 | const std::size_t copy_amount{ |
| 324 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; | 315 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; |
| 325 | 316 | const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; | |
| 326 | if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { | 317 | if (page_addr && *page_addr != 0) { |
| 327 | const auto dest_addr{*page_addr + page_offset}; | 318 | const auto dest_addr{*page_addr + page_offset}; |
| 328 | 319 | ||
| 329 | // Invalidate must happen on the rasterizer interface, such that memory is always | 320 | if (is_safe) { |
| 330 | // synchronous when it is written (even when in asynchronous GPU mode). | 321 | // Invalidate must happen on the rasterizer interface, such that memory is always |
| 331 | rasterizer->InvalidateRegion(dest_addr, copy_amount); | 322 | // synchronous when it is written (even when in asynchronous GPU mode). |
| 323 | rasterizer->InvalidateRegion(dest_addr, copy_amount); | ||
| 324 | } | ||
| 332 | system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); | 325 | system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); |
| 333 | } | 326 | } |
| 334 | 327 | ||
| @@ -339,26 +332,13 @@ void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, s | |||
| 339 | } | 332 | } |
| 340 | } | 333 | } |
| 341 | 334 | ||
| 335 | void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { | ||
| 336 | WriteBlockImpl(gpu_dest_addr, src_buffer, size, true); | ||
| 337 | } | ||
| 338 | |||
| 342 | void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, | 339 | void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, |
| 343 | std::size_t size) { | 340 | std::size_t size) { |
| 344 | std::size_t remaining_size{size}; | 341 | WriteBlockImpl(gpu_dest_addr, src_buffer, size, false); |
| 345 | std::size_t page_index{gpu_dest_addr >> page_bits}; | ||
| 346 | std::size_t page_offset{gpu_dest_addr & page_mask}; | ||
| 347 | |||
| 348 | while (remaining_size > 0) { | ||
| 349 | const std::size_t copy_amount{ | ||
| 350 | std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; | ||
| 351 | |||
| 352 | if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { | ||
| 353 | const auto dest_addr{*page_addr + page_offset}; | ||
| 354 | system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); | ||
| 355 | } | ||
| 356 | |||
| 357 | page_index++; | ||
| 358 | page_offset = 0; | ||
| 359 | src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; | ||
| 360 | remaining_size -= copy_amount; | ||
| 361 | } | ||
| 362 | } | 342 | } |
| 363 | 343 | ||
| 364 | void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const { | 344 | void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const { |
| @@ -435,15 +415,15 @@ std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange( | |||
| 435 | size_t page_offset{gpu_addr & page_mask}; | 415 | size_t page_offset{gpu_addr & page_mask}; |
| 436 | std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{}; | 416 | std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{}; |
| 437 | std::optional<VAddr> old_page_addr{}; | 417 | std::optional<VAddr> old_page_addr{}; |
| 438 | const auto extend_size = [this, &last_segment, &page_index](std::size_t bytes) { | 418 | const auto extend_size = [&last_segment, &page_index, &page_offset](std::size_t bytes) { |
| 439 | if (!last_segment) { | 419 | if (!last_segment) { |
| 440 | GPUVAddr new_base_addr = page_index << page_bits; | 420 | const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset; |
| 441 | last_segment = {new_base_addr, bytes}; | 421 | last_segment = {new_base_addr, bytes}; |
| 442 | } else { | 422 | } else { |
| 443 | last_segment->second += bytes; | 423 | last_segment->second += bytes; |
| 444 | } | 424 | } |
| 445 | }; | 425 | }; |
| 446 | const auto split = [this, &last_segment, &result] { | 426 | const auto split = [&last_segment, &result] { |
| 447 | if (last_segment) { | 427 | if (last_segment) { |
| 448 | result.push_back(*last_segment); | 428 | result.push_back(*last_segment); |
| 449 | last_segment = std::nullopt; | 429 | last_segment = std::nullopt; |
| @@ -452,7 +432,7 @@ std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange( | |||
| 452 | while (remaining_size > 0) { | 432 | while (remaining_size > 0) { |
| 453 | const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; | 433 | const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; |
| 454 | const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; | 434 | const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; |
| 455 | if (!page_addr) { | 435 | if (!page_addr || *page_addr == 0) { |
| 456 | split(); | 436 | split(); |
| 457 | } else if (old_page_addr) { | 437 | } else if (old_page_addr) { |
| 458 | if (*old_page_addr + page_size != *page_addr) { | 438 | if (*old_page_addr + page_size != *page_addr) { |
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 99d13e7f6..38d8d9d74 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h | |||
| @@ -155,6 +155,11 @@ private: | |||
| 155 | 155 | ||
| 156 | void FlushRegion(GPUVAddr gpu_addr, size_t size) const; | 156 | void FlushRegion(GPUVAddr gpu_addr, size_t size) const; |
| 157 | 157 | ||
| 158 | void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, | ||
| 159 | bool is_safe) const; | ||
| 160 | void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, | ||
| 161 | bool is_safe); | ||
| 162 | |||
| 158 | [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) { | 163 | [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) { |
| 159 | return (gpu_addr >> page_bits) & page_table_mask; | 164 | return (gpu_addr >> page_bits) & page_table_mask; |
| 160 | } | 165 | } |
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index b494152b8..198bb0cfb 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h | |||
| @@ -1376,9 +1376,7 @@ void TextureCache<P>::ForEachSparseSegment(ImageBase& image, Func&& func) { | |||
| 1376 | using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type; | 1376 | using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type; |
| 1377 | static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>; | 1377 | static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>; |
| 1378 | const auto segments = gpu_memory.GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); | 1378 | const auto segments = gpu_memory.GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); |
| 1379 | for (auto& segment : segments) { | 1379 | for (const auto& [gpu_addr, size] : segments) { |
| 1380 | const auto gpu_addr = segment.first; | ||
| 1381 | const auto size = segment.second; | ||
| 1382 | std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); | 1380 | std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); |
| 1383 | ASSERT(cpu_addr); | 1381 | ASSERT(cpu_addr); |
| 1384 | if constexpr (RETURNS_BOOL) { | 1382 | if constexpr (RETURNS_BOOL) { |
diff --git a/src/yuzu/configuration/configure_per_game.cpp b/src/yuzu/configuration/configure_per_game.cpp index 12699c126..f4cf25f05 100644 --- a/src/yuzu/configuration/configure_per_game.cpp +++ b/src/yuzu/configuration/configure_per_game.cpp | |||
| @@ -45,20 +45,21 @@ | |||
| 45 | ConfigurePerGame::ConfigurePerGame(QWidget* parent, u64 title_id, const std::string& file_name, | 45 | ConfigurePerGame::ConfigurePerGame(QWidget* parent, u64 title_id, const std::string& file_name, |
| 46 | Core::System& system_) | 46 | Core::System& system_) |
| 47 | : QDialog(parent), ui(std::make_unique<Ui::ConfigurePerGame>()), | 47 | : QDialog(parent), ui(std::make_unique<Ui::ConfigurePerGame>()), |
| 48 | title_id(title_id), system{system_}, addons_tab{std::make_unique<ConfigurePerGameAddons>( | 48 | title_id(title_id), system{system_} { |
| 49 | system_, this)}, | ||
| 50 | audio_tab{std::make_unique<ConfigureAudio>(system_, this)}, | ||
| 51 | cpu_tab{std::make_unique<ConfigureCpu>(system_, this)}, | ||
| 52 | general_tab{std::make_unique<ConfigureGeneral>(system_, this)}, | ||
| 53 | graphics_tab{std::make_unique<ConfigureGraphics>(system_, this)}, | ||
| 54 | graphics_advanced_tab{std::make_unique<ConfigureGraphicsAdvanced>(system_, this)}, | ||
| 55 | system_tab{std::make_unique<ConfigureSystem>(system_, this)} { | ||
| 56 | const auto file_path = std::filesystem::path(Common::FS::ToU8String(file_name)); | 49 | const auto file_path = std::filesystem::path(Common::FS::ToU8String(file_name)); |
| 57 | const auto config_file_name = title_id == 0 ? Common::FS::PathToUTF8String(file_path.filename()) | 50 | const auto config_file_name = title_id == 0 ? Common::FS::PathToUTF8String(file_path.filename()) |
| 58 | : fmt::format("{:016X}", title_id); | 51 | : fmt::format("{:016X}", title_id); |
| 59 | game_config = | 52 | game_config = |
| 60 | std::make_unique<Config>(system, config_file_name, Config::ConfigType::PerGameConfig); | 53 | std::make_unique<Config>(system, config_file_name, Config::ConfigType::PerGameConfig); |
| 61 | 54 | ||
| 55 | addons_tab = std::make_unique<ConfigurePerGameAddons>(system_, this); | ||
| 56 | audio_tab = std::make_unique<ConfigureAudio>(system_, this); | ||
| 57 | cpu_tab = std::make_unique<ConfigureCpu>(system_, this); | ||
| 58 | general_tab = std::make_unique<ConfigureGeneral>(system_, this); | ||
| 59 | graphics_tab = std::make_unique<ConfigureGraphics>(system_, this); | ||
| 60 | graphics_advanced_tab = std::make_unique<ConfigureGraphicsAdvanced>(system_, this); | ||
| 61 | system_tab = std::make_unique<ConfigureSystem>(system_, this); | ||
| 62 | |||
| 62 | ui->setupUi(this); | 63 | ui->setupUi(this); |
| 63 | 64 | ||
| 64 | ui->tabWidget->addTab(addons_tab.get(), tr("Add-Ons")); | 65 | ui->tabWidget->addTab(addons_tab.get(), tr("Add-Ons")); |
| @@ -187,11 +188,4 @@ void ConfigurePerGame::LoadConfiguration() { | |||
| 187 | 188 | ||
| 188 | const auto valueText = ReadableByteSize(file->GetSize()); | 189 | const auto valueText = ReadableByteSize(file->GetSize()); |
| 189 | ui->display_size->setText(valueText); | 190 | ui->display_size->setText(valueText); |
| 190 | |||
| 191 | general_tab->SetConfiguration(); | ||
| 192 | cpu_tab->SetConfiguration(); | ||
| 193 | system_tab->SetConfiguration(); | ||
| 194 | graphics_tab->SetConfiguration(); | ||
| 195 | graphics_advanced_tab->SetConfiguration(); | ||
| 196 | audio_tab->SetConfiguration(); | ||
| 197 | } | 191 | } |