diff options
Diffstat (limited to 'src')
28 files changed, 244 insertions, 179 deletions
diff --git a/src/core/core.cpp b/src/core/core.cpp index 4d0ac72a5..ddc767e30 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp | |||
| @@ -256,6 +256,8 @@ struct System::Impl { | |||
| 256 | is_powered_on = false; | 256 | is_powered_on = false; |
| 257 | exit_lock = false; | 257 | exit_lock = false; |
| 258 | 258 | ||
| 259 | gpu_core->WaitIdle(); | ||
| 260 | |||
| 259 | // Shutdown emulation session | 261 | // Shutdown emulation session |
| 260 | renderer.reset(); | 262 | renderer.reset(); |
| 261 | GDBStub::Shutdown(); | 263 | GDBStub::Shutdown(); |
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index 941ebc93a..3a32d5b41 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp | |||
| @@ -1140,8 +1140,9 @@ void IApplicationFunctions::PopLaunchParameter(Kernel::HLERequestContext& ctx) { | |||
| 1140 | LOG_DEBUG(Service_AM, "called, kind={:08X}", static_cast<u8>(kind)); | 1140 | LOG_DEBUG(Service_AM, "called, kind={:08X}", static_cast<u8>(kind)); |
| 1141 | 1141 | ||
| 1142 | if (kind == LaunchParameterKind::ApplicationSpecific && !launch_popped_application_specific) { | 1142 | if (kind == LaunchParameterKind::ApplicationSpecific && !launch_popped_application_specific) { |
| 1143 | const auto backend = BCAT::CreateBackendFromSettings( | 1143 | const auto backend = BCAT::CreateBackendFromSettings(system, [this](u64 tid) { |
| 1144 | [this](u64 tid) { return system.GetFileSystemController().GetBCATDirectory(tid); }); | 1144 | return system.GetFileSystemController().GetBCATDirectory(tid); |
| 1145 | }); | ||
| 1145 | const auto build_id_full = system.GetCurrentProcessBuildID(); | 1146 | const auto build_id_full = system.GetCurrentProcessBuildID(); |
| 1146 | u64 build_id{}; | 1147 | u64 build_id{}; |
| 1147 | std::memcpy(&build_id, build_id_full.data(), sizeof(u64)); | 1148 | std::memcpy(&build_id, build_id_full.data(), sizeof(u64)); |
diff --git a/src/core/hle/service/bcat/backend/backend.cpp b/src/core/hle/service/bcat/backend/backend.cpp index 9d6946bc5..b86fda29a 100644 --- a/src/core/hle/service/bcat/backend/backend.cpp +++ b/src/core/hle/service/bcat/backend/backend.cpp | |||
| @@ -10,8 +10,8 @@ | |||
| 10 | 10 | ||
| 11 | namespace Service::BCAT { | 11 | namespace Service::BCAT { |
| 12 | 12 | ||
| 13 | ProgressServiceBackend::ProgressServiceBackend(std::string_view event_name) { | 13 | ProgressServiceBackend::ProgressServiceBackend(Kernel::KernelCore& kernel, |
| 14 | auto& kernel{Core::System::GetInstance().Kernel()}; | 14 | std::string_view event_name) { |
| 15 | event = Kernel::WritableEvent::CreateEventPair( | 15 | event = Kernel::WritableEvent::CreateEventPair( |
| 16 | kernel, Kernel::ResetType::Automatic, | 16 | kernel, Kernel::ResetType::Automatic, |
| 17 | std::string("ProgressServiceBackend:UpdateEvent:").append(event_name)); | 17 | std::string("ProgressServiceBackend:UpdateEvent:").append(event_name)); |
diff --git a/src/core/hle/service/bcat/backend/backend.h b/src/core/hle/service/bcat/backend/backend.h index 51dbd3316..ea4b16ad0 100644 --- a/src/core/hle/service/bcat/backend/backend.h +++ b/src/core/hle/service/bcat/backend/backend.h | |||
| @@ -15,6 +15,14 @@ | |||
| 15 | #include "core/hle/kernel/writable_event.h" | 15 | #include "core/hle/kernel/writable_event.h" |
| 16 | #include "core/hle/result.h" | 16 | #include "core/hle/result.h" |
| 17 | 17 | ||
| 18 | namespace Core { | ||
| 19 | class System; | ||
| 20 | } | ||
| 21 | |||
| 22 | namespace Kernel { | ||
| 23 | class KernelCore; | ||
| 24 | } | ||
| 25 | |||
| 18 | namespace Service::BCAT { | 26 | namespace Service::BCAT { |
| 19 | 27 | ||
| 20 | struct DeliveryCacheProgressImpl; | 28 | struct DeliveryCacheProgressImpl; |
| @@ -88,7 +96,7 @@ public: | |||
| 88 | void FinishDownload(ResultCode result); | 96 | void FinishDownload(ResultCode result); |
| 89 | 97 | ||
| 90 | private: | 98 | private: |
| 91 | explicit ProgressServiceBackend(std::string_view event_name); | 99 | explicit ProgressServiceBackend(Kernel::KernelCore& kernel, std::string_view event_name); |
| 92 | 100 | ||
| 93 | Kernel::SharedPtr<Kernel::ReadableEvent> GetEvent() const; | 101 | Kernel::SharedPtr<Kernel::ReadableEvent> GetEvent() const; |
| 94 | DeliveryCacheProgressImpl& GetImpl(); | 102 | DeliveryCacheProgressImpl& GetImpl(); |
| @@ -145,6 +153,6 @@ public: | |||
| 145 | std::optional<std::vector<u8>> GetLaunchParameter(TitleIDVersion title) override; | 153 | std::optional<std::vector<u8>> GetLaunchParameter(TitleIDVersion title) override; |
| 146 | }; | 154 | }; |
| 147 | 155 | ||
| 148 | std::unique_ptr<Backend> CreateBackendFromSettings(DirectoryGetter getter); | 156 | std::unique_ptr<Backend> CreateBackendFromSettings(Core::System& system, DirectoryGetter getter); |
| 149 | 157 | ||
| 150 | } // namespace Service::BCAT | 158 | } // namespace Service::BCAT |
diff --git a/src/core/hle/service/bcat/backend/boxcat.cpp b/src/core/hle/service/bcat/backend/boxcat.cpp index 64022982b..918159e11 100644 --- a/src/core/hle/service/bcat/backend/boxcat.cpp +++ b/src/core/hle/service/bcat/backend/boxcat.cpp | |||
| @@ -104,14 +104,15 @@ std::string GetZIPFilePath(u64 title_id) { | |||
| 104 | 104 | ||
| 105 | // If the error is something the user should know about (build ID mismatch, bad client version), | 105 | // If the error is something the user should know about (build ID mismatch, bad client version), |
| 106 | // display an error. | 106 | // display an error. |
| 107 | void HandleDownloadDisplayResult(DownloadResult res) { | 107 | void HandleDownloadDisplayResult(const AM::Applets::AppletManager& applet_manager, |
| 108 | DownloadResult res) { | ||
| 108 | if (res == DownloadResult::Success || res == DownloadResult::NoResponse || | 109 | if (res == DownloadResult::Success || res == DownloadResult::NoResponse || |
| 109 | res == DownloadResult::GeneralWebError || res == DownloadResult::GeneralFSError || | 110 | res == DownloadResult::GeneralWebError || res == DownloadResult::GeneralFSError || |
| 110 | res == DownloadResult::NoMatchTitleId || res == DownloadResult::InvalidContentType) { | 111 | res == DownloadResult::NoMatchTitleId || res == DownloadResult::InvalidContentType) { |
| 111 | return; | 112 | return; |
| 112 | } | 113 | } |
| 113 | 114 | ||
| 114 | const auto& frontend{Core::System::GetInstance().GetAppletManager().GetAppletFrontendSet()}; | 115 | const auto& frontend{applet_manager.GetAppletFrontendSet()}; |
| 115 | frontend.error->ShowCustomErrorText( | 116 | frontend.error->ShowCustomErrorText( |
| 116 | ResultCode(-1), "There was an error while attempting to use Boxcat.", | 117 | ResultCode(-1), "There was an error while attempting to use Boxcat.", |
| 117 | DOWNLOAD_RESULT_LOG_MESSAGES[static_cast<std::size_t>(res)], [] {}); | 118 | DOWNLOAD_RESULT_LOG_MESSAGES[static_cast<std::size_t>(res)], [] {}); |
| @@ -264,12 +265,13 @@ private: | |||
| 264 | u64 build_id; | 265 | u64 build_id; |
| 265 | }; | 266 | }; |
| 266 | 267 | ||
| 267 | Boxcat::Boxcat(DirectoryGetter getter) : Backend(std::move(getter)) {} | 268 | Boxcat::Boxcat(AM::Applets::AppletManager& applet_manager_, DirectoryGetter getter) |
| 269 | : Backend(std::move(getter)), applet_manager{applet_manager_} {} | ||
| 268 | 270 | ||
| 269 | Boxcat::~Boxcat() = default; | 271 | Boxcat::~Boxcat() = default; |
| 270 | 272 | ||
| 271 | void SynchronizeInternal(DirectoryGetter dir_getter, TitleIDVersion title, | 273 | void SynchronizeInternal(AM::Applets::AppletManager& applet_manager, DirectoryGetter dir_getter, |
| 272 | ProgressServiceBackend& progress, | 274 | TitleIDVersion title, ProgressServiceBackend& progress, |
| 273 | std::optional<std::string> dir_name = {}) { | 275 | std::optional<std::string> dir_name = {}) { |
| 274 | progress.SetNeedHLELock(true); | 276 | progress.SetNeedHLELock(true); |
| 275 | 277 | ||
| @@ -295,7 +297,7 @@ void SynchronizeInternal(DirectoryGetter dir_getter, TitleIDVersion title, | |||
| 295 | FileUtil::Delete(zip_path); | 297 | FileUtil::Delete(zip_path); |
| 296 | } | 298 | } |
| 297 | 299 | ||
| 298 | HandleDownloadDisplayResult(res); | 300 | HandleDownloadDisplayResult(applet_manager, res); |
| 299 | progress.FinishDownload(ERROR_GENERAL_BCAT_FAILURE); | 301 | progress.FinishDownload(ERROR_GENERAL_BCAT_FAILURE); |
| 300 | return; | 302 | return; |
| 301 | } | 303 | } |
| @@ -364,17 +366,24 @@ void SynchronizeInternal(DirectoryGetter dir_getter, TitleIDVersion title, | |||
| 364 | 366 | ||
| 365 | bool Boxcat::Synchronize(TitleIDVersion title, ProgressServiceBackend& progress) { | 367 | bool Boxcat::Synchronize(TitleIDVersion title, ProgressServiceBackend& progress) { |
| 366 | is_syncing.exchange(true); | 368 | is_syncing.exchange(true); |
| 367 | std::thread([this, title, &progress] { SynchronizeInternal(dir_getter, title, progress); }) | 369 | |
| 370 | std::thread([this, title, &progress] { | ||
| 371 | SynchronizeInternal(applet_manager, dir_getter, title, progress); | ||
| 372 | }) | ||
| 368 | .detach(); | 373 | .detach(); |
| 374 | |||
| 369 | return true; | 375 | return true; |
| 370 | } | 376 | } |
| 371 | 377 | ||
| 372 | bool Boxcat::SynchronizeDirectory(TitleIDVersion title, std::string name, | 378 | bool Boxcat::SynchronizeDirectory(TitleIDVersion title, std::string name, |
| 373 | ProgressServiceBackend& progress) { | 379 | ProgressServiceBackend& progress) { |
| 374 | is_syncing.exchange(true); | 380 | is_syncing.exchange(true); |
| 375 | std::thread( | 381 | |
| 376 | [this, title, name, &progress] { SynchronizeInternal(dir_getter, title, progress, name); }) | 382 | std::thread([this, title, name, &progress] { |
| 383 | SynchronizeInternal(applet_manager, dir_getter, title, progress, name); | ||
| 384 | }) | ||
| 377 | .detach(); | 385 | .detach(); |
| 386 | |||
| 378 | return true; | 387 | return true; |
| 379 | } | 388 | } |
| 380 | 389 | ||
| @@ -420,7 +429,7 @@ std::optional<std::vector<u8>> Boxcat::GetLaunchParameter(TitleIDVersion title) | |||
| 420 | FileUtil::Delete(path); | 429 | FileUtil::Delete(path); |
| 421 | } | 430 | } |
| 422 | 431 | ||
| 423 | HandleDownloadDisplayResult(res); | 432 | HandleDownloadDisplayResult(applet_manager, res); |
| 424 | return std::nullopt; | 433 | return std::nullopt; |
| 425 | } | 434 | } |
| 426 | } | 435 | } |
diff --git a/src/core/hle/service/bcat/backend/boxcat.h b/src/core/hle/service/bcat/backend/boxcat.h index 601151189..d65b42e58 100644 --- a/src/core/hle/service/bcat/backend/boxcat.h +++ b/src/core/hle/service/bcat/backend/boxcat.h | |||
| @@ -9,6 +9,10 @@ | |||
| 9 | #include <optional> | 9 | #include <optional> |
| 10 | #include "core/hle/service/bcat/backend/backend.h" | 10 | #include "core/hle/service/bcat/backend/backend.h" |
| 11 | 11 | ||
| 12 | namespace Service::AM::Applets { | ||
| 13 | class AppletManager; | ||
| 14 | } | ||
| 15 | |||
| 12 | namespace Service::BCAT { | 16 | namespace Service::BCAT { |
| 13 | 17 | ||
| 14 | struct EventStatus { | 18 | struct EventStatus { |
| @@ -20,12 +24,13 @@ struct EventStatus { | |||
| 20 | /// Boxcat is yuzu's custom backend implementation of Nintendo's BCAT service. It is free to use and | 24 | /// Boxcat is yuzu's custom backend implementation of Nintendo's BCAT service. It is free to use and |
| 21 | /// doesn't require a switch or nintendo account. The content is controlled by the yuzu team. | 25 | /// doesn't require a switch or nintendo account. The content is controlled by the yuzu team. |
| 22 | class Boxcat final : public Backend { | 26 | class Boxcat final : public Backend { |
| 23 | friend void SynchronizeInternal(DirectoryGetter dir_getter, TitleIDVersion title, | 27 | friend void SynchronizeInternal(AM::Applets::AppletManager& applet_manager, |
| 28 | DirectoryGetter dir_getter, TitleIDVersion title, | ||
| 24 | ProgressServiceBackend& progress, | 29 | ProgressServiceBackend& progress, |
| 25 | std::optional<std::string> dir_name); | 30 | std::optional<std::string> dir_name); |
| 26 | 31 | ||
| 27 | public: | 32 | public: |
| 28 | explicit Boxcat(DirectoryGetter getter); | 33 | explicit Boxcat(AM::Applets::AppletManager& applet_manager_, DirectoryGetter getter); |
| 29 | ~Boxcat() override; | 34 | ~Boxcat() override; |
| 30 | 35 | ||
| 31 | bool Synchronize(TitleIDVersion title, ProgressServiceBackend& progress) override; | 36 | bool Synchronize(TitleIDVersion title, ProgressServiceBackend& progress) override; |
| @@ -53,6 +58,7 @@ private: | |||
| 53 | 58 | ||
| 54 | class Client; | 59 | class Client; |
| 55 | std::unique_ptr<Client> client; | 60 | std::unique_ptr<Client> client; |
| 61 | AM::Applets::AppletManager& applet_manager; | ||
| 56 | }; | 62 | }; |
| 57 | 63 | ||
| 58 | } // namespace Service::BCAT | 64 | } // namespace Service::BCAT |
diff --git a/src/core/hle/service/bcat/module.cpp b/src/core/hle/service/bcat/module.cpp index 4e4aa758b..6d9d1527d 100644 --- a/src/core/hle/service/bcat/module.cpp +++ b/src/core/hle/service/bcat/module.cpp | |||
| @@ -125,7 +125,11 @@ private: | |||
| 125 | class IBcatService final : public ServiceFramework<IBcatService> { | 125 | class IBcatService final : public ServiceFramework<IBcatService> { |
| 126 | public: | 126 | public: |
| 127 | explicit IBcatService(Core::System& system_, Backend& backend_) | 127 | explicit IBcatService(Core::System& system_, Backend& backend_) |
| 128 | : ServiceFramework("IBcatService"), system{system_}, backend{backend_} { | 128 | : ServiceFramework("IBcatService"), system{system_}, backend{backend_}, |
| 129 | progress{{ | ||
| 130 | ProgressServiceBackend{system_.Kernel(), "Normal"}, | ||
| 131 | ProgressServiceBackend{system_.Kernel(), "Directory"}, | ||
| 132 | }} { | ||
| 129 | // clang-format off | 133 | // clang-format off |
| 130 | static const FunctionInfo functions[] = { | 134 | static const FunctionInfo functions[] = { |
| 131 | {10100, &IBcatService::RequestSyncDeliveryCache, "RequestSyncDeliveryCache"}, | 135 | {10100, &IBcatService::RequestSyncDeliveryCache, "RequestSyncDeliveryCache"}, |
| @@ -249,10 +253,7 @@ private: | |||
| 249 | Core::System& system; | 253 | Core::System& system; |
| 250 | Backend& backend; | 254 | Backend& backend; |
| 251 | 255 | ||
| 252 | std::array<ProgressServiceBackend, static_cast<std::size_t>(SyncType::Count)> progress{ | 256 | std::array<ProgressServiceBackend, static_cast<std::size_t>(SyncType::Count)> progress; |
| 253 | ProgressServiceBackend{"Normal"}, | ||
| 254 | ProgressServiceBackend{"Directory"}, | ||
| 255 | }; | ||
| 256 | }; | 257 | }; |
| 257 | 258 | ||
| 258 | void Module::Interface::CreateBcatService(Kernel::HLERequestContext& ctx) { | 259 | void Module::Interface::CreateBcatService(Kernel::HLERequestContext& ctx) { |
| @@ -557,12 +558,12 @@ void Module::Interface::CreateDeliveryCacheStorageServiceWithApplicationId( | |||
| 557 | rb.PushIpcInterface<IDeliveryCacheStorageService>(fsc.GetBCATDirectory(title_id)); | 558 | rb.PushIpcInterface<IDeliveryCacheStorageService>(fsc.GetBCATDirectory(title_id)); |
| 558 | } | 559 | } |
| 559 | 560 | ||
| 560 | std::unique_ptr<Backend> CreateBackendFromSettings(DirectoryGetter getter) { | 561 | std::unique_ptr<Backend> CreateBackendFromSettings([[maybe_unused]] Core::System& system, |
| 561 | const auto backend = Settings::values.bcat_backend; | 562 | DirectoryGetter getter) { |
| 562 | |||
| 563 | #ifdef YUZU_ENABLE_BOXCAT | 563 | #ifdef YUZU_ENABLE_BOXCAT |
| 564 | if (backend == "boxcat") | 564 | if (Settings::values.bcat_backend == "boxcat") { |
| 565 | return std::make_unique<Boxcat>(std::move(getter)); | 565 | return std::make_unique<Boxcat>(system.GetAppletManager(), std::move(getter)); |
| 566 | } | ||
| 566 | #endif | 567 | #endif |
| 567 | 568 | ||
| 568 | return std::make_unique<NullBackend>(std::move(getter)); | 569 | return std::make_unique<NullBackend>(std::move(getter)); |
| @@ -571,7 +572,8 @@ std::unique_ptr<Backend> CreateBackendFromSettings(DirectoryGetter getter) { | |||
| 571 | Module::Interface::Interface(Core::System& system_, std::shared_ptr<Module> module_, | 572 | Module::Interface::Interface(Core::System& system_, std::shared_ptr<Module> module_, |
| 572 | FileSystem::FileSystemController& fsc_, const char* name) | 573 | FileSystem::FileSystemController& fsc_, const char* name) |
| 573 | : ServiceFramework(name), fsc{fsc_}, module{std::move(module_)}, | 574 | : ServiceFramework(name), fsc{fsc_}, module{std::move(module_)}, |
| 574 | backend{CreateBackendFromSettings([&fsc_](u64 tid) { return fsc_.GetBCATDirectory(tid); })}, | 575 | backend{CreateBackendFromSettings(system_, |
| 576 | [&fsc_](u64 tid) { return fsc_.GetBCATDirectory(tid); })}, | ||
| 575 | system{system_} {} | 577 | system{system_} {} |
| 576 | 578 | ||
| 577 | Module::Interface::~Interface() = default; | 579 | Module::Interface::~Interface() = default; |
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index f764388bc..3f7b8e670 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include "common/assert.h" | 5 | #include "common/assert.h" |
| 6 | #include "common/logging/log.h" | 6 | #include "common/logging/log.h" |
| 7 | #include "core/core.h" | 7 | #include "core/core.h" |
| 8 | #include "core/core_timing.h" | ||
| 8 | #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" | 9 | #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" |
| 9 | #include "core/hle/service/nvdrv/devices/nvmap.h" | 10 | #include "core/hle/service/nvdrv/devices/nvmap.h" |
| 10 | #include "core/perf_stats.h" | 11 | #include "core/perf_stats.h" |
| @@ -38,7 +39,10 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u3 | |||
| 38 | transform, crop_rect}; | 39 | transform, crop_rect}; |
| 39 | 40 | ||
| 40 | system.GetPerfStats().EndGameFrame(); | 41 | system.GetPerfStats().EndGameFrame(); |
| 42 | system.GetPerfStats().EndSystemFrame(); | ||
| 41 | system.GPU().SwapBuffers(&framebuffer); | 43 | system.GPU().SwapBuffers(&framebuffer); |
| 44 | system.FrameLimiter().DoFrameLimiting(system.CoreTiming().GetGlobalTimeUs()); | ||
| 45 | system.GetPerfStats().BeginSystemFrame(); | ||
| 42 | } | 46 | } |
| 43 | 47 | ||
| 44 | } // namespace Service::Nvidia::Devices | 48 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index eb88fee1b..b27ee0502 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp | |||
| @@ -63,16 +63,26 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& | |||
| 63 | return NvResult::BadParameter; | 63 | return NvResult::BadParameter; |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | u32 event_id = params.value & 0x00FF; | ||
| 67 | |||
| 68 | if (event_id >= MaxNvEvents) { | ||
| 69 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 70 | return NvResult::BadParameter; | ||
| 71 | } | ||
| 72 | |||
| 73 | auto event = events_interface.events[event_id]; | ||
| 66 | auto& gpu = system.GPU(); | 74 | auto& gpu = system.GPU(); |
| 67 | // This is mostly to take into account unimplemented features. As synced | 75 | // This is mostly to take into account unimplemented features. As synced |
| 68 | // gpu is always synced. | 76 | // gpu is always synced. |
| 69 | if (!gpu.IsAsync()) { | 77 | if (!gpu.IsAsync()) { |
| 78 | event.writable->Signal(); | ||
| 70 | return NvResult::Success; | 79 | return NvResult::Success; |
| 71 | } | 80 | } |
| 72 | auto lock = gpu.LockSync(); | 81 | auto lock = gpu.LockSync(); |
| 73 | const u32 current_syncpoint_value = gpu.GetSyncpointValue(params.syncpt_id); | 82 | const u32 current_syncpoint_value = gpu.GetSyncpointValue(params.syncpt_id); |
| 74 | const s32 diff = current_syncpoint_value - params.threshold; | 83 | const s32 diff = current_syncpoint_value - params.threshold; |
| 75 | if (diff >= 0) { | 84 | if (diff >= 0) { |
| 85 | event.writable->Signal(); | ||
| 76 | params.value = current_syncpoint_value; | 86 | params.value = current_syncpoint_value; |
| 77 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 87 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 78 | return NvResult::Success; | 88 | return NvResult::Success; |
| @@ -88,27 +98,6 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& | |||
| 88 | return NvResult::Timeout; | 98 | return NvResult::Timeout; |
| 89 | } | 99 | } |
| 90 | 100 | ||
| 91 | u32 event_id; | ||
| 92 | if (is_async) { | ||
| 93 | event_id = params.value & 0x00FF; | ||
| 94 | if (event_id >= MaxNvEvents) { | ||
| 95 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 96 | return NvResult::BadParameter; | ||
| 97 | } | ||
| 98 | } else { | ||
| 99 | if (ctrl.fresh_call) { | ||
| 100 | const auto result = events_interface.GetFreeEvent(); | ||
| 101 | if (result) { | ||
| 102 | event_id = *result; | ||
| 103 | } else { | ||
| 104 | LOG_CRITICAL(Service_NVDRV, "No Free Events available!"); | ||
| 105 | event_id = params.value & 0x00FF; | ||
| 106 | } | ||
| 107 | } else { | ||
| 108 | event_id = ctrl.event_id; | ||
| 109 | } | ||
| 110 | } | ||
| 111 | |||
| 112 | EventState status = events_interface.status[event_id]; | 101 | EventState status = events_interface.status[event_id]; |
| 113 | if (event_id < MaxNvEvents || status == EventState::Free || status == EventState::Registered) { | 102 | if (event_id < MaxNvEvents || status == EventState::Free || status == EventState::Registered) { |
| 114 | events_interface.SetEventStatus(event_id, EventState::Waiting); | 103 | events_interface.SetEventStatus(event_id, EventState::Waiting); |
| @@ -120,7 +109,7 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& | |||
| 120 | params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000; | 109 | params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000; |
| 121 | } | 110 | } |
| 122 | params.value |= event_id; | 111 | params.value |= event_id; |
| 123 | events_interface.events[event_id].writable->Clear(); | 112 | event.writable->Clear(); |
| 124 | gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value); | 113 | gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value); |
| 125 | if (!is_async && ctrl.fresh_call) { | 114 | if (!is_async && ctrl.fresh_call) { |
| 126 | ctrl.must_delay = true; | 115 | ctrl.must_delay = true; |
diff --git a/src/core/hle/service/nvdrv/interface.cpp b/src/core/hle/service/nvdrv/interface.cpp index 5e0c23602..68d139cfb 100644 --- a/src/core/hle/service/nvdrv/interface.cpp +++ b/src/core/hle/service/nvdrv/interface.cpp | |||
| @@ -134,7 +134,9 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) { | |||
| 134 | IPC::ResponseBuilder rb{ctx, 3, 1}; | 134 | IPC::ResponseBuilder rb{ctx, 3, 1}; |
| 135 | rb.Push(RESULT_SUCCESS); | 135 | rb.Push(RESULT_SUCCESS); |
| 136 | if (event_id < MaxNvEvents) { | 136 | if (event_id < MaxNvEvents) { |
| 137 | rb.PushCopyObjects(nvdrv->GetEvent(event_id)); | 137 | auto event = nvdrv->GetEvent(event_id); |
| 138 | event->Clear(); | ||
| 139 | rb.PushCopyObjects(event); | ||
| 138 | rb.Push<u32>(NvResult::Success); | 140 | rb.Push<u32>(NvResult::Success); |
| 139 | } else { | 141 | } else { |
| 140 | rb.Push<u32>(0); | 142 | rb.Push<u32>(0); |
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 307a7e928..7bfb99e34 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp | |||
| @@ -40,8 +40,8 @@ Module::Module(Core::System& system) { | |||
| 40 | auto& kernel = system.Kernel(); | 40 | auto& kernel = system.Kernel(); |
| 41 | for (u32 i = 0; i < MaxNvEvents; i++) { | 41 | for (u32 i = 0; i < MaxNvEvents; i++) { |
| 42 | std::string event_label = fmt::format("NVDRV::NvEvent_{}", i); | 42 | std::string event_label = fmt::format("NVDRV::NvEvent_{}", i); |
| 43 | events_interface.events[i] = Kernel::WritableEvent::CreateEventPair( | 43 | events_interface.events[i] = |
| 44 | kernel, Kernel::ResetType::Automatic, event_label); | 44 | Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Manual, event_label); |
| 45 | events_interface.status[i] = EventState::Free; | 45 | events_interface.status[i] = EventState::Free; |
| 46 | events_interface.registered[i] = false; | 46 | events_interface.registered[i] = false; |
| 47 | } | 47 | } |
diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp index e1a07d3ee..55b68eb0c 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue.cpp | |||
| @@ -14,8 +14,8 @@ | |||
| 14 | 14 | ||
| 15 | namespace Service::NVFlinger { | 15 | namespace Service::NVFlinger { |
| 16 | 16 | ||
| 17 | BufferQueue::BufferQueue(u32 id, u64 layer_id) : id(id), layer_id(layer_id) { | 17 | BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id, u64 layer_id) |
| 18 | auto& kernel = Core::System::GetInstance().Kernel(); | 18 | : id(id), layer_id(layer_id) { |
| 19 | buffer_wait_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Manual, | 19 | buffer_wait_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Manual, |
| 20 | "BufferQueue NativeHandle"); | 20 | "BufferQueue NativeHandle"); |
| 21 | } | 21 | } |
diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h index 356bedb81..8f9b18547 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.h +++ b/src/core/hle/service/nvflinger/buffer_queue.h | |||
| @@ -15,6 +15,10 @@ | |||
| 15 | #include "core/hle/kernel/writable_event.h" | 15 | #include "core/hle/kernel/writable_event.h" |
| 16 | #include "core/hle/service/nvdrv/nvdata.h" | 16 | #include "core/hle/service/nvdrv/nvdata.h" |
| 17 | 17 | ||
| 18 | namespace Kernel { | ||
| 19 | class KernelCore; | ||
| 20 | } | ||
| 21 | |||
| 18 | namespace Service::NVFlinger { | 22 | namespace Service::NVFlinger { |
| 19 | 23 | ||
| 20 | struct IGBPBuffer { | 24 | struct IGBPBuffer { |
| @@ -44,7 +48,7 @@ public: | |||
| 44 | NativeWindowFormat = 2, | 48 | NativeWindowFormat = 2, |
| 45 | }; | 49 | }; |
| 46 | 50 | ||
| 47 | BufferQueue(u32 id, u64 layer_id); | 51 | explicit BufferQueue(Kernel::KernelCore& kernel, u32 id, u64 layer_id); |
| 48 | ~BufferQueue(); | 52 | ~BufferQueue(); |
| 49 | 53 | ||
| 50 | enum class BufferTransformFlags : u32 { | 54 | enum class BufferTransformFlags : u32 { |
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 2e4d707b9..cc9522aad 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp | |||
| @@ -83,7 +83,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { | |||
| 83 | 83 | ||
| 84 | const u64 layer_id = next_layer_id++; | 84 | const u64 layer_id = next_layer_id++; |
| 85 | const u32 buffer_queue_id = next_buffer_queue_id++; | 85 | const u32 buffer_queue_id = next_buffer_queue_id++; |
| 86 | buffer_queues.emplace_back(buffer_queue_id, layer_id); | 86 | buffer_queues.emplace_back(system.Kernel(), buffer_queue_id, layer_id); |
| 87 | display->CreateLayer(layer_id, buffer_queues.back()); | 87 | display->CreateLayer(layer_id, buffer_queues.back()); |
| 88 | return layer_id; | 88 | return layer_id; |
| 89 | } | 89 | } |
| @@ -187,14 +187,18 @@ void NVFlinger::Compose() { | |||
| 187 | MicroProfileFlip(); | 187 | MicroProfileFlip(); |
| 188 | 188 | ||
| 189 | if (!buffer) { | 189 | if (!buffer) { |
| 190 | // There was no queued buffer to draw, render previous frame | ||
| 191 | system.GetPerfStats().EndGameFrame(); | ||
| 192 | system.GPU().SwapBuffers({}); | ||
| 193 | continue; | 190 | continue; |
| 194 | } | 191 | } |
| 195 | 192 | ||
| 196 | const auto& igbp_buffer = buffer->get().igbp_buffer; | 193 | const auto& igbp_buffer = buffer->get().igbp_buffer; |
| 197 | 194 | ||
| 195 | const auto& gpu = system.GPU(); | ||
| 196 | const auto& multi_fence = buffer->get().multi_fence; | ||
| 197 | for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) { | ||
| 198 | const auto& fence = multi_fence.fences[fence_id]; | ||
| 199 | gpu.WaitFence(fence.id, fence.value); | ||
| 200 | } | ||
| 201 | |||
| 198 | // Now send the buffer to the GPU for drawing. | 202 | // Now send the buffer to the GPU for drawing. |
| 199 | // TODO(Subv): Support more than just disp0. The display device selection is probably based | 203 | // TODO(Subv): Support more than just disp0. The display device selection is probably based |
| 200 | // on which display we're drawing (Default, Internal, External, etc) | 204 | // on which display we're drawing (Default, Internal, External, etc) |
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 76cfe8107..095660115 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include "common/assert.h" | 5 | #include "common/assert.h" |
| 6 | #include "common/microprofile.h" | ||
| 6 | #include "core/core.h" | 7 | #include "core/core.h" |
| 7 | #include "core/core_timing.h" | 8 | #include "core/core_timing.h" |
| 8 | #include "core/memory.h" | 9 | #include "core/memory.h" |
| @@ -17,6 +18,8 @@ | |||
| 17 | 18 | ||
| 18 | namespace Tegra { | 19 | namespace Tegra { |
| 19 | 20 | ||
| 21 | MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); | ||
| 22 | |||
| 20 | GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer, bool is_async) | 23 | GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer, bool is_async) |
| 21 | : system{system}, renderer{renderer}, is_async{is_async} { | 24 | : system{system}, renderer{renderer}, is_async{is_async} { |
| 22 | auto& rasterizer{renderer.Rasterizer()}; | 25 | auto& rasterizer{renderer.Rasterizer()}; |
| @@ -63,6 +66,16 @@ const DmaPusher& GPU::DmaPusher() const { | |||
| 63 | return *dma_pusher; | 66 | return *dma_pusher; |
| 64 | } | 67 | } |
| 65 | 68 | ||
| 69 | void GPU::WaitFence(u32 syncpoint_id, u32 value) const { | ||
| 70 | // Synced GPU, is always in sync | ||
| 71 | if (!is_async) { | ||
| 72 | return; | ||
| 73 | } | ||
| 74 | MICROPROFILE_SCOPE(GPU_wait); | ||
| 75 | while (syncpoints[syncpoint_id].load(std::memory_order_relaxed) < value) { | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 66 | void GPU::IncrementSyncPoint(const u32 syncpoint_id) { | 79 | void GPU::IncrementSyncPoint(const u32 syncpoint_id) { |
| 67 | syncpoints[syncpoint_id]++; | 80 | syncpoints[syncpoint_id]++; |
| 68 | std::lock_guard lock{sync_mutex}; | 81 | std::lock_guard lock{sync_mutex}; |
| @@ -326,7 +339,7 @@ void GPU::ProcessSemaphoreTriggerMethod() { | |||
| 326 | block.sequence = regs.semaphore_sequence; | 339 | block.sequence = regs.semaphore_sequence; |
| 327 | // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of | 340 | // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of |
| 328 | // CoreTiming | 341 | // CoreTiming |
| 329 | block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks(); | 342 | block.timestamp = system.CoreTiming().GetTicks(); |
| 330 | memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block, | 343 | memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block, |
| 331 | sizeof(block)); | 344 | sizeof(block)); |
| 332 | } else { | 345 | } else { |
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index 29fa8e95b..dbca19f35 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h | |||
| @@ -177,6 +177,12 @@ public: | |||
| 177 | /// Returns a reference to the GPU DMA pusher. | 177 | /// Returns a reference to the GPU DMA pusher. |
| 178 | Tegra::DmaPusher& DmaPusher(); | 178 | Tegra::DmaPusher& DmaPusher(); |
| 179 | 179 | ||
| 180 | // Waits for the GPU to finish working | ||
| 181 | virtual void WaitIdle() const = 0; | ||
| 182 | |||
| 183 | /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame. | ||
| 184 | void WaitFence(u32 syncpoint_id, u32 value) const; | ||
| 185 | |||
| 180 | void IncrementSyncPoint(u32 syncpoint_id); | 186 | void IncrementSyncPoint(u32 syncpoint_id); |
| 181 | 187 | ||
| 182 | u32 GetSyncpointValue(u32 syncpoint_id) const; | 188 | u32 GetSyncpointValue(u32 syncpoint_id) const; |
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp index f2a3a390e..04222d060 100644 --- a/src/video_core/gpu_asynch.cpp +++ b/src/video_core/gpu_asynch.cpp | |||
| @@ -44,4 +44,8 @@ void GPUAsynch::TriggerCpuInterrupt(const u32 syncpoint_id, const u32 value) con | |||
| 44 | interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); | 44 | interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | void GPUAsynch::WaitIdle() const { | ||
| 48 | gpu_thread.WaitIdle(); | ||
| 49 | } | ||
| 50 | |||
| 47 | } // namespace VideoCommon | 51 | } // namespace VideoCommon |
diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h index a12f9bac4..1241ade1d 100644 --- a/src/video_core/gpu_asynch.h +++ b/src/video_core/gpu_asynch.h | |||
| @@ -25,6 +25,7 @@ public: | |||
| 25 | void FlushRegion(CacheAddr addr, u64 size) override; | 25 | void FlushRegion(CacheAddr addr, u64 size) override; |
| 26 | void InvalidateRegion(CacheAddr addr, u64 size) override; | 26 | void InvalidateRegion(CacheAddr addr, u64 size) override; |
| 27 | void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; | 27 | void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; |
| 28 | void WaitIdle() const override; | ||
| 28 | 29 | ||
| 29 | protected: | 30 | protected: |
| 30 | void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override; | 31 | void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override; |
diff --git a/src/video_core/gpu_synch.h b/src/video_core/gpu_synch.h index 5eb1c461c..c71baee89 100644 --- a/src/video_core/gpu_synch.h +++ b/src/video_core/gpu_synch.h | |||
| @@ -24,6 +24,7 @@ public: | |||
| 24 | void FlushRegion(CacheAddr addr, u64 size) override; | 24 | void FlushRegion(CacheAddr addr, u64 size) override; |
| 25 | void InvalidateRegion(CacheAddr addr, u64 size) override; | 25 | void InvalidateRegion(CacheAddr addr, u64 size) override; |
| 26 | void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; | 26 | void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; |
| 27 | void WaitIdle() const override {} | ||
| 27 | 28 | ||
| 28 | protected: | 29 | protected: |
| 29 | void TriggerCpuInterrupt([[maybe_unused]] u32 syncpoint_id, | 30 | void TriggerCpuInterrupt([[maybe_unused]] u32 syncpoint_id, |
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index 5f039e4fd..758a37f14 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp | |||
| @@ -5,8 +5,6 @@ | |||
| 5 | #include "common/assert.h" | 5 | #include "common/assert.h" |
| 6 | #include "common/microprofile.h" | 6 | #include "common/microprofile.h" |
| 7 | #include "core/core.h" | 7 | #include "core/core.h" |
| 8 | #include "core/core_timing.h" | ||
| 9 | #include "core/core_timing_util.h" | ||
| 10 | #include "core/frontend/scope_acquire_window_context.h" | 8 | #include "core/frontend/scope_acquire_window_context.h" |
| 11 | #include "video_core/dma_pusher.h" | 9 | #include "video_core/dma_pusher.h" |
| 12 | #include "video_core/gpu.h" | 10 | #include "video_core/gpu.h" |
| @@ -68,14 +66,10 @@ ThreadManager::~ThreadManager() { | |||
| 68 | 66 | ||
| 69 | void ThreadManager::StartThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) { | 67 | void ThreadManager::StartThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) { |
| 70 | thread = std::thread{RunThread, std::ref(renderer), std::ref(dma_pusher), std::ref(state)}; | 68 | thread = std::thread{RunThread, std::ref(renderer), std::ref(dma_pusher), std::ref(state)}; |
| 71 | synchronization_event = system.CoreTiming().RegisterEvent( | ||
| 72 | "GPUThreadSynch", [this](u64 fence, s64) { state.WaitForSynchronization(fence); }); | ||
| 73 | } | 69 | } |
| 74 | 70 | ||
| 75 | void ThreadManager::SubmitList(Tegra::CommandList&& entries) { | 71 | void ThreadManager::SubmitList(Tegra::CommandList&& entries) { |
| 76 | const u64 fence{PushCommand(SubmitListCommand(std::move(entries)))}; | 72 | PushCommand(SubmitListCommand(std::move(entries))); |
| 77 | const s64 synchronization_ticks{Core::Timing::usToCycles(std::chrono::microseconds{9000})}; | ||
| 78 | system.CoreTiming().ScheduleEvent(synchronization_ticks, synchronization_event, fence); | ||
| 79 | } | 73 | } |
| 80 | 74 | ||
| 81 | void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { | 75 | void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { |
| @@ -96,16 +90,15 @@ void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { | |||
| 96 | InvalidateRegion(addr, size); | 90 | InvalidateRegion(addr, size); |
| 97 | } | 91 | } |
| 98 | 92 | ||
| 93 | void ThreadManager::WaitIdle() const { | ||
| 94 | while (state.last_fence > state.signaled_fence.load(std::memory_order_relaxed)) { | ||
| 95 | } | ||
| 96 | } | ||
| 97 | |||
| 99 | u64 ThreadManager::PushCommand(CommandData&& command_data) { | 98 | u64 ThreadManager::PushCommand(CommandData&& command_data) { |
| 100 | const u64 fence{++state.last_fence}; | 99 | const u64 fence{++state.last_fence}; |
| 101 | state.queue.Push(CommandDataContainer(std::move(command_data), fence)); | 100 | state.queue.Push(CommandDataContainer(std::move(command_data), fence)); |
| 102 | return fence; | 101 | return fence; |
| 103 | } | 102 | } |
| 104 | 103 | ||
| 105 | MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); | ||
| 106 | void SynchState::WaitForSynchronization(u64 fence) { | ||
| 107 | while (signaled_fence.load() < fence) | ||
| 108 | ; | ||
| 109 | } | ||
| 110 | |||
| 111 | } // namespace VideoCommon::GPUThread | 104 | } // namespace VideoCommon::GPUThread |
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h index 3ae0ec9f3..08dc96bb3 100644 --- a/src/video_core/gpu_thread.h +++ b/src/video_core/gpu_thread.h | |||
| @@ -21,9 +21,6 @@ class DmaPusher; | |||
| 21 | 21 | ||
| 22 | namespace Core { | 22 | namespace Core { |
| 23 | class System; | 23 | class System; |
| 24 | namespace Timing { | ||
| 25 | struct EventType; | ||
| 26 | } // namespace Timing | ||
| 27 | } // namespace Core | 24 | } // namespace Core |
| 28 | 25 | ||
| 29 | namespace VideoCommon::GPUThread { | 26 | namespace VideoCommon::GPUThread { |
| @@ -89,8 +86,6 @@ struct CommandDataContainer { | |||
| 89 | struct SynchState final { | 86 | struct SynchState final { |
| 90 | std::atomic_bool is_running{true}; | 87 | std::atomic_bool is_running{true}; |
| 91 | 88 | ||
| 92 | void WaitForSynchronization(u64 fence); | ||
| 93 | |||
| 94 | using CommandQueue = Common::SPSCQueue<CommandDataContainer>; | 89 | using CommandQueue = Common::SPSCQueue<CommandDataContainer>; |
| 95 | CommandQueue queue; | 90 | CommandQueue queue; |
| 96 | u64 last_fence{}; | 91 | u64 last_fence{}; |
| @@ -121,6 +116,9 @@ public: | |||
| 121 | /// Notify rasterizer that any caches of the specified region should be flushed and invalidated | 116 | /// Notify rasterizer that any caches of the specified region should be flushed and invalidated |
| 122 | void FlushAndInvalidateRegion(CacheAddr addr, u64 size); | 117 | void FlushAndInvalidateRegion(CacheAddr addr, u64 size); |
| 123 | 118 | ||
| 119 | // Wait until the gpu thread is idle. | ||
| 120 | void WaitIdle() const; | ||
| 121 | |||
| 124 | private: | 122 | private: |
| 125 | /// Pushes a command to be executed by the GPU thread | 123 | /// Pushes a command to be executed by the GPU thread |
| 126 | u64 PushCommand(CommandData&& command_data); | 124 | u64 PushCommand(CommandData&& command_data); |
| @@ -128,7 +126,6 @@ private: | |||
| 128 | private: | 126 | private: |
| 129 | SynchState state; | 127 | SynchState state; |
| 130 | Core::System& system; | 128 | Core::System& system; |
| 131 | Core::Timing::EventType* synchronization_event{}; | ||
| 132 | std::thread thread; | 129 | std::thread thread; |
| 133 | std::thread::id thread_id; | 130 | std::thread::id thread_id; |
| 134 | }; | 131 | }; |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index a85f730a8..cbcf81414 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp | |||
| @@ -348,6 +348,7 @@ static constexpr auto RangeFromInterval(Map& map, const Interval& interval) { | |||
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | void RasterizerOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { | 350 | void RasterizerOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { |
| 351 | std::lock_guard lock{pages_mutex}; | ||
| 351 | const u64 page_start{addr >> Memory::PAGE_BITS}; | 352 | const u64 page_start{addr >> Memory::PAGE_BITS}; |
| 352 | const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS}; | 353 | const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS}; |
| 353 | 354 | ||
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 9c10ebda3..c24a02d71 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <cstddef> | 9 | #include <cstddef> |
| 10 | #include <map> | 10 | #include <map> |
| 11 | #include <memory> | 11 | #include <memory> |
| 12 | #include <mutex> | ||
| 12 | #include <optional> | 13 | #include <optional> |
| 13 | #include <tuple> | 14 | #include <tuple> |
| 14 | #include <utility> | 15 | #include <utility> |
| @@ -230,6 +231,8 @@ private: | |||
| 230 | 231 | ||
| 231 | using CachedPageMap = boost::icl::interval_map<u64, int>; | 232 | using CachedPageMap = boost::icl::interval_map<u64, int>; |
| 232 | CachedPageMap cached_pages; | 233 | CachedPageMap cached_pages; |
| 234 | |||
| 235 | std::mutex pages_mutex; | ||
| 233 | }; | 236 | }; |
| 234 | 237 | ||
| 235 | } // namespace OpenGL | 238 | } // namespace OpenGL |
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 6a610a3bc..a3524a6a9 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp | |||
| @@ -1148,7 +1148,7 @@ private: | |||
| 1148 | for (const auto& variant : extras) { | 1148 | for (const auto& variant : extras) { |
| 1149 | if (const auto argument = std::get_if<TextureArgument>(&variant)) { | 1149 | if (const auto argument = std::get_if<TextureArgument>(&variant)) { |
| 1150 | expr += GenerateTextureArgument(*argument); | 1150 | expr += GenerateTextureArgument(*argument); |
| 1151 | } else if (std::get_if<TextureAoffi>(&variant)) { | 1151 | } else if (std::holds_alternative<TextureAoffi>(variant)) { |
| 1152 | expr += GenerateTextureAoffi(meta->aoffi); | 1152 | expr += GenerateTextureAoffi(meta->aoffi); |
| 1153 | } else { | 1153 | } else { |
| 1154 | UNREACHABLE(); | 1154 | UNREACHABLE(); |
| @@ -1158,8 +1158,8 @@ private: | |||
| 1158 | return expr + ')'; | 1158 | return expr + ')'; |
| 1159 | } | 1159 | } |
| 1160 | 1160 | ||
| 1161 | std::string GenerateTextureArgument(TextureArgument argument) { | 1161 | std::string GenerateTextureArgument(const TextureArgument& argument) { |
| 1162 | const auto [type, operand] = argument; | 1162 | const auto& [type, operand] = argument; |
| 1163 | if (operand == nullptr) { | 1163 | if (operand == nullptr) { |
| 1164 | return {}; | 1164 | return {}; |
| 1165 | } | 1165 | } |
| @@ -1235,7 +1235,7 @@ private: | |||
| 1235 | 1235 | ||
| 1236 | std::string BuildImageValues(Operation operation) { | 1236 | std::string BuildImageValues(Operation operation) { |
| 1237 | constexpr std::array constructors{"uint", "uvec2", "uvec3", "uvec4"}; | 1237 | constexpr std::array constructors{"uint", "uvec2", "uvec3", "uvec4"}; |
| 1238 | const auto meta{std::get<MetaImage>(operation.GetMeta())}; | 1238 | const auto& meta{std::get<MetaImage>(operation.GetMeta())}; |
| 1239 | 1239 | ||
| 1240 | const std::size_t values_count{meta.values.size()}; | 1240 | const std::size_t values_count{meta.values.size()}; |
| 1241 | std::string expr = fmt::format("{}(", constructors.at(values_count - 1)); | 1241 | std::string expr = fmt::format("{}(", constructors.at(values_count - 1)); |
| @@ -1780,14 +1780,14 @@ private: | |||
| 1780 | return {"0", Type::Int}; | 1780 | return {"0", Type::Int}; |
| 1781 | } | 1781 | } |
| 1782 | 1782 | ||
| 1783 | const auto meta{std::get<MetaImage>(operation.GetMeta())}; | 1783 | const auto& meta{std::get<MetaImage>(operation.GetMeta())}; |
| 1784 | return {fmt::format("imageLoad({}, {}){}", GetImage(meta.image), | 1784 | return {fmt::format("imageLoad({}, {}){}", GetImage(meta.image), |
| 1785 | BuildIntegerCoordinates(operation), GetSwizzle(meta.element)), | 1785 | BuildIntegerCoordinates(operation), GetSwizzle(meta.element)), |
| 1786 | Type::Uint}; | 1786 | Type::Uint}; |
| 1787 | } | 1787 | } |
| 1788 | 1788 | ||
| 1789 | Expression ImageStore(Operation operation) { | 1789 | Expression ImageStore(Operation operation) { |
| 1790 | const auto meta{std::get<MetaImage>(operation.GetMeta())}; | 1790 | const auto& meta{std::get<MetaImage>(operation.GetMeta())}; |
| 1791 | code.AddLine("imageStore({}, {}, {});", GetImage(meta.image), | 1791 | code.AddLine("imageStore({}, {}, {});", GetImage(meta.image), |
| 1792 | BuildIntegerCoordinates(operation), BuildImageValues(operation)); | 1792 | BuildIntegerCoordinates(operation), BuildImageValues(operation)); |
| 1793 | return {}; | 1793 | return {}; |
| @@ -1795,7 +1795,7 @@ private: | |||
| 1795 | 1795 | ||
| 1796 | template <const std::string_view& opname> | 1796 | template <const std::string_view& opname> |
| 1797 | Expression AtomicImage(Operation operation) { | 1797 | Expression AtomicImage(Operation operation) { |
| 1798 | const auto meta{std::get<MetaImage>(operation.GetMeta())}; | 1798 | const auto& meta{std::get<MetaImage>(operation.GetMeta())}; |
| 1799 | ASSERT(meta.values.size() == 1); | 1799 | ASSERT(meta.values.size() == 1); |
| 1800 | 1800 | ||
| 1801 | return {fmt::format("imageAtomic{}({}, {}, {})", opname, GetImage(meta.image), | 1801 | return {fmt::format("imageAtomic{}({}, {}, {})", opname, GetImage(meta.image), |
| @@ -2246,7 +2246,7 @@ private: | |||
| 2246 | code.AddLine("#ifdef SAMPLER_{}_IS_BUFFER", sampler.GetIndex()); | 2246 | code.AddLine("#ifdef SAMPLER_{}_IS_BUFFER", sampler.GetIndex()); |
| 2247 | } | 2247 | } |
| 2248 | 2248 | ||
| 2249 | std::string GetDeclarationWithSuffix(u32 index, const std::string& name) const { | 2249 | std::string GetDeclarationWithSuffix(u32 index, std::string_view name) const { |
| 2250 | return fmt::format("{}_{}_{}", name, index, suffix); | 2250 | return fmt::format("{}_{}_{}", name, index, suffix); |
| 2251 | } | 2251 | } |
| 2252 | 2252 | ||
| @@ -2271,17 +2271,15 @@ private: | |||
| 2271 | ShaderWriter code; | 2271 | ShaderWriter code; |
| 2272 | }; | 2272 | }; |
| 2273 | 2273 | ||
| 2274 | static constexpr std::string_view flow_var = "flow_var_"; | ||
| 2275 | |||
| 2276 | std::string GetFlowVariable(u32 i) { | 2274 | std::string GetFlowVariable(u32 i) { |
| 2277 | return fmt::format("{}{}", flow_var, i); | 2275 | return fmt::format("flow_var_{}", i); |
| 2278 | } | 2276 | } |
| 2279 | 2277 | ||
| 2280 | class ExprDecompiler { | 2278 | class ExprDecompiler { |
| 2281 | public: | 2279 | public: |
| 2282 | explicit ExprDecompiler(GLSLDecompiler& decomp) : decomp{decomp} {} | 2280 | explicit ExprDecompiler(GLSLDecompiler& decomp) : decomp{decomp} {} |
| 2283 | 2281 | ||
| 2284 | void operator()(VideoCommon::Shader::ExprAnd& expr) { | 2282 | void operator()(const ExprAnd& expr) { |
| 2285 | inner += "( "; | 2283 | inner += "( "; |
| 2286 | std::visit(*this, *expr.operand1); | 2284 | std::visit(*this, *expr.operand1); |
| 2287 | inner += " && "; | 2285 | inner += " && "; |
| @@ -2289,7 +2287,7 @@ public: | |||
| 2289 | inner += ')'; | 2287 | inner += ')'; |
| 2290 | } | 2288 | } |
| 2291 | 2289 | ||
| 2292 | void operator()(VideoCommon::Shader::ExprOr& expr) { | 2290 | void operator()(const ExprOr& expr) { |
| 2293 | inner += "( "; | 2291 | inner += "( "; |
| 2294 | std::visit(*this, *expr.operand1); | 2292 | std::visit(*this, *expr.operand1); |
| 2295 | inner += " || "; | 2293 | inner += " || "; |
| @@ -2297,17 +2295,17 @@ public: | |||
| 2297 | inner += ')'; | 2295 | inner += ')'; |
| 2298 | } | 2296 | } |
| 2299 | 2297 | ||
| 2300 | void operator()(VideoCommon::Shader::ExprNot& expr) { | 2298 | void operator()(const ExprNot& expr) { |
| 2301 | inner += '!'; | 2299 | inner += '!'; |
| 2302 | std::visit(*this, *expr.operand1); | 2300 | std::visit(*this, *expr.operand1); |
| 2303 | } | 2301 | } |
| 2304 | 2302 | ||
| 2305 | void operator()(VideoCommon::Shader::ExprPredicate& expr) { | 2303 | void operator()(const ExprPredicate& expr) { |
| 2306 | const auto pred = static_cast<Tegra::Shader::Pred>(expr.predicate); | 2304 | const auto pred = static_cast<Tegra::Shader::Pred>(expr.predicate); |
| 2307 | inner += decomp.GetPredicate(pred); | 2305 | inner += decomp.GetPredicate(pred); |
| 2308 | } | 2306 | } |
| 2309 | 2307 | ||
| 2310 | void operator()(VideoCommon::Shader::ExprCondCode& expr) { | 2308 | void operator()(const ExprCondCode& expr) { |
| 2311 | const Node cc = decomp.ir.GetConditionCode(expr.cc); | 2309 | const Node cc = decomp.ir.GetConditionCode(expr.cc); |
| 2312 | std::string target; | 2310 | std::string target; |
| 2313 | 2311 | ||
| @@ -2329,15 +2327,15 @@ public: | |||
| 2329 | inner += target; | 2327 | inner += target; |
| 2330 | } | 2328 | } |
| 2331 | 2329 | ||
| 2332 | void operator()(VideoCommon::Shader::ExprVar& expr) { | 2330 | void operator()(const ExprVar& expr) { |
| 2333 | inner += GetFlowVariable(expr.var_index); | 2331 | inner += GetFlowVariable(expr.var_index); |
| 2334 | } | 2332 | } |
| 2335 | 2333 | ||
| 2336 | void operator()(VideoCommon::Shader::ExprBoolean& expr) { | 2334 | void operator()(const ExprBoolean& expr) { |
| 2337 | inner += expr.value ? "true" : "false"; | 2335 | inner += expr.value ? "true" : "false"; |
| 2338 | } | 2336 | } |
| 2339 | 2337 | ||
| 2340 | std::string& GetResult() { | 2338 | const std::string& GetResult() const { |
| 2341 | return inner; | 2339 | return inner; |
| 2342 | } | 2340 | } |
| 2343 | 2341 | ||
| @@ -2350,7 +2348,7 @@ class ASTDecompiler { | |||
| 2350 | public: | 2348 | public: |
| 2351 | explicit ASTDecompiler(GLSLDecompiler& decomp) : decomp{decomp} {} | 2349 | explicit ASTDecompiler(GLSLDecompiler& decomp) : decomp{decomp} {} |
| 2352 | 2350 | ||
| 2353 | void operator()(VideoCommon::Shader::ASTProgram& ast) { | 2351 | void operator()(const ASTProgram& ast) { |
| 2354 | ASTNode current = ast.nodes.GetFirst(); | 2352 | ASTNode current = ast.nodes.GetFirst(); |
| 2355 | while (current) { | 2353 | while (current) { |
| 2356 | Visit(current); | 2354 | Visit(current); |
| @@ -2358,7 +2356,7 @@ public: | |||
| 2358 | } | 2356 | } |
| 2359 | } | 2357 | } |
| 2360 | 2358 | ||
| 2361 | void operator()(VideoCommon::Shader::ASTIfThen& ast) { | 2359 | void operator()(const ASTIfThen& ast) { |
| 2362 | ExprDecompiler expr_parser{decomp}; | 2360 | ExprDecompiler expr_parser{decomp}; |
| 2363 | std::visit(expr_parser, *ast.condition); | 2361 | std::visit(expr_parser, *ast.condition); |
| 2364 | decomp.code.AddLine("if ({}) {{", expr_parser.GetResult()); | 2362 | decomp.code.AddLine("if ({}) {{", expr_parser.GetResult()); |
| @@ -2372,7 +2370,7 @@ public: | |||
| 2372 | decomp.code.AddLine("}}"); | 2370 | decomp.code.AddLine("}}"); |
| 2373 | } | 2371 | } |
| 2374 | 2372 | ||
| 2375 | void operator()(VideoCommon::Shader::ASTIfElse& ast) { | 2373 | void operator()(const ASTIfElse& ast) { |
| 2376 | decomp.code.AddLine("else {{"); | 2374 | decomp.code.AddLine("else {{"); |
| 2377 | decomp.code.scope++; | 2375 | decomp.code.scope++; |
| 2378 | ASTNode current = ast.nodes.GetFirst(); | 2376 | ASTNode current = ast.nodes.GetFirst(); |
| @@ -2384,29 +2382,29 @@ public: | |||
| 2384 | decomp.code.AddLine("}}"); | 2382 | decomp.code.AddLine("}}"); |
| 2385 | } | 2383 | } |
| 2386 | 2384 | ||
| 2387 | void operator()(VideoCommon::Shader::ASTBlockEncoded& ast) { | 2385 | void operator()([[maybe_unused]] const ASTBlockEncoded& ast) { |
| 2388 | UNREACHABLE(); | 2386 | UNREACHABLE(); |
| 2389 | } | 2387 | } |
| 2390 | 2388 | ||
| 2391 | void operator()(VideoCommon::Shader::ASTBlockDecoded& ast) { | 2389 | void operator()(const ASTBlockDecoded& ast) { |
| 2392 | decomp.VisitBlock(ast.nodes); | 2390 | decomp.VisitBlock(ast.nodes); |
| 2393 | } | 2391 | } |
| 2394 | 2392 | ||
| 2395 | void operator()(VideoCommon::Shader::ASTVarSet& ast) { | 2393 | void operator()(const ASTVarSet& ast) { |
| 2396 | ExprDecompiler expr_parser{decomp}; | 2394 | ExprDecompiler expr_parser{decomp}; |
| 2397 | std::visit(expr_parser, *ast.condition); | 2395 | std::visit(expr_parser, *ast.condition); |
| 2398 | decomp.code.AddLine("{} = {};", GetFlowVariable(ast.index), expr_parser.GetResult()); | 2396 | decomp.code.AddLine("{} = {};", GetFlowVariable(ast.index), expr_parser.GetResult()); |
| 2399 | } | 2397 | } |
| 2400 | 2398 | ||
| 2401 | void operator()(VideoCommon::Shader::ASTLabel& ast) { | 2399 | void operator()(const ASTLabel& ast) { |
| 2402 | decomp.code.AddLine("// Label_{}:", ast.index); | 2400 | decomp.code.AddLine("// Label_{}:", ast.index); |
| 2403 | } | 2401 | } |
| 2404 | 2402 | ||
| 2405 | void operator()(VideoCommon::Shader::ASTGoto& ast) { | 2403 | void operator()([[maybe_unused]] const ASTGoto& ast) { |
| 2406 | UNREACHABLE(); | 2404 | UNREACHABLE(); |
| 2407 | } | 2405 | } |
| 2408 | 2406 | ||
| 2409 | void operator()(VideoCommon::Shader::ASTDoWhile& ast) { | 2407 | void operator()(const ASTDoWhile& ast) { |
| 2410 | ExprDecompiler expr_parser{decomp}; | 2408 | ExprDecompiler expr_parser{decomp}; |
| 2411 | std::visit(expr_parser, *ast.condition); | 2409 | std::visit(expr_parser, *ast.condition); |
| 2412 | decomp.code.AddLine("do {{"); | 2410 | decomp.code.AddLine("do {{"); |
| @@ -2420,7 +2418,7 @@ public: | |||
| 2420 | decomp.code.AddLine("}} while({});", expr_parser.GetResult()); | 2418 | decomp.code.AddLine("}} while({});", expr_parser.GetResult()); |
| 2421 | } | 2419 | } |
| 2422 | 2420 | ||
| 2423 | void operator()(VideoCommon::Shader::ASTReturn& ast) { | 2421 | void operator()(const ASTReturn& ast) { |
| 2424 | const bool is_true = VideoCommon::Shader::ExprIsTrue(ast.condition); | 2422 | const bool is_true = VideoCommon::Shader::ExprIsTrue(ast.condition); |
| 2425 | if (!is_true) { | 2423 | if (!is_true) { |
| 2426 | ExprDecompiler expr_parser{decomp}; | 2424 | ExprDecompiler expr_parser{decomp}; |
| @@ -2440,7 +2438,7 @@ public: | |||
| 2440 | } | 2438 | } |
| 2441 | } | 2439 | } |
| 2442 | 2440 | ||
| 2443 | void operator()(VideoCommon::Shader::ASTBreak& ast) { | 2441 | void operator()(const ASTBreak& ast) { |
| 2444 | const bool is_true = VideoCommon::Shader::ExprIsTrue(ast.condition); | 2442 | const bool is_true = VideoCommon::Shader::ExprIsTrue(ast.condition); |
| 2445 | if (!is_true) { | 2443 | if (!is_true) { |
| 2446 | ExprDecompiler expr_parser{decomp}; | 2444 | ExprDecompiler expr_parser{decomp}; |
| @@ -2455,7 +2453,7 @@ public: | |||
| 2455 | } | 2453 | } |
| 2456 | } | 2454 | } |
| 2457 | 2455 | ||
| 2458 | void Visit(VideoCommon::Shader::ASTNode& node) { | 2456 | void Visit(const ASTNode& node) { |
| 2459 | std::visit(*this, *node->GetInnerData()); | 2457 | std::visit(*this, *node->GetInnerData()); |
| 2460 | } | 2458 | } |
| 2461 | 2459 | ||
| @@ -2468,9 +2466,9 @@ void GLSLDecompiler::DecompileAST() { | |||
| 2468 | for (u32 i = 0; i < num_flow_variables; i++) { | 2466 | for (u32 i = 0; i < num_flow_variables; i++) { |
| 2469 | code.AddLine("bool {} = false;", GetFlowVariable(i)); | 2467 | code.AddLine("bool {} = false;", GetFlowVariable(i)); |
| 2470 | } | 2468 | } |
| 2469 | |||
| 2471 | ASTDecompiler decompiler{*this}; | 2470 | ASTDecompiler decompiler{*this}; |
| 2472 | VideoCommon::Shader::ASTNode program = ir.GetASTProgram(); | 2471 | decompiler.Visit(ir.GetASTProgram()); |
| 2473 | decompiler.Visit(program); | ||
| 2474 | } | 2472 | } |
| 2475 | 2473 | ||
| 2476 | } // Anonymous namespace | 2474 | } // Anonymous namespace |
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 1e6ef66ab..4bbd17b12 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp | |||
| @@ -102,8 +102,6 @@ RendererOpenGL::RendererOpenGL(Core::Frontend::EmuWindow& emu_window, Core::Syst | |||
| 102 | RendererOpenGL::~RendererOpenGL() = default; | 102 | RendererOpenGL::~RendererOpenGL() = default; |
| 103 | 103 | ||
| 104 | void RendererOpenGL::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { | 104 | void RendererOpenGL::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { |
| 105 | system.GetPerfStats().EndSystemFrame(); | ||
| 106 | |||
| 107 | // Maintain the rasterizer's state as a priority | 105 | // Maintain the rasterizer's state as a priority |
| 108 | OpenGLState prev_state = OpenGLState::GetCurState(); | 106 | OpenGLState prev_state = OpenGLState::GetCurState(); |
| 109 | state.AllDirty(); | 107 | state.AllDirty(); |
| @@ -135,9 +133,6 @@ void RendererOpenGL::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { | |||
| 135 | 133 | ||
| 136 | render_window.PollEvents(); | 134 | render_window.PollEvents(); |
| 137 | 135 | ||
| 138 | system.FrameLimiter().DoFrameLimiting(system.CoreTiming().GetGlobalTimeUs()); | ||
| 139 | system.GetPerfStats().BeginSystemFrame(); | ||
| 140 | |||
| 141 | // Restore the rasterizer state | 136 | // Restore the rasterizer state |
| 142 | prev_state.AllDirty(); | 137 | prev_state.AllDirty(); |
| 143 | prev_state.Apply(); | 138 | prev_state.Apply(); |
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h index 338bab17c..447fb5c1d 100644 --- a/src/video_core/shader/node.h +++ b/src/video_core/shader/node.h | |||
| @@ -410,7 +410,7 @@ public: | |||
| 410 | explicit OperationNode(OperationCode code) : OperationNode(code, Meta{}) {} | 410 | explicit OperationNode(OperationCode code) : OperationNode(code, Meta{}) {} |
| 411 | 411 | ||
| 412 | explicit OperationNode(OperationCode code, Meta meta) | 412 | explicit OperationNode(OperationCode code, Meta meta) |
| 413 | : OperationNode(code, meta, std::vector<Node>{}) {} | 413 | : OperationNode(code, std::move(meta), std::vector<Node>{}) {} |
| 414 | 414 | ||
| 415 | explicit OperationNode(OperationCode code, std::vector<Node> operands) | 415 | explicit OperationNode(OperationCode code, std::vector<Node> operands) |
| 416 | : OperationNode(code, Meta{}, std::move(operands)) {} | 416 | : OperationNode(code, Meta{}, std::move(operands)) {} |
diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp index 250afc6d6..ef6b3592e 100644 --- a/src/video_core/surface.cpp +++ b/src/video_core/surface.cpp | |||
| @@ -252,6 +252,7 @@ PixelFormat PixelFormatFromTextureFormat(Tegra::Texture::TextureFormat format, | |||
| 252 | default: | 252 | default: |
| 253 | break; | 253 | break; |
| 254 | } | 254 | } |
| 255 | break; | ||
| 255 | case Tegra::Texture::TextureFormat::R32_G32_B32_A32: | 256 | case Tegra::Texture::TextureFormat::R32_G32_B32_A32: |
| 256 | switch (component_type) { | 257 | switch (component_type) { |
| 257 | case Tegra::Texture::ComponentType::FLOAT: | 258 | case Tegra::Texture::ComponentType::FLOAT: |
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index ca2da8f97..6a92b22d3 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h | |||
| @@ -62,10 +62,10 @@ public: | |||
| 62 | } | 62 | } |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | /*** | 65 | /** |
| 66 | * `Guard` guarantees that rendertargets don't unregister themselves if the | 66 | * Guarantees that rendertargets don't unregister themselves if the |
| 67 | * collide. Protection is currently only done on 3D slices. | 67 | * collide. Protection is currently only done on 3D slices. |
| 68 | ***/ | 68 | */ |
| 69 | void GuardRenderTargets(bool new_guard) { | 69 | void GuardRenderTargets(bool new_guard) { |
| 70 | guard_render_targets = new_guard; | 70 | guard_render_targets = new_guard; |
| 71 | } | 71 | } |
| @@ -287,7 +287,7 @@ protected: | |||
| 287 | const Tegra::Engines::Fermi2D::Config& copy_config) = 0; | 287 | const Tegra::Engines::Fermi2D::Config& copy_config) = 0; |
| 288 | 288 | ||
| 289 | // Depending on the backend, a buffer copy can be slow as it means deoptimizing the texture | 289 | // Depending on the backend, a buffer copy can be slow as it means deoptimizing the texture |
| 290 | // and reading it from a sepparate buffer. | 290 | // and reading it from a separate buffer. |
| 291 | virtual void BufferCopy(TSurface& src_surface, TSurface& dst_surface) = 0; | 291 | virtual void BufferCopy(TSurface& src_surface, TSurface& dst_surface) = 0; |
| 292 | 292 | ||
| 293 | void ManageRenderTargetUnregister(TSurface& surface) { | 293 | void ManageRenderTargetUnregister(TSurface& surface) { |
| @@ -386,12 +386,13 @@ private: | |||
| 386 | }; | 386 | }; |
| 387 | 387 | ||
| 388 | /** | 388 | /** |
| 389 | * `PickStrategy` takes care of selecting a proper strategy to deal with a texture recycle. | 389 | * Takes care of selecting a proper strategy to deal with a texture recycle. |
| 390 | * @param overlaps, the overlapping surfaces registered in the cache. | 390 | * |
| 391 | * @param params, the paremeters on the new surface. | 391 | * @param overlaps The overlapping surfaces registered in the cache. |
| 392 | * @param gpu_addr, the starting address of the new surface. | 392 | * @param params The parameters on the new surface. |
| 393 | * @param untopological, tells the recycler that the texture has no way to match the overlaps | 393 | * @param gpu_addr The starting address of the new surface. |
| 394 | * due to topological reasons. | 394 | * @param untopological Indicates to the recycler that the texture has no way |
| 395 | * to match the overlaps due to topological reasons. | ||
| 395 | **/ | 396 | **/ |
| 396 | RecycleStrategy PickStrategy(std::vector<TSurface>& overlaps, const SurfaceParams& params, | 397 | RecycleStrategy PickStrategy(std::vector<TSurface>& overlaps, const SurfaceParams& params, |
| 397 | const GPUVAddr gpu_addr, const MatchTopologyResult untopological) { | 398 | const GPUVAddr gpu_addr, const MatchTopologyResult untopological) { |
| @@ -402,7 +403,7 @@ private: | |||
| 402 | if (params.block_depth > 1 || params.target == SurfaceTarget::Texture3D) { | 403 | if (params.block_depth > 1 || params.target == SurfaceTarget::Texture3D) { |
| 403 | return RecycleStrategy::Flush; | 404 | return RecycleStrategy::Flush; |
| 404 | } | 405 | } |
| 405 | for (auto s : overlaps) { | 406 | for (const auto& s : overlaps) { |
| 406 | const auto& s_params = s->GetSurfaceParams(); | 407 | const auto& s_params = s->GetSurfaceParams(); |
| 407 | if (s_params.block_depth > 1 || s_params.target == SurfaceTarget::Texture3D) { | 408 | if (s_params.block_depth > 1 || s_params.target == SurfaceTarget::Texture3D) { |
| 408 | return RecycleStrategy::Flush; | 409 | return RecycleStrategy::Flush; |
| @@ -419,16 +420,19 @@ private: | |||
| 419 | } | 420 | } |
| 420 | 421 | ||
| 421 | /** | 422 | /** |
| 422 | * `RecycleSurface` es a method we use to decide what to do with textures we can't resolve in | 423 | * Used to decide what to do with textures we can't resolve in the cache It has 2 implemented |
| 423 | *the cache It has 2 implemented strategies: Ignore and Flush. Ignore just unregisters all the | 424 | * strategies: Ignore and Flush. |
| 424 | *overlaps and loads the new texture. Flush, flushes all the overlaps into memory and loads the | 425 | * |
| 425 | *new surface from that data. | 426 | * - Ignore: Just unregisters all the overlaps and loads the new texture. |
| 426 | * @param overlaps, the overlapping surfaces registered in the cache. | 427 | * - Flush: Flushes all the overlaps into memory and loads the new surface from that data. |
| 427 | * @param params, the paremeters on the new surface. | 428 | * |
| 428 | * @param gpu_addr, the starting address of the new surface. | 429 | * @param overlaps The overlapping surfaces registered in the cache. |
| 429 | * @param preserve_contents, tells if the new surface should be loaded from meory or left blank | 430 | * @param params The parameters for the new surface. |
| 430 | * @param untopological, tells the recycler that the texture has no way to match the overlaps | 431 | * @param gpu_addr The starting address of the new surface. |
| 431 | * due to topological reasons. | 432 | * @param preserve_contents Indicates that the new surface should be loaded from memory or left |
| 433 | * blank. | ||
| 434 | * @param untopological Indicates to the recycler that the texture has no way to match the | ||
| 435 | * overlaps due to topological reasons. | ||
| 432 | **/ | 436 | **/ |
| 433 | std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps, | 437 | std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps, |
| 434 | const SurfaceParams& params, const GPUVAddr gpu_addr, | 438 | const SurfaceParams& params, const GPUVAddr gpu_addr, |
| @@ -465,10 +469,12 @@ private: | |||
| 465 | } | 469 | } |
| 466 | 470 | ||
| 467 | /** | 471 | /** |
| 468 | * `RebuildSurface` this method takes a single surface and recreates into another that | 472 | * Takes a single surface and recreates into another that may differ in |
| 469 | * may differ in format, target or width alingment. | 473 | * format, target or width alignment. |
| 470 | * @param current_surface, the registered surface in the cache which we want to convert. | 474 | * |
| 471 | * @param params, the new surface params which we'll use to recreate the surface. | 475 | * @param current_surface The registered surface in the cache which we want to convert. |
| 476 | * @param params The new surface params which we'll use to recreate the surface. | ||
| 477 | * @param is_render Whether or not the surface is a render target. | ||
| 472 | **/ | 478 | **/ |
| 473 | std::pair<TSurface, TView> RebuildSurface(TSurface current_surface, const SurfaceParams& params, | 479 | std::pair<TSurface, TView> RebuildSurface(TSurface current_surface, const SurfaceParams& params, |
| 474 | bool is_render) { | 480 | bool is_render) { |
| @@ -502,12 +508,14 @@ private: | |||
| 502 | } | 508 | } |
| 503 | 509 | ||
| 504 | /** | 510 | /** |
| 505 | * `ManageStructuralMatch` this method takes a single surface and checks with the new surface's | 511 | * Takes a single surface and checks with the new surface's params if it's an exact |
| 506 | * params if it's an exact match, we return the main view of the registered surface. If it's | 512 | * match, we return the main view of the registered surface. If its formats don't |
| 507 | * formats don't match, we rebuild the surface. We call this last method a `Mirage`. If formats | 513 | * match, we rebuild the surface. We call this last method a `Mirage`. If formats |
| 508 | * match but the targets don't, we create an overview View of the registered surface. | 514 | * match but the targets don't, we create an overview View of the registered surface. |
| 509 | * @param current_surface, the registered surface in the cache which we want to convert. | 515 | * |
| 510 | * @param params, the new surface params which we want to check. | 516 | * @param current_surface The registered surface in the cache which we want to convert. |
| 517 | * @param params The new surface params which we want to check. | ||
| 518 | * @param is_render Whether or not the surface is a render target. | ||
| 511 | **/ | 519 | **/ |
| 512 | std::pair<TSurface, TView> ManageStructuralMatch(TSurface current_surface, | 520 | std::pair<TSurface, TView> ManageStructuralMatch(TSurface current_surface, |
| 513 | const SurfaceParams& params, bool is_render) { | 521 | const SurfaceParams& params, bool is_render) { |
| @@ -529,13 +537,14 @@ private: | |||
| 529 | } | 537 | } |
| 530 | 538 | ||
| 531 | /** | 539 | /** |
| 532 | * `TryReconstructSurface` unlike `RebuildSurface` where we know the registered surface | 540 | * Unlike RebuildSurface where we know whether or not registered surfaces match the candidate |
| 533 | * matches the candidate in some way, we got no guarantess here. We try to see if the overlaps | 541 | * in some way, we have no guarantees here. We try to see if the overlaps are sublayers/mipmaps |
| 534 | * are sublayers/mipmaps of the new surface, if they all match we end up recreating a surface | 542 | * of the new surface, if they all match we end up recreating a surface for them, |
| 535 | * for them, else we return nothing. | 543 | * else we return nothing. |
| 536 | * @param overlaps, the overlapping surfaces registered in the cache. | 544 | * |
| 537 | * @param params, the paremeters on the new surface. | 545 | * @param overlaps The overlapping surfaces registered in the cache. |
| 538 | * @param gpu_addr, the starting address of the new surface. | 546 | * @param params The parameters on the new surface. |
| 547 | * @param gpu_addr The starting address of the new surface. | ||
| 539 | **/ | 548 | **/ |
| 540 | std::optional<std::pair<TSurface, TView>> TryReconstructSurface(std::vector<TSurface>& overlaps, | 549 | std::optional<std::pair<TSurface, TView>> TryReconstructSurface(std::vector<TSurface>& overlaps, |
| 541 | const SurfaceParams& params, | 550 | const SurfaceParams& params, |
| @@ -575,7 +584,7 @@ private: | |||
| 575 | } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) { | 584 | } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) { |
| 576 | return {}; | 585 | return {}; |
| 577 | } | 586 | } |
| 578 | for (auto surface : overlaps) { | 587 | for (const auto& surface : overlaps) { |
| 579 | Unregister(surface); | 588 | Unregister(surface); |
| 580 | } | 589 | } |
| 581 | new_surface->MarkAsModified(modified, Tick()); | 590 | new_surface->MarkAsModified(modified, Tick()); |
| @@ -584,19 +593,27 @@ private: | |||
| 584 | } | 593 | } |
| 585 | 594 | ||
| 586 | /** | 595 | /** |
| 587 | * `GetSurface` gets the starting address and parameters of a candidate surface and tries | 596 | * Gets the starting address and parameters of a candidate surface and tries |
| 588 | * to find a matching surface within the cache. This is done in 3 big steps. The first is to | 597 | * to find a matching surface within the cache. This is done in 3 big steps: |
| 589 | * check the 1st Level Cache in order to find an exact match, if we fail, we move to step 2. | 598 | * |
| 590 | * Step 2 is checking if there are any overlaps at all, if none, we just load the texture from | 599 | * 1. Check the 1st Level Cache in order to find an exact match, if we fail, we move to step 2. |
| 591 | * memory else we move to step 3. Step 3 consists on figuring the relationship between the | 600 | * |
| 592 | * candidate texture and the overlaps. We divide the scenarios depending if there's 1 or many | 601 | * 2. Check if there are any overlaps at all, if there are none, we just load the texture from |
| 593 | * overlaps. If there's many, we just try to reconstruct a new surface out of them based on the | 602 | * memory else we move to step 3. |
| 594 | * candidate's parameters, if we fail, we recycle. When there's only 1 overlap then we have to | 603 | * |
| 595 | * check if the candidate is a view (layer/mipmap) of the overlap or if the registered surface | 604 | * 3. Consists of figuring out the relationship between the candidate texture and the |
| 596 | * is a mipmap/layer of the candidate. In this last case we reconstruct a new surface. | 605 | * overlaps. We divide the scenarios depending if there's 1 or many overlaps. If |
| 597 | * @param gpu_addr, the starting address of the candidate surface. | 606 | * there's many, we just try to reconstruct a new surface out of them based on the |
| 598 | * @param params, the paremeters on the candidate surface. | 607 | * candidate's parameters, if we fail, we recycle. When there's only 1 overlap then we |
| 599 | * @param preserve_contents, tells if the new surface should be loaded from meory or left blank. | 608 | * have to check if the candidate is a view (layer/mipmap) of the overlap or if the |
| 609 | * registered surface is a mipmap/layer of the candidate. In this last case we reconstruct | ||
| 610 | * a new surface. | ||
| 611 | * | ||
| 612 | * @param gpu_addr The starting address of the candidate surface. | ||
| 613 | * @param params The parameters on the candidate surface. | ||
| 614 | * @param preserve_contents Indicates that the new surface should be loaded from memory or | ||
| 615 | * left blank. | ||
| 616 | * @param is_render Whether or not the surface is a render target. | ||
| 600 | **/ | 617 | **/ |
| 601 | std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const SurfaceParams& params, | 618 | std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const SurfaceParams& params, |
| 602 | bool preserve_contents, bool is_render) { | 619 | bool preserve_contents, bool is_render) { |
| @@ -651,7 +668,7 @@ private: | |||
| 651 | // Step 3 | 668 | // Step 3 |
| 652 | // Now we need to figure the relationship between the texture and its overlaps | 669 | // Now we need to figure the relationship between the texture and its overlaps |
| 653 | // we do a topological test to ensure we can find some relationship. If it fails | 670 | // we do a topological test to ensure we can find some relationship. If it fails |
| 654 | // inmediatly recycle the texture | 671 | // immediately recycle the texture |
| 655 | for (const auto& surface : overlaps) { | 672 | for (const auto& surface : overlaps) { |
| 656 | const auto topological_result = surface->MatchesTopology(params); | 673 | const auto topological_result = surface->MatchesTopology(params); |
| 657 | if (topological_result != MatchTopologyResult::FullMatch) { | 674 | if (topological_result != MatchTopologyResult::FullMatch) { |
| @@ -720,12 +737,13 @@ private: | |||
| 720 | } | 737 | } |
| 721 | 738 | ||
| 722 | /** | 739 | /** |
| 723 | * `DeduceSurface` gets the starting address and parameters of a candidate surface and tries | 740 | * Gets the starting address and parameters of a candidate surface and tries to find a |
| 724 | * to find a matching surface within the cache that's similar to it. If there are many textures | 741 | * matching surface within the cache that's similar to it. If there are many textures |
| 725 | * or the texture found if entirely incompatible, it will fail. If no texture is found, the | 742 | * or the texture found if entirely incompatible, it will fail. If no texture is found, the |
| 726 | * blit will be unsuccessful. | 743 | * blit will be unsuccessful. |
| 727 | * @param gpu_addr, the starting address of the candidate surface. | 744 | * |
| 728 | * @param params, the paremeters on the candidate surface. | 745 | * @param gpu_addr The starting address of the candidate surface. |
| 746 | * @param params The parameters on the candidate surface. | ||
| 729 | **/ | 747 | **/ |
| 730 | Deduction DeduceSurface(const GPUVAddr gpu_addr, const SurfaceParams& params) { | 748 | Deduction DeduceSurface(const GPUVAddr gpu_addr, const SurfaceParams& params) { |
| 731 | const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; | 749 | const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; |
| @@ -777,11 +795,14 @@ private: | |||
| 777 | } | 795 | } |
| 778 | 796 | ||
| 779 | /** | 797 | /** |
| 780 | * `DeduceBestBlit` gets the a source and destination starting address and parameters, | 798 | * Gets the a source and destination starting address and parameters, |
| 781 | * and tries to deduce if they are supposed to be depth textures. If so, their | 799 | * and tries to deduce if they are supposed to be depth textures. If so, their |
| 782 | * parameters are modified and fixed into so. | 800 | * parameters are modified and fixed into so. |
| 783 | * @param gpu_addr, the starting address of the candidate surface. | 801 | * |
| 784 | * @param params, the parameters on the candidate surface. | 802 | * @param src_params The parameters of the candidate surface. |
| 803 | * @param dst_params The parameters of the destination surface. | ||
| 804 | * @param src_gpu_addr The starting address of the candidate surface. | ||
| 805 | * @param dst_gpu_addr The starting address of the destination surface. | ||
| 785 | **/ | 806 | **/ |
| 786 | void DeduceBestBlit(SurfaceParams& src_params, SurfaceParams& dst_params, | 807 | void DeduceBestBlit(SurfaceParams& src_params, SurfaceParams& dst_params, |
| 787 | const GPUVAddr src_gpu_addr, const GPUVAddr dst_gpu_addr) { | 808 | const GPUVAddr src_gpu_addr, const GPUVAddr dst_gpu_addr) { |