diff options
Diffstat (limited to 'src/core')
129 files changed, 2249 insertions, 1575 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index f61bcd40d..16920e2e9 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -107,6 +107,8 @@ add_library(core STATIC | |||
| 107 | hle/kernel/client_port.h | 107 | hle/kernel/client_port.h |
| 108 | hle/kernel/client_session.cpp | 108 | hle/kernel/client_session.cpp |
| 109 | hle/kernel/client_session.h | 109 | hle/kernel/client_session.h |
| 110 | hle/kernel/code_set.cpp | ||
| 111 | hle/kernel/code_set.h | ||
| 110 | hle/kernel/errors.h | 112 | hle/kernel/errors.h |
| 111 | hle/kernel/handle_table.cpp | 113 | hle/kernel/handle_table.cpp |
| 112 | hle/kernel/handle_table.h | 114 | hle/kernel/handle_table.h |
| @@ -217,6 +219,7 @@ add_library(core STATIC | |||
| 217 | hle/service/audio/audren_u.h | 219 | hle/service/audio/audren_u.h |
| 218 | hle/service/audio/codecctl.cpp | 220 | hle/service/audio/codecctl.cpp |
| 219 | hle/service/audio/codecctl.h | 221 | hle/service/audio/codecctl.h |
| 222 | hle/service/audio/errors.h | ||
| 220 | hle/service/audio/hwopus.cpp | 223 | hle/service/audio/hwopus.cpp |
| 221 | hle/service/audio/hwopus.h | 224 | hle/service/audio/hwopus.h |
| 222 | hle/service/bcat/bcat.cpp | 225 | hle/service/bcat/bcat.cpp |
| @@ -400,6 +403,10 @@ add_library(core STATIC | |||
| 400 | hle/service/time/time.h | 403 | hle/service/time/time.h |
| 401 | hle/service/usb/usb.cpp | 404 | hle/service/usb/usb.cpp |
| 402 | hle/service/usb/usb.h | 405 | hle/service/usb/usb.h |
| 406 | hle/service/vi/display/vi_display.cpp | ||
| 407 | hle/service/vi/display/vi_display.h | ||
| 408 | hle/service/vi/layer/vi_layer.cpp | ||
| 409 | hle/service/vi/layer/vi_layer.h | ||
| 403 | hle/service/vi/vi.cpp | 410 | hle/service/vi/vi.cpp |
| 404 | hle/service/vi/vi.h | 411 | hle/service/vi/vi.h |
| 405 | hle/service/vi/vi_m.cpp | 412 | hle/service/vi/vi_m.cpp |
| @@ -414,8 +421,6 @@ add_library(core STATIC | |||
| 414 | loader/deconstructed_rom_directory.h | 421 | loader/deconstructed_rom_directory.h |
| 415 | loader/elf.cpp | 422 | loader/elf.cpp |
| 416 | loader/elf.h | 423 | loader/elf.h |
| 417 | loader/linker.cpp | ||
| 418 | loader/linker.h | ||
| 419 | loader/loader.cpp | 424 | loader/loader.cpp |
| 420 | loader/loader.h | 425 | loader/loader.h |
| 421 | loader/nax.cpp | 426 | loader/nax.cpp |
| @@ -432,8 +437,6 @@ add_library(core STATIC | |||
| 432 | loader/xci.h | 437 | loader/xci.h |
| 433 | memory.cpp | 438 | memory.cpp |
| 434 | memory.h | 439 | memory.h |
| 435 | memory_hook.cpp | ||
| 436 | memory_hook.h | ||
| 437 | memory_setup.h | 440 | memory_setup.h |
| 438 | perf_stats.cpp | 441 | perf_stats.cpp |
| 439 | perf_stats.h | 442 | perf_stats.h |
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp index afbda8d8b..4fdc12f11 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic.cpp | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include "core/core.h" | 12 | #include "core/core.h" |
| 13 | #include "core/core_cpu.h" | 13 | #include "core/core_cpu.h" |
| 14 | #include "core/core_timing.h" | 14 | #include "core/core_timing.h" |
| 15 | #include "core/core_timing_util.h" | ||
| 15 | #include "core/gdbstub/gdbstub.h" | 16 | #include "core/gdbstub/gdbstub.h" |
| 16 | #include "core/hle/kernel/process.h" | 17 | #include "core/hle/kernel/process.h" |
| 17 | #include "core/hle/kernel/svc.h" | 18 | #include "core/hle/kernel/svc.h" |
| @@ -112,14 +113,14 @@ public: | |||
| 112 | // Always execute at least one tick. | 113 | // Always execute at least one tick. |
| 113 | amortized_ticks = std::max<u64>(amortized_ticks, 1); | 114 | amortized_ticks = std::max<u64>(amortized_ticks, 1); |
| 114 | 115 | ||
| 115 | CoreTiming::AddTicks(amortized_ticks); | 116 | parent.core_timing.AddTicks(amortized_ticks); |
| 116 | num_interpreted_instructions = 0; | 117 | num_interpreted_instructions = 0; |
| 117 | } | 118 | } |
| 118 | u64 GetTicksRemaining() override { | 119 | u64 GetTicksRemaining() override { |
| 119 | return std::max(CoreTiming::GetDowncount(), 0); | 120 | return std::max(parent.core_timing.GetDowncount(), 0); |
| 120 | } | 121 | } |
| 121 | u64 GetCNTPCT() override { | 122 | u64 GetCNTPCT() override { |
| 122 | return CoreTiming::GetTicks(); | 123 | return Timing::CpuCyclesToClockCycles(parent.core_timing.GetTicks()); |
| 123 | } | 124 | } |
| 124 | 125 | ||
| 125 | ARM_Dynarmic& parent; | 126 | ARM_Dynarmic& parent; |
| @@ -151,7 +152,7 @@ std::unique_ptr<Dynarmic::A64::Jit> ARM_Dynarmic::MakeJit() const { | |||
| 151 | config.tpidr_el0 = &cb->tpidr_el0; | 152 | config.tpidr_el0 = &cb->tpidr_el0; |
| 152 | config.dczid_el0 = 4; | 153 | config.dczid_el0 = 4; |
| 153 | config.ctr_el0 = 0x8444c004; | 154 | config.ctr_el0 = 0x8444c004; |
| 154 | config.cntfrq_el0 = 19200000; // Value from fusee. | 155 | config.cntfrq_el0 = Timing::CNTFREQ; |
| 155 | 156 | ||
| 156 | // Unpredictable instructions | 157 | // Unpredictable instructions |
| 157 | config.define_unpredictable_behaviour = true; | 158 | config.define_unpredictable_behaviour = true; |
| @@ -172,8 +173,10 @@ void ARM_Dynarmic::Step() { | |||
| 172 | cb->InterpreterFallback(jit->GetPC(), 1); | 173 | cb->InterpreterFallback(jit->GetPC(), 1); |
| 173 | } | 174 | } |
| 174 | 175 | ||
| 175 | ARM_Dynarmic::ARM_Dynarmic(ExclusiveMonitor& exclusive_monitor, std::size_t core_index) | 176 | ARM_Dynarmic::ARM_Dynarmic(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor, |
| 176 | : cb(std::make_unique<ARM_Dynarmic_Callbacks>(*this)), core_index{core_index}, | 177 | std::size_t core_index) |
| 178 | : cb(std::make_unique<ARM_Dynarmic_Callbacks>(*this)), inner_unicorn{core_timing}, | ||
| 179 | core_index{core_index}, core_timing{core_timing}, | ||
| 177 | exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} { | 180 | exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} { |
| 178 | ThreadContext ctx{}; | 181 | ThreadContext ctx{}; |
| 179 | inner_unicorn.SaveContext(ctx); | 182 | inner_unicorn.SaveContext(ctx); |
diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h index 512bf8ce9..aada1e862 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.h +++ b/src/core/arm/dynarmic/arm_dynarmic.h | |||
| @@ -12,10 +12,14 @@ | |||
| 12 | #include "core/arm/exclusive_monitor.h" | 12 | #include "core/arm/exclusive_monitor.h" |
| 13 | #include "core/arm/unicorn/arm_unicorn.h" | 13 | #include "core/arm/unicorn/arm_unicorn.h" |
| 14 | 14 | ||
| 15 | namespace Memory { | 15 | namespace Common { |
| 16 | struct PageTable; | 16 | struct PageTable; |
| 17 | } | 17 | } |
| 18 | 18 | ||
| 19 | namespace Core::Timing { | ||
| 20 | class CoreTiming; | ||
| 21 | } | ||
| 22 | |||
| 19 | namespace Core { | 23 | namespace Core { |
| 20 | 24 | ||
| 21 | class ARM_Dynarmic_Callbacks; | 25 | class ARM_Dynarmic_Callbacks; |
| @@ -23,7 +27,8 @@ class DynarmicExclusiveMonitor; | |||
| 23 | 27 | ||
| 24 | class ARM_Dynarmic final : public ARM_Interface { | 28 | class ARM_Dynarmic final : public ARM_Interface { |
| 25 | public: | 29 | public: |
| 26 | ARM_Dynarmic(ExclusiveMonitor& exclusive_monitor, std::size_t core_index); | 30 | ARM_Dynarmic(Timing::CoreTiming& core_timing, ExclusiveMonitor& exclusive_monitor, |
| 31 | std::size_t core_index); | ||
| 27 | ~ARM_Dynarmic(); | 32 | ~ARM_Dynarmic(); |
| 28 | 33 | ||
| 29 | void MapBackingMemory(VAddr address, std::size_t size, u8* memory, | 34 | void MapBackingMemory(VAddr address, std::size_t size, u8* memory, |
| @@ -62,9 +67,10 @@ private: | |||
| 62 | ARM_Unicorn inner_unicorn; | 67 | ARM_Unicorn inner_unicorn; |
| 63 | 68 | ||
| 64 | std::size_t core_index; | 69 | std::size_t core_index; |
| 70 | Timing::CoreTiming& core_timing; | ||
| 65 | DynarmicExclusiveMonitor& exclusive_monitor; | 71 | DynarmicExclusiveMonitor& exclusive_monitor; |
| 66 | 72 | ||
| 67 | Memory::PageTable* current_page_table = nullptr; | 73 | Common::PageTable* current_page_table = nullptr; |
| 68 | }; | 74 | }; |
| 69 | 75 | ||
| 70 | class DynarmicExclusiveMonitor final : public ExclusiveMonitor { | 76 | class DynarmicExclusiveMonitor final : public ExclusiveMonitor { |
diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp index c455c81fb..a542a098b 100644 --- a/src/core/arm/unicorn/arm_unicorn.cpp +++ b/src/core/arm/unicorn/arm_unicorn.cpp | |||
| @@ -72,7 +72,7 @@ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int si | |||
| 72 | return {}; | 72 | return {}; |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | ARM_Unicorn::ARM_Unicorn() { | 75 | ARM_Unicorn::ARM_Unicorn(Timing::CoreTiming& core_timing) : core_timing{core_timing} { |
| 76 | CHECKED(uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc)); | 76 | CHECKED(uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc)); |
| 77 | 77 | ||
| 78 | auto fpv = 3 << 20; | 78 | auto fpv = 3 << 20; |
| @@ -177,7 +177,7 @@ void ARM_Unicorn::Run() { | |||
| 177 | if (GDBStub::IsServerEnabled()) { | 177 | if (GDBStub::IsServerEnabled()) { |
| 178 | ExecuteInstructions(std::max(4000000, 0)); | 178 | ExecuteInstructions(std::max(4000000, 0)); |
| 179 | } else { | 179 | } else { |
| 180 | ExecuteInstructions(std::max(CoreTiming::GetDowncount(), 0)); | 180 | ExecuteInstructions(std::max(core_timing.GetDowncount(), 0)); |
| 181 | } | 181 | } |
| 182 | } | 182 | } |
| 183 | 183 | ||
| @@ -190,7 +190,7 @@ MICROPROFILE_DEFINE(ARM_Jit_Unicorn, "ARM JIT", "Unicorn", MP_RGB(255, 64, 64)); | |||
| 190 | void ARM_Unicorn::ExecuteInstructions(int num_instructions) { | 190 | void ARM_Unicorn::ExecuteInstructions(int num_instructions) { |
| 191 | MICROPROFILE_SCOPE(ARM_Jit_Unicorn); | 191 | MICROPROFILE_SCOPE(ARM_Jit_Unicorn); |
| 192 | CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions)); | 192 | CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions)); |
| 193 | CoreTiming::AddTicks(num_instructions); | 193 | core_timing.AddTicks(num_instructions); |
| 194 | if (GDBStub::IsServerEnabled()) { | 194 | if (GDBStub::IsServerEnabled()) { |
| 195 | if (last_bkpt_hit) { | 195 | if (last_bkpt_hit) { |
| 196 | uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address); | 196 | uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address); |
diff --git a/src/core/arm/unicorn/arm_unicorn.h b/src/core/arm/unicorn/arm_unicorn.h index 75761950b..dbd6955ea 100644 --- a/src/core/arm/unicorn/arm_unicorn.h +++ b/src/core/arm/unicorn/arm_unicorn.h | |||
| @@ -9,12 +9,17 @@ | |||
| 9 | #include "core/arm/arm_interface.h" | 9 | #include "core/arm/arm_interface.h" |
| 10 | #include "core/gdbstub/gdbstub.h" | 10 | #include "core/gdbstub/gdbstub.h" |
| 11 | 11 | ||
| 12 | namespace Core::Timing { | ||
| 13 | class CoreTiming; | ||
| 14 | } | ||
| 15 | |||
| 12 | namespace Core { | 16 | namespace Core { |
| 13 | 17 | ||
| 14 | class ARM_Unicorn final : public ARM_Interface { | 18 | class ARM_Unicorn final : public ARM_Interface { |
| 15 | public: | 19 | public: |
| 16 | ARM_Unicorn(); | 20 | explicit ARM_Unicorn(Timing::CoreTiming& core_timing); |
| 17 | ~ARM_Unicorn(); | 21 | ~ARM_Unicorn(); |
| 22 | |||
| 18 | void MapBackingMemory(VAddr address, std::size_t size, u8* memory, | 23 | void MapBackingMemory(VAddr address, std::size_t size, u8* memory, |
| 19 | Kernel::VMAPermission perms) override; | 24 | Kernel::VMAPermission perms) override; |
| 20 | void UnmapMemory(VAddr address, std::size_t size) override; | 25 | void UnmapMemory(VAddr address, std::size_t size) override; |
| @@ -43,6 +48,7 @@ public: | |||
| 43 | 48 | ||
| 44 | private: | 49 | private: |
| 45 | uc_engine* uc{}; | 50 | uc_engine* uc{}; |
| 51 | Timing::CoreTiming& core_timing; | ||
| 46 | GDBStub::BreakpointAddress last_bkpt{}; | 52 | GDBStub::BreakpointAddress last_bkpt{}; |
| 47 | bool last_bkpt_hit; | 53 | bool last_bkpt_hit; |
| 48 | }; | 54 | }; |
diff --git a/src/core/core.cpp b/src/core/core.cpp index 572814e4b..89b3fb418 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp | |||
| @@ -36,7 +36,8 @@ | |||
| 36 | #include "frontend/applets/software_keyboard.h" | 36 | #include "frontend/applets/software_keyboard.h" |
| 37 | #include "frontend/applets/web_browser.h" | 37 | #include "frontend/applets/web_browser.h" |
| 38 | #include "video_core/debug_utils/debug_utils.h" | 38 | #include "video_core/debug_utils/debug_utils.h" |
| 39 | #include "video_core/gpu.h" | 39 | #include "video_core/gpu_asynch.h" |
| 40 | #include "video_core/gpu_synch.h" | ||
| 40 | #include "video_core/renderer_base.h" | 41 | #include "video_core/renderer_base.h" |
| 41 | #include "video_core/video_core.h" | 42 | #include "video_core/video_core.h" |
| 42 | 43 | ||
| @@ -78,6 +79,7 @@ FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs, | |||
| 78 | return vfs->OpenFile(path, FileSys::Mode::Read); | 79 | return vfs->OpenFile(path, FileSys::Mode::Read); |
| 79 | } | 80 | } |
| 80 | struct System::Impl { | 81 | struct System::Impl { |
| 82 | explicit Impl(System& system) : kernel{system} {} | ||
| 81 | 83 | ||
| 82 | Cpu& CurrentCpuCore() { | 84 | Cpu& CurrentCpuCore() { |
| 83 | return cpu_core_manager.GetCurrentCore(); | 85 | return cpu_core_manager.GetCurrentCore(); |
| @@ -94,7 +96,7 @@ struct System::Impl { | |||
| 94 | ResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { | 96 | ResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { |
| 95 | LOG_DEBUG(HW_Memory, "initialized OK"); | 97 | LOG_DEBUG(HW_Memory, "initialized OK"); |
| 96 | 98 | ||
| 97 | CoreTiming::Init(); | 99 | core_timing.Initialize(); |
| 98 | kernel.Initialize(); | 100 | kernel.Initialize(); |
| 99 | 101 | ||
| 100 | const auto current_time = std::chrono::duration_cast<std::chrono::seconds>( | 102 | const auto current_time = std::chrono::duration_cast<std::chrono::seconds>( |
| @@ -114,24 +116,30 @@ struct System::Impl { | |||
| 114 | if (web_browser == nullptr) | 116 | if (web_browser == nullptr) |
| 115 | web_browser = std::make_unique<Core::Frontend::DefaultWebBrowserApplet>(); | 117 | web_browser = std::make_unique<Core::Frontend::DefaultWebBrowserApplet>(); |
| 116 | 118 | ||
| 117 | auto main_process = Kernel::Process::Create(kernel, "main"); | 119 | auto main_process = Kernel::Process::Create(system, "main"); |
| 118 | kernel.MakeCurrentProcess(main_process.get()); | 120 | kernel.MakeCurrentProcess(main_process.get()); |
| 119 | 121 | ||
| 120 | telemetry_session = std::make_unique<Core::TelemetrySession>(); | 122 | telemetry_session = std::make_unique<Core::TelemetrySession>(); |
| 121 | service_manager = std::make_shared<Service::SM::ServiceManager>(); | 123 | service_manager = std::make_shared<Service::SM::ServiceManager>(); |
| 122 | 124 | ||
| 123 | Service::Init(service_manager, *virtual_filesystem); | 125 | Service::Init(service_manager, system, *virtual_filesystem); |
| 124 | GDBStub::Init(); | 126 | GDBStub::Init(); |
| 125 | 127 | ||
| 126 | renderer = VideoCore::CreateRenderer(emu_window); | 128 | renderer = VideoCore::CreateRenderer(emu_window, system); |
| 127 | if (!renderer->Init()) { | 129 | if (!renderer->Init()) { |
| 128 | return ResultStatus::ErrorVideoCore; | 130 | return ResultStatus::ErrorVideoCore; |
| 129 | } | 131 | } |
| 130 | 132 | ||
| 131 | gpu_core = std::make_unique<Tegra::GPU>(renderer->Rasterizer()); | 133 | is_powered_on = true; |
| 134 | |||
| 135 | if (Settings::values.use_asynchronous_gpu_emulation) { | ||
| 136 | gpu_core = std::make_unique<VideoCommon::GPUAsynch>(system, *renderer); | ||
| 137 | } else { | ||
| 138 | gpu_core = std::make_unique<VideoCommon::GPUSynch>(system, *renderer); | ||
| 139 | } | ||
| 132 | 140 | ||
| 133 | cpu_core_manager.Initialize(system); | 141 | cpu_core_manager.Initialize(system); |
| 134 | is_powered_on = true; | 142 | |
| 135 | LOG_DEBUG(Core, "Initialized OK"); | 143 | LOG_DEBUG(Core, "Initialized OK"); |
| 136 | 144 | ||
| 137 | // Reset counters and set time origin to current frame | 145 | // Reset counters and set time origin to current frame |
| @@ -175,19 +183,20 @@ struct System::Impl { | |||
| 175 | return static_cast<ResultStatus>(static_cast<u32>(ResultStatus::ErrorLoader) + | 183 | return static_cast<ResultStatus>(static_cast<u32>(ResultStatus::ErrorLoader) + |
| 176 | static_cast<u32>(load_result)); | 184 | static_cast<u32>(load_result)); |
| 177 | } | 185 | } |
| 186 | |||
| 178 | status = ResultStatus::Success; | 187 | status = ResultStatus::Success; |
| 179 | return status; | 188 | return status; |
| 180 | } | 189 | } |
| 181 | 190 | ||
| 182 | void Shutdown() { | 191 | void Shutdown() { |
| 183 | // Log last frame performance stats | 192 | // Log last frame performance stats |
| 184 | auto perf_results = GetAndResetPerfStats(); | 193 | const auto perf_results = GetAndResetPerfStats(); |
| 185 | Telemetry().AddField(Telemetry::FieldType::Performance, "Shutdown_EmulationSpeed", | 194 | telemetry_session->AddField(Telemetry::FieldType::Performance, "Shutdown_EmulationSpeed", |
| 186 | perf_results.emulation_speed * 100.0); | 195 | perf_results.emulation_speed * 100.0); |
| 187 | Telemetry().AddField(Telemetry::FieldType::Performance, "Shutdown_Framerate", | 196 | telemetry_session->AddField(Telemetry::FieldType::Performance, "Shutdown_Framerate", |
| 188 | perf_results.game_fps); | 197 | perf_results.game_fps); |
| 189 | Telemetry().AddField(Telemetry::FieldType::Performance, "Shutdown_Frametime", | 198 | telemetry_session->AddField(Telemetry::FieldType::Performance, "Shutdown_Frametime", |
| 190 | perf_results.frametime * 1000.0); | 199 | perf_results.frametime * 1000.0); |
| 191 | 200 | ||
| 192 | is_powered_on = false; | 201 | is_powered_on = false; |
| 193 | 202 | ||
| @@ -204,7 +213,7 @@ struct System::Impl { | |||
| 204 | 213 | ||
| 205 | // Shutdown kernel and core timing | 214 | // Shutdown kernel and core timing |
| 206 | kernel.Shutdown(); | 215 | kernel.Shutdown(); |
| 207 | CoreTiming::Shutdown(); | 216 | core_timing.Shutdown(); |
| 208 | 217 | ||
| 209 | // Close app loader | 218 | // Close app loader |
| 210 | app_loader.reset(); | 219 | app_loader.reset(); |
| @@ -231,9 +240,10 @@ struct System::Impl { | |||
| 231 | } | 240 | } |
| 232 | 241 | ||
| 233 | PerfStatsResults GetAndResetPerfStats() { | 242 | PerfStatsResults GetAndResetPerfStats() { |
| 234 | return perf_stats.GetAndResetStats(CoreTiming::GetGlobalTimeUs()); | 243 | return perf_stats.GetAndResetStats(core_timing.GetGlobalTimeUs()); |
| 235 | } | 244 | } |
| 236 | 245 | ||
| 246 | Timing::CoreTiming core_timing; | ||
| 237 | Kernel::KernelCore kernel; | 247 | Kernel::KernelCore kernel; |
| 238 | /// RealVfsFilesystem instance | 248 | /// RealVfsFilesystem instance |
| 239 | FileSys::VirtualFilesystem virtual_filesystem; | 249 | FileSys::VirtualFilesystem virtual_filesystem; |
| @@ -263,7 +273,7 @@ struct System::Impl { | |||
| 263 | Core::FrameLimiter frame_limiter; | 273 | Core::FrameLimiter frame_limiter; |
| 264 | }; | 274 | }; |
| 265 | 275 | ||
| 266 | System::System() : impl{std::make_unique<Impl>()} {} | 276 | System::System() : impl{std::make_unique<Impl>(*this)} {} |
| 267 | System::~System() = default; | 277 | System::~System() = default; |
| 268 | 278 | ||
| 269 | Cpu& System::CurrentCpuCore() { | 279 | Cpu& System::CurrentCpuCore() { |
| @@ -395,6 +405,14 @@ const Kernel::KernelCore& System::Kernel() const { | |||
| 395 | return impl->kernel; | 405 | return impl->kernel; |
| 396 | } | 406 | } |
| 397 | 407 | ||
| 408 | Timing::CoreTiming& System::CoreTiming() { | ||
| 409 | return impl->core_timing; | ||
| 410 | } | ||
| 411 | |||
| 412 | const Timing::CoreTiming& System::CoreTiming() const { | ||
| 413 | return impl->core_timing; | ||
| 414 | } | ||
| 415 | |||
| 398 | Core::PerfStats& System::GetPerfStats() { | 416 | Core::PerfStats& System::GetPerfStats() { |
| 399 | return impl->perf_stats; | 417 | return impl->perf_stats; |
| 400 | } | 418 | } |
diff --git a/src/core/core.h b/src/core/core.h index 511a5ad3a..ba76a41d8 100644 --- a/src/core/core.h +++ b/src/core/core.h | |||
| @@ -47,6 +47,10 @@ namespace VideoCore { | |||
| 47 | class RendererBase; | 47 | class RendererBase; |
| 48 | } // namespace VideoCore | 48 | } // namespace VideoCore |
| 49 | 49 | ||
| 50 | namespace Core::Timing { | ||
| 51 | class CoreTiming; | ||
| 52 | } | ||
| 53 | |||
| 50 | namespace Core { | 54 | namespace Core { |
| 51 | 55 | ||
| 52 | class ARM_Interface; | 56 | class ARM_Interface; |
| @@ -205,6 +209,12 @@ public: | |||
| 205 | /// Provides a constant pointer to the current process. | 209 | /// Provides a constant pointer to the current process. |
| 206 | const Kernel::Process* CurrentProcess() const; | 210 | const Kernel::Process* CurrentProcess() const; |
| 207 | 211 | ||
| 212 | /// Provides a reference to the core timing instance. | ||
| 213 | Timing::CoreTiming& CoreTiming(); | ||
| 214 | |||
| 215 | /// Provides a constant reference to the core timing instance. | ||
| 216 | const Timing::CoreTiming& CoreTiming() const; | ||
| 217 | |||
| 208 | /// Provides a reference to the kernel instance. | 218 | /// Provides a reference to the kernel instance. |
| 209 | Kernel::KernelCore& Kernel(); | 219 | Kernel::KernelCore& Kernel(); |
| 210 | 220 | ||
| @@ -283,10 +293,6 @@ inline ARM_Interface& CurrentArmInterface() { | |||
| 283 | return System::GetInstance().CurrentArmInterface(); | 293 | return System::GetInstance().CurrentArmInterface(); |
| 284 | } | 294 | } |
| 285 | 295 | ||
| 286 | inline TelemetrySession& Telemetry() { | ||
| 287 | return System::GetInstance().TelemetrySession(); | ||
| 288 | } | ||
| 289 | |||
| 290 | inline Kernel::Process* CurrentProcess() { | 296 | inline Kernel::Process* CurrentProcess() { |
| 291 | return System::GetInstance().CurrentProcess(); | 297 | return System::GetInstance().CurrentProcess(); |
| 292 | } | 298 | } |
diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp index fffda8a99..1eefed6d0 100644 --- a/src/core/core_cpu.cpp +++ b/src/core/core_cpu.cpp | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #endif | 11 | #endif |
| 12 | #include "core/arm/exclusive_monitor.h" | 12 | #include "core/arm/exclusive_monitor.h" |
| 13 | #include "core/arm/unicorn/arm_unicorn.h" | 13 | #include "core/arm/unicorn/arm_unicorn.h" |
| 14 | #include "core/core.h" | ||
| 14 | #include "core/core_cpu.h" | 15 | #include "core/core_cpu.h" |
| 15 | #include "core/core_timing.h" | 16 | #include "core/core_timing.h" |
| 16 | #include "core/hle/kernel/scheduler.h" | 17 | #include "core/hle/kernel/scheduler.h" |
| @@ -49,20 +50,21 @@ bool CpuBarrier::Rendezvous() { | |||
| 49 | return false; | 50 | return false; |
| 50 | } | 51 | } |
| 51 | 52 | ||
| 52 | Cpu::Cpu(ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier, std::size_t core_index) | 53 | Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier, |
| 53 | : cpu_barrier{cpu_barrier}, core_index{core_index} { | 54 | std::size_t core_index) |
| 55 | : cpu_barrier{cpu_barrier}, core_timing{system.CoreTiming()}, core_index{core_index} { | ||
| 54 | if (Settings::values.use_cpu_jit) { | 56 | if (Settings::values.use_cpu_jit) { |
| 55 | #ifdef ARCHITECTURE_x86_64 | 57 | #ifdef ARCHITECTURE_x86_64 |
| 56 | arm_interface = std::make_unique<ARM_Dynarmic>(exclusive_monitor, core_index); | 58 | arm_interface = std::make_unique<ARM_Dynarmic>(core_timing, exclusive_monitor, core_index); |
| 57 | #else | 59 | #else |
| 58 | arm_interface = std::make_unique<ARM_Unicorn>(); | 60 | arm_interface = std::make_unique<ARM_Unicorn>(); |
| 59 | LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available"); | 61 | LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available"); |
| 60 | #endif | 62 | #endif |
| 61 | } else { | 63 | } else { |
| 62 | arm_interface = std::make_unique<ARM_Unicorn>(); | 64 | arm_interface = std::make_unique<ARM_Unicorn>(core_timing); |
| 63 | } | 65 | } |
| 64 | 66 | ||
| 65 | scheduler = std::make_unique<Kernel::Scheduler>(*arm_interface); | 67 | scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface); |
| 66 | } | 68 | } |
| 67 | 69 | ||
| 68 | Cpu::~Cpu() = default; | 70 | Cpu::~Cpu() = default; |
| @@ -93,14 +95,14 @@ void Cpu::RunLoop(bool tight_loop) { | |||
| 93 | 95 | ||
| 94 | if (IsMainCore()) { | 96 | if (IsMainCore()) { |
| 95 | // TODO(Subv): Only let CoreTiming idle if all 4 cores are idling. | 97 | // TODO(Subv): Only let CoreTiming idle if all 4 cores are idling. |
| 96 | CoreTiming::Idle(); | 98 | core_timing.Idle(); |
| 97 | CoreTiming::Advance(); | 99 | core_timing.Advance(); |
| 98 | } | 100 | } |
| 99 | 101 | ||
| 100 | PrepareReschedule(); | 102 | PrepareReschedule(); |
| 101 | } else { | 103 | } else { |
| 102 | if (IsMainCore()) { | 104 | if (IsMainCore()) { |
| 103 | CoreTiming::Advance(); | 105 | core_timing.Advance(); |
| 104 | } | 106 | } |
| 105 | 107 | ||
| 106 | if (tight_loop) { | 108 | if (tight_loop) { |
diff --git a/src/core/core_cpu.h b/src/core/core_cpu.h index 1d2bdc6cd..7589beb8c 100644 --- a/src/core/core_cpu.h +++ b/src/core/core_cpu.h | |||
| @@ -16,6 +16,14 @@ class Scheduler; | |||
| 16 | } | 16 | } |
| 17 | 17 | ||
| 18 | namespace Core { | 18 | namespace Core { |
| 19 | class System; | ||
| 20 | } | ||
| 21 | |||
| 22 | namespace Core::Timing { | ||
| 23 | class CoreTiming; | ||
| 24 | } | ||
| 25 | |||
| 26 | namespace Core { | ||
| 19 | 27 | ||
| 20 | class ARM_Interface; | 28 | class ARM_Interface; |
| 21 | class ExclusiveMonitor; | 29 | class ExclusiveMonitor; |
| @@ -41,7 +49,8 @@ private: | |||
| 41 | 49 | ||
| 42 | class Cpu { | 50 | class Cpu { |
| 43 | public: | 51 | public: |
| 44 | Cpu(ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier, std::size_t core_index); | 52 | Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier, |
| 53 | std::size_t core_index); | ||
| 45 | ~Cpu(); | 54 | ~Cpu(); |
| 46 | 55 | ||
| 47 | void RunLoop(bool tight_loop = true); | 56 | void RunLoop(bool tight_loop = true); |
| @@ -82,6 +91,7 @@ private: | |||
| 82 | std::unique_ptr<ARM_Interface> arm_interface; | 91 | std::unique_ptr<ARM_Interface> arm_interface; |
| 83 | CpuBarrier& cpu_barrier; | 92 | CpuBarrier& cpu_barrier; |
| 84 | std::unique_ptr<Kernel::Scheduler> scheduler; | 93 | std::unique_ptr<Kernel::Scheduler> scheduler; |
| 94 | Timing::CoreTiming& core_timing; | ||
| 85 | 95 | ||
| 86 | std::atomic<bool> reschedule_pending = false; | 96 | std::atomic<bool> reschedule_pending = false; |
| 87 | std::size_t core_index; | 97 | std::size_t core_index; |
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 7953c8720..a0dd5db24 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp | |||
| @@ -8,149 +8,98 @@ | |||
| 8 | #include <mutex> | 8 | #include <mutex> |
| 9 | #include <string> | 9 | #include <string> |
| 10 | #include <tuple> | 10 | #include <tuple> |
| 11 | #include <unordered_map> | 11 | |
| 12 | #include <vector> | ||
| 13 | #include "common/assert.h" | 12 | #include "common/assert.h" |
| 14 | #include "common/thread.h" | 13 | #include "common/thread.h" |
| 15 | #include "common/threadsafe_queue.h" | ||
| 16 | #include "core/core_timing_util.h" | 14 | #include "core/core_timing_util.h" |
| 17 | 15 | ||
| 18 | namespace CoreTiming { | 16 | namespace Core::Timing { |
| 19 | |||
| 20 | static s64 global_timer; | ||
| 21 | static int slice_length; | ||
| 22 | static int downcount; | ||
| 23 | 17 | ||
| 24 | struct EventType { | 18 | constexpr int MAX_SLICE_LENGTH = 20000; |
| 25 | TimedCallback callback; | ||
| 26 | const std::string* name; | ||
| 27 | }; | ||
| 28 | 19 | ||
| 29 | struct Event { | 20 | struct CoreTiming::Event { |
| 30 | s64 time; | 21 | s64 time; |
| 31 | u64 fifo_order; | 22 | u64 fifo_order; |
| 32 | u64 userdata; | 23 | u64 userdata; |
| 33 | const EventType* type; | 24 | const EventType* type; |
| 34 | }; | ||
| 35 | |||
| 36 | // Sort by time, unless the times are the same, in which case sort by the order added to the queue | ||
| 37 | static bool operator>(const Event& left, const Event& right) { | ||
| 38 | return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order); | ||
| 39 | } | ||
| 40 | |||
| 41 | static bool operator<(const Event& left, const Event& right) { | ||
| 42 | return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order); | ||
| 43 | } | ||
| 44 | |||
| 45 | // unordered_map stores each element separately as a linked list node so pointers to elements | ||
| 46 | // remain stable regardless of rehashes/resizing. | ||
| 47 | static std::unordered_map<std::string, EventType> event_types; | ||
| 48 | 25 | ||
| 49 | // The queue is a min-heap using std::make_heap/push_heap/pop_heap. | 26 | // Sort by time, unless the times are the same, in which case sort by |
| 50 | // We don't use std::priority_queue because we need to be able to serialize, unserialize and | 27 | // the order added to the queue |
| 51 | // erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't accomodated | 28 | friend bool operator>(const Event& left, const Event& right) { |
| 52 | // by the standard adaptor class. | 29 | return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order); |
| 53 | static std::vector<Event> event_queue; | 30 | } |
| 54 | static u64 event_fifo_id; | ||
| 55 | // the queue for storing the events from other threads threadsafe until they will be added | ||
| 56 | // to the event_queue by the emu thread | ||
| 57 | static Common::MPSCQueue<Event, false> ts_queue; | ||
| 58 | |||
| 59 | // the queue for unscheduling the events from other threads threadsafe | ||
| 60 | static Common::MPSCQueue<std::pair<const EventType*, u64>, false> unschedule_queue; | ||
| 61 | |||
| 62 | constexpr int MAX_SLICE_LENGTH = 20000; | ||
| 63 | |||
| 64 | static s64 idled_cycles; | ||
| 65 | |||
| 66 | // Are we in a function that has been called from Advance() | ||
| 67 | // If events are sheduled from a function that gets called from Advance(), | ||
| 68 | // don't change slice_length and downcount. | ||
| 69 | static bool is_global_timer_sane; | ||
| 70 | |||
| 71 | static EventType* ev_lost = nullptr; | ||
| 72 | |||
| 73 | static void EmptyTimedCallback(u64 userdata, s64 cyclesLate) {} | ||
| 74 | |||
| 75 | EventType* RegisterEvent(const std::string& name, TimedCallback callback) { | ||
| 76 | // check for existing type with same name. | ||
| 77 | // we want event type names to remain unique so that we can use them for serialization. | ||
| 78 | ASSERT_MSG(event_types.find(name) == event_types.end(), | ||
| 79 | "CoreTiming Event \"{}\" is already registered. Events should only be registered " | ||
| 80 | "during Init to avoid breaking save states.", | ||
| 81 | name.c_str()); | ||
| 82 | 31 | ||
| 83 | auto info = event_types.emplace(name, EventType{callback, nullptr}); | 32 | friend bool operator<(const Event& left, const Event& right) { |
| 84 | EventType* event_type = &info.first->second; | 33 | return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order); |
| 85 | event_type->name = &info.first->first; | 34 | } |
| 86 | return event_type; | 35 | }; |
| 87 | } | ||
| 88 | 36 | ||
| 89 | void UnregisterAllEvents() { | 37 | CoreTiming::CoreTiming() = default; |
| 90 | ASSERT_MSG(event_queue.empty(), "Cannot unregister events with events pending"); | 38 | CoreTiming::~CoreTiming() = default; |
| 91 | event_types.clear(); | ||
| 92 | } | ||
| 93 | 39 | ||
| 94 | void Init() { | 40 | void CoreTiming::Initialize() { |
| 95 | downcount = MAX_SLICE_LENGTH; | 41 | downcount = MAX_SLICE_LENGTH; |
| 96 | slice_length = MAX_SLICE_LENGTH; | 42 | slice_length = MAX_SLICE_LENGTH; |
| 97 | global_timer = 0; | 43 | global_timer = 0; |
| 98 | idled_cycles = 0; | 44 | idled_cycles = 0; |
| 99 | 45 | ||
| 100 | // The time between CoreTiming being intialized and the first call to Advance() is considered | 46 | // The time between CoreTiming being initialized and the first call to Advance() is considered |
| 101 | // the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before | 47 | // the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before |
| 102 | // executing the first cycle of each slice to prepare the slice length and downcount for | 48 | // executing the first cycle of each slice to prepare the slice length and downcount for |
| 103 | // that slice. | 49 | // that slice. |
| 104 | is_global_timer_sane = true; | 50 | is_global_timer_sane = true; |
| 105 | 51 | ||
| 106 | event_fifo_id = 0; | 52 | event_fifo_id = 0; |
| 107 | ev_lost = RegisterEvent("_lost_event", &EmptyTimedCallback); | 53 | |
| 54 | const auto empty_timed_callback = [](u64, s64) {}; | ||
| 55 | ev_lost = RegisterEvent("_lost_event", empty_timed_callback); | ||
| 108 | } | 56 | } |
| 109 | 57 | ||
| 110 | void Shutdown() { | 58 | void CoreTiming::Shutdown() { |
| 111 | MoveEvents(); | 59 | MoveEvents(); |
| 112 | ClearPendingEvents(); | 60 | ClearPendingEvents(); |
| 113 | UnregisterAllEvents(); | 61 | UnregisterAllEvents(); |
| 114 | } | 62 | } |
| 115 | 63 | ||
| 116 | // This should only be called from the CPU thread. If you are calling | 64 | EventType* CoreTiming::RegisterEvent(const std::string& name, TimedCallback callback) { |
| 117 | // it from any other thread, you are doing something evil | 65 | // check for existing type with same name. |
| 118 | u64 GetTicks() { | 66 | // we want event type names to remain unique so that we can use them for serialization. |
| 119 | u64 ticks = static_cast<u64>(global_timer); | 67 | ASSERT_MSG(event_types.find(name) == event_types.end(), |
| 120 | if (!is_global_timer_sane) { | 68 | "CoreTiming Event \"{}\" is already registered. Events should only be registered " |
| 121 | ticks += slice_length - downcount; | 69 | "during Init to avoid breaking save states.", |
| 122 | } | 70 | name.c_str()); |
| 123 | return ticks; | ||
| 124 | } | ||
| 125 | |||
| 126 | void AddTicks(u64 ticks) { | ||
| 127 | downcount -= static_cast<int>(ticks); | ||
| 128 | } | ||
| 129 | 71 | ||
| 130 | u64 GetIdleTicks() { | 72 | auto info = event_types.emplace(name, EventType{callback, nullptr}); |
| 131 | return static_cast<u64>(idled_cycles); | 73 | EventType* event_type = &info.first->second; |
| 74 | event_type->name = &info.first->first; | ||
| 75 | return event_type; | ||
| 132 | } | 76 | } |
| 133 | 77 | ||
| 134 | void ClearPendingEvents() { | 78 | void CoreTiming::UnregisterAllEvents() { |
| 135 | event_queue.clear(); | 79 | ASSERT_MSG(event_queue.empty(), "Cannot unregister events with events pending"); |
| 80 | event_types.clear(); | ||
| 136 | } | 81 | } |
| 137 | 82 | ||
| 138 | void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) { | 83 | void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) { |
| 139 | ASSERT(event_type != nullptr); | 84 | ASSERT(event_type != nullptr); |
| 140 | s64 timeout = GetTicks() + cycles_into_future; | 85 | const s64 timeout = GetTicks() + cycles_into_future; |
| 86 | |||
| 141 | // If this event needs to be scheduled before the next advance(), force one early | 87 | // If this event needs to be scheduled before the next advance(), force one early |
| 142 | if (!is_global_timer_sane) | 88 | if (!is_global_timer_sane) { |
| 143 | ForceExceptionCheck(cycles_into_future); | 89 | ForceExceptionCheck(cycles_into_future); |
| 90 | } | ||
| 91 | |||
| 144 | event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type}); | 92 | event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type}); |
| 145 | std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); | 93 | std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); |
| 146 | } | 94 | } |
| 147 | 95 | ||
| 148 | void ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type, u64 userdata) { | 96 | void CoreTiming::ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type, |
| 97 | u64 userdata) { | ||
| 149 | ts_queue.Push(Event{global_timer + cycles_into_future, 0, userdata, event_type}); | 98 | ts_queue.Push(Event{global_timer + cycles_into_future, 0, userdata, event_type}); |
| 150 | } | 99 | } |
| 151 | 100 | ||
| 152 | void UnscheduleEvent(const EventType* event_type, u64 userdata) { | 101 | void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) { |
| 153 | auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { | 102 | const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { |
| 154 | return e.type == event_type && e.userdata == userdata; | 103 | return e.type == event_type && e.userdata == userdata; |
| 155 | }); | 104 | }); |
| 156 | 105 | ||
| @@ -161,13 +110,33 @@ void UnscheduleEvent(const EventType* event_type, u64 userdata) { | |||
| 161 | } | 110 | } |
| 162 | } | 111 | } |
| 163 | 112 | ||
| 164 | void UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata) { | 113 | void CoreTiming::UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata) { |
| 165 | unschedule_queue.Push(std::make_pair(event_type, userdata)); | 114 | unschedule_queue.Push(std::make_pair(event_type, userdata)); |
| 166 | } | 115 | } |
| 167 | 116 | ||
| 168 | void RemoveEvent(const EventType* event_type) { | 117 | u64 CoreTiming::GetTicks() const { |
| 169 | auto itr = std::remove_if(event_queue.begin(), event_queue.end(), | 118 | u64 ticks = static_cast<u64>(global_timer); |
| 170 | [&](const Event& e) { return e.type == event_type; }); | 119 | if (!is_global_timer_sane) { |
| 120 | ticks += slice_length - downcount; | ||
| 121 | } | ||
| 122 | return ticks; | ||
| 123 | } | ||
| 124 | |||
| 125 | u64 CoreTiming::GetIdleTicks() const { | ||
| 126 | return static_cast<u64>(idled_cycles); | ||
| 127 | } | ||
| 128 | |||
| 129 | void CoreTiming::AddTicks(u64 ticks) { | ||
| 130 | downcount -= static_cast<int>(ticks); | ||
| 131 | } | ||
| 132 | |||
| 133 | void CoreTiming::ClearPendingEvents() { | ||
| 134 | event_queue.clear(); | ||
| 135 | } | ||
| 136 | |||
| 137 | void CoreTiming::RemoveEvent(const EventType* event_type) { | ||
| 138 | const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), | ||
| 139 | [&](const Event& e) { return e.type == event_type; }); | ||
| 171 | 140 | ||
| 172 | // Removing random items breaks the invariant so we have to re-establish it. | 141 | // Removing random items breaks the invariant so we have to re-establish it. |
| 173 | if (itr != event_queue.end()) { | 142 | if (itr != event_queue.end()) { |
| @@ -176,22 +145,24 @@ void RemoveEvent(const EventType* event_type) { | |||
| 176 | } | 145 | } |
| 177 | } | 146 | } |
| 178 | 147 | ||
| 179 | void RemoveNormalAndThreadsafeEvent(const EventType* event_type) { | 148 | void CoreTiming::RemoveNormalAndThreadsafeEvent(const EventType* event_type) { |
| 180 | MoveEvents(); | 149 | MoveEvents(); |
| 181 | RemoveEvent(event_type); | 150 | RemoveEvent(event_type); |
| 182 | } | 151 | } |
| 183 | 152 | ||
| 184 | void ForceExceptionCheck(s64 cycles) { | 153 | void CoreTiming::ForceExceptionCheck(s64 cycles) { |
| 185 | cycles = std::max<s64>(0, cycles); | 154 | cycles = std::max<s64>(0, cycles); |
| 186 | if (downcount > cycles) { | 155 | if (downcount <= cycles) { |
| 187 | // downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int | 156 | return; |
| 188 | // here. Account for cycles already executed by adjusting the g.slice_length | ||
| 189 | slice_length -= downcount - static_cast<int>(cycles); | ||
| 190 | downcount = static_cast<int>(cycles); | ||
| 191 | } | 157 | } |
| 158 | |||
| 159 | // downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int | ||
| 160 | // here. Account for cycles already executed by adjusting the g.slice_length | ||
| 161 | slice_length -= downcount - static_cast<int>(cycles); | ||
| 162 | downcount = static_cast<int>(cycles); | ||
| 192 | } | 163 | } |
| 193 | 164 | ||
| 194 | void MoveEvents() { | 165 | void CoreTiming::MoveEvents() { |
| 195 | for (Event ev; ts_queue.Pop(ev);) { | 166 | for (Event ev; ts_queue.Pop(ev);) { |
| 196 | ev.fifo_order = event_fifo_id++; | 167 | ev.fifo_order = event_fifo_id++; |
| 197 | event_queue.emplace_back(std::move(ev)); | 168 | event_queue.emplace_back(std::move(ev)); |
| @@ -199,13 +170,13 @@ void MoveEvents() { | |||
| 199 | } | 170 | } |
| 200 | } | 171 | } |
| 201 | 172 | ||
| 202 | void Advance() { | 173 | void CoreTiming::Advance() { |
| 203 | MoveEvents(); | 174 | MoveEvents(); |
| 204 | for (std::pair<const EventType*, u64> ev; unschedule_queue.Pop(ev);) { | 175 | for (std::pair<const EventType*, u64> ev; unschedule_queue.Pop(ev);) { |
| 205 | UnscheduleEvent(ev.first, ev.second); | 176 | UnscheduleEvent(ev.first, ev.second); |
| 206 | } | 177 | } |
| 207 | 178 | ||
| 208 | int cycles_executed = slice_length - downcount; | 179 | const int cycles_executed = slice_length - downcount; |
| 209 | global_timer += cycles_executed; | 180 | global_timer += cycles_executed; |
| 210 | slice_length = MAX_SLICE_LENGTH; | 181 | slice_length = MAX_SLICE_LENGTH; |
| 211 | 182 | ||
| @@ -229,17 +200,17 @@ void Advance() { | |||
| 229 | downcount = slice_length; | 200 | downcount = slice_length; |
| 230 | } | 201 | } |
| 231 | 202 | ||
| 232 | void Idle() { | 203 | void CoreTiming::Idle() { |
| 233 | idled_cycles += downcount; | 204 | idled_cycles += downcount; |
| 234 | downcount = 0; | 205 | downcount = 0; |
| 235 | } | 206 | } |
| 236 | 207 | ||
| 237 | std::chrono::microseconds GetGlobalTimeUs() { | 208 | std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const { |
| 238 | return std::chrono::microseconds{GetTicks() * 1000000 / BASE_CLOCK_RATE}; | 209 | return std::chrono::microseconds{GetTicks() * 1000000 / BASE_CLOCK_RATE}; |
| 239 | } | 210 | } |
| 240 | 211 | ||
| 241 | int GetDowncount() { | 212 | int CoreTiming::GetDowncount() const { |
| 242 | return downcount; | 213 | return downcount; |
| 243 | } | 214 | } |
| 244 | 215 | ||
| 245 | } // namespace CoreTiming | 216 | } // namespace Core::Timing |
diff --git a/src/core/core_timing.h b/src/core/core_timing.h index 9ed757bd7..59163bae1 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h | |||
| @@ -4,6 +4,27 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <chrono> | ||
| 8 | #include <functional> | ||
| 9 | #include <string> | ||
| 10 | #include <unordered_map> | ||
| 11 | #include <vector> | ||
| 12 | #include "common/common_types.h" | ||
| 13 | #include "common/threadsafe_queue.h" | ||
| 14 | |||
| 15 | namespace Core::Timing { | ||
| 16 | |||
| 17 | /// A callback that may be scheduled for a particular core timing event. | ||
| 18 | using TimedCallback = std::function<void(u64 userdata, int cycles_late)>; | ||
| 19 | |||
| 20 | /// Contains the characteristics of a particular event. | ||
| 21 | struct EventType { | ||
| 22 | /// The event's callback function. | ||
| 23 | TimedCallback callback; | ||
| 24 | /// A pointer to the name of the event. | ||
| 25 | const std::string* name; | ||
| 26 | }; | ||
| 27 | |||
| 7 | /** | 28 | /** |
| 8 | * This is a system to schedule events into the emulated machine's future. Time is measured | 29 | * This is a system to schedule events into the emulated machine's future. Time is measured |
| 9 | * in main CPU clock cycles. | 30 | * in main CPU clock cycles. |
| @@ -16,80 +37,120 @@ | |||
| 16 | * inside callback: | 37 | * inside callback: |
| 17 | * ScheduleEvent(periodInCycles - cyclesLate, callback, "whatever") | 38 | * ScheduleEvent(periodInCycles - cyclesLate, callback, "whatever") |
| 18 | */ | 39 | */ |
| 19 | 40 | class CoreTiming { | |
| 20 | #include <chrono> | 41 | public: |
| 21 | #include <functional> | 42 | CoreTiming(); |
| 22 | #include <string> | 43 | ~CoreTiming(); |
| 23 | #include "common/common_types.h" | 44 | |
| 24 | 45 | CoreTiming(const CoreTiming&) = delete; | |
| 25 | namespace CoreTiming { | 46 | CoreTiming(CoreTiming&&) = delete; |
| 26 | 47 | ||
| 27 | struct EventType; | 48 | CoreTiming& operator=(const CoreTiming&) = delete; |
| 28 | 49 | CoreTiming& operator=(CoreTiming&&) = delete; | |
| 29 | using TimedCallback = std::function<void(u64 userdata, int cycles_late)>; | 50 | |
| 30 | 51 | /// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is | |
| 31 | /** | 52 | /// required to end slice - 1 and start slice 0 before the first cycle of code is executed. |
| 32 | * CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is | 53 | void Initialize(); |
| 33 | * required to end slice -1 and start slice 0 before the first cycle of code is executed. | 54 | |
| 34 | */ | 55 | /// Tears down all timing related functionality. |
| 35 | void Init(); | 56 | void Shutdown(); |
| 36 | void Shutdown(); | 57 | |
| 37 | 58 | /// Registers a core timing event with the given name and callback. | |
| 38 | /** | 59 | /// |
| 39 | * This should only be called from the emu thread, if you are calling it any other thread, you are | 60 | /// @param name The name of the core timing event to register. |
| 40 | * doing something evil | 61 | /// @param callback The callback to execute for the event. |
| 41 | */ | 62 | /// |
| 42 | u64 GetTicks(); | 63 | /// @returns An EventType instance representing the registered event. |
| 43 | u64 GetIdleTicks(); | 64 | /// |
| 44 | void AddTicks(u64 ticks); | 65 | /// @pre The name of the event being registered must be unique among all |
| 45 | 66 | /// registered events. | |
| 46 | /** | 67 | /// |
| 47 | * Returns the event_type identifier. if name is not unique, it will assert. | 68 | EventType* RegisterEvent(const std::string& name, TimedCallback callback); |
| 48 | */ | 69 | |
| 49 | EventType* RegisterEvent(const std::string& name, TimedCallback callback); | 70 | /// Unregisters all registered events thus far. |
| 50 | void UnregisterAllEvents(); | 71 | void UnregisterAllEvents(); |
| 51 | 72 | ||
| 52 | /** | 73 | /// After the first Advance, the slice lengths and the downcount will be reduced whenever an |
| 53 | * After the first Advance, the slice lengths and the downcount will be reduced whenever an event | 74 | /// event is scheduled earlier than the current values. |
| 54 | * is scheduled earlier than the current values. | 75 | /// |
| 55 | * Scheduling from a callback will not update the downcount until the Advance() completes. | 76 | /// Scheduling from a callback will not update the downcount until the Advance() completes. |
| 56 | */ | 77 | void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata = 0); |
| 57 | void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata = 0); | 78 | |
| 58 | 79 | /// This is to be called when outside of hle threads, such as the graphics thread, wants to | |
| 59 | /** | 80 | /// schedule things to be executed on the main thread. |
| 60 | * This is to be called when outside of hle threads, such as the graphics thread, wants to | 81 | /// |
| 61 | * schedule things to be executed on the main thread. | 82 | /// @note This doesn't change slice_length and thus events scheduled by this might be |
| 62 | * Not that this doesn't change slice_length and thus events scheduled by this might be called | 83 | /// called with a delay of up to MAX_SLICE_LENGTH |
| 63 | * with a delay of up to MAX_SLICE_LENGTH | 84 | void ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type, |
| 64 | */ | 85 | u64 userdata = 0); |
| 65 | void ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type, u64 userdata); | 86 | |
| 66 | 87 | void UnscheduleEvent(const EventType* event_type, u64 userdata); | |
| 67 | void UnscheduleEvent(const EventType* event_type, u64 userdata); | 88 | void UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata); |
| 68 | void UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata); | 89 | |
| 69 | 90 | /// We only permit one event of each type in the queue at a time. | |
| 70 | /// We only permit one event of each type in the queue at a time. | 91 | void RemoveEvent(const EventType* event_type); |
| 71 | void RemoveEvent(const EventType* event_type); | 92 | void RemoveNormalAndThreadsafeEvent(const EventType* event_type); |
| 72 | void RemoveNormalAndThreadsafeEvent(const EventType* event_type); | 93 | |
| 73 | 94 | void ForceExceptionCheck(s64 cycles); | |
| 74 | /** Advance must be called at the beginning of dispatcher loops, not the end. Advance() ends | 95 | |
| 75 | * the previous timing slice and begins the next one, you must Advance from the previous | 96 | /// This should only be called from the emu thread, if you are calling it any other thread, |
| 76 | * slice to the current one before executing any cycles. CoreTiming starts in slice -1 so an | 97 | /// you are doing something evil |
| 77 | * Advance() is required to initialize the slice length before the first cycle of emulated | 98 | u64 GetTicks() const; |
| 78 | * instructions is executed. | 99 | |
| 79 | */ | 100 | u64 GetIdleTicks() const; |
| 80 | void Advance(); | 101 | |
| 81 | void MoveEvents(); | 102 | void AddTicks(u64 ticks); |
| 82 | 103 | ||
| 83 | /// Pretend that the main CPU has executed enough cycles to reach the next event. | 104 | /// Advance must be called at the beginning of dispatcher loops, not the end. Advance() ends |
| 84 | void Idle(); | 105 | /// the previous timing slice and begins the next one, you must Advance from the previous |
| 85 | 106 | /// slice to the current one before executing any cycles. CoreTiming starts in slice -1 so an | |
| 86 | /// Clear all pending events. This should ONLY be done on exit. | 107 | /// Advance() is required to initialize the slice length before the first cycle of emulated |
| 87 | void ClearPendingEvents(); | 108 | /// instructions is executed. |
| 88 | 109 | void Advance(); | |
| 89 | void ForceExceptionCheck(s64 cycles); | 110 | |
| 90 | 111 | /// Pretend that the main CPU has executed enough cycles to reach the next event. | |
| 91 | std::chrono::microseconds GetGlobalTimeUs(); | 112 | void Idle(); |
| 92 | 113 | ||
| 93 | int GetDowncount(); | 114 | std::chrono::microseconds GetGlobalTimeUs() const; |
| 94 | 115 | ||
| 95 | } // namespace CoreTiming | 116 | int GetDowncount() const; |
| 117 | |||
| 118 | private: | ||
| 119 | struct Event; | ||
| 120 | |||
| 121 | /// Clear all pending events. This should ONLY be done on exit. | ||
| 122 | void ClearPendingEvents(); | ||
| 123 | void MoveEvents(); | ||
| 124 | |||
| 125 | s64 global_timer = 0; | ||
| 126 | s64 idled_cycles = 0; | ||
| 127 | int slice_length = 0; | ||
| 128 | int downcount = 0; | ||
| 129 | |||
| 130 | // Are we in a function that has been called from Advance() | ||
| 131 | // If events are scheduled from a function that gets called from Advance(), | ||
| 132 | // don't change slice_length and downcount. | ||
| 133 | bool is_global_timer_sane = false; | ||
| 134 | |||
| 135 | // The queue is a min-heap using std::make_heap/push_heap/pop_heap. | ||
| 136 | // We don't use std::priority_queue because we need to be able to serialize, unserialize and | ||
| 137 | // erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't | ||
| 138 | // accomodated by the standard adaptor class. | ||
| 139 | std::vector<Event> event_queue; | ||
| 140 | u64 event_fifo_id = 0; | ||
| 141 | |||
| 142 | // Stores each element separately as a linked list node so pointers to elements | ||
| 143 | // remain stable regardless of rehashes/resizing. | ||
| 144 | std::unordered_map<std::string, EventType> event_types; | ||
| 145 | |||
| 146 | // The queue for storing the events from other threads threadsafe until they will be added | ||
| 147 | // to the event_queue by the emu thread | ||
| 148 | Common::MPSCQueue<Event> ts_queue; | ||
| 149 | |||
| 150 | // The queue for unscheduling the events from other threads threadsafe | ||
| 151 | Common::MPSCQueue<std::pair<const EventType*, u64>> unschedule_queue; | ||
| 152 | |||
| 153 | EventType* ev_lost = nullptr; | ||
| 154 | }; | ||
| 155 | |||
| 156 | } // namespace Core::Timing | ||
diff --git a/src/core/core_timing_util.cpp b/src/core/core_timing_util.cpp index 73dea4edb..7942f30d6 100644 --- a/src/core/core_timing_util.cpp +++ b/src/core/core_timing_util.cpp | |||
| @@ -7,8 +7,9 @@ | |||
| 7 | #include <cinttypes> | 7 | #include <cinttypes> |
| 8 | #include <limits> | 8 | #include <limits> |
| 9 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 10 | #include "common/uint128.h" | ||
| 10 | 11 | ||
| 11 | namespace CoreTiming { | 12 | namespace Core::Timing { |
| 12 | 13 | ||
| 13 | constexpr u64 MAX_VALUE_TO_MULTIPLY = std::numeric_limits<s64>::max() / BASE_CLOCK_RATE; | 14 | constexpr u64 MAX_VALUE_TO_MULTIPLY = std::numeric_limits<s64>::max() / BASE_CLOCK_RATE; |
| 14 | 15 | ||
| @@ -60,4 +61,9 @@ s64 nsToCycles(u64 ns) { | |||
| 60 | return (BASE_CLOCK_RATE * static_cast<s64>(ns)) / 1000000000; | 61 | return (BASE_CLOCK_RATE * static_cast<s64>(ns)) / 1000000000; |
| 61 | } | 62 | } |
| 62 | 63 | ||
| 63 | } // namespace CoreTiming | 64 | u64 CpuCyclesToClockCycles(u64 ticks) { |
| 65 | const u128 temporal = Common::Multiply64Into128(ticks, CNTFREQ); | ||
| 66 | return Common::Divide128On32(temporal, static_cast<u32>(BASE_CLOCK_RATE)).first; | ||
| 67 | } | ||
| 68 | |||
| 69 | } // namespace Core::Timing | ||
diff --git a/src/core/core_timing_util.h b/src/core/core_timing_util.h index 5c3718782..679aa3123 100644 --- a/src/core/core_timing_util.h +++ b/src/core/core_timing_util.h | |||
| @@ -6,11 +6,12 @@ | |||
| 6 | 6 | ||
| 7 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 8 | 8 | ||
| 9 | namespace CoreTiming { | 9 | namespace Core::Timing { |
| 10 | 10 | ||
| 11 | // The below clock rate is based on Switch's clockspeed being widely known as 1.020GHz | 11 | // The below clock rate is based on Switch's clockspeed being widely known as 1.020GHz |
| 12 | // The exact value used is of course unverified. | 12 | // The exact value used is of course unverified. |
| 13 | constexpr u64 BASE_CLOCK_RATE = 1019215872; // Switch clock speed is 1020MHz un/docked | 13 | constexpr u64 BASE_CLOCK_RATE = 1019215872; // Switch clock speed is 1020MHz un/docked |
| 14 | constexpr u64 CNTFREQ = 19200000; // Value from fusee. | ||
| 14 | 15 | ||
| 15 | inline s64 msToCycles(int ms) { | 16 | inline s64 msToCycles(int ms) { |
| 16 | // since ms is int there is no way to overflow | 17 | // since ms is int there is no way to overflow |
| @@ -61,4 +62,6 @@ inline u64 cyclesToMs(s64 cycles) { | |||
| 61 | return cycles * 1000 / BASE_CLOCK_RATE; | 62 | return cycles * 1000 / BASE_CLOCK_RATE; |
| 62 | } | 63 | } |
| 63 | 64 | ||
| 64 | } // namespace CoreTiming | 65 | u64 CpuCyclesToClockCycles(u64 ticks); |
| 66 | |||
| 67 | } // namespace Core::Timing | ||
diff --git a/src/core/cpu_core_manager.cpp b/src/core/cpu_core_manager.cpp index 769a6fefa..93bc5619c 100644 --- a/src/core/cpu_core_manager.cpp +++ b/src/core/cpu_core_manager.cpp | |||
| @@ -27,7 +27,7 @@ void CpuCoreManager::Initialize(System& system) { | |||
| 27 | exclusive_monitor = Cpu::MakeExclusiveMonitor(cores.size()); | 27 | exclusive_monitor = Cpu::MakeExclusiveMonitor(cores.size()); |
| 28 | 28 | ||
| 29 | for (std::size_t index = 0; index < cores.size(); ++index) { | 29 | for (std::size_t index = 0; index < cores.size(); ++index) { |
| 30 | cores[index] = std::make_unique<Cpu>(*exclusive_monitor, *barrier, index); | 30 | cores[index] = std::make_unique<Cpu>(system, *exclusive_monitor, *barrier, index); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | // Create threads for CPU cores 1-3, and build thread_to_cpu map | 33 | // Create threads for CPU cores 1-3, and build thread_to_cpu map |
diff --git a/src/core/crypto/key_manager.cpp b/src/core/crypto/key_manager.cpp index ca12fb4ab..dfac9a4b3 100644 --- a/src/core/crypto/key_manager.cpp +++ b/src/core/crypto/key_manager.cpp | |||
| @@ -398,7 +398,8 @@ static bool ValidCryptoRevisionString(std::string_view base, size_t begin, size_ | |||
| 398 | } | 398 | } |
| 399 | 399 | ||
| 400 | void KeyManager::LoadFromFile(const std::string& filename, bool is_title_keys) { | 400 | void KeyManager::LoadFromFile(const std::string& filename, bool is_title_keys) { |
| 401 | std::ifstream file(filename); | 401 | std::ifstream file; |
| 402 | OpenFStream(file, filename, std::ios_base::in); | ||
| 402 | if (!file.is_open()) | 403 | if (!file.is_open()) |
| 403 | return; | 404 | return; |
| 404 | 405 | ||
diff --git a/src/core/file_sys/content_archive.h b/src/core/file_sys/content_archive.h index 5d4d05c82..15b9e6624 100644 --- a/src/core/file_sys/content_archive.h +++ b/src/core/file_sys/content_archive.h | |||
| @@ -24,13 +24,26 @@ namespace FileSys { | |||
| 24 | 24 | ||
| 25 | union NCASectionHeader; | 25 | union NCASectionHeader; |
| 26 | 26 | ||
| 27 | /// Describes the type of content within an NCA archive. | ||
| 27 | enum class NCAContentType : u8 { | 28 | enum class NCAContentType : u8 { |
| 29 | /// Executable-related data | ||
| 28 | Program = 0, | 30 | Program = 0, |
| 31 | |||
| 32 | /// Metadata. | ||
| 29 | Meta = 1, | 33 | Meta = 1, |
| 34 | |||
| 35 | /// Access control data. | ||
| 30 | Control = 2, | 36 | Control = 2, |
| 37 | |||
| 38 | /// Information related to the game manual | ||
| 39 | /// e.g. Legal information, etc. | ||
| 31 | Manual = 3, | 40 | Manual = 3, |
| 41 | |||
| 42 | /// System data. | ||
| 32 | Data = 4, | 43 | Data = 4, |
| 33 | Data_Unknown5 = 5, ///< Seems to be used on some system archives | 44 | |
| 45 | /// Data that can be accessed by applications. | ||
| 46 | PublicData = 5, | ||
| 34 | }; | 47 | }; |
| 35 | 48 | ||
| 36 | enum class NCASectionCryptoType : u8 { | 49 | enum class NCASectionCryptoType : u8 { |
diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp index 128199063..1c6bacace 100644 --- a/src/core/file_sys/registered_cache.cpp +++ b/src/core/file_sys/registered_cache.cpp | |||
| @@ -94,7 +94,7 @@ static ContentRecordType GetCRTypeFromNCAType(NCAContentType type) { | |||
| 94 | case NCAContentType::Control: | 94 | case NCAContentType::Control: |
| 95 | return ContentRecordType::Control; | 95 | return ContentRecordType::Control; |
| 96 | case NCAContentType::Data: | 96 | case NCAContentType::Data: |
| 97 | case NCAContentType::Data_Unknown5: | 97 | case NCAContentType::PublicData: |
| 98 | return ContentRecordType::Data; | 98 | return ContentRecordType::Data; |
| 99 | case NCAContentType::Manual: | 99 | case NCAContentType::Manual: |
| 100 | // TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal. | 100 | // TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal. |
diff --git a/src/core/file_sys/vfs_vector.cpp b/src/core/file_sys/vfs_vector.cpp index 515626658..75fc04302 100644 --- a/src/core/file_sys/vfs_vector.cpp +++ b/src/core/file_sys/vfs_vector.cpp | |||
| @@ -47,7 +47,7 @@ std::size_t VectorVfsFile::Write(const u8* data_, std::size_t length, std::size_ | |||
| 47 | if (offset + length > data.size()) | 47 | if (offset + length > data.size()) |
| 48 | data.resize(offset + length); | 48 | data.resize(offset + length); |
| 49 | const auto write = std::min(length, data.size() - offset); | 49 | const auto write = std::min(length, data.size() - offset); |
| 50 | std::memcpy(data.data(), data_, write); | 50 | std::memcpy(data.data() + offset, data_, write); |
| 51 | return write; | 51 | return write; |
| 52 | } | 52 | } |
| 53 | 53 | ||
diff --git a/src/core/frontend/emu_window.cpp b/src/core/frontend/emu_window.cpp index 9dd493efb..e29afd630 100644 --- a/src/core/frontend/emu_window.cpp +++ b/src/core/frontend/emu_window.cpp | |||
| @@ -67,7 +67,7 @@ static bool IsWithinTouchscreen(const Layout::FramebufferLayout& layout, unsigne | |||
| 67 | framebuffer_x >= layout.screen.left && framebuffer_x < layout.screen.right); | 67 | framebuffer_x >= layout.screen.left && framebuffer_x < layout.screen.right); |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | std::tuple<unsigned, unsigned> EmuWindow::ClipToTouchScreen(unsigned new_x, unsigned new_y) { | 70 | std::tuple<unsigned, unsigned> EmuWindow::ClipToTouchScreen(unsigned new_x, unsigned new_y) const { |
| 71 | new_x = std::max(new_x, framebuffer_layout.screen.left); | 71 | new_x = std::max(new_x, framebuffer_layout.screen.left); |
| 72 | new_x = std::min(new_x, framebuffer_layout.screen.right - 1); | 72 | new_x = std::min(new_x, framebuffer_layout.screen.right - 1); |
| 73 | 73 | ||
diff --git a/src/core/frontend/emu_window.h b/src/core/frontend/emu_window.h index 7006a37b3..d0bcb4660 100644 --- a/src/core/frontend/emu_window.h +++ b/src/core/frontend/emu_window.h | |||
| @@ -166,7 +166,7 @@ private: | |||
| 166 | /** | 166 | /** |
| 167 | * Clip the provided coordinates to be inside the touchscreen area. | 167 | * Clip the provided coordinates to be inside the touchscreen area. |
| 168 | */ | 168 | */ |
| 169 | std::tuple<unsigned, unsigned> ClipToTouchScreen(unsigned new_x, unsigned new_y); | 169 | std::tuple<unsigned, unsigned> ClipToTouchScreen(unsigned new_x, unsigned new_y) const; |
| 170 | }; | 170 | }; |
| 171 | 171 | ||
| 172 | } // namespace Core::Frontend | 172 | } // namespace Core::Frontend |
diff --git a/src/core/frontend/framebuffer_layout.cpp b/src/core/frontend/framebuffer_layout.cpp index f8662d193..a1357179f 100644 --- a/src/core/frontend/framebuffer_layout.cpp +++ b/src/core/frontend/framebuffer_layout.cpp | |||
| @@ -12,12 +12,12 @@ namespace Layout { | |||
| 12 | 12 | ||
| 13 | // Finds the largest size subrectangle contained in window area that is confined to the aspect ratio | 13 | // Finds the largest size subrectangle contained in window area that is confined to the aspect ratio |
| 14 | template <class T> | 14 | template <class T> |
| 15 | static MathUtil::Rectangle<T> maxRectangle(MathUtil::Rectangle<T> window_area, | 15 | static Common::Rectangle<T> MaxRectangle(Common::Rectangle<T> window_area, |
| 16 | float screen_aspect_ratio) { | 16 | float screen_aspect_ratio) { |
| 17 | float scale = std::min(static_cast<float>(window_area.GetWidth()), | 17 | float scale = std::min(static_cast<float>(window_area.GetWidth()), |
| 18 | window_area.GetHeight() / screen_aspect_ratio); | 18 | window_area.GetHeight() / screen_aspect_ratio); |
| 19 | return MathUtil::Rectangle<T>{0, 0, static_cast<T>(std::round(scale)), | 19 | return Common::Rectangle<T>{0, 0, static_cast<T>(std::round(scale)), |
| 20 | static_cast<T>(std::round(scale * screen_aspect_ratio))}; | 20 | static_cast<T>(std::round(scale * screen_aspect_ratio))}; |
| 21 | } | 21 | } |
| 22 | 22 | ||
| 23 | FramebufferLayout DefaultFrameLayout(unsigned width, unsigned height) { | 23 | FramebufferLayout DefaultFrameLayout(unsigned width, unsigned height) { |
| @@ -29,8 +29,8 @@ FramebufferLayout DefaultFrameLayout(unsigned width, unsigned height) { | |||
| 29 | 29 | ||
| 30 | const float emulation_aspect_ratio{static_cast<float>(ScreenUndocked::Height) / | 30 | const float emulation_aspect_ratio{static_cast<float>(ScreenUndocked::Height) / |
| 31 | ScreenUndocked::Width}; | 31 | ScreenUndocked::Width}; |
| 32 | MathUtil::Rectangle<unsigned> screen_window_area{0, 0, width, height}; | 32 | Common::Rectangle<unsigned> screen_window_area{0, 0, width, height}; |
| 33 | MathUtil::Rectangle<unsigned> screen = maxRectangle(screen_window_area, emulation_aspect_ratio); | 33 | Common::Rectangle<unsigned> screen = MaxRectangle(screen_window_area, emulation_aspect_ratio); |
| 34 | 34 | ||
| 35 | float window_aspect_ratio = static_cast<float>(height) / width; | 35 | float window_aspect_ratio = static_cast<float>(height) / width; |
| 36 | 36 | ||
diff --git a/src/core/frontend/framebuffer_layout.h b/src/core/frontend/framebuffer_layout.h index e06647794..c2c63d08c 100644 --- a/src/core/frontend/framebuffer_layout.h +++ b/src/core/frontend/framebuffer_layout.h | |||
| @@ -16,7 +16,7 @@ struct FramebufferLayout { | |||
| 16 | unsigned width{ScreenUndocked::Width}; | 16 | unsigned width{ScreenUndocked::Width}; |
| 17 | unsigned height{ScreenUndocked::Height}; | 17 | unsigned height{ScreenUndocked::Height}; |
| 18 | 18 | ||
| 19 | MathUtil::Rectangle<unsigned> screen; | 19 | Common::Rectangle<unsigned> screen; |
| 20 | 20 | ||
| 21 | /** | 21 | /** |
| 22 | * Returns the ration of pixel size of the screen, compared to the native size of the undocked | 22 | * Returns the ration of pixel size of the screen, compared to the native size of the undocked |
diff --git a/src/core/frontend/input.h b/src/core/frontend/input.h index 16fdcd376..7c11d7546 100644 --- a/src/core/frontend/input.h +++ b/src/core/frontend/input.h | |||
| @@ -124,7 +124,7 @@ using AnalogDevice = InputDevice<std::tuple<float, float>>; | |||
| 124 | * Orientation is determined by right-hand rule. | 124 | * Orientation is determined by right-hand rule. |
| 125 | * Units: deg/sec | 125 | * Units: deg/sec |
| 126 | */ | 126 | */ |
| 127 | using MotionDevice = InputDevice<std::tuple<Math::Vec3<float>, Math::Vec3<float>>>; | 127 | using MotionDevice = InputDevice<std::tuple<Common::Vec3<float>, Common::Vec3<float>>>; |
| 128 | 128 | ||
| 129 | /** | 129 | /** |
| 130 | * A touch device is an input device that returns a tuple of two floats and a bool. The floats are | 130 | * A touch device is an input device that returns a tuple of two floats and a bool. The floats are |
diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp index a1cad4fcb..dafb32aae 100644 --- a/src/core/gdbstub/gdbstub.cpp +++ b/src/core/gdbstub/gdbstub.cpp | |||
| @@ -507,8 +507,11 @@ static void RemoveBreakpoint(BreakpointType type, VAddr addr) { | |||
| 507 | 507 | ||
| 508 | LOG_DEBUG(Debug_GDBStub, "gdb: removed a breakpoint: {:016X} bytes at {:016X} of type {}", | 508 | LOG_DEBUG(Debug_GDBStub, "gdb: removed a breakpoint: {:016X} bytes at {:016X} of type {}", |
| 509 | bp->second.len, bp->second.addr, static_cast<int>(type)); | 509 | bp->second.len, bp->second.addr, static_cast<int>(type)); |
| 510 | Memory::WriteBlock(bp->second.addr, bp->second.inst.data(), bp->second.inst.size()); | 510 | |
| 511 | Core::System::GetInstance().InvalidateCpuInstructionCaches(); | 511 | if (type == BreakpointType::Execute) { |
| 512 | Memory::WriteBlock(bp->second.addr, bp->second.inst.data(), bp->second.inst.size()); | ||
| 513 | Core::System::GetInstance().InvalidateCpuInstructionCaches(); | ||
| 514 | } | ||
| 512 | p.erase(addr); | 515 | p.erase(addr); |
| 513 | } | 516 | } |
| 514 | 517 | ||
| @@ -1057,9 +1060,12 @@ static bool CommitBreakpoint(BreakpointType type, VAddr addr, u64 len) { | |||
| 1057 | breakpoint.addr = addr; | 1060 | breakpoint.addr = addr; |
| 1058 | breakpoint.len = len; | 1061 | breakpoint.len = len; |
| 1059 | Memory::ReadBlock(addr, breakpoint.inst.data(), breakpoint.inst.size()); | 1062 | Memory::ReadBlock(addr, breakpoint.inst.data(), breakpoint.inst.size()); |
| 1063 | |||
| 1060 | static constexpr std::array<u8, 4> btrap{0x00, 0x7d, 0x20, 0xd4}; | 1064 | static constexpr std::array<u8, 4> btrap{0x00, 0x7d, 0x20, 0xd4}; |
| 1061 | Memory::WriteBlock(addr, btrap.data(), btrap.size()); | 1065 | if (type == BreakpointType::Execute) { |
| 1062 | Core::System::GetInstance().InvalidateCpuInstructionCaches(); | 1066 | Memory::WriteBlock(addr, btrap.data(), btrap.size()); |
| 1067 | Core::System::GetInstance().InvalidateCpuInstructionCaches(); | ||
| 1068 | } | ||
| 1063 | p.insert({addr, breakpoint}); | 1069 | p.insert({addr, breakpoint}); |
| 1064 | 1070 | ||
| 1065 | LOG_DEBUG(Debug_GDBStub, "gdb: added {} breakpoint: {:016X} bytes at {:016X}", | 1071 | LOG_DEBUG(Debug_GDBStub, "gdb: added {} breakpoint: {:016X} bytes at {:016X}", |
diff --git a/src/core/hle/ipc.h b/src/core/hle/ipc.h index 96c8677d2..fae54bcc7 100644 --- a/src/core/hle/ipc.h +++ b/src/core/hle/ipc.h | |||
| @@ -4,10 +4,10 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "common/bit_field.h" | ||
| 8 | #include "common/common_funcs.h" | ||
| 7 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 8 | #include "common/swap.h" | 10 | #include "common/swap.h" |
| 9 | #include "core/hle/kernel/errors.h" | ||
| 10 | #include "core/memory.h" | ||
| 11 | 11 | ||
| 12 | namespace IPC { | 12 | namespace IPC { |
| 13 | 13 | ||
diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index 90f276ee8..68406eb63 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h | |||
| @@ -19,9 +19,12 @@ | |||
| 19 | #include "core/hle/kernel/hle_ipc.h" | 19 | #include "core/hle/kernel/hle_ipc.h" |
| 20 | #include "core/hle/kernel/object.h" | 20 | #include "core/hle/kernel/object.h" |
| 21 | #include "core/hle/kernel/server_session.h" | 21 | #include "core/hle/kernel/server_session.h" |
| 22 | #include "core/hle/result.h" | ||
| 22 | 23 | ||
| 23 | namespace IPC { | 24 | namespace IPC { |
| 24 | 25 | ||
| 26 | constexpr ResultCode ERR_REMOTE_PROCESS_DEAD{ErrorModule::HIPC, 301}; | ||
| 27 | |||
| 25 | class RequestHelperBase { | 28 | class RequestHelperBase { |
| 26 | protected: | 29 | protected: |
| 27 | Kernel::HLERequestContext* context = nullptr; | 30 | Kernel::HLERequestContext* context = nullptr; |
| @@ -272,6 +275,20 @@ inline void ResponseBuilder::Push(u64 value) { | |||
| 272 | } | 275 | } |
| 273 | 276 | ||
| 274 | template <> | 277 | template <> |
| 278 | inline void ResponseBuilder::Push(float value) { | ||
| 279 | u32 integral; | ||
| 280 | std::memcpy(&integral, &value, sizeof(u32)); | ||
| 281 | Push(integral); | ||
| 282 | } | ||
| 283 | |||
| 284 | template <> | ||
| 285 | inline void ResponseBuilder::Push(double value) { | ||
| 286 | u64 integral; | ||
| 287 | std::memcpy(&integral, &value, sizeof(u64)); | ||
| 288 | Push(integral); | ||
| 289 | } | ||
| 290 | |||
| 291 | template <> | ||
| 275 | inline void ResponseBuilder::Push(bool value) { | 292 | inline void ResponseBuilder::Push(bool value) { |
| 276 | Push(static_cast<u8>(value)); | 293 | Push(static_cast<u8>(value)); |
| 277 | } | 294 | } |
| @@ -350,7 +367,7 @@ public: | |||
| 350 | template <class T> | 367 | template <class T> |
| 351 | std::shared_ptr<T> PopIpcInterface() { | 368 | std::shared_ptr<T> PopIpcInterface() { |
| 352 | ASSERT(context->Session()->IsDomain()); | 369 | ASSERT(context->Session()->IsDomain()); |
| 353 | ASSERT(context->GetDomainMessageHeader()->input_object_count > 0); | 370 | ASSERT(context->GetDomainMessageHeader().input_object_count > 0); |
| 354 | return context->GetDomainRequestHandler<T>(Pop<u32>() - 1); | 371 | return context->GetDomainRequestHandler<T>(Pop<u32>() - 1); |
| 355 | } | 372 | } |
| 356 | }; | 373 | }; |
| @@ -362,6 +379,11 @@ inline u32 RequestParser::Pop() { | |||
| 362 | return cmdbuf[index++]; | 379 | return cmdbuf[index++]; |
| 363 | } | 380 | } |
| 364 | 381 | ||
| 382 | template <> | ||
| 383 | inline s32 RequestParser::Pop() { | ||
| 384 | return static_cast<s32>(Pop<u32>()); | ||
| 385 | } | ||
| 386 | |||
| 365 | template <typename T> | 387 | template <typename T> |
| 366 | void RequestParser::PopRaw(T& value) { | 388 | void RequestParser::PopRaw(T& value) { |
| 367 | std::memcpy(&value, cmdbuf + index, sizeof(T)); | 389 | std::memcpy(&value, cmdbuf + index, sizeof(T)); |
| @@ -393,11 +415,37 @@ inline u64 RequestParser::Pop() { | |||
| 393 | } | 415 | } |
| 394 | 416 | ||
| 395 | template <> | 417 | template <> |
| 418 | inline s8 RequestParser::Pop() { | ||
| 419 | return static_cast<s8>(Pop<u8>()); | ||
| 420 | } | ||
| 421 | |||
| 422 | template <> | ||
| 423 | inline s16 RequestParser::Pop() { | ||
| 424 | return static_cast<s16>(Pop<u16>()); | ||
| 425 | } | ||
| 426 | |||
| 427 | template <> | ||
| 396 | inline s64 RequestParser::Pop() { | 428 | inline s64 RequestParser::Pop() { |
| 397 | return static_cast<s64>(Pop<u64>()); | 429 | return static_cast<s64>(Pop<u64>()); |
| 398 | } | 430 | } |
| 399 | 431 | ||
| 400 | template <> | 432 | template <> |
| 433 | inline float RequestParser::Pop() { | ||
| 434 | const u32 value = Pop<u32>(); | ||
| 435 | float real; | ||
| 436 | std::memcpy(&real, &value, sizeof(real)); | ||
| 437 | return real; | ||
| 438 | } | ||
| 439 | |||
| 440 | template <> | ||
| 441 | inline double RequestParser::Pop() { | ||
| 442 | const u64 value = Pop<u64>(); | ||
| 443 | float real; | ||
| 444 | std::memcpy(&real, &value, sizeof(real)); | ||
| 445 | return real; | ||
| 446 | } | ||
| 447 | |||
| 448 | template <> | ||
| 401 | inline bool RequestParser::Pop() { | 449 | inline bool RequestParser::Pop() { |
| 402 | return Pop<u8>() != 0; | 450 | return Pop<u8>() != 0; |
| 403 | } | 451 | } |
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 57157beb4..352190da8 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "core/core.h" | 10 | #include "core/core.h" |
| 11 | #include "core/core_cpu.h" | 11 | #include "core/core_cpu.h" |
| 12 | #include "core/hle/kernel/address_arbiter.h" | ||
| 12 | #include "core/hle/kernel/errors.h" | 13 | #include "core/hle/kernel/errors.h" |
| 13 | #include "core/hle/kernel/object.h" | 14 | #include "core/hle/kernel/object.h" |
| 14 | #include "core/hle/kernel/process.h" | 15 | #include "core/hle/kernel/process.h" |
| @@ -18,58 +19,15 @@ | |||
| 18 | #include "core/memory.h" | 19 | #include "core/memory.h" |
| 19 | 20 | ||
| 20 | namespace Kernel { | 21 | namespace Kernel { |
| 21 | namespace AddressArbiter { | 22 | namespace { |
| 22 | |||
| 23 | // Performs actual address waiting logic. | ||
| 24 | static ResultCode WaitForAddress(VAddr address, s64 timeout) { | ||
| 25 | SharedPtr<Thread> current_thread = GetCurrentThread(); | ||
| 26 | current_thread->SetArbiterWaitAddress(address); | ||
| 27 | current_thread->SetStatus(ThreadStatus::WaitArb); | ||
| 28 | current_thread->InvalidateWakeupCallback(); | ||
| 29 | |||
| 30 | current_thread->WakeAfterDelay(timeout); | ||
| 31 | |||
| 32 | Core::System::GetInstance().CpuCore(current_thread->GetProcessorID()).PrepareReschedule(); | ||
| 33 | return RESULT_TIMEOUT; | ||
| 34 | } | ||
| 35 | |||
| 36 | // Gets the threads waiting on an address. | ||
| 37 | static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) { | ||
| 38 | const auto RetrieveWaitingThreads = [](std::size_t core_index, | ||
| 39 | std::vector<SharedPtr<Thread>>& waiting_threads, | ||
| 40 | VAddr arb_addr) { | ||
| 41 | const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); | ||
| 42 | const auto& thread_list = scheduler.GetThreadList(); | ||
| 43 | |||
| 44 | for (const auto& thread : thread_list) { | ||
| 45 | if (thread->GetArbiterWaitAddress() == arb_addr) | ||
| 46 | waiting_threads.push_back(thread); | ||
| 47 | } | ||
| 48 | }; | ||
| 49 | |||
| 50 | // Retrieve all threads that are waiting for this address. | ||
| 51 | std::vector<SharedPtr<Thread>> threads; | ||
| 52 | RetrieveWaitingThreads(0, threads, address); | ||
| 53 | RetrieveWaitingThreads(1, threads, address); | ||
| 54 | RetrieveWaitingThreads(2, threads, address); | ||
| 55 | RetrieveWaitingThreads(3, threads, address); | ||
| 56 | |||
| 57 | // Sort them by priority, such that the highest priority ones come first. | ||
| 58 | std::sort(threads.begin(), threads.end(), | ||
| 59 | [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { | ||
| 60 | return lhs->GetPriority() < rhs->GetPriority(); | ||
| 61 | }); | ||
| 62 | |||
| 63 | return threads; | ||
| 64 | } | ||
| 65 | |||
| 66 | // Wake up num_to_wake (or all) threads in a vector. | 23 | // Wake up num_to_wake (or all) threads in a vector. |
| 67 | static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) { | 24 | void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) { |
| 68 | // Only process up to 'target' threads, unless 'target' is <= 0, in which case process | 25 | // Only process up to 'target' threads, unless 'target' is <= 0, in which case process |
| 69 | // them all. | 26 | // them all. |
| 70 | std::size_t last = waiting_threads.size(); | 27 | std::size_t last = waiting_threads.size(); |
| 71 | if (num_to_wake > 0) | 28 | if (num_to_wake > 0) { |
| 72 | last = num_to_wake; | 29 | last = num_to_wake; |
| 30 | } | ||
| 73 | 31 | ||
| 74 | // Signal the waiting threads. | 32 | // Signal the waiting threads. |
| 75 | for (std::size_t i = 0; i < last; i++) { | 33 | for (std::size_t i = 0; i < last; i++) { |
| @@ -79,42 +37,55 @@ static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num | |||
| 79 | waiting_threads[i]->ResumeFromWait(); | 37 | waiting_threads[i]->ResumeFromWait(); |
| 80 | } | 38 | } |
| 81 | } | 39 | } |
| 40 | } // Anonymous namespace | ||
| 41 | |||
| 42 | AddressArbiter::AddressArbiter(Core::System& system) : system{system} {} | ||
| 43 | AddressArbiter::~AddressArbiter() = default; | ||
| 44 | |||
| 45 | ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value, | ||
| 46 | s32 num_to_wake) { | ||
| 47 | switch (type) { | ||
| 48 | case SignalType::Signal: | ||
| 49 | return SignalToAddressOnly(address, num_to_wake); | ||
| 50 | case SignalType::IncrementAndSignalIfEqual: | ||
| 51 | return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake); | ||
| 52 | case SignalType::ModifyByWaitingCountAndSignalIfEqual: | ||
| 53 | return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake); | ||
| 54 | default: | ||
| 55 | return ERR_INVALID_ENUM_VALUE; | ||
| 56 | } | ||
| 57 | } | ||
| 82 | 58 | ||
| 83 | // Signals an address being waited on. | 59 | ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { |
| 84 | ResultCode SignalToAddress(VAddr address, s32 num_to_wake) { | 60 | const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); |
| 85 | std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); | ||
| 86 | |||
| 87 | WakeThreads(waiting_threads, num_to_wake); | 61 | WakeThreads(waiting_threads, num_to_wake); |
| 88 | return RESULT_SUCCESS; | 62 | return RESULT_SUCCESS; |
| 89 | } | 63 | } |
| 90 | 64 | ||
| 91 | // Signals an address being waited on and increments its value if equal to the value argument. | 65 | ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, |
| 92 | ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake) { | 66 | s32 num_to_wake) { |
| 93 | // Ensure that we can write to the address. | 67 | // Ensure that we can write to the address. |
| 94 | if (!Memory::IsValidVirtualAddress(address)) { | 68 | if (!Memory::IsValidVirtualAddress(address)) { |
| 95 | return ERR_INVALID_ADDRESS_STATE; | 69 | return ERR_INVALID_ADDRESS_STATE; |
| 96 | } | 70 | } |
| 97 | 71 | ||
| 98 | if (static_cast<s32>(Memory::Read32(address)) == value) { | 72 | if (static_cast<s32>(Memory::Read32(address)) != value) { |
| 99 | Memory::Write32(address, static_cast<u32>(value + 1)); | ||
| 100 | } else { | ||
| 101 | return ERR_INVALID_STATE; | 73 | return ERR_INVALID_STATE; |
| 102 | } | 74 | } |
| 103 | 75 | ||
| 104 | return SignalToAddress(address, num_to_wake); | 76 | Memory::Write32(address, static_cast<u32>(value + 1)); |
| 77 | return SignalToAddressOnly(address, num_to_wake); | ||
| 105 | } | 78 | } |
| 106 | 79 | ||
| 107 | // Signals an address being waited on and modifies its value based on waiting thread count if equal | 80 | ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, |
| 108 | // to the value argument. | 81 | s32 num_to_wake) { |
| 109 | ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, | ||
| 110 | s32 num_to_wake) { | ||
| 111 | // Ensure that we can write to the address. | 82 | // Ensure that we can write to the address. |
| 112 | if (!Memory::IsValidVirtualAddress(address)) { | 83 | if (!Memory::IsValidVirtualAddress(address)) { |
| 113 | return ERR_INVALID_ADDRESS_STATE; | 84 | return ERR_INVALID_ADDRESS_STATE; |
| 114 | } | 85 | } |
| 115 | 86 | ||
| 116 | // Get threads waiting on the address. | 87 | // Get threads waiting on the address. |
| 117 | std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); | 88 | const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); |
| 118 | 89 | ||
| 119 | // Determine the modified value depending on the waiting count. | 90 | // Determine the modified value depending on the waiting count. |
| 120 | s32 updated_value; | 91 | s32 updated_value; |
| @@ -126,41 +97,54 @@ ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 valu | |||
| 126 | updated_value = value; | 97 | updated_value = value; |
| 127 | } | 98 | } |
| 128 | 99 | ||
| 129 | if (static_cast<s32>(Memory::Read32(address)) == value) { | 100 | if (static_cast<s32>(Memory::Read32(address)) != value) { |
| 130 | Memory::Write32(address, static_cast<u32>(updated_value)); | ||
| 131 | } else { | ||
| 132 | return ERR_INVALID_STATE; | 101 | return ERR_INVALID_STATE; |
| 133 | } | 102 | } |
| 134 | 103 | ||
| 104 | Memory::Write32(address, static_cast<u32>(updated_value)); | ||
| 135 | WakeThreads(waiting_threads, num_to_wake); | 105 | WakeThreads(waiting_threads, num_to_wake); |
| 136 | return RESULT_SUCCESS; | 106 | return RESULT_SUCCESS; |
| 137 | } | 107 | } |
| 138 | 108 | ||
| 139 | // Waits on an address if the value passed is less than the argument value, optionally decrementing. | 109 | ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value, |
| 140 | ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, bool should_decrement) { | 110 | s64 timeout_ns) { |
| 111 | switch (type) { | ||
| 112 | case ArbitrationType::WaitIfLessThan: | ||
| 113 | return WaitForAddressIfLessThan(address, value, timeout_ns, false); | ||
| 114 | case ArbitrationType::DecrementAndWaitIfLessThan: | ||
| 115 | return WaitForAddressIfLessThan(address, value, timeout_ns, true); | ||
| 116 | case ArbitrationType::WaitIfEqual: | ||
| 117 | return WaitForAddressIfEqual(address, value, timeout_ns); | ||
| 118 | default: | ||
| 119 | return ERR_INVALID_ENUM_VALUE; | ||
| 120 | } | ||
| 121 | } | ||
| 122 | |||
| 123 | ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, | ||
| 124 | bool should_decrement) { | ||
| 141 | // Ensure that we can read the address. | 125 | // Ensure that we can read the address. |
| 142 | if (!Memory::IsValidVirtualAddress(address)) { | 126 | if (!Memory::IsValidVirtualAddress(address)) { |
| 143 | return ERR_INVALID_ADDRESS_STATE; | 127 | return ERR_INVALID_ADDRESS_STATE; |
| 144 | } | 128 | } |
| 145 | 129 | ||
| 146 | s32 cur_value = static_cast<s32>(Memory::Read32(address)); | 130 | const s32 cur_value = static_cast<s32>(Memory::Read32(address)); |
| 147 | if (cur_value < value) { | 131 | if (cur_value >= value) { |
| 148 | if (should_decrement) { | ||
| 149 | Memory::Write32(address, static_cast<u32>(cur_value - 1)); | ||
| 150 | } | ||
| 151 | } else { | ||
| 152 | return ERR_INVALID_STATE; | 132 | return ERR_INVALID_STATE; |
| 153 | } | 133 | } |
| 134 | |||
| 135 | if (should_decrement) { | ||
| 136 | Memory::Write32(address, static_cast<u32>(cur_value - 1)); | ||
| 137 | } | ||
| 138 | |||
| 154 | // Short-circuit without rescheduling, if timeout is zero. | 139 | // Short-circuit without rescheduling, if timeout is zero. |
| 155 | if (timeout == 0) { | 140 | if (timeout == 0) { |
| 156 | return RESULT_TIMEOUT; | 141 | return RESULT_TIMEOUT; |
| 157 | } | 142 | } |
| 158 | 143 | ||
| 159 | return WaitForAddress(address, timeout); | 144 | return WaitForAddressImpl(address, timeout); |
| 160 | } | 145 | } |
| 161 | 146 | ||
| 162 | // Waits on an address if the value passed is equal to the argument value. | 147 | ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { |
| 163 | ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { | ||
| 164 | // Ensure that we can read the address. | 148 | // Ensure that we can read the address. |
| 165 | if (!Memory::IsValidVirtualAddress(address)) { | 149 | if (!Memory::IsValidVirtualAddress(address)) { |
| 166 | return ERR_INVALID_ADDRESS_STATE; | 150 | return ERR_INVALID_ADDRESS_STATE; |
| @@ -174,7 +158,48 @@ ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { | |||
| 174 | return RESULT_TIMEOUT; | 158 | return RESULT_TIMEOUT; |
| 175 | } | 159 | } |
| 176 | 160 | ||
| 177 | return WaitForAddress(address, timeout); | 161 | return WaitForAddressImpl(address, timeout); |
| 162 | } | ||
| 163 | |||
| 164 | ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) { | ||
| 165 | SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread(); | ||
| 166 | current_thread->SetArbiterWaitAddress(address); | ||
| 167 | current_thread->SetStatus(ThreadStatus::WaitArb); | ||
| 168 | current_thread->InvalidateWakeupCallback(); | ||
| 169 | |||
| 170 | current_thread->WakeAfterDelay(timeout); | ||
| 171 | |||
| 172 | system.CpuCore(current_thread->GetProcessorID()).PrepareReschedule(); | ||
| 173 | return RESULT_TIMEOUT; | ||
| 174 | } | ||
| 175 | |||
| 176 | std::vector<SharedPtr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(VAddr address) const { | ||
| 177 | const auto RetrieveWaitingThreads = [this](std::size_t core_index, | ||
| 178 | std::vector<SharedPtr<Thread>>& waiting_threads, | ||
| 179 | VAddr arb_addr) { | ||
| 180 | const auto& scheduler = system.Scheduler(core_index); | ||
| 181 | const auto& thread_list = scheduler.GetThreadList(); | ||
| 182 | |||
| 183 | for (const auto& thread : thread_list) { | ||
| 184 | if (thread->GetArbiterWaitAddress() == arb_addr) { | ||
| 185 | waiting_threads.push_back(thread); | ||
| 186 | } | ||
| 187 | } | ||
| 188 | }; | ||
| 189 | |||
| 190 | // Retrieve all threads that are waiting for this address. | ||
| 191 | std::vector<SharedPtr<Thread>> threads; | ||
| 192 | RetrieveWaitingThreads(0, threads, address); | ||
| 193 | RetrieveWaitingThreads(1, threads, address); | ||
| 194 | RetrieveWaitingThreads(2, threads, address); | ||
| 195 | RetrieveWaitingThreads(3, threads, address); | ||
| 196 | |||
| 197 | // Sort them by priority, such that the highest priority ones come first. | ||
| 198 | std::sort(threads.begin(), threads.end(), | ||
| 199 | [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { | ||
| 200 | return lhs->GetPriority() < rhs->GetPriority(); | ||
| 201 | }); | ||
| 202 | |||
| 203 | return threads; | ||
| 178 | } | 204 | } |
| 179 | } // namespace AddressArbiter | ||
| 180 | } // namespace Kernel | 205 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h index e3657b8e9..ed0d0e69f 100644 --- a/src/core/hle/kernel/address_arbiter.h +++ b/src/core/hle/kernel/address_arbiter.h | |||
| @@ -4,31 +4,77 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <vector> | ||
| 8 | |||
| 7 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "core/hle/kernel/object.h" | ||
| 8 | 11 | ||
| 9 | union ResultCode; | 12 | union ResultCode; |
| 10 | 13 | ||
| 14 | namespace Core { | ||
| 15 | class System; | ||
| 16 | } | ||
| 17 | |||
| 11 | namespace Kernel { | 18 | namespace Kernel { |
| 12 | 19 | ||
| 13 | namespace AddressArbiter { | 20 | class Thread; |
| 14 | enum class ArbitrationType { | ||
| 15 | WaitIfLessThan = 0, | ||
| 16 | DecrementAndWaitIfLessThan = 1, | ||
| 17 | WaitIfEqual = 2, | ||
| 18 | }; | ||
| 19 | 21 | ||
| 20 | enum class SignalType { | 22 | class AddressArbiter { |
| 21 | Signal = 0, | 23 | public: |
| 22 | IncrementAndSignalIfEqual = 1, | 24 | enum class ArbitrationType { |
| 23 | ModifyByWaitingCountAndSignalIfEqual = 2, | 25 | WaitIfLessThan = 0, |
| 24 | }; | 26 | DecrementAndWaitIfLessThan = 1, |
| 27 | WaitIfEqual = 2, | ||
| 28 | }; | ||
| 29 | |||
| 30 | enum class SignalType { | ||
| 31 | Signal = 0, | ||
| 32 | IncrementAndSignalIfEqual = 1, | ||
| 33 | ModifyByWaitingCountAndSignalIfEqual = 2, | ||
| 34 | }; | ||
| 35 | |||
| 36 | explicit AddressArbiter(Core::System& system); | ||
| 37 | ~AddressArbiter(); | ||
| 38 | |||
| 39 | AddressArbiter(const AddressArbiter&) = delete; | ||
| 40 | AddressArbiter& operator=(const AddressArbiter&) = delete; | ||
| 41 | |||
| 42 | AddressArbiter(AddressArbiter&&) = default; | ||
| 43 | AddressArbiter& operator=(AddressArbiter&&) = delete; | ||
| 44 | |||
| 45 | /// Signals an address being waited on with a particular signaling type. | ||
| 46 | ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake); | ||
| 25 | 47 | ||
| 26 | ResultCode SignalToAddress(VAddr address, s32 num_to_wake); | 48 | /// Waits on an address with a particular arbitration type. |
| 27 | ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake); | 49 | ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns); |
| 28 | ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake); | ||
| 29 | 50 | ||
| 30 | ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, bool should_decrement); | 51 | private: |
| 31 | ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); | 52 | /// Signals an address being waited on. |
| 32 | } // namespace AddressArbiter | 53 | ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake); |
| 54 | |||
| 55 | /// Signals an address being waited on and increments its value if equal to the value argument. | ||
| 56 | ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake); | ||
| 57 | |||
| 58 | /// Signals an address being waited on and modifies its value based on waiting thread count if | ||
| 59 | /// equal to the value argument. | ||
| 60 | ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, | ||
| 61 | s32 num_to_wake); | ||
| 62 | |||
| 63 | /// Waits on an address if the value passed is less than the argument value, | ||
| 64 | /// optionally decrementing. | ||
| 65 | ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, | ||
| 66 | bool should_decrement); | ||
| 67 | |||
| 68 | /// Waits on an address if the value passed is equal to the argument value. | ||
| 69 | ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); | ||
| 70 | |||
| 71 | // Waits on the given address with a timeout in nanoseconds | ||
| 72 | ResultCode WaitForAddressImpl(VAddr address, s64 timeout); | ||
| 73 | |||
| 74 | // Gets the threads waiting on an address. | ||
| 75 | std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const; | ||
| 76 | |||
| 77 | Core::System& system; | ||
| 78 | }; | ||
| 33 | 79 | ||
| 34 | } // namespace Kernel | 80 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp index d4c91d529..aa432658e 100644 --- a/src/core/hle/kernel/client_port.cpp +++ b/src/core/hle/kernel/client_port.cpp | |||
| @@ -33,10 +33,11 @@ ResultVal<SharedPtr<ClientSession>> ClientPort::Connect() { | |||
| 33 | // Create a new session pair, let the created sessions inherit the parent port's HLE handler. | 33 | // Create a new session pair, let the created sessions inherit the parent port's HLE handler. |
| 34 | auto sessions = ServerSession::CreateSessionPair(kernel, server_port->GetName(), this); | 34 | auto sessions = ServerSession::CreateSessionPair(kernel, server_port->GetName(), this); |
| 35 | 35 | ||
| 36 | if (server_port->hle_handler) | 36 | if (server_port->HasHLEHandler()) { |
| 37 | server_port->hle_handler->ClientConnected(std::get<SharedPtr<ServerSession>>(sessions)); | 37 | server_port->GetHLEHandler()->ClientConnected(std::get<SharedPtr<ServerSession>>(sessions)); |
| 38 | else | 38 | } else { |
| 39 | server_port->pending_sessions.push_back(std::get<SharedPtr<ServerSession>>(sessions)); | 39 | server_port->AppendPendingSession(std::get<SharedPtr<ServerSession>>(sessions)); |
| 40 | } | ||
| 40 | 41 | ||
| 41 | // Wake the threads waiting on the ServerPort | 42 | // Wake the threads waiting on the ServerPort |
| 42 | server_port->WakeupAllWaitingThreads(); | 43 | server_port->WakeupAllWaitingThreads(); |
diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp index 704e82824..c17baa50a 100644 --- a/src/core/hle/kernel/client_session.cpp +++ b/src/core/hle/kernel/client_session.cpp | |||
| @@ -17,21 +17,11 @@ ClientSession::~ClientSession() { | |||
| 17 | // This destructor will be called automatically when the last ClientSession handle is closed by | 17 | // This destructor will be called automatically when the last ClientSession handle is closed by |
| 18 | // the emulated application. | 18 | // the emulated application. |
| 19 | 19 | ||
| 20 | // Local references to ServerSession and SessionRequestHandler are necessary to guarantee they | 20 | // A local reference to the ServerSession is necessary to guarantee it |
| 21 | // will be kept alive until after ClientDisconnected() returns. | 21 | // will be kept alive until after ClientDisconnected() returns. |
| 22 | SharedPtr<ServerSession> server = parent->server; | 22 | SharedPtr<ServerSession> server = parent->server; |
| 23 | if (server) { | 23 | if (server) { |
| 24 | std::shared_ptr<SessionRequestHandler> hle_handler = server->hle_handler; | 24 | server->ClientDisconnected(); |
| 25 | if (hle_handler) | ||
| 26 | hle_handler->ClientDisconnected(server); | ||
| 27 | |||
| 28 | // TODO(Subv): Force a wake up of all the ServerSession's waiting threads and set | ||
| 29 | // their WaitSynchronization result to 0xC920181A. | ||
| 30 | |||
| 31 | // Clean up the list of client threads with pending requests, they are unneeded now that the | ||
| 32 | // client endpoint is closed. | ||
| 33 | server->pending_requesting_threads.clear(); | ||
| 34 | server->currently_handling = nullptr; | ||
| 35 | } | 25 | } |
| 36 | 26 | ||
| 37 | parent->client = nullptr; | 27 | parent->client = nullptr; |
diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h index 4c18de69c..b1f39aad7 100644 --- a/src/core/hle/kernel/client_session.h +++ b/src/core/hle/kernel/client_session.h | |||
| @@ -36,14 +36,15 @@ public: | |||
| 36 | 36 | ||
| 37 | ResultCode SendSyncRequest(SharedPtr<Thread> thread); | 37 | ResultCode SendSyncRequest(SharedPtr<Thread> thread); |
| 38 | 38 | ||
| 39 | std::string name; ///< Name of client port (optional) | 39 | private: |
| 40 | explicit ClientSession(KernelCore& kernel); | ||
| 41 | ~ClientSession() override; | ||
| 40 | 42 | ||
| 41 | /// The parent session, which links to the server endpoint. | 43 | /// The parent session, which links to the server endpoint. |
| 42 | std::shared_ptr<Session> parent; | 44 | std::shared_ptr<Session> parent; |
| 43 | 45 | ||
| 44 | private: | 46 | /// Name of the client session (optional) |
| 45 | explicit ClientSession(KernelCore& kernel); | 47 | std::string name; |
| 46 | ~ClientSession() override; | ||
| 47 | }; | 48 | }; |
| 48 | 49 | ||
| 49 | } // namespace Kernel | 50 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/code_set.cpp b/src/core/hle/kernel/code_set.cpp new file mode 100644 index 000000000..1f434e9af --- /dev/null +++ b/src/core/hle/kernel/code_set.cpp | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | // Copyright 2019 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "core/hle/kernel/code_set.h" | ||
| 6 | |||
| 7 | namespace Kernel { | ||
| 8 | |||
| 9 | CodeSet::CodeSet() = default; | ||
| 10 | CodeSet::~CodeSet() = default; | ||
| 11 | |||
| 12 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h new file mode 100644 index 000000000..834fd23d2 --- /dev/null +++ b/src/core/hle/kernel/code_set.h | |||
| @@ -0,0 +1,90 @@ | |||
| 1 | // Copyright 2019 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <cstddef> | ||
| 8 | #include <memory> | ||
| 9 | #include <vector> | ||
| 10 | |||
| 11 | #include "common/common_types.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | /** | ||
| 16 | * Represents executable data that may be loaded into a kernel process. | ||
| 17 | * | ||
| 18 | * A code set consists of three basic segments: | ||
| 19 | * - A code (AKA text) segment, | ||
| 20 | * - A read-only data segment (rodata) | ||
| 21 | * - A data segment | ||
| 22 | * | ||
| 23 | * The code segment is the portion of the object file that contains | ||
| 24 | * executable instructions. | ||
| 25 | * | ||
| 26 | * The read-only data segment in the portion of the object file that | ||
| 27 | * contains (as one would expect) read-only data, such as fixed constant | ||
| 28 | * values and data structures. | ||
| 29 | * | ||
| 30 | * The data segment is similar to the read-only data segment -- it contains | ||
| 31 | * variables and data structures that have predefined values, however, | ||
| 32 | * entities within this segment can be modified. | ||
| 33 | */ | ||
| 34 | struct CodeSet final { | ||
| 35 | /// A single segment within a code set. | ||
| 36 | struct Segment final { | ||
| 37 | /// The byte offset that this segment is located at. | ||
| 38 | std::size_t offset = 0; | ||
| 39 | |||
| 40 | /// The address to map this segment to. | ||
| 41 | VAddr addr = 0; | ||
| 42 | |||
| 43 | /// The size of this segment in bytes. | ||
| 44 | u32 size = 0; | ||
| 45 | }; | ||
| 46 | |||
| 47 | explicit CodeSet(); | ||
| 48 | ~CodeSet(); | ||
| 49 | |||
| 50 | CodeSet(const CodeSet&) = delete; | ||
| 51 | CodeSet& operator=(const CodeSet&) = delete; | ||
| 52 | |||
| 53 | CodeSet(CodeSet&&) = default; | ||
| 54 | CodeSet& operator=(CodeSet&&) = default; | ||
| 55 | |||
| 56 | Segment& CodeSegment() { | ||
| 57 | return segments[0]; | ||
| 58 | } | ||
| 59 | |||
| 60 | const Segment& CodeSegment() const { | ||
| 61 | return segments[0]; | ||
| 62 | } | ||
| 63 | |||
| 64 | Segment& RODataSegment() { | ||
| 65 | return segments[1]; | ||
| 66 | } | ||
| 67 | |||
| 68 | const Segment& RODataSegment() const { | ||
| 69 | return segments[1]; | ||
| 70 | } | ||
| 71 | |||
| 72 | Segment& DataSegment() { | ||
| 73 | return segments[2]; | ||
| 74 | } | ||
| 75 | |||
| 76 | const Segment& DataSegment() const { | ||
| 77 | return segments[2]; | ||
| 78 | } | ||
| 79 | |||
| 80 | /// The overall data that backs this code set. | ||
| 81 | std::shared_ptr<std::vector<u8>> memory; | ||
| 82 | |||
| 83 | /// The segments that comprise this code set. | ||
| 84 | std::array<Segment, 3> segments; | ||
| 85 | |||
| 86 | /// The entry point address for this code set. | ||
| 87 | VAddr entrypoint = 0; | ||
| 88 | }; | ||
| 89 | |||
| 90 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h index d17eb0cb6..8097b3863 100644 --- a/src/core/hle/kernel/errors.h +++ b/src/core/hle/kernel/errors.h | |||
| @@ -14,6 +14,7 @@ constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; | |||
| 14 | constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; | 14 | constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; |
| 15 | constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; | 15 | constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; |
| 16 | constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; | 16 | constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; |
| 17 | constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104}; | ||
| 17 | constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105}; | 18 | constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105}; |
| 18 | constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106}; | 19 | constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106}; |
| 19 | constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108}; | 20 | constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108}; |
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index c8acde5b1..bdfaa977f 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp | |||
| @@ -14,32 +14,47 @@ | |||
| 14 | namespace Kernel { | 14 | namespace Kernel { |
| 15 | namespace { | 15 | namespace { |
| 16 | constexpr u16 GetSlot(Handle handle) { | 16 | constexpr u16 GetSlot(Handle handle) { |
| 17 | return handle >> 15; | 17 | return static_cast<u16>(handle >> 15); |
| 18 | } | 18 | } |
| 19 | 19 | ||
| 20 | constexpr u16 GetGeneration(Handle handle) { | 20 | constexpr u16 GetGeneration(Handle handle) { |
| 21 | return handle & 0x7FFF; | 21 | return static_cast<u16>(handle & 0x7FFF); |
| 22 | } | 22 | } |
| 23 | } // Anonymous namespace | 23 | } // Anonymous namespace |
| 24 | 24 | ||
| 25 | HandleTable::HandleTable() { | 25 | HandleTable::HandleTable() { |
| 26 | next_generation = 1; | ||
| 27 | Clear(); | 26 | Clear(); |
| 28 | } | 27 | } |
| 29 | 28 | ||
| 30 | HandleTable::~HandleTable() = default; | 29 | HandleTable::~HandleTable() = default; |
| 31 | 30 | ||
| 31 | ResultCode HandleTable::SetSize(s32 handle_table_size) { | ||
| 32 | if (static_cast<u32>(handle_table_size) > MAX_COUNT) { | ||
| 33 | return ERR_OUT_OF_MEMORY; | ||
| 34 | } | ||
| 35 | |||
| 36 | // Values less than or equal to zero indicate to use the maximum allowable | ||
| 37 | // size for the handle table in the actual kernel, so we ignore the given | ||
| 38 | // value in that case, since we assume this by default unless this function | ||
| 39 | // is called. | ||
| 40 | if (handle_table_size > 0) { | ||
| 41 | table_size = static_cast<u16>(handle_table_size); | ||
| 42 | } | ||
| 43 | |||
| 44 | return RESULT_SUCCESS; | ||
| 45 | } | ||
| 46 | |||
| 32 | ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) { | 47 | ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) { |
| 33 | DEBUG_ASSERT(obj != nullptr); | 48 | DEBUG_ASSERT(obj != nullptr); |
| 34 | 49 | ||
| 35 | u16 slot = next_free_slot; | 50 | const u16 slot = next_free_slot; |
| 36 | if (slot >= generations.size()) { | 51 | if (slot >= table_size) { |
| 37 | LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use."); | 52 | LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use."); |
| 38 | return ERR_HANDLE_TABLE_FULL; | 53 | return ERR_HANDLE_TABLE_FULL; |
| 39 | } | 54 | } |
| 40 | next_free_slot = generations[slot]; | 55 | next_free_slot = generations[slot]; |
| 41 | 56 | ||
| 42 | u16 generation = next_generation++; | 57 | const u16 generation = next_generation++; |
| 43 | 58 | ||
| 44 | // Overflow count so it fits in the 15 bits dedicated to the generation in the handle. | 59 | // Overflow count so it fits in the 15 bits dedicated to the generation in the handle. |
| 45 | // Horizon OS uses zero to represent an invalid handle, so skip to 1. | 60 | // Horizon OS uses zero to represent an invalid handle, so skip to 1. |
| @@ -64,10 +79,11 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) { | |||
| 64 | } | 79 | } |
| 65 | 80 | ||
| 66 | ResultCode HandleTable::Close(Handle handle) { | 81 | ResultCode HandleTable::Close(Handle handle) { |
| 67 | if (!IsValid(handle)) | 82 | if (!IsValid(handle)) { |
| 68 | return ERR_INVALID_HANDLE; | 83 | return ERR_INVALID_HANDLE; |
| 84 | } | ||
| 69 | 85 | ||
| 70 | u16 slot = GetSlot(handle); | 86 | const u16 slot = GetSlot(handle); |
| 71 | 87 | ||
| 72 | objects[slot] = nullptr; | 88 | objects[slot] = nullptr; |
| 73 | 89 | ||
| @@ -77,10 +93,10 @@ ResultCode HandleTable::Close(Handle handle) { | |||
| 77 | } | 93 | } |
| 78 | 94 | ||
| 79 | bool HandleTable::IsValid(Handle handle) const { | 95 | bool HandleTable::IsValid(Handle handle) const { |
| 80 | std::size_t slot = GetSlot(handle); | 96 | const std::size_t slot = GetSlot(handle); |
| 81 | u16 generation = GetGeneration(handle); | 97 | const u16 generation = GetGeneration(handle); |
| 82 | 98 | ||
| 83 | return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation; | 99 | return slot < table_size && objects[slot] != nullptr && generations[slot] == generation; |
| 84 | } | 100 | } |
| 85 | 101 | ||
| 86 | SharedPtr<Object> HandleTable::GetGeneric(Handle handle) const { | 102 | SharedPtr<Object> HandleTable::GetGeneric(Handle handle) const { |
| @@ -97,7 +113,7 @@ SharedPtr<Object> HandleTable::GetGeneric(Handle handle) const { | |||
| 97 | } | 113 | } |
| 98 | 114 | ||
| 99 | void HandleTable::Clear() { | 115 | void HandleTable::Clear() { |
| 100 | for (u16 i = 0; i < MAX_COUNT; ++i) { | 116 | for (u16 i = 0; i < table_size; ++i) { |
| 101 | generations[i] = i + 1; | 117 | generations[i] = i + 1; |
| 102 | objects[i] = nullptr; | 118 | objects[i] = nullptr; |
| 103 | } | 119 | } |
diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h index 89a3bc740..44901391b 100644 --- a/src/core/hle/kernel/handle_table.h +++ b/src/core/hle/kernel/handle_table.h | |||
| @@ -50,6 +50,20 @@ public: | |||
| 50 | ~HandleTable(); | 50 | ~HandleTable(); |
| 51 | 51 | ||
| 52 | /** | 52 | /** |
| 53 | * Sets the number of handles that may be in use at one time | ||
| 54 | * for this handle table. | ||
| 55 | * | ||
| 56 | * @param handle_table_size The desired size to limit the handle table to. | ||
| 57 | * | ||
| 58 | * @returns an error code indicating if initialization was successful. | ||
| 59 | * If initialization was not successful, then ERR_OUT_OF_MEMORY | ||
| 60 | * will be returned. | ||
| 61 | * | ||
| 62 | * @pre handle_table_size must be within the range [0, 1024] | ||
| 63 | */ | ||
| 64 | ResultCode SetSize(s32 handle_table_size); | ||
| 65 | |||
| 66 | /** | ||
| 53 | * Allocates a handle for the given object. | 67 | * Allocates a handle for the given object. |
| 54 | * @return The created Handle or one of the following errors: | 68 | * @return The created Handle or one of the following errors: |
| 55 | * - `ERR_HANDLE_TABLE_FULL`: the maximum number of handles has been exceeded. | 69 | * - `ERR_HANDLE_TABLE_FULL`: the maximum number of handles has been exceeded. |
| @@ -104,13 +118,20 @@ private: | |||
| 104 | std::array<u16, MAX_COUNT> generations; | 118 | std::array<u16, MAX_COUNT> generations; |
| 105 | 119 | ||
| 106 | /** | 120 | /** |
| 121 | * The limited size of the handle table. This can be specified by process | ||
| 122 | * capabilities in order to restrict the overall number of handles that | ||
| 123 | * can be created in a process instance | ||
| 124 | */ | ||
| 125 | u16 table_size = static_cast<u16>(MAX_COUNT); | ||
| 126 | |||
| 127 | /** | ||
| 107 | * Global counter of the number of created handles. Stored in `generations` when a handle is | 128 | * Global counter of the number of created handles. Stored in `generations` when a handle is |
| 108 | * created, and wraps around to 1 when it hits 0x8000. | 129 | * created, and wraps around to 1 when it hits 0x8000. |
| 109 | */ | 130 | */ |
| 110 | u16 next_generation; | 131 | u16 next_generation = 1; |
| 111 | 132 | ||
| 112 | /// Head of the free slots linked list. | 133 | /// Head of the free slots linked list. |
| 113 | u16 next_free_slot; | 134 | u16 next_free_slot = 0; |
| 114 | }; | 135 | }; |
| 115 | 136 | ||
| 116 | } // namespace Kernel | 137 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 5dd855db8..fe710eb6e 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp | |||
| @@ -86,7 +86,7 @@ HLERequestContext::~HLERequestContext() = default; | |||
| 86 | void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, | 86 | void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, |
| 87 | bool incoming) { | 87 | bool incoming) { |
| 88 | IPC::RequestParser rp(src_cmdbuf); | 88 | IPC::RequestParser rp(src_cmdbuf); |
| 89 | command_header = std::make_shared<IPC::CommandHeader>(rp.PopRaw<IPC::CommandHeader>()); | 89 | command_header = rp.PopRaw<IPC::CommandHeader>(); |
| 90 | 90 | ||
| 91 | if (command_header->type == IPC::CommandType::Close) { | 91 | if (command_header->type == IPC::CommandType::Close) { |
| 92 | // Close does not populate the rest of the IPC header | 92 | // Close does not populate the rest of the IPC header |
| @@ -95,8 +95,7 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_ | |||
| 95 | 95 | ||
| 96 | // If handle descriptor is present, add size of it | 96 | // If handle descriptor is present, add size of it |
| 97 | if (command_header->enable_handle_descriptor) { | 97 | if (command_header->enable_handle_descriptor) { |
| 98 | handle_descriptor_header = | 98 | handle_descriptor_header = rp.PopRaw<IPC::HandleDescriptorHeader>(); |
| 99 | std::make_shared<IPC::HandleDescriptorHeader>(rp.PopRaw<IPC::HandleDescriptorHeader>()); | ||
| 100 | if (handle_descriptor_header->send_current_pid) { | 99 | if (handle_descriptor_header->send_current_pid) { |
| 101 | rp.Skip(2, false); | 100 | rp.Skip(2, false); |
| 102 | } | 101 | } |
| @@ -140,16 +139,15 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_ | |||
| 140 | // If this is an incoming message, only CommandType "Request" has a domain header | 139 | // If this is an incoming message, only CommandType "Request" has a domain header |
| 141 | // All outgoing domain messages have the domain header, if only incoming has it | 140 | // All outgoing domain messages have the domain header, if only incoming has it |
| 142 | if (incoming || domain_message_header) { | 141 | if (incoming || domain_message_header) { |
| 143 | domain_message_header = | 142 | domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>(); |
| 144 | std::make_shared<IPC::DomainMessageHeader>(rp.PopRaw<IPC::DomainMessageHeader>()); | ||
| 145 | } else { | 143 | } else { |
| 146 | if (Session()->IsDomain()) | 144 | if (Session()->IsDomain()) { |
| 147 | LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!"); | 145 | LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!"); |
| 146 | } | ||
| 148 | } | 147 | } |
| 149 | } | 148 | } |
| 150 | 149 | ||
| 151 | data_payload_header = | 150 | data_payload_header = rp.PopRaw<IPC::DataPayloadHeader>(); |
| 152 | std::make_shared<IPC::DataPayloadHeader>(rp.PopRaw<IPC::DataPayloadHeader>()); | ||
| 153 | 151 | ||
| 154 | data_payload_offset = rp.GetCurrentOffset(); | 152 | data_payload_offset = rp.GetCurrentOffset(); |
| 155 | 153 | ||
| @@ -264,11 +262,11 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) { | |||
| 264 | // Write the domain objects to the command buffer, these go after the raw untranslated data. | 262 | // Write the domain objects to the command buffer, these go after the raw untranslated data. |
| 265 | // TODO(Subv): This completely ignores C buffers. | 263 | // TODO(Subv): This completely ignores C buffers. |
| 266 | std::size_t domain_offset = size - domain_message_header->num_objects; | 264 | std::size_t domain_offset = size - domain_message_header->num_objects; |
| 267 | auto& request_handlers = server_session->domain_request_handlers; | ||
| 268 | 265 | ||
| 269 | for (auto& object : domain_objects) { | 266 | for (const auto& object : domain_objects) { |
| 270 | request_handlers.emplace_back(object); | 267 | server_session->AppendDomainRequestHandler(object); |
| 271 | dst_cmdbuf[domain_offset++] = static_cast<u32_le>(request_handlers.size()); | 268 | dst_cmdbuf[domain_offset++] = |
| 269 | static_cast<u32_le>(server_session->NumDomainRequestHandlers()); | ||
| 272 | } | 270 | } |
| 273 | } | 271 | } |
| 274 | 272 | ||
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index cb1c5aff3..2bdd9f02c 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | #include <array> | 7 | #include <array> |
| 8 | #include <memory> | 8 | #include <memory> |
| 9 | #include <optional> | ||
| 9 | #include <string> | 10 | #include <string> |
| 10 | #include <type_traits> | 11 | #include <type_traits> |
| 11 | #include <vector> | 12 | #include <vector> |
| @@ -15,6 +16,8 @@ | |||
| 15 | #include "core/hle/ipc.h" | 16 | #include "core/hle/ipc.h" |
| 16 | #include "core/hle/kernel/object.h" | 17 | #include "core/hle/kernel/object.h" |
| 17 | 18 | ||
| 19 | union ResultCode; | ||
| 20 | |||
| 18 | namespace Service { | 21 | namespace Service { |
| 19 | class ServiceFrameworkBase; | 22 | class ServiceFrameworkBase; |
| 20 | } | 23 | } |
| @@ -166,12 +169,12 @@ public: | |||
| 166 | return buffer_c_desciptors; | 169 | return buffer_c_desciptors; |
| 167 | } | 170 | } |
| 168 | 171 | ||
| 169 | const IPC::DomainMessageHeader* GetDomainMessageHeader() const { | 172 | const IPC::DomainMessageHeader& GetDomainMessageHeader() const { |
| 170 | return domain_message_header.get(); | 173 | return domain_message_header.value(); |
| 171 | } | 174 | } |
| 172 | 175 | ||
| 173 | bool HasDomainMessageHeader() const { | 176 | bool HasDomainMessageHeader() const { |
| 174 | return domain_message_header != nullptr; | 177 | return domain_message_header.has_value(); |
| 175 | } | 178 | } |
| 176 | 179 | ||
| 177 | /// Helper function to read a buffer using the appropriate buffer descriptor | 180 | /// Helper function to read a buffer using the appropriate buffer descriptor |
| @@ -208,14 +211,12 @@ public: | |||
| 208 | 211 | ||
| 209 | template <typename T> | 212 | template <typename T> |
| 210 | SharedPtr<T> GetCopyObject(std::size_t index) { | 213 | SharedPtr<T> GetCopyObject(std::size_t index) { |
| 211 | ASSERT(index < copy_objects.size()); | 214 | return DynamicObjectCast<T>(copy_objects.at(index)); |
| 212 | return DynamicObjectCast<T>(copy_objects[index]); | ||
| 213 | } | 215 | } |
| 214 | 216 | ||
| 215 | template <typename T> | 217 | template <typename T> |
| 216 | SharedPtr<T> GetMoveObject(std::size_t index) { | 218 | SharedPtr<T> GetMoveObject(std::size_t index) { |
| 217 | ASSERT(index < move_objects.size()); | 219 | return DynamicObjectCast<T>(move_objects.at(index)); |
| 218 | return DynamicObjectCast<T>(move_objects[index]); | ||
| 219 | } | 220 | } |
| 220 | 221 | ||
| 221 | void AddMoveObject(SharedPtr<Object> object) { | 222 | void AddMoveObject(SharedPtr<Object> object) { |
| @@ -232,7 +233,7 @@ public: | |||
| 232 | 233 | ||
| 233 | template <typename T> | 234 | template <typename T> |
| 234 | std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const { | 235 | std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const { |
| 235 | return std::static_pointer_cast<T>(domain_request_handlers[index]); | 236 | return std::static_pointer_cast<T>(domain_request_handlers.at(index)); |
| 236 | } | 237 | } |
| 237 | 238 | ||
| 238 | void SetDomainRequestHandlers( | 239 | void SetDomainRequestHandlers( |
| @@ -272,10 +273,10 @@ private: | |||
| 272 | boost::container::small_vector<SharedPtr<Object>, 8> copy_objects; | 273 | boost::container::small_vector<SharedPtr<Object>, 8> copy_objects; |
| 273 | boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects; | 274 | boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects; |
| 274 | 275 | ||
| 275 | std::shared_ptr<IPC::CommandHeader> command_header; | 276 | std::optional<IPC::CommandHeader> command_header; |
| 276 | std::shared_ptr<IPC::HandleDescriptorHeader> handle_descriptor_header; | 277 | std::optional<IPC::HandleDescriptorHeader> handle_descriptor_header; |
| 277 | std::shared_ptr<IPC::DataPayloadHeader> data_payload_header; | 278 | std::optional<IPC::DataPayloadHeader> data_payload_header; |
| 278 | std::shared_ptr<IPC::DomainMessageHeader> domain_message_header; | 279 | std::optional<IPC::DomainMessageHeader> domain_message_header; |
| 279 | std::vector<IPC::BufferDescriptorX> buffer_x_desciptors; | 280 | std::vector<IPC::BufferDescriptorX> buffer_x_desciptors; |
| 280 | std::vector<IPC::BufferDescriptorABW> buffer_a_desciptors; | 281 | std::vector<IPC::BufferDescriptorABW> buffer_a_desciptors; |
| 281 | std::vector<IPC::BufferDescriptorABW> buffer_b_desciptors; | 282 | std::vector<IPC::BufferDescriptorABW> buffer_b_desciptors; |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 7a524ce5a..4d224d01d 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include "core/core.h" | 13 | #include "core/core.h" |
| 14 | #include "core/core_timing.h" | 14 | #include "core/core_timing.h" |
| 15 | #include "core/hle/kernel/address_arbiter.h" | ||
| 15 | #include "core/hle/kernel/client_port.h" | 16 | #include "core/hle/kernel/client_port.h" |
| 16 | #include "core/hle/kernel/handle_table.h" | 17 | #include "core/hle/kernel/handle_table.h" |
| 17 | #include "core/hle/kernel/kernel.h" | 18 | #include "core/hle/kernel/kernel.h" |
| @@ -86,6 +87,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_ | |||
| 86 | } | 87 | } |
| 87 | 88 | ||
| 88 | struct KernelCore::Impl { | 89 | struct KernelCore::Impl { |
| 90 | explicit Impl(Core::System& system) : system{system} {} | ||
| 91 | |||
| 89 | void Initialize(KernelCore& kernel) { | 92 | void Initialize(KernelCore& kernel) { |
| 90 | Shutdown(); | 93 | Shutdown(); |
| 91 | 94 | ||
| @@ -124,7 +127,7 @@ struct KernelCore::Impl { | |||
| 124 | 127 | ||
| 125 | void InitializeThreads() { | 128 | void InitializeThreads() { |
| 126 | thread_wakeup_event_type = | 129 | thread_wakeup_event_type = |
| 127 | CoreTiming::RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback); | 130 | system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback); |
| 128 | } | 131 | } |
| 129 | 132 | ||
| 130 | std::atomic<u32> next_object_id{0}; | 133 | std::atomic<u32> next_object_id{0}; |
| @@ -137,7 +140,7 @@ struct KernelCore::Impl { | |||
| 137 | 140 | ||
| 138 | SharedPtr<ResourceLimit> system_resource_limit; | 141 | SharedPtr<ResourceLimit> system_resource_limit; |
| 139 | 142 | ||
| 140 | CoreTiming::EventType* thread_wakeup_event_type = nullptr; | 143 | Core::Timing::EventType* thread_wakeup_event_type = nullptr; |
| 141 | // TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future, | 144 | // TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future, |
| 142 | // allowing us to simply use a pool index or similar. | 145 | // allowing us to simply use a pool index or similar. |
| 143 | Kernel::HandleTable thread_wakeup_callback_handle_table; | 146 | Kernel::HandleTable thread_wakeup_callback_handle_table; |
| @@ -145,9 +148,12 @@ struct KernelCore::Impl { | |||
| 145 | /// Map of named ports managed by the kernel, which can be retrieved using | 148 | /// Map of named ports managed by the kernel, which can be retrieved using |
| 146 | /// the ConnectToPort SVC. | 149 | /// the ConnectToPort SVC. |
| 147 | NamedPortTable named_ports; | 150 | NamedPortTable named_ports; |
| 151 | |||
| 152 | // System context | ||
| 153 | Core::System& system; | ||
| 148 | }; | 154 | }; |
| 149 | 155 | ||
| 150 | KernelCore::KernelCore() : impl{std::make_unique<Impl>()} {} | 156 | KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system)} {} |
| 151 | KernelCore::~KernelCore() { | 157 | KernelCore::~KernelCore() { |
| 152 | Shutdown(); | 158 | Shutdown(); |
| 153 | } | 159 | } |
| @@ -213,7 +219,7 @@ u64 KernelCore::CreateNewProcessID() { | |||
| 213 | return impl->next_process_id++; | 219 | return impl->next_process_id++; |
| 214 | } | 220 | } |
| 215 | 221 | ||
| 216 | CoreTiming::EventType* KernelCore::ThreadWakeupCallbackEventType() const { | 222 | Core::Timing::EventType* KernelCore::ThreadWakeupCallbackEventType() const { |
| 217 | return impl->thread_wakeup_event_type; | 223 | return impl->thread_wakeup_event_type; |
| 218 | } | 224 | } |
| 219 | 225 | ||
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index c643a6401..ff17ff865 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -11,12 +11,18 @@ | |||
| 11 | template <typename T> | 11 | template <typename T> |
| 12 | class ResultVal; | 12 | class ResultVal; |
| 13 | 13 | ||
| 14 | namespace CoreTiming { | 14 | namespace Core { |
| 15 | struct EventType; | 15 | class System; |
| 16 | } | 16 | } |
| 17 | 17 | ||
| 18 | namespace Core::Timing { | ||
| 19 | class CoreTiming; | ||
| 20 | struct EventType; | ||
| 21 | } // namespace Core::Timing | ||
| 22 | |||
| 18 | namespace Kernel { | 23 | namespace Kernel { |
| 19 | 24 | ||
| 25 | class AddressArbiter; | ||
| 20 | class ClientPort; | 26 | class ClientPort; |
| 21 | class HandleTable; | 27 | class HandleTable; |
| 22 | class Process; | 28 | class Process; |
| @@ -29,7 +35,14 @@ private: | |||
| 29 | using NamedPortTable = std::unordered_map<std::string, SharedPtr<ClientPort>>; | 35 | using NamedPortTable = std::unordered_map<std::string, SharedPtr<ClientPort>>; |
| 30 | 36 | ||
| 31 | public: | 37 | public: |
| 32 | KernelCore(); | 38 | /// Constructs an instance of the kernel using the given System |
| 39 | /// instance as a context for any necessary system-related state, | ||
| 40 | /// such as threads, CPU core state, etc. | ||
| 41 | /// | ||
| 42 | /// @post After execution of the constructor, the provided System | ||
| 43 | /// object *must* outlive the kernel instance itself. | ||
| 44 | /// | ||
| 45 | explicit KernelCore(Core::System& system); | ||
| 33 | ~KernelCore(); | 46 | ~KernelCore(); |
| 34 | 47 | ||
| 35 | KernelCore(const KernelCore&) = delete; | 48 | KernelCore(const KernelCore&) = delete; |
| @@ -89,7 +102,7 @@ private: | |||
| 89 | u64 CreateNewThreadID(); | 102 | u64 CreateNewThreadID(); |
| 90 | 103 | ||
| 91 | /// Retrieves the event type used for thread wakeup callbacks. | 104 | /// Retrieves the event type used for thread wakeup callbacks. |
| 92 | CoreTiming::EventType* ThreadWakeupCallbackEventType() const; | 105 | Core::Timing::EventType* ThreadWakeupCallbackEventType() const; |
| 93 | 106 | ||
| 94 | /// Provides a reference to the thread wakeup callback handle table. | 107 | /// Provides a reference to the thread wakeup callback handle table. |
| 95 | Kernel::HandleTable& ThreadWakeupCallbackHandleTable(); | 108 | Kernel::HandleTable& ThreadWakeupCallbackHandleTable(); |
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index c5aa19afa..15a16ae14 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 10 | #include "core/core.h" | 10 | #include "core/core.h" |
| 11 | #include "core/file_sys/program_metadata.h" | 11 | #include "core/file_sys/program_metadata.h" |
| 12 | #include "core/hle/kernel/code_set.h" | ||
| 12 | #include "core/hle/kernel/errors.h" | 13 | #include "core/hle/kernel/errors.h" |
| 13 | #include "core/hle/kernel/kernel.h" | 14 | #include "core/hle/kernel/kernel.h" |
| 14 | #include "core/hle/kernel/process.h" | 15 | #include "core/hle/kernel/process.h" |
| @@ -31,7 +32,7 @@ namespace { | |||
| 31 | */ | 32 | */ |
| 32 | void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) { | 33 | void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) { |
| 33 | // Setup page table so we can write to memory | 34 | // Setup page table so we can write to memory |
| 34 | SetCurrentPageTable(&owner_process.VMManager().page_table); | 35 | Memory::SetCurrentPageTable(&owner_process.VMManager().page_table); |
| 35 | 36 | ||
| 36 | // Initialize new "main" thread | 37 | // Initialize new "main" thread |
| 37 | const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress(); | 38 | const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress(); |
| @@ -50,12 +51,10 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_poi | |||
| 50 | } | 51 | } |
| 51 | } // Anonymous namespace | 52 | } // Anonymous namespace |
| 52 | 53 | ||
| 53 | CodeSet::CodeSet() = default; | 54 | SharedPtr<Process> Process::Create(Core::System& system, std::string&& name) { |
| 54 | CodeSet::~CodeSet() = default; | 55 | auto& kernel = system.Kernel(); |
| 55 | |||
| 56 | SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) { | ||
| 57 | SharedPtr<Process> process(new Process(kernel)); | ||
| 58 | 56 | ||
| 57 | SharedPtr<Process> process(new Process(system)); | ||
| 59 | process->name = std::move(name); | 58 | process->name = std::move(name); |
| 60 | process->resource_limit = kernel.GetSystemResourceLimit(); | 59 | process->resource_limit = kernel.GetSystemResourceLimit(); |
| 61 | process->status = ProcessStatus::Created; | 60 | process->status = ProcessStatus::Created; |
| @@ -99,7 +98,13 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { | |||
| 99 | vm_manager.Reset(metadata.GetAddressSpaceType()); | 98 | vm_manager.Reset(metadata.GetAddressSpaceType()); |
| 100 | 99 | ||
| 101 | const auto& caps = metadata.GetKernelCapabilities(); | 100 | const auto& caps = metadata.GetKernelCapabilities(); |
| 102 | return capabilities.InitializeForUserProcess(caps.data(), caps.size(), vm_manager); | 101 | const auto capability_init_result = |
| 102 | capabilities.InitializeForUserProcess(caps.data(), caps.size(), vm_manager); | ||
| 103 | if (capability_init_result.IsError()) { | ||
| 104 | return capability_init_result; | ||
| 105 | } | ||
| 106 | |||
| 107 | return handle_table.SetSize(capabilities.GetHandleTableSize()); | ||
| 103 | } | 108 | } |
| 104 | 109 | ||
| 105 | void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { | 110 | void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { |
| @@ -126,7 +131,7 @@ void Process::PrepareForTermination() { | |||
| 126 | if (thread->GetOwnerProcess() != this) | 131 | if (thread->GetOwnerProcess() != this) |
| 127 | continue; | 132 | continue; |
| 128 | 133 | ||
| 129 | if (thread == GetCurrentThread()) | 134 | if (thread == system.CurrentScheduler().GetCurrentThread()) |
| 130 | continue; | 135 | continue; |
| 131 | 136 | ||
| 132 | // TODO(Subv): When are the other running/ready threads terminated? | 137 | // TODO(Subv): When are the other running/ready threads terminated? |
| @@ -138,7 +143,6 @@ void Process::PrepareForTermination() { | |||
| 138 | } | 143 | } |
| 139 | }; | 144 | }; |
| 140 | 145 | ||
| 141 | const auto& system = Core::System::GetInstance(); | ||
| 142 | stop_threads(system.Scheduler(0).GetThreadList()); | 146 | stop_threads(system.Scheduler(0).GetThreadList()); |
| 143 | stop_threads(system.Scheduler(1).GetThreadList()); | 147 | stop_threads(system.Scheduler(1).GetThreadList()); |
| 144 | stop_threads(system.Scheduler(2).GetThreadList()); | 148 | stop_threads(system.Scheduler(2).GetThreadList()); |
| @@ -206,7 +210,7 @@ void Process::FreeTLSSlot(VAddr tls_address) { | |||
| 206 | } | 210 | } |
| 207 | 211 | ||
| 208 | void Process::LoadModule(CodeSet module_, VAddr base_addr) { | 212 | void Process::LoadModule(CodeSet module_, VAddr base_addr) { |
| 209 | const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, | 213 | const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions, |
| 210 | MemoryState memory_state) { | 214 | MemoryState memory_state) { |
| 211 | const auto vma = vm_manager | 215 | const auto vma = vm_manager |
| 212 | .MapMemoryBlock(segment.addr + base_addr, module_.memory, | 216 | .MapMemoryBlock(segment.addr + base_addr, module_.memory, |
| @@ -221,14 +225,12 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) { | |||
| 221 | MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeMutable); | 225 | MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeMutable); |
| 222 | 226 | ||
| 223 | // Clear instruction cache in CPU JIT | 227 | // Clear instruction cache in CPU JIT |
| 224 | Core::System::GetInstance().ArmInterface(0).ClearInstructionCache(); | 228 | system.InvalidateCpuInstructionCaches(); |
| 225 | Core::System::GetInstance().ArmInterface(1).ClearInstructionCache(); | ||
| 226 | Core::System::GetInstance().ArmInterface(2).ClearInstructionCache(); | ||
| 227 | Core::System::GetInstance().ArmInterface(3).ClearInstructionCache(); | ||
| 228 | } | 229 | } |
| 229 | 230 | ||
| 230 | Kernel::Process::Process(KernelCore& kernel) : WaitObject{kernel} {} | 231 | Process::Process(Core::System& system) |
| 231 | Kernel::Process::~Process() {} | 232 | : WaitObject{system.Kernel()}, address_arbiter{system}, system{system} {} |
| 233 | Process::~Process() = default; | ||
| 232 | 234 | ||
| 233 | void Process::Acquire(Thread* thread) { | 235 | void Process::Acquire(Thread* thread) { |
| 234 | ASSERT_MSG(!ShouldWait(thread), "Object unavailable!"); | 236 | ASSERT_MSG(!ShouldWait(thread), "Object unavailable!"); |
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index dcc57ae9f..3ae7c922c 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h | |||
| @@ -7,17 +7,21 @@ | |||
| 7 | #include <array> | 7 | #include <array> |
| 8 | #include <bitset> | 8 | #include <bitset> |
| 9 | #include <cstddef> | 9 | #include <cstddef> |
| 10 | #include <memory> | ||
| 11 | #include <string> | 10 | #include <string> |
| 12 | #include <vector> | 11 | #include <vector> |
| 13 | #include <boost/container/static_vector.hpp> | 12 | #include <boost/container/static_vector.hpp> |
| 14 | #include "common/common_types.h" | 13 | #include "common/common_types.h" |
| 14 | #include "core/hle/kernel/address_arbiter.h" | ||
| 15 | #include "core/hle/kernel/handle_table.h" | 15 | #include "core/hle/kernel/handle_table.h" |
| 16 | #include "core/hle/kernel/process_capability.h" | 16 | #include "core/hle/kernel/process_capability.h" |
| 17 | #include "core/hle/kernel/vm_manager.h" | 17 | #include "core/hle/kernel/vm_manager.h" |
| 18 | #include "core/hle/kernel/wait_object.h" | 18 | #include "core/hle/kernel/wait_object.h" |
| 19 | #include "core/hle/result.h" | 19 | #include "core/hle/result.h" |
| 20 | 20 | ||
| 21 | namespace Core { | ||
| 22 | class System; | ||
| 23 | } | ||
| 24 | |||
| 21 | namespace FileSys { | 25 | namespace FileSys { |
| 22 | class ProgramMetadata; | 26 | class ProgramMetadata; |
| 23 | } | 27 | } |
| @@ -28,6 +32,8 @@ class KernelCore; | |||
| 28 | class ResourceLimit; | 32 | class ResourceLimit; |
| 29 | class Thread; | 33 | class Thread; |
| 30 | 34 | ||
| 35 | struct CodeSet; | ||
| 36 | |||
| 31 | struct AddressMapping { | 37 | struct AddressMapping { |
| 32 | // Address and size must be page-aligned | 38 | // Address and size must be page-aligned |
| 33 | VAddr address; | 39 | VAddr address; |
| @@ -60,46 +66,6 @@ enum class ProcessStatus { | |||
| 60 | DebugBreak, | 66 | DebugBreak, |
| 61 | }; | 67 | }; |
| 62 | 68 | ||
| 63 | struct CodeSet final { | ||
| 64 | struct Segment { | ||
| 65 | std::size_t offset = 0; | ||
| 66 | VAddr addr = 0; | ||
| 67 | u32 size = 0; | ||
| 68 | }; | ||
| 69 | |||
| 70 | explicit CodeSet(); | ||
| 71 | ~CodeSet(); | ||
| 72 | |||
| 73 | Segment& CodeSegment() { | ||
| 74 | return segments[0]; | ||
| 75 | } | ||
| 76 | |||
| 77 | const Segment& CodeSegment() const { | ||
| 78 | return segments[0]; | ||
| 79 | } | ||
| 80 | |||
| 81 | Segment& RODataSegment() { | ||
| 82 | return segments[1]; | ||
| 83 | } | ||
| 84 | |||
| 85 | const Segment& RODataSegment() const { | ||
| 86 | return segments[1]; | ||
| 87 | } | ||
| 88 | |||
| 89 | Segment& DataSegment() { | ||
| 90 | return segments[2]; | ||
| 91 | } | ||
| 92 | |||
| 93 | const Segment& DataSegment() const { | ||
| 94 | return segments[2]; | ||
| 95 | } | ||
| 96 | |||
| 97 | std::shared_ptr<std::vector<u8>> memory; | ||
| 98 | |||
| 99 | std::array<Segment, 3> segments; | ||
| 100 | VAddr entrypoint = 0; | ||
| 101 | }; | ||
| 102 | |||
| 103 | class Process final : public WaitObject { | 69 | class Process final : public WaitObject { |
| 104 | public: | 70 | public: |
| 105 | enum : u64 { | 71 | enum : u64 { |
| @@ -116,7 +82,7 @@ public: | |||
| 116 | 82 | ||
| 117 | static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; | 83 | static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; |
| 118 | 84 | ||
| 119 | static SharedPtr<Process> Create(KernelCore& kernel, std::string&& name); | 85 | static SharedPtr<Process> Create(Core::System& system, std::string&& name); |
| 120 | 86 | ||
| 121 | std::string GetTypeName() const override { | 87 | std::string GetTypeName() const override { |
| 122 | return "Process"; | 88 | return "Process"; |
| @@ -150,6 +116,16 @@ public: | |||
| 150 | return handle_table; | 116 | return handle_table; |
| 151 | } | 117 | } |
| 152 | 118 | ||
| 119 | /// Gets a reference to the process' address arbiter. | ||
| 120 | AddressArbiter& GetAddressArbiter() { | ||
| 121 | return address_arbiter; | ||
| 122 | } | ||
| 123 | |||
| 124 | /// Gets a const reference to the process' address arbiter. | ||
| 125 | const AddressArbiter& GetAddressArbiter() const { | ||
| 126 | return address_arbiter; | ||
| 127 | } | ||
| 128 | |||
| 153 | /// Gets the current status of the process | 129 | /// Gets the current status of the process |
| 154 | ProcessStatus GetStatus() const { | 130 | ProcessStatus GetStatus() const { |
| 155 | return status; | 131 | return status; |
| @@ -251,7 +227,7 @@ public: | |||
| 251 | void FreeTLSSlot(VAddr tls_address); | 227 | void FreeTLSSlot(VAddr tls_address); |
| 252 | 228 | ||
| 253 | private: | 229 | private: |
| 254 | explicit Process(KernelCore& kernel); | 230 | explicit Process(Core::System& system); |
| 255 | ~Process() override; | 231 | ~Process() override; |
| 256 | 232 | ||
| 257 | /// Checks if the specified thread should wait until this process is available. | 233 | /// Checks if the specified thread should wait until this process is available. |
| @@ -309,9 +285,16 @@ private: | |||
| 309 | /// Per-process handle table for storing created object handles in. | 285 | /// Per-process handle table for storing created object handles in. |
| 310 | HandleTable handle_table; | 286 | HandleTable handle_table; |
| 311 | 287 | ||
| 288 | /// Per-process address arbiter. | ||
| 289 | AddressArbiter address_arbiter; | ||
| 290 | |||
| 312 | /// Random values for svcGetInfo RandomEntropy | 291 | /// Random values for svcGetInfo RandomEntropy |
| 313 | std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy; | 292 | std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy; |
| 314 | 293 | ||
| 294 | /// System context | ||
| 295 | Core::System& system; | ||
| 296 | |||
| 297 | /// Name of this process | ||
| 315 | std::string name; | 298 | std::string name; |
| 316 | }; | 299 | }; |
| 317 | 300 | ||
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp index 3a2164b25..583e35b79 100644 --- a/src/core/hle/kernel/process_capability.cpp +++ b/src/core/hle/kernel/process_capability.cpp | |||
| @@ -96,7 +96,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() { | |||
| 96 | interrupt_capabilities.set(); | 96 | interrupt_capabilities.set(); |
| 97 | 97 | ||
| 98 | // Allow using the maximum possible amount of handles | 98 | // Allow using the maximum possible amount of handles |
| 99 | handle_table_size = static_cast<u32>(HandleTable::MAX_COUNT); | 99 | handle_table_size = static_cast<s32>(HandleTable::MAX_COUNT); |
| 100 | 100 | ||
| 101 | // Allow all debugging capabilities. | 101 | // Allow all debugging capabilities. |
| 102 | is_debuggable = true; | 102 | is_debuggable = true; |
| @@ -337,7 +337,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) { | |||
| 337 | return ERR_RESERVED_VALUE; | 337 | return ERR_RESERVED_VALUE; |
| 338 | } | 338 | } |
| 339 | 339 | ||
| 340 | handle_table_size = (flags >> 16) & 0x3FF; | 340 | handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF); |
| 341 | return RESULT_SUCCESS; | 341 | return RESULT_SUCCESS; |
| 342 | } | 342 | } |
| 343 | 343 | ||
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h index fbc8812a3..5cdd80747 100644 --- a/src/core/hle/kernel/process_capability.h +++ b/src/core/hle/kernel/process_capability.h | |||
| @@ -156,7 +156,7 @@ public: | |||
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | /// Gets the number of total allowable handles for the process' handle table. | 158 | /// Gets the number of total allowable handles for the process' handle table. |
| 159 | u32 GetHandleTableSize() const { | 159 | s32 GetHandleTableSize() const { |
| 160 | return handle_table_size; | 160 | return handle_table_size; |
| 161 | } | 161 | } |
| 162 | 162 | ||
| @@ -252,7 +252,7 @@ private: | |||
| 252 | u64 core_mask = 0; | 252 | u64 core_mask = 0; |
| 253 | u64 priority_mask = 0; | 253 | u64 priority_mask = 0; |
| 254 | 254 | ||
| 255 | u32 handle_table_size = 0; | 255 | s32 handle_table_size = 0; |
| 256 | u32 kernel_version = 0; | 256 | u32 kernel_version = 0; |
| 257 | 257 | ||
| 258 | ProgramType program_type = ProgramType::SysModule; | 258 | ProgramType program_type = ProgramType::SysModule; |
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index df4d6cf0a..cc189cc64 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -19,7 +19,8 @@ namespace Kernel { | |||
| 19 | 19 | ||
| 20 | std::mutex Scheduler::scheduler_mutex; | 20 | std::mutex Scheduler::scheduler_mutex; |
| 21 | 21 | ||
| 22 | Scheduler::Scheduler(Core::ARM_Interface& cpu_core) : cpu_core(cpu_core) {} | 22 | Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core) |
| 23 | : cpu_core{cpu_core}, system{system} {} | ||
| 23 | 24 | ||
| 24 | Scheduler::~Scheduler() { | 25 | Scheduler::~Scheduler() { |
| 25 | for (auto& thread : thread_list) { | 26 | for (auto& thread : thread_list) { |
| @@ -61,7 +62,7 @@ Thread* Scheduler::PopNextReadyThread() { | |||
| 61 | 62 | ||
| 62 | void Scheduler::SwitchContext(Thread* new_thread) { | 63 | void Scheduler::SwitchContext(Thread* new_thread) { |
| 63 | Thread* const previous_thread = GetCurrentThread(); | 64 | Thread* const previous_thread = GetCurrentThread(); |
| 64 | Process* const previous_process = Core::CurrentProcess(); | 65 | Process* const previous_process = system.Kernel().CurrentProcess(); |
| 65 | 66 | ||
| 66 | UpdateLastContextSwitchTime(previous_thread, previous_process); | 67 | UpdateLastContextSwitchTime(previous_thread, previous_process); |
| 67 | 68 | ||
| @@ -94,8 +95,8 @@ void Scheduler::SwitchContext(Thread* new_thread) { | |||
| 94 | 95 | ||
| 95 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); | 96 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); |
| 96 | if (previous_process != thread_owner_process) { | 97 | if (previous_process != thread_owner_process) { |
| 97 | Core::System::GetInstance().Kernel().MakeCurrentProcess(thread_owner_process); | 98 | system.Kernel().MakeCurrentProcess(thread_owner_process); |
| 98 | SetCurrentPageTable(&Core::CurrentProcess()->VMManager().page_table); | 99 | Memory::SetCurrentPageTable(&thread_owner_process->VMManager().page_table); |
| 99 | } | 100 | } |
| 100 | 101 | ||
| 101 | cpu_core.LoadContext(new_thread->GetContext()); | 102 | cpu_core.LoadContext(new_thread->GetContext()); |
| @@ -111,7 +112,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { | |||
| 111 | 112 | ||
| 112 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | 113 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { |
| 113 | const u64 prev_switch_ticks = last_context_switch_time; | 114 | const u64 prev_switch_ticks = last_context_switch_time; |
| 114 | const u64 most_recent_switch_ticks = CoreTiming::GetTicks(); | 115 | const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks(); |
| 115 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; | 116 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; |
| 116 | 117 | ||
| 117 | if (thread != nullptr) { | 118 | if (thread != nullptr) { |
| @@ -198,8 +199,7 @@ void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { | |||
| 198 | ASSERT(thread->GetPriority() < THREADPRIO_COUNT); | 199 | ASSERT(thread->GetPriority() < THREADPRIO_COUNT); |
| 199 | 200 | ||
| 200 | // Yield this thread -- sleep for zero time and force reschedule to different thread | 201 | // Yield this thread -- sleep for zero time and force reschedule to different thread |
| 201 | WaitCurrentThread_Sleep(); | 202 | GetCurrentThread()->Sleep(0); |
| 202 | GetCurrentThread()->WakeAfterDelay(0); | ||
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | void Scheduler::YieldWithLoadBalancing(Thread* thread) { | 205 | void Scheduler::YieldWithLoadBalancing(Thread* thread) { |
| @@ -214,8 +214,7 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) { | |||
| 214 | ASSERT(priority < THREADPRIO_COUNT); | 214 | ASSERT(priority < THREADPRIO_COUNT); |
| 215 | 215 | ||
| 216 | // Sleep for zero time to be able to force reschedule to different thread | 216 | // Sleep for zero time to be able to force reschedule to different thread |
| 217 | WaitCurrentThread_Sleep(); | 217 | GetCurrentThread()->Sleep(0); |
| 218 | GetCurrentThread()->WakeAfterDelay(0); | ||
| 219 | 218 | ||
| 220 | Thread* suggested_thread = nullptr; | 219 | Thread* suggested_thread = nullptr; |
| 221 | 220 | ||
| @@ -223,8 +222,7 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) { | |||
| 223 | // Take the first non-nullptr one | 222 | // Take the first non-nullptr one |
| 224 | for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) { | 223 | for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) { |
| 225 | const auto res = | 224 | const auto res = |
| 226 | Core::System::GetInstance().CpuCore(cur_core).Scheduler().GetNextSuggestedThread( | 225 | system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority); |
| 227 | core, priority); | ||
| 228 | 226 | ||
| 229 | // If scheduler provides a suggested thread | 227 | // If scheduler provides a suggested thread |
| 230 | if (res != nullptr) { | 228 | if (res != nullptr) { |
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 97ced4dfc..1c5bf57d9 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h | |||
| @@ -13,7 +13,8 @@ | |||
| 13 | 13 | ||
| 14 | namespace Core { | 14 | namespace Core { |
| 15 | class ARM_Interface; | 15 | class ARM_Interface; |
| 16 | } | 16 | class System; |
| 17 | } // namespace Core | ||
| 17 | 18 | ||
| 18 | namespace Kernel { | 19 | namespace Kernel { |
| 19 | 20 | ||
| @@ -21,7 +22,7 @@ class Process; | |||
| 21 | 22 | ||
| 22 | class Scheduler final { | 23 | class Scheduler final { |
| 23 | public: | 24 | public: |
| 24 | explicit Scheduler(Core::ARM_Interface& cpu_core); | 25 | explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core); |
| 25 | ~Scheduler(); | 26 | ~Scheduler(); |
| 26 | 27 | ||
| 27 | /// Returns whether there are any threads that are ready to run. | 28 | /// Returns whether there are any threads that are ready to run. |
| @@ -162,6 +163,7 @@ private: | |||
| 162 | Core::ARM_Interface& cpu_core; | 163 | Core::ARM_Interface& cpu_core; |
| 163 | u64 last_context_switch_time = 0; | 164 | u64 last_context_switch_time = 0; |
| 164 | 165 | ||
| 166 | Core::System& system; | ||
| 165 | static std::mutex scheduler_mutex; | 167 | static std::mutex scheduler_mutex; |
| 166 | }; | 168 | }; |
| 167 | 169 | ||
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp index d6ceeb2da..0e1515c89 100644 --- a/src/core/hle/kernel/server_port.cpp +++ b/src/core/hle/kernel/server_port.cpp | |||
| @@ -26,6 +26,10 @@ ResultVal<SharedPtr<ServerSession>> ServerPort::Accept() { | |||
| 26 | return MakeResult(std::move(session)); | 26 | return MakeResult(std::move(session)); |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | void ServerPort::AppendPendingSession(SharedPtr<ServerSession> pending_session) { | ||
| 30 | pending_sessions.push_back(std::move(pending_session)); | ||
| 31 | } | ||
| 32 | |||
| 29 | bool ServerPort::ShouldWait(Thread* thread) const { | 33 | bool ServerPort::ShouldWait(Thread* thread) const { |
| 30 | // If there are no pending sessions, we wait until a new one is added. | 34 | // If there are no pending sessions, we wait until a new one is added. |
| 31 | return pending_sessions.empty(); | 35 | return pending_sessions.empty(); |
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h index e52f8245f..9bc667cf2 100644 --- a/src/core/hle/kernel/server_port.h +++ b/src/core/hle/kernel/server_port.h | |||
| @@ -22,6 +22,8 @@ class SessionRequestHandler; | |||
| 22 | 22 | ||
| 23 | class ServerPort final : public WaitObject { | 23 | class ServerPort final : public WaitObject { |
| 24 | public: | 24 | public: |
| 25 | using HLEHandler = std::shared_ptr<SessionRequestHandler>; | ||
| 26 | |||
| 25 | /** | 27 | /** |
| 26 | * Creates a pair of ServerPort and an associated ClientPort. | 28 | * Creates a pair of ServerPort and an associated ClientPort. |
| 27 | * | 29 | * |
| @@ -51,22 +53,27 @@ public: | |||
| 51 | */ | 53 | */ |
| 52 | ResultVal<SharedPtr<ServerSession>> Accept(); | 54 | ResultVal<SharedPtr<ServerSession>> Accept(); |
| 53 | 55 | ||
| 56 | /// Whether or not this server port has an HLE handler available. | ||
| 57 | bool HasHLEHandler() const { | ||
| 58 | return hle_handler != nullptr; | ||
| 59 | } | ||
| 60 | |||
| 61 | /// Gets the HLE handler for this port. | ||
| 62 | HLEHandler GetHLEHandler() const { | ||
| 63 | return hle_handler; | ||
| 64 | } | ||
| 65 | |||
| 54 | /** | 66 | /** |
| 55 | * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port | 67 | * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port |
| 56 | * will inherit a reference to this handler. | 68 | * will inherit a reference to this handler. |
| 57 | */ | 69 | */ |
| 58 | void SetHleHandler(std::shared_ptr<SessionRequestHandler> hle_handler_) { | 70 | void SetHleHandler(HLEHandler hle_handler_) { |
| 59 | hle_handler = std::move(hle_handler_); | 71 | hle_handler = std::move(hle_handler_); |
| 60 | } | 72 | } |
| 61 | 73 | ||
| 62 | std::string name; ///< Name of port (optional) | 74 | /// Appends a ServerSession to the collection of ServerSessions |
| 63 | 75 | /// waiting to be accepted by this port. | |
| 64 | /// ServerSessions waiting to be accepted by the port | 76 | void AppendPendingSession(SharedPtr<ServerSession> pending_session); |
| 65 | std::vector<SharedPtr<ServerSession>> pending_sessions; | ||
| 66 | |||
| 67 | /// This session's HLE request handler template (optional) | ||
| 68 | /// ServerSessions created from this port inherit a reference to this handler. | ||
| 69 | std::shared_ptr<SessionRequestHandler> hle_handler; | ||
| 70 | 77 | ||
| 71 | bool ShouldWait(Thread* thread) const override; | 78 | bool ShouldWait(Thread* thread) const override; |
| 72 | void Acquire(Thread* thread) override; | 79 | void Acquire(Thread* thread) override; |
| @@ -74,6 +81,16 @@ public: | |||
| 74 | private: | 81 | private: |
| 75 | explicit ServerPort(KernelCore& kernel); | 82 | explicit ServerPort(KernelCore& kernel); |
| 76 | ~ServerPort() override; | 83 | ~ServerPort() override; |
| 84 | |||
| 85 | /// ServerSessions waiting to be accepted by the port | ||
| 86 | std::vector<SharedPtr<ServerSession>> pending_sessions; | ||
| 87 | |||
| 88 | /// This session's HLE request handler template (optional) | ||
| 89 | /// ServerSessions created from this port inherit a reference to this handler. | ||
| 90 | HLEHandler hle_handler; | ||
| 91 | |||
| 92 | /// Name of the port (optional) | ||
| 93 | std::string name; | ||
| 77 | }; | 94 | }; |
| 78 | 95 | ||
| 79 | } // namespace Kernel | 96 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 027434f92..4d8a337a7 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp | |||
| @@ -63,42 +63,71 @@ void ServerSession::Acquire(Thread* thread) { | |||
| 63 | pending_requesting_threads.pop_back(); | 63 | pending_requesting_threads.pop_back(); |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | void ServerSession::ClientDisconnected() { | ||
| 67 | // We keep a shared pointer to the hle handler to keep it alive throughout | ||
| 68 | // the call to ClientDisconnected, as ClientDisconnected invalidates the | ||
| 69 | // hle_handler member itself during the course of the function executing. | ||
| 70 | std::shared_ptr<SessionRequestHandler> handler = hle_handler; | ||
| 71 | if (handler) { | ||
| 72 | // Note that after this returns, this server session's hle_handler is | ||
| 73 | // invalidated (set to null). | ||
| 74 | handler->ClientDisconnected(this); | ||
| 75 | } | ||
| 76 | |||
| 77 | // TODO(Subv): Force a wake up of all the ServerSession's waiting threads and set | ||
| 78 | // their WaitSynchronization result to 0xC920181A. | ||
| 79 | |||
| 80 | // Clean up the list of client threads with pending requests, they are unneeded now that the | ||
| 81 | // client endpoint is closed. | ||
| 82 | pending_requesting_threads.clear(); | ||
| 83 | currently_handling = nullptr; | ||
| 84 | } | ||
| 85 | |||
| 86 | void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) { | ||
| 87 | domain_request_handlers.push_back(std::move(handler)); | ||
| 88 | } | ||
| 89 | |||
| 90 | std::size_t ServerSession::NumDomainRequestHandlers() const { | ||
| 91 | return domain_request_handlers.size(); | ||
| 92 | } | ||
| 93 | |||
| 66 | ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) { | 94 | ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) { |
| 67 | auto* const domain_message_header = context.GetDomainMessageHeader(); | 95 | if (!context.HasDomainMessageHeader()) { |
| 68 | if (domain_message_header) { | 96 | return RESULT_SUCCESS; |
| 69 | // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs | 97 | } |
| 70 | context.SetDomainRequestHandlers(domain_request_handlers); | 98 | |
| 71 | 99 | // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs | |
| 72 | // If there is a DomainMessageHeader, then this is CommandType "Request" | 100 | context.SetDomainRequestHandlers(domain_request_handlers); |
| 73 | const u32 object_id{context.GetDomainMessageHeader()->object_id}; | 101 | |
| 74 | switch (domain_message_header->command) { | 102 | // If there is a DomainMessageHeader, then this is CommandType "Request" |
| 75 | case IPC::DomainMessageHeader::CommandType::SendMessage: | 103 | const auto& domain_message_header = context.GetDomainMessageHeader(); |
| 76 | if (object_id > domain_request_handlers.size()) { | 104 | const u32 object_id{domain_message_header.object_id}; |
| 77 | LOG_CRITICAL(IPC, | 105 | switch (domain_message_header.command) { |
| 78 | "object_id {} is too big! This probably means a recent service call " | 106 | case IPC::DomainMessageHeader::CommandType::SendMessage: |
| 79 | "to {} needed to return a new interface!", | 107 | if (object_id > domain_request_handlers.size()) { |
| 80 | object_id, name); | 108 | LOG_CRITICAL(IPC, |
| 81 | UNREACHABLE(); | 109 | "object_id {} is too big! This probably means a recent service call " |
| 82 | return RESULT_SUCCESS; // Ignore error if asserts are off | 110 | "to {} needed to return a new interface!", |
| 83 | } | 111 | object_id, name); |
| 84 | return domain_request_handlers[object_id - 1]->HandleSyncRequest(context); | 112 | UNREACHABLE(); |
| 85 | 113 | return RESULT_SUCCESS; // Ignore error if asserts are off | |
| 86 | case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { | ||
| 87 | LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); | ||
| 88 | |||
| 89 | domain_request_handlers[object_id - 1] = nullptr; | ||
| 90 | |||
| 91 | IPC::ResponseBuilder rb{context, 2}; | ||
| 92 | rb.Push(RESULT_SUCCESS); | ||
| 93 | return RESULT_SUCCESS; | ||
| 94 | } | ||
| 95 | } | 114 | } |
| 115 | return domain_request_handlers[object_id - 1]->HandleSyncRequest(context); | ||
| 96 | 116 | ||
| 97 | LOG_CRITICAL(IPC, "Unknown domain command={}", | 117 | case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { |
| 98 | static_cast<int>(domain_message_header->command.Value())); | 118 | LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); |
| 99 | ASSERT(false); | 119 | |
| 120 | domain_request_handlers[object_id - 1] = nullptr; | ||
| 121 | |||
| 122 | IPC::ResponseBuilder rb{context, 2}; | ||
| 123 | rb.Push(RESULT_SUCCESS); | ||
| 124 | return RESULT_SUCCESS; | ||
| 125 | } | ||
| 100 | } | 126 | } |
| 101 | 127 | ||
| 128 | LOG_CRITICAL(IPC, "Unknown domain command={}", | ||
| 129 | static_cast<int>(domain_message_header.command.Value())); | ||
| 130 | ASSERT(false); | ||
| 102 | return RESULT_SUCCESS; | 131 | return RESULT_SUCCESS; |
| 103 | } | 132 | } |
| 104 | 133 | ||
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h index e0e9d64c8..aea4ccfeb 100644 --- a/src/core/hle/kernel/server_session.h +++ b/src/core/hle/kernel/server_session.h | |||
| @@ -46,6 +46,14 @@ public: | |||
| 46 | return HANDLE_TYPE; | 46 | return HANDLE_TYPE; |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | Session* GetParent() { | ||
| 50 | return parent.get(); | ||
| 51 | } | ||
| 52 | |||
| 53 | const Session* GetParent() const { | ||
| 54 | return parent.get(); | ||
| 55 | } | ||
| 56 | |||
| 49 | using SessionPair = std::tuple<SharedPtr<ServerSession>, SharedPtr<ClientSession>>; | 57 | using SessionPair = std::tuple<SharedPtr<ServerSession>, SharedPtr<ClientSession>>; |
| 50 | 58 | ||
| 51 | /** | 59 | /** |
| @@ -78,23 +86,16 @@ public: | |||
| 78 | 86 | ||
| 79 | void Acquire(Thread* thread) override; | 87 | void Acquire(Thread* thread) override; |
| 80 | 88 | ||
| 81 | std::string name; ///< The name of this session (optional) | 89 | /// Called when a client disconnection occurs. |
| 82 | std::shared_ptr<Session> parent; ///< The parent session, which links to the client endpoint. | 90 | void ClientDisconnected(); |
| 83 | std::shared_ptr<SessionRequestHandler> | ||
| 84 | hle_handler; ///< This session's HLE request handler (applicable when not a domain) | ||
| 85 | 91 | ||
| 86 | /// This is the list of domain request handlers (after conversion to a domain) | 92 | /// Adds a new domain request handler to the collection of request handlers within |
| 87 | std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers; | 93 | /// this ServerSession instance. |
| 88 | 94 | void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler); | |
| 89 | /// List of threads that are pending a response after a sync request. This list is processed in | ||
| 90 | /// a LIFO manner, thus, the last request will be dispatched first. | ||
| 91 | /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test. | ||
| 92 | std::vector<SharedPtr<Thread>> pending_requesting_threads; | ||
| 93 | 95 | ||
| 94 | /// Thread whose request is currently being handled. A request is considered "handled" when a | 96 | /// Retrieves the total number of domain request handlers that have been |
| 95 | /// response is sent via svcReplyAndReceive. | 97 | /// appended to this ServerSession instance. |
| 96 | /// TODO(Subv): Find a better name for this. | 98 | std::size_t NumDomainRequestHandlers() const; |
| 97 | SharedPtr<Thread> currently_handling; | ||
| 98 | 99 | ||
| 99 | /// Returns true if the session has been converted to a domain, otherwise False | 100 | /// Returns true if the session has been converted to a domain, otherwise False |
| 100 | bool IsDomain() const { | 101 | bool IsDomain() const { |
| @@ -129,8 +130,30 @@ private: | |||
| 129 | /// object handle. | 130 | /// object handle. |
| 130 | ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context); | 131 | ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context); |
| 131 | 132 | ||
| 133 | /// The parent session, which links to the client endpoint. | ||
| 134 | std::shared_ptr<Session> parent; | ||
| 135 | |||
| 136 | /// This session's HLE request handler (applicable when not a domain) | ||
| 137 | std::shared_ptr<SessionRequestHandler> hle_handler; | ||
| 138 | |||
| 139 | /// This is the list of domain request handlers (after conversion to a domain) | ||
| 140 | std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers; | ||
| 141 | |||
| 142 | /// List of threads that are pending a response after a sync request. This list is processed in | ||
| 143 | /// a LIFO manner, thus, the last request will be dispatched first. | ||
| 144 | /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test. | ||
| 145 | std::vector<SharedPtr<Thread>> pending_requesting_threads; | ||
| 146 | |||
| 147 | /// Thread whose request is currently being handled. A request is considered "handled" when a | ||
| 148 | /// response is sent via svcReplyAndReceive. | ||
| 149 | /// TODO(Subv): Find a better name for this. | ||
| 150 | SharedPtr<Thread> currently_handling; | ||
| 151 | |||
| 132 | /// When set to True, converts the session to a domain at the end of the command | 152 | /// When set to True, converts the session to a domain at the end of the command |
| 133 | bool convert_to_domain{}; | 153 | bool convert_to_domain{}; |
| 154 | |||
| 155 | /// The name of this session (optional) | ||
| 156 | std::string name; | ||
| 134 | }; | 157 | }; |
| 135 | 158 | ||
| 136 | } // namespace Kernel | 159 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp index 22d0c1dd5..62861da36 100644 --- a/src/core/hle/kernel/shared_memory.cpp +++ b/src/core/hle/kernel/shared_memory.cpp | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | 6 | ||
| 7 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 8 | #include "common/logging/log.h" | 8 | #include "common/logging/log.h" |
| 9 | #include "core/core.h" | ||
| 10 | #include "core/hle/kernel/errors.h" | 9 | #include "core/hle/kernel/errors.h" |
| 11 | #include "core/hle/kernel/kernel.h" | 10 | #include "core/hle/kernel/kernel.h" |
| 12 | #include "core/hle/kernel/shared_memory.h" | 11 | #include "core/hle/kernel/shared_memory.h" |
| @@ -34,8 +33,8 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_ | |||
| 34 | shared_memory->backing_block_offset = 0; | 33 | shared_memory->backing_block_offset = 0; |
| 35 | 34 | ||
| 36 | // Refresh the address mappings for the current process. | 35 | // Refresh the address mappings for the current process. |
| 37 | if (Core::CurrentProcess() != nullptr) { | 36 | if (kernel.CurrentProcess() != nullptr) { |
| 38 | Core::CurrentProcess()->VMManager().RefreshMemoryBlockMappings( | 37 | kernel.CurrentProcess()->VMManager().RefreshMemoryBlockMappings( |
| 39 | shared_memory->backing_block.get()); | 38 | shared_memory->backing_block.get()); |
| 40 | } | 39 | } |
| 41 | } else { | 40 | } else { |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 7cfecb68c..047fa0c19 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include "core/hle/kernel/address_arbiter.h" | 20 | #include "core/hle/kernel/address_arbiter.h" |
| 21 | #include "core/hle/kernel/client_port.h" | 21 | #include "core/hle/kernel/client_port.h" |
| 22 | #include "core/hle/kernel/client_session.h" | 22 | #include "core/hle/kernel/client_session.h" |
| 23 | #include "core/hle/kernel/errors.h" | ||
| 23 | #include "core/hle/kernel/handle_table.h" | 24 | #include "core/hle/kernel/handle_table.h" |
| 24 | #include "core/hle/kernel/kernel.h" | 25 | #include "core/hle/kernel/kernel.h" |
| 25 | #include "core/hle/kernel/mutex.h" | 26 | #include "core/hle/kernel/mutex.h" |
| @@ -47,23 +48,6 @@ constexpr bool IsValidAddressRange(VAddr address, u64 size) { | |||
| 47 | return address + size > address; | 48 | return address + size > address; |
| 48 | } | 49 | } |
| 49 | 50 | ||
| 50 | // Checks if a given address range lies within a larger address range. | ||
| 51 | constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin, | ||
| 52 | VAddr address_range_end) { | ||
| 53 | const VAddr end_address = address + size - 1; | ||
| 54 | return address_range_begin <= address && end_address <= address_range_end - 1; | ||
| 55 | } | ||
| 56 | |||
| 57 | bool IsInsideAddressSpace(const VMManager& vm, VAddr address, u64 size) { | ||
| 58 | return IsInsideAddressRange(address, size, vm.GetAddressSpaceBaseAddress(), | ||
| 59 | vm.GetAddressSpaceEndAddress()); | ||
| 60 | } | ||
| 61 | |||
| 62 | bool IsInsideNewMapRegion(const VMManager& vm, VAddr address, u64 size) { | ||
| 63 | return IsInsideAddressRange(address, size, vm.GetNewMapRegionBaseAddress(), | ||
| 64 | vm.GetNewMapRegionEndAddress()); | ||
| 65 | } | ||
| 66 | |||
| 67 | // 8 GiB | 51 | // 8 GiB |
| 68 | constexpr u64 MAIN_MEMORY_SIZE = 0x200000000; | 52 | constexpr u64 MAIN_MEMORY_SIZE = 0x200000000; |
| 69 | 53 | ||
| @@ -105,14 +89,14 @@ ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_add | |||
| 105 | return ERR_INVALID_ADDRESS_STATE; | 89 | return ERR_INVALID_ADDRESS_STATE; |
| 106 | } | 90 | } |
| 107 | 91 | ||
| 108 | if (!IsInsideAddressSpace(vm_manager, src_addr, size)) { | 92 | if (!vm_manager.IsWithinAddressSpace(src_addr, size)) { |
| 109 | LOG_ERROR(Kernel_SVC, | 93 | LOG_ERROR(Kernel_SVC, |
| 110 | "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", | 94 | "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", |
| 111 | src_addr, size); | 95 | src_addr, size); |
| 112 | return ERR_INVALID_ADDRESS_STATE; | 96 | return ERR_INVALID_ADDRESS_STATE; |
| 113 | } | 97 | } |
| 114 | 98 | ||
| 115 | if (!IsInsideNewMapRegion(vm_manager, dst_addr, size)) { | 99 | if (!vm_manager.IsWithinNewMapRegion(dst_addr, size)) { |
| 116 | LOG_ERROR(Kernel_SVC, | 100 | LOG_ERROR(Kernel_SVC, |
| 117 | "Destination is not within the new map region, addr=0x{:016X}, size=0x{:016X}", | 101 | "Destination is not within the new map region, addr=0x{:016X}, size=0x{:016X}", |
| 118 | dst_addr, size); | 102 | dst_addr, size); |
| @@ -238,7 +222,7 @@ static ResultCode SetMemoryPermission(VAddr addr, u64 size, u32 prot) { | |||
| 238 | auto* const current_process = Core::CurrentProcess(); | 222 | auto* const current_process = Core::CurrentProcess(); |
| 239 | auto& vm_manager = current_process->VMManager(); | 223 | auto& vm_manager = current_process->VMManager(); |
| 240 | 224 | ||
| 241 | if (!IsInsideAddressSpace(vm_manager, addr, size)) { | 225 | if (!vm_manager.IsWithinAddressSpace(addr, size)) { |
| 242 | LOG_ERROR(Kernel_SVC, | 226 | LOG_ERROR(Kernel_SVC, |
| 243 | "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, | 227 | "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, |
| 244 | size); | 228 | size); |
| @@ -299,7 +283,7 @@ static ResultCode SetMemoryAttribute(VAddr address, u64 size, u32 mask, u32 attr | |||
| 299 | } | 283 | } |
| 300 | 284 | ||
| 301 | auto& vm_manager = Core::CurrentProcess()->VMManager(); | 285 | auto& vm_manager = Core::CurrentProcess()->VMManager(); |
| 302 | if (!IsInsideAddressSpace(vm_manager, address, size)) { | 286 | if (!vm_manager.IsWithinAddressSpace(address, size)) { |
| 303 | LOG_ERROR(Kernel_SVC, | 287 | LOG_ERROR(Kernel_SVC, |
| 304 | "Given address (0x{:016X}) is outside the bounds of the address space.", address); | 288 | "Given address (0x{:016X}) is outside the bounds of the address space.", address); |
| 305 | return ERR_INVALID_ADDRESS_STATE; | 289 | return ERR_INVALID_ADDRESS_STATE; |
| @@ -918,6 +902,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) | |||
| 918 | } | 902 | } |
| 919 | 903 | ||
| 920 | const auto& system = Core::System::GetInstance(); | 904 | const auto& system = Core::System::GetInstance(); |
| 905 | const auto& core_timing = system.CoreTiming(); | ||
| 921 | const auto& scheduler = system.CurrentScheduler(); | 906 | const auto& scheduler = system.CurrentScheduler(); |
| 922 | const auto* const current_thread = scheduler.GetCurrentThread(); | 907 | const auto* const current_thread = scheduler.GetCurrentThread(); |
| 923 | const bool same_thread = current_thread == thread; | 908 | const bool same_thread = current_thread == thread; |
| @@ -927,9 +912,9 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) | |||
| 927 | if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { | 912 | if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { |
| 928 | const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks(); | 913 | const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks(); |
| 929 | 914 | ||
| 930 | out_ticks = thread_ticks + (CoreTiming::GetTicks() - prev_ctx_ticks); | 915 | out_ticks = thread_ticks + (core_timing.GetTicks() - prev_ctx_ticks); |
| 931 | } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { | 916 | } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { |
| 932 | out_ticks = CoreTiming::GetTicks() - prev_ctx_ticks; | 917 | out_ticks = core_timing.GetTicks() - prev_ctx_ticks; |
| 933 | } | 918 | } |
| 934 | 919 | ||
| 935 | *result = out_ticks; | 920 | *result = out_ticks; |
| @@ -1299,10 +1284,14 @@ static ResultCode StartThread(Handle thread_handle) { | |||
| 1299 | 1284 | ||
| 1300 | /// Called when a thread exits | 1285 | /// Called when a thread exits |
| 1301 | static void ExitThread() { | 1286 | static void ExitThread() { |
| 1302 | LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CurrentArmInterface().GetPC()); | 1287 | auto& system = Core::System::GetInstance(); |
| 1303 | 1288 | ||
| 1304 | ExitCurrentThread(); | 1289 | LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); |
| 1305 | Core::System::GetInstance().PrepareReschedule(); | 1290 | |
| 1291 | auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); | ||
| 1292 | current_thread->Stop(); | ||
| 1293 | system.CurrentScheduler().RemoveThread(current_thread); | ||
| 1294 | system.PrepareReschedule(); | ||
| 1306 | } | 1295 | } |
| 1307 | 1296 | ||
| 1308 | /// Sleep the current thread | 1297 | /// Sleep the current thread |
| @@ -1315,32 +1304,32 @@ static void SleepThread(s64 nanoseconds) { | |||
| 1315 | YieldAndWaitForLoadBalancing = -2, | 1304 | YieldAndWaitForLoadBalancing = -2, |
| 1316 | }; | 1305 | }; |
| 1317 | 1306 | ||
| 1307 | auto& system = Core::System::GetInstance(); | ||
| 1308 | auto& scheduler = system.CurrentScheduler(); | ||
| 1309 | auto* const current_thread = scheduler.GetCurrentThread(); | ||
| 1310 | |||
| 1318 | if (nanoseconds <= 0) { | 1311 | if (nanoseconds <= 0) { |
| 1319 | auto& scheduler{Core::System::GetInstance().CurrentScheduler()}; | ||
| 1320 | switch (static_cast<SleepType>(nanoseconds)) { | 1312 | switch (static_cast<SleepType>(nanoseconds)) { |
| 1321 | case SleepType::YieldWithoutLoadBalancing: | 1313 | case SleepType::YieldWithoutLoadBalancing: |
| 1322 | scheduler.YieldWithoutLoadBalancing(GetCurrentThread()); | 1314 | scheduler.YieldWithoutLoadBalancing(current_thread); |
| 1323 | break; | 1315 | break; |
| 1324 | case SleepType::YieldWithLoadBalancing: | 1316 | case SleepType::YieldWithLoadBalancing: |
| 1325 | scheduler.YieldWithLoadBalancing(GetCurrentThread()); | 1317 | scheduler.YieldWithLoadBalancing(current_thread); |
| 1326 | break; | 1318 | break; |
| 1327 | case SleepType::YieldAndWaitForLoadBalancing: | 1319 | case SleepType::YieldAndWaitForLoadBalancing: |
| 1328 | scheduler.YieldAndWaitForLoadBalancing(GetCurrentThread()); | 1320 | scheduler.YieldAndWaitForLoadBalancing(current_thread); |
| 1329 | break; | 1321 | break; |
| 1330 | default: | 1322 | default: |
| 1331 | UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); | 1323 | UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); |
| 1332 | } | 1324 | } |
| 1333 | } else { | 1325 | } else { |
| 1334 | // Sleep current thread and check for next thread to schedule | 1326 | current_thread->Sleep(nanoseconds); |
| 1335 | WaitCurrentThread_Sleep(); | ||
| 1336 | |||
| 1337 | // Create an event to wake the thread up after the specified nanosecond delay has passed | ||
| 1338 | GetCurrentThread()->WakeAfterDelay(nanoseconds); | ||
| 1339 | } | 1327 | } |
| 1340 | 1328 | ||
| 1341 | // Reschedule all CPU cores | 1329 | // Reschedule all CPU cores |
| 1342 | for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) | 1330 | for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) { |
| 1343 | Core::System::GetInstance().CpuCore(i).PrepareReschedule(); | 1331 | system.CpuCore(i).PrepareReschedule(); |
| 1332 | } | ||
| 1344 | } | 1333 | } |
| 1345 | 1334 | ||
| 1346 | /// Wait process wide key atomic | 1335 | /// Wait process wide key atomic |
| @@ -1494,20 +1483,10 @@ static ResultCode WaitForAddress(VAddr address, u32 type, s32 value, s64 timeout | |||
| 1494 | return ERR_INVALID_ADDRESS; | 1483 | return ERR_INVALID_ADDRESS; |
| 1495 | } | 1484 | } |
| 1496 | 1485 | ||
| 1497 | switch (static_cast<AddressArbiter::ArbitrationType>(type)) { | 1486 | const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type); |
| 1498 | case AddressArbiter::ArbitrationType::WaitIfLessThan: | 1487 | auto& address_arbiter = |
| 1499 | return AddressArbiter::WaitForAddressIfLessThan(address, value, timeout, false); | 1488 | Core::System::GetInstance().Kernel().CurrentProcess()->GetAddressArbiter(); |
| 1500 | case AddressArbiter::ArbitrationType::DecrementAndWaitIfLessThan: | 1489 | return address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); |
| 1501 | return AddressArbiter::WaitForAddressIfLessThan(address, value, timeout, true); | ||
| 1502 | case AddressArbiter::ArbitrationType::WaitIfEqual: | ||
| 1503 | return AddressArbiter::WaitForAddressIfEqual(address, value, timeout); | ||
| 1504 | default: | ||
| 1505 | LOG_ERROR(Kernel_SVC, | ||
| 1506 | "Invalid arbitration type, expected WaitIfLessThan, DecrementAndWaitIfLessThan " | ||
| 1507 | "or WaitIfEqual but got {}", | ||
| 1508 | type); | ||
| 1509 | return ERR_INVALID_ENUM_VALUE; | ||
| 1510 | } | ||
| 1511 | } | 1490 | } |
| 1512 | 1491 | ||
| 1513 | // Signals to an address (via Address Arbiter) | 1492 | // Signals to an address (via Address Arbiter) |
| @@ -1525,31 +1504,21 @@ static ResultCode SignalToAddress(VAddr address, u32 type, s32 value, s32 num_to | |||
| 1525 | return ERR_INVALID_ADDRESS; | 1504 | return ERR_INVALID_ADDRESS; |
| 1526 | } | 1505 | } |
| 1527 | 1506 | ||
| 1528 | switch (static_cast<AddressArbiter::SignalType>(type)) { | 1507 | const auto signal_type = static_cast<AddressArbiter::SignalType>(type); |
| 1529 | case AddressArbiter::SignalType::Signal: | 1508 | auto& address_arbiter = |
| 1530 | return AddressArbiter::SignalToAddress(address, num_to_wake); | 1509 | Core::System::GetInstance().Kernel().CurrentProcess()->GetAddressArbiter(); |
| 1531 | case AddressArbiter::SignalType::IncrementAndSignalIfEqual: | 1510 | return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake); |
| 1532 | return AddressArbiter::IncrementAndSignalToAddressIfEqual(address, value, num_to_wake); | ||
| 1533 | case AddressArbiter::SignalType::ModifyByWaitingCountAndSignalIfEqual: | ||
| 1534 | return AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, | ||
| 1535 | num_to_wake); | ||
| 1536 | default: | ||
| 1537 | LOG_ERROR(Kernel_SVC, | ||
| 1538 | "Invalid signal type, expected Signal, IncrementAndSignalIfEqual " | ||
| 1539 | "or ModifyByWaitingCountAndSignalIfEqual but got {}", | ||
| 1540 | type); | ||
| 1541 | return ERR_INVALID_ENUM_VALUE; | ||
| 1542 | } | ||
| 1543 | } | 1511 | } |
| 1544 | 1512 | ||
| 1545 | /// This returns the total CPU ticks elapsed since the CPU was powered-on | 1513 | /// This returns the total CPU ticks elapsed since the CPU was powered-on |
| 1546 | static u64 GetSystemTick() { | 1514 | static u64 GetSystemTick() { |
| 1547 | LOG_TRACE(Kernel_SVC, "called"); | 1515 | LOG_TRACE(Kernel_SVC, "called"); |
| 1548 | 1516 | ||
| 1549 | const u64 result{CoreTiming::GetTicks()}; | 1517 | auto& core_timing = Core::System::GetInstance().CoreTiming(); |
| 1518 | const u64 result{core_timing.GetTicks()}; | ||
| 1550 | 1519 | ||
| 1551 | // Advance time to defeat dumb games that busy-wait for the frame to end. | 1520 | // Advance time to defeat dumb games that busy-wait for the frame to end. |
| 1552 | CoreTiming::AddTicks(400); | 1521 | core_timing.AddTicks(400); |
| 1553 | 1522 | ||
| 1554 | return result; | 1523 | return result; |
| 1555 | } | 1524 | } |
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index d3984dfc4..3b22e8e0d 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -7,8 +7,6 @@ | |||
| 7 | #include <optional> | 7 | #include <optional> |
| 8 | #include <vector> | 8 | #include <vector> |
| 9 | 9 | ||
| 10 | #include <boost/range/algorithm_ext/erase.hpp> | ||
| 11 | |||
| 12 | #include "common/assert.h" | 10 | #include "common/assert.h" |
| 13 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 14 | #include "common/logging/log.h" | 12 | #include "common/logging/log.h" |
| @@ -43,7 +41,8 @@ Thread::~Thread() = default; | |||
| 43 | 41 | ||
| 44 | void Thread::Stop() { | 42 | void Thread::Stop() { |
| 45 | // Cancel any outstanding wakeup events for this thread | 43 | // Cancel any outstanding wakeup events for this thread |
| 46 | CoreTiming::UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(), callback_handle); | 44 | Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(), |
| 45 | callback_handle); | ||
| 47 | kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle); | 46 | kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle); |
| 48 | callback_handle = 0; | 47 | callback_handle = 0; |
| 49 | 48 | ||
| @@ -67,17 +66,6 @@ void Thread::Stop() { | |||
| 67 | owner_process->FreeTLSSlot(tls_address); | 66 | owner_process->FreeTLSSlot(tls_address); |
| 68 | } | 67 | } |
| 69 | 68 | ||
| 70 | void WaitCurrentThread_Sleep() { | ||
| 71 | Thread* thread = GetCurrentThread(); | ||
| 72 | thread->SetStatus(ThreadStatus::WaitSleep); | ||
| 73 | } | ||
| 74 | |||
| 75 | void ExitCurrentThread() { | ||
| 76 | Thread* thread = GetCurrentThread(); | ||
| 77 | thread->Stop(); | ||
| 78 | Core::System::GetInstance().CurrentScheduler().RemoveThread(thread); | ||
| 79 | } | ||
| 80 | |||
| 81 | void Thread::WakeAfterDelay(s64 nanoseconds) { | 69 | void Thread::WakeAfterDelay(s64 nanoseconds) { |
| 82 | // Don't schedule a wakeup if the thread wants to wait forever | 70 | // Don't schedule a wakeup if the thread wants to wait forever |
| 83 | if (nanoseconds == -1) | 71 | if (nanoseconds == -1) |
| @@ -85,12 +73,14 @@ void Thread::WakeAfterDelay(s64 nanoseconds) { | |||
| 85 | 73 | ||
| 86 | // This function might be called from any thread so we have to be cautious and use the | 74 | // This function might be called from any thread so we have to be cautious and use the |
| 87 | // thread-safe version of ScheduleEvent. | 75 | // thread-safe version of ScheduleEvent. |
| 88 | CoreTiming::ScheduleEventThreadsafe(CoreTiming::nsToCycles(nanoseconds), | 76 | Core::System::GetInstance().CoreTiming().ScheduleEventThreadsafe( |
| 89 | kernel.ThreadWakeupCallbackEventType(), callback_handle); | 77 | Core::Timing::nsToCycles(nanoseconds), kernel.ThreadWakeupCallbackEventType(), |
| 78 | callback_handle); | ||
| 90 | } | 79 | } |
| 91 | 80 | ||
| 92 | void Thread::CancelWakeupTimer() { | 81 | void Thread::CancelWakeupTimer() { |
| 93 | CoreTiming::UnscheduleEventThreadsafe(kernel.ThreadWakeupCallbackEventType(), callback_handle); | 82 | Core::System::GetInstance().CoreTiming().UnscheduleEventThreadsafe( |
| 83 | kernel.ThreadWakeupCallbackEventType(), callback_handle); | ||
| 94 | } | 84 | } |
| 95 | 85 | ||
| 96 | static std::optional<s32> GetNextProcessorId(u64 mask) { | 86 | static std::optional<s32> GetNextProcessorId(u64 mask) { |
| @@ -181,14 +171,13 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name | |||
| 181 | return ERR_INVALID_PROCESSOR_ID; | 171 | return ERR_INVALID_PROCESSOR_ID; |
| 182 | } | 172 | } |
| 183 | 173 | ||
| 184 | // TODO(yuriks): Other checks, returning 0xD9001BEA | ||
| 185 | |||
| 186 | if (!Memory::IsValidVirtualAddress(owner_process, entry_point)) { | 174 | if (!Memory::IsValidVirtualAddress(owner_process, entry_point)) { |
| 187 | LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); | 175 | LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); |
| 188 | // TODO (bunnei): Find the correct error code to use here | 176 | // TODO (bunnei): Find the correct error code to use here |
| 189 | return ResultCode(-1); | 177 | return ResultCode(-1); |
| 190 | } | 178 | } |
| 191 | 179 | ||
| 180 | auto& system = Core::System::GetInstance(); | ||
| 192 | SharedPtr<Thread> thread(new Thread(kernel)); | 181 | SharedPtr<Thread> thread(new Thread(kernel)); |
| 193 | 182 | ||
| 194 | thread->thread_id = kernel.CreateNewThreadID(); | 183 | thread->thread_id = kernel.CreateNewThreadID(); |
| @@ -197,7 +186,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name | |||
| 197 | thread->stack_top = stack_top; | 186 | thread->stack_top = stack_top; |
| 198 | thread->tpidr_el0 = 0; | 187 | thread->tpidr_el0 = 0; |
| 199 | thread->nominal_priority = thread->current_priority = priority; | 188 | thread->nominal_priority = thread->current_priority = priority; |
| 200 | thread->last_running_ticks = CoreTiming::GetTicks(); | 189 | thread->last_running_ticks = system.CoreTiming().GetTicks(); |
| 201 | thread->processor_id = processor_id; | 190 | thread->processor_id = processor_id; |
| 202 | thread->ideal_core = processor_id; | 191 | thread->ideal_core = processor_id; |
| 203 | thread->affinity_mask = 1ULL << processor_id; | 192 | thread->affinity_mask = 1ULL << processor_id; |
| @@ -208,7 +197,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name | |||
| 208 | thread->name = std::move(name); | 197 | thread->name = std::move(name); |
| 209 | thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); | 198 | thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); |
| 210 | thread->owner_process = &owner_process; | 199 | thread->owner_process = &owner_process; |
| 211 | thread->scheduler = &Core::System::GetInstance().Scheduler(processor_id); | 200 | thread->scheduler = &system.Scheduler(processor_id); |
| 212 | thread->scheduler->AddThread(thread, priority); | 201 | thread->scheduler->AddThread(thread, priority); |
| 213 | thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread); | 202 | thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread); |
| 214 | 203 | ||
| @@ -257,7 +246,7 @@ void Thread::SetStatus(ThreadStatus new_status) { | |||
| 257 | } | 246 | } |
| 258 | 247 | ||
| 259 | if (status == ThreadStatus::Running) { | 248 | if (status == ThreadStatus::Running) { |
| 260 | last_running_ticks = CoreTiming::GetTicks(); | 249 | last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); |
| 261 | } | 250 | } |
| 262 | 251 | ||
| 263 | status = new_status; | 252 | status = new_status; |
| @@ -267,8 +256,8 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) { | |||
| 267 | if (thread->lock_owner == this) { | 256 | if (thread->lock_owner == this) { |
| 268 | // If the thread is already waiting for this thread to release the mutex, ensure that the | 257 | // If the thread is already waiting for this thread to release the mutex, ensure that the |
| 269 | // waiters list is consistent and return without doing anything. | 258 | // waiters list is consistent and return without doing anything. |
| 270 | auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 259 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); |
| 271 | ASSERT(itr != wait_mutex_threads.end()); | 260 | ASSERT(iter != wait_mutex_threads.end()); |
| 272 | return; | 261 | return; |
| 273 | } | 262 | } |
| 274 | 263 | ||
| @@ -276,11 +265,16 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) { | |||
| 276 | ASSERT(thread->lock_owner == nullptr); | 265 | ASSERT(thread->lock_owner == nullptr); |
| 277 | 266 | ||
| 278 | // Ensure that the thread is not already in the list of mutex waiters | 267 | // Ensure that the thread is not already in the list of mutex waiters |
| 279 | auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 268 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); |
| 280 | ASSERT(itr == wait_mutex_threads.end()); | 269 | ASSERT(iter == wait_mutex_threads.end()); |
| 281 | 270 | ||
| 271 | // Keep the list in an ordered fashion | ||
| 272 | const auto insertion_point = std::find_if( | ||
| 273 | wait_mutex_threads.begin(), wait_mutex_threads.end(), | ||
| 274 | [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); }); | ||
| 275 | wait_mutex_threads.insert(insertion_point, thread); | ||
| 282 | thread->lock_owner = this; | 276 | thread->lock_owner = this; |
| 283 | wait_mutex_threads.emplace_back(std::move(thread)); | 277 | |
| 284 | UpdatePriority(); | 278 | UpdatePriority(); |
| 285 | } | 279 | } |
| 286 | 280 | ||
| @@ -288,32 +282,44 @@ void Thread::RemoveMutexWaiter(SharedPtr<Thread> thread) { | |||
| 288 | ASSERT(thread->lock_owner == this); | 282 | ASSERT(thread->lock_owner == this); |
| 289 | 283 | ||
| 290 | // Ensure that the thread is in the list of mutex waiters | 284 | // Ensure that the thread is in the list of mutex waiters |
| 291 | auto itr = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 285 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); |
| 292 | ASSERT(itr != wait_mutex_threads.end()); | 286 | ASSERT(iter != wait_mutex_threads.end()); |
| 287 | |||
| 288 | wait_mutex_threads.erase(iter); | ||
| 293 | 289 | ||
| 294 | boost::remove_erase(wait_mutex_threads, thread); | ||
| 295 | thread->lock_owner = nullptr; | 290 | thread->lock_owner = nullptr; |
| 296 | UpdatePriority(); | 291 | UpdatePriority(); |
| 297 | } | 292 | } |
| 298 | 293 | ||
| 299 | void Thread::UpdatePriority() { | 294 | void Thread::UpdatePriority() { |
| 300 | // Find the highest priority among all the threads that are waiting for this thread's lock | 295 | // If any of the threads waiting on the mutex have a higher priority |
| 296 | // (taking into account priority inheritance), then this thread inherits | ||
| 297 | // that thread's priority. | ||
| 301 | u32 new_priority = nominal_priority; | 298 | u32 new_priority = nominal_priority; |
| 302 | for (const auto& thread : wait_mutex_threads) { | 299 | if (!wait_mutex_threads.empty()) { |
| 303 | if (thread->nominal_priority < new_priority) | 300 | if (wait_mutex_threads.front()->current_priority < new_priority) { |
| 304 | new_priority = thread->nominal_priority; | 301 | new_priority = wait_mutex_threads.front()->current_priority; |
| 302 | } | ||
| 305 | } | 303 | } |
| 306 | 304 | ||
| 307 | if (new_priority == current_priority) | 305 | if (new_priority == current_priority) { |
| 308 | return; | 306 | return; |
| 307 | } | ||
| 309 | 308 | ||
| 310 | scheduler->SetThreadPriority(this, new_priority); | 309 | scheduler->SetThreadPriority(this, new_priority); |
| 311 | |||
| 312 | current_priority = new_priority; | 310 | current_priority = new_priority; |
| 313 | 311 | ||
| 312 | if (!lock_owner) { | ||
| 313 | return; | ||
| 314 | } | ||
| 315 | |||
| 316 | // Ensure that the thread is within the correct location in the waiting list. | ||
| 317 | auto old_owner = lock_owner; | ||
| 318 | lock_owner->RemoveMutexWaiter(this); | ||
| 319 | old_owner->AddMutexWaiter(this); | ||
| 320 | |||
| 314 | // Recursively update the priority of the thread that depends on the priority of this one. | 321 | // Recursively update the priority of the thread that depends on the priority of this one. |
| 315 | if (lock_owner) | 322 | lock_owner->UpdatePriority(); |
| 316 | lock_owner->UpdatePriority(); | ||
| 317 | } | 323 | } |
| 318 | 324 | ||
| 319 | void Thread::ChangeCore(u32 core, u64 mask) { | 325 | void Thread::ChangeCore(u32 core, u64 mask) { |
| @@ -389,6 +395,14 @@ void Thread::SetActivity(ThreadActivity value) { | |||
| 389 | } | 395 | } |
| 390 | } | 396 | } |
| 391 | 397 | ||
| 398 | void Thread::Sleep(s64 nanoseconds) { | ||
| 399 | // Sleep current thread and check for next thread to schedule | ||
| 400 | SetStatus(ThreadStatus::WaitSleep); | ||
| 401 | |||
| 402 | // Create an event to wake the thread up after the specified nanosecond delay has passed | ||
| 403 | WakeAfterDelay(nanoseconds); | ||
| 404 | } | ||
| 405 | |||
| 392 | //////////////////////////////////////////////////////////////////////////////////////////////////// | 406 | //////////////////////////////////////////////////////////////////////////////////////////////////// |
| 393 | 407 | ||
| 394 | /** | 408 | /** |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index c48b21aba..faad5f391 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -383,6 +383,9 @@ public: | |||
| 383 | 383 | ||
| 384 | void SetActivity(ThreadActivity value); | 384 | void SetActivity(ThreadActivity value); |
| 385 | 385 | ||
| 386 | /// Sleeps this thread for the given amount of nanoseconds. | ||
| 387 | void Sleep(s64 nanoseconds); | ||
| 388 | |||
| 386 | private: | 389 | private: |
| 387 | explicit Thread(KernelCore& kernel); | 390 | explicit Thread(KernelCore& kernel); |
| 388 | ~Thread() override; | 391 | ~Thread() override; |
| @@ -398,8 +401,14 @@ private: | |||
| 398 | VAddr entry_point = 0; | 401 | VAddr entry_point = 0; |
| 399 | VAddr stack_top = 0; | 402 | VAddr stack_top = 0; |
| 400 | 403 | ||
| 401 | u32 nominal_priority = 0; ///< Nominal thread priority, as set by the emulated application | 404 | /// Nominal thread priority, as set by the emulated application. |
| 402 | u32 current_priority = 0; ///< Current thread priority, can be temporarily changed | 405 | /// The nominal priority is the thread priority without priority |
| 406 | /// inheritance taken into account. | ||
| 407 | u32 nominal_priority = 0; | ||
| 408 | |||
| 409 | /// Current thread priority. This may change over the course of the | ||
| 410 | /// thread's lifetime in order to facilitate priority inheritance. | ||
| 411 | u32 current_priority = 0; | ||
| 403 | 412 | ||
| 404 | u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. | 413 | u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. |
| 405 | u64 last_running_ticks = 0; ///< CPU tick when thread was last running | 414 | u64 last_running_ticks = 0; ///< CPU tick when thread was last running |
| @@ -460,14 +469,4 @@ private: | |||
| 460 | */ | 469 | */ |
| 461 | Thread* GetCurrentThread(); | 470 | Thread* GetCurrentThread(); |
| 462 | 471 | ||
| 463 | /** | ||
| 464 | * Waits the current thread on a sleep | ||
| 465 | */ | ||
| 466 | void WaitCurrentThread_Sleep(); | ||
| 467 | |||
| 468 | /** | ||
| 469 | * Stops the current thread and removes it from the thread_list | ||
| 470 | */ | ||
| 471 | void ExitCurrentThread(); | ||
| 472 | |||
| 473 | } // namespace Kernel | 472 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 10ad94aa6..3def3e52c 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp | |||
| @@ -7,18 +7,18 @@ | |||
| 7 | #include <utility> | 7 | #include <utility> |
| 8 | #include "common/assert.h" | 8 | #include "common/assert.h" |
| 9 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 10 | #include "common/memory_hook.h" | ||
| 10 | #include "core/arm/arm_interface.h" | 11 | #include "core/arm/arm_interface.h" |
| 11 | #include "core/core.h" | 12 | #include "core/core.h" |
| 12 | #include "core/file_sys/program_metadata.h" | 13 | #include "core/file_sys/program_metadata.h" |
| 13 | #include "core/hle/kernel/errors.h" | 14 | #include "core/hle/kernel/errors.h" |
| 14 | #include "core/hle/kernel/vm_manager.h" | 15 | #include "core/hle/kernel/vm_manager.h" |
| 15 | #include "core/memory.h" | 16 | #include "core/memory.h" |
| 16 | #include "core/memory_hook.h" | ||
| 17 | #include "core/memory_setup.h" | 17 | #include "core/memory_setup.h" |
| 18 | 18 | ||
| 19 | namespace Kernel { | 19 | namespace Kernel { |
| 20 | 20 | namespace { | |
| 21 | static const char* GetMemoryStateName(MemoryState state) { | 21 | const char* GetMemoryStateName(MemoryState state) { |
| 22 | static constexpr const char* names[] = { | 22 | static constexpr const char* names[] = { |
| 23 | "Unmapped", "Io", | 23 | "Unmapped", "Io", |
| 24 | "Normal", "CodeStatic", | 24 | "Normal", "CodeStatic", |
| @@ -35,6 +35,14 @@ static const char* GetMemoryStateName(MemoryState state) { | |||
| 35 | return names[ToSvcMemoryState(state)]; | 35 | return names[ToSvcMemoryState(state)]; |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | // Checks if a given address range lies within a larger address range. | ||
| 39 | constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin, | ||
| 40 | VAddr address_range_end) { | ||
| 41 | const VAddr end_address = address + size - 1; | ||
| 42 | return address_range_begin <= address && end_address <= address_range_end - 1; | ||
| 43 | } | ||
| 44 | } // Anonymous namespace | ||
| 45 | |||
| 38 | bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { | 46 | bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { |
| 39 | ASSERT(base + size == next.base); | 47 | ASSERT(base + size == next.base); |
| 40 | if (permissions != next.permissions || state != next.state || attribute != next.attribute || | 48 | if (permissions != next.permissions || state != next.state || attribute != next.attribute || |
| @@ -169,7 +177,7 @@ ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const { | |||
| 169 | 177 | ||
| 170 | ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, | 178 | ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, |
| 171 | MemoryState state, | 179 | MemoryState state, |
| 172 | Memory::MemoryHookPointer mmio_handler) { | 180 | Common::MemoryHookPointer mmio_handler) { |
| 173 | // This is the appropriately sized VMA that will turn into our allocation. | 181 | // This is the appropriately sized VMA that will turn into our allocation. |
| 174 | CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); | 182 | CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); |
| 175 | VirtualMemoryArea& final_vma = vma_handle->second; | 183 | VirtualMemoryArea& final_vma = vma_handle->second; |
| @@ -249,8 +257,7 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p | |||
| 249 | } | 257 | } |
| 250 | 258 | ||
| 251 | ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { | 259 | ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { |
| 252 | if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() || | 260 | if (!IsWithinHeapRegion(target, size)) { |
| 253 | target + size < target) { | ||
| 254 | return ERR_INVALID_ADDRESS; | 261 | return ERR_INVALID_ADDRESS; |
| 255 | } | 262 | } |
| 256 | 263 | ||
| @@ -285,8 +292,7 @@ ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission p | |||
| 285 | } | 292 | } |
| 286 | 293 | ||
| 287 | ResultCode VMManager::HeapFree(VAddr target, u64 size) { | 294 | ResultCode VMManager::HeapFree(VAddr target, u64 size) { |
| 288 | if (target < GetHeapRegionBaseAddress() || target + size > GetHeapRegionEndAddress() || | 295 | if (!IsWithinHeapRegion(target, size)) { |
| 289 | target + size < target) { | ||
| 290 | return ERR_INVALID_ADDRESS; | 296 | return ERR_INVALID_ADDRESS; |
| 291 | } | 297 | } |
| 292 | 298 | ||
| @@ -618,7 +624,7 @@ void VMManager::ClearPageTable() { | |||
| 618 | std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); | 624 | std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); |
| 619 | page_table.special_regions.clear(); | 625 | page_table.special_regions.clear(); |
| 620 | std::fill(page_table.attributes.begin(), page_table.attributes.end(), | 626 | std::fill(page_table.attributes.begin(), page_table.attributes.end(), |
| 621 | Memory::PageType::Unmapped); | 627 | Common::PageType::Unmapped); |
| 622 | } | 628 | } |
| 623 | 629 | ||
| 624 | VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask, | 630 | VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask, |
| @@ -706,6 +712,11 @@ u64 VMManager::GetAddressSpaceWidth() const { | |||
| 706 | return address_space_width; | 712 | return address_space_width; |
| 707 | } | 713 | } |
| 708 | 714 | ||
| 715 | bool VMManager::IsWithinAddressSpace(VAddr address, u64 size) const { | ||
| 716 | return IsInsideAddressRange(address, size, GetAddressSpaceBaseAddress(), | ||
| 717 | GetAddressSpaceEndAddress()); | ||
| 718 | } | ||
| 719 | |||
| 709 | VAddr VMManager::GetASLRRegionBaseAddress() const { | 720 | VAddr VMManager::GetASLRRegionBaseAddress() const { |
| 710 | return aslr_region_base; | 721 | return aslr_region_base; |
| 711 | } | 722 | } |
| @@ -750,6 +761,11 @@ u64 VMManager::GetCodeRegionSize() const { | |||
| 750 | return code_region_end - code_region_base; | 761 | return code_region_end - code_region_base; |
| 751 | } | 762 | } |
| 752 | 763 | ||
| 764 | bool VMManager::IsWithinCodeRegion(VAddr address, u64 size) const { | ||
| 765 | return IsInsideAddressRange(address, size, GetCodeRegionBaseAddress(), | ||
| 766 | GetCodeRegionEndAddress()); | ||
| 767 | } | ||
| 768 | |||
| 753 | VAddr VMManager::GetHeapRegionBaseAddress() const { | 769 | VAddr VMManager::GetHeapRegionBaseAddress() const { |
| 754 | return heap_region_base; | 770 | return heap_region_base; |
| 755 | } | 771 | } |
| @@ -762,6 +778,11 @@ u64 VMManager::GetHeapRegionSize() const { | |||
| 762 | return heap_region_end - heap_region_base; | 778 | return heap_region_end - heap_region_base; |
| 763 | } | 779 | } |
| 764 | 780 | ||
| 781 | bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { | ||
| 782 | return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), | ||
| 783 | GetHeapRegionEndAddress()); | ||
| 784 | } | ||
| 785 | |||
| 765 | VAddr VMManager::GetMapRegionBaseAddress() const { | 786 | VAddr VMManager::GetMapRegionBaseAddress() const { |
| 766 | return map_region_base; | 787 | return map_region_base; |
| 767 | } | 788 | } |
| @@ -774,6 +795,10 @@ u64 VMManager::GetMapRegionSize() const { | |||
| 774 | return map_region_end - map_region_base; | 795 | return map_region_end - map_region_base; |
| 775 | } | 796 | } |
| 776 | 797 | ||
| 798 | bool VMManager::IsWithinMapRegion(VAddr address, u64 size) const { | ||
| 799 | return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress()); | ||
| 800 | } | ||
| 801 | |||
| 777 | VAddr VMManager::GetNewMapRegionBaseAddress() const { | 802 | VAddr VMManager::GetNewMapRegionBaseAddress() const { |
| 778 | return new_map_region_base; | 803 | return new_map_region_base; |
| 779 | } | 804 | } |
| @@ -786,6 +811,11 @@ u64 VMManager::GetNewMapRegionSize() const { | |||
| 786 | return new_map_region_end - new_map_region_base; | 811 | return new_map_region_end - new_map_region_base; |
| 787 | } | 812 | } |
| 788 | 813 | ||
| 814 | bool VMManager::IsWithinNewMapRegion(VAddr address, u64 size) const { | ||
| 815 | return IsInsideAddressRange(address, size, GetNewMapRegionBaseAddress(), | ||
| 816 | GetNewMapRegionEndAddress()); | ||
| 817 | } | ||
| 818 | |||
| 789 | VAddr VMManager::GetTLSIORegionBaseAddress() const { | 819 | VAddr VMManager::GetTLSIORegionBaseAddress() const { |
| 790 | return tls_io_region_base; | 820 | return tls_io_region_base; |
| 791 | } | 821 | } |
| @@ -798,4 +828,9 @@ u64 VMManager::GetTLSIORegionSize() const { | |||
| 798 | return tls_io_region_end - tls_io_region_base; | 828 | return tls_io_region_end - tls_io_region_base; |
| 799 | } | 829 | } |
| 800 | 830 | ||
| 831 | bool VMManager::IsWithinTLSIORegion(VAddr address, u64 size) const { | ||
| 832 | return IsInsideAddressRange(address, size, GetTLSIORegionBaseAddress(), | ||
| 833 | GetTLSIORegionEndAddress()); | ||
| 834 | } | ||
| 835 | |||
| 801 | } // namespace Kernel | 836 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 6091533bc..b96980f8f 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h | |||
| @@ -9,9 +9,10 @@ | |||
| 9 | #include <tuple> | 9 | #include <tuple> |
| 10 | #include <vector> | 10 | #include <vector> |
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "common/memory_hook.h" | ||
| 13 | #include "common/page_table.h" | ||
| 12 | #include "core/hle/result.h" | 14 | #include "core/hle/result.h" |
| 13 | #include "core/memory.h" | 15 | #include "core/memory.h" |
| 14 | #include "core/memory_hook.h" | ||
| 15 | 16 | ||
| 16 | namespace FileSys { | 17 | namespace FileSys { |
| 17 | enum class ProgramAddressSpaceType : u8; | 18 | enum class ProgramAddressSpaceType : u8; |
| @@ -290,7 +291,7 @@ struct VirtualMemoryArea { | |||
| 290 | // Settings for type = MMIO | 291 | // Settings for type = MMIO |
| 291 | /// Physical address of the register area this VMA maps to. | 292 | /// Physical address of the register area this VMA maps to. |
| 292 | PAddr paddr = 0; | 293 | PAddr paddr = 0; |
| 293 | Memory::MemoryHookPointer mmio_handler = nullptr; | 294 | Common::MemoryHookPointer mmio_handler = nullptr; |
| 294 | 295 | ||
| 295 | /// Tests if this area can be merged to the right with `next`. | 296 | /// Tests if this area can be merged to the right with `next`. |
| 296 | bool CanBeMergedWith(const VirtualMemoryArea& next) const; | 297 | bool CanBeMergedWith(const VirtualMemoryArea& next) const; |
| @@ -368,7 +369,7 @@ public: | |||
| 368 | * @param mmio_handler The handler that will implement read and write for this MMIO region. | 369 | * @param mmio_handler The handler that will implement read and write for this MMIO region. |
| 369 | */ | 370 | */ |
| 370 | ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, | 371 | ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, |
| 371 | Memory::MemoryHookPointer mmio_handler); | 372 | Common::MemoryHookPointer mmio_handler); |
| 372 | 373 | ||
| 373 | /// Unmaps a range of addresses, splitting VMAs as necessary. | 374 | /// Unmaps a range of addresses, splitting VMAs as necessary. |
| 374 | ResultCode UnmapRange(VAddr target, u64 size); | 375 | ResultCode UnmapRange(VAddr target, u64 size); |
| @@ -432,18 +433,21 @@ public: | |||
| 432 | /// Gets the address space width in bits. | 433 | /// Gets the address space width in bits. |
| 433 | u64 GetAddressSpaceWidth() const; | 434 | u64 GetAddressSpaceWidth() const; |
| 434 | 435 | ||
| 436 | /// Determines whether or not the given address range lies within the address space. | ||
| 437 | bool IsWithinAddressSpace(VAddr address, u64 size) const; | ||
| 438 | |||
| 435 | /// Gets the base address of the ASLR region. | 439 | /// Gets the base address of the ASLR region. |
| 436 | VAddr GetASLRRegionBaseAddress() const; | 440 | VAddr GetASLRRegionBaseAddress() const; |
| 437 | 441 | ||
| 438 | /// Gets the end address of the ASLR region. | 442 | /// Gets the end address of the ASLR region. |
| 439 | VAddr GetASLRRegionEndAddress() const; | 443 | VAddr GetASLRRegionEndAddress() const; |
| 440 | 444 | ||
| 441 | /// Determines whether or not the specified address range is within the ASLR region. | ||
| 442 | bool IsWithinASLRRegion(VAddr address, u64 size) const; | ||
| 443 | |||
| 444 | /// Gets the size of the ASLR region | 445 | /// Gets the size of the ASLR region |
| 445 | u64 GetASLRRegionSize() const; | 446 | u64 GetASLRRegionSize() const; |
| 446 | 447 | ||
| 448 | /// Determines whether or not the specified address range is within the ASLR region. | ||
| 449 | bool IsWithinASLRRegion(VAddr address, u64 size) const; | ||
| 450 | |||
| 447 | /// Gets the base address of the code region. | 451 | /// Gets the base address of the code region. |
| 448 | VAddr GetCodeRegionBaseAddress() const; | 452 | VAddr GetCodeRegionBaseAddress() const; |
| 449 | 453 | ||
| @@ -453,6 +457,9 @@ public: | |||
| 453 | /// Gets the total size of the code region in bytes. | 457 | /// Gets the total size of the code region in bytes. |
| 454 | u64 GetCodeRegionSize() const; | 458 | u64 GetCodeRegionSize() const; |
| 455 | 459 | ||
| 460 | /// Determines whether or not the specified range is within the code region. | ||
| 461 | bool IsWithinCodeRegion(VAddr address, u64 size) const; | ||
| 462 | |||
| 456 | /// Gets the base address of the heap region. | 463 | /// Gets the base address of the heap region. |
| 457 | VAddr GetHeapRegionBaseAddress() const; | 464 | VAddr GetHeapRegionBaseAddress() const; |
| 458 | 465 | ||
| @@ -462,6 +469,9 @@ public: | |||
| 462 | /// Gets the total size of the heap region in bytes. | 469 | /// Gets the total size of the heap region in bytes. |
| 463 | u64 GetHeapRegionSize() const; | 470 | u64 GetHeapRegionSize() const; |
| 464 | 471 | ||
| 472 | /// Determines whether or not the specified range is within the heap region. | ||
| 473 | bool IsWithinHeapRegion(VAddr address, u64 size) const; | ||
| 474 | |||
| 465 | /// Gets the base address of the map region. | 475 | /// Gets the base address of the map region. |
| 466 | VAddr GetMapRegionBaseAddress() const; | 476 | VAddr GetMapRegionBaseAddress() const; |
| 467 | 477 | ||
| @@ -471,6 +481,9 @@ public: | |||
| 471 | /// Gets the total size of the map region in bytes. | 481 | /// Gets the total size of the map region in bytes. |
| 472 | u64 GetMapRegionSize() const; | 482 | u64 GetMapRegionSize() const; |
| 473 | 483 | ||
| 484 | /// Determines whether or not the specified range is within the map region. | ||
| 485 | bool IsWithinMapRegion(VAddr address, u64 size) const; | ||
| 486 | |||
| 474 | /// Gets the base address of the new map region. | 487 | /// Gets the base address of the new map region. |
| 475 | VAddr GetNewMapRegionBaseAddress() const; | 488 | VAddr GetNewMapRegionBaseAddress() const; |
| 476 | 489 | ||
| @@ -480,6 +493,9 @@ public: | |||
| 480 | /// Gets the total size of the new map region in bytes. | 493 | /// Gets the total size of the new map region in bytes. |
| 481 | u64 GetNewMapRegionSize() const; | 494 | u64 GetNewMapRegionSize() const; |
| 482 | 495 | ||
| 496 | /// Determines whether or not the given address range is within the new map region | ||
| 497 | bool IsWithinNewMapRegion(VAddr address, u64 size) const; | ||
| 498 | |||
| 483 | /// Gets the base address of the TLS IO region. | 499 | /// Gets the base address of the TLS IO region. |
| 484 | VAddr GetTLSIORegionBaseAddress() const; | 500 | VAddr GetTLSIORegionBaseAddress() const; |
| 485 | 501 | ||
| @@ -489,9 +505,12 @@ public: | |||
| 489 | /// Gets the total size of the TLS IO region in bytes. | 505 | /// Gets the total size of the TLS IO region in bytes. |
| 490 | u64 GetTLSIORegionSize() const; | 506 | u64 GetTLSIORegionSize() const; |
| 491 | 507 | ||
| 508 | /// Determines if the given address range is within the TLS IO region. | ||
| 509 | bool IsWithinTLSIORegion(VAddr address, u64 size) const; | ||
| 510 | |||
| 492 | /// Each VMManager has its own page table, which is set as the main one when the owning process | 511 | /// Each VMManager has its own page table, which is set as the main one when the owning process |
| 493 | /// is scheduled. | 512 | /// is scheduled. |
| 494 | Memory::PageTable page_table; | 513 | Common::PageTable page_table{Memory::PAGE_BITS}; |
| 495 | 514 | ||
| 496 | private: | 515 | private: |
| 497 | using VMAIter = VMAMap::iterator; | 516 | using VMAIter = VMAMap::iterator; |
diff --git a/src/core/hle/result.h b/src/core/hle/result.h index bfb77cc31..ab84f5ddc 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h | |||
| @@ -8,20 +8,11 @@ | |||
| 8 | #include <utility> | 8 | #include <utility> |
| 9 | #include "common/assert.h" | 9 | #include "common/assert.h" |
| 10 | #include "common/bit_field.h" | 10 | #include "common/bit_field.h" |
| 11 | #include "common/common_funcs.h" | ||
| 12 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 13 | 12 | ||
| 14 | // All the constants in this file come from http://switchbrew.org/index.php?title=Error_codes | 13 | // All the constants in this file come from http://switchbrew.org/index.php?title=Error_codes |
| 15 | 14 | ||
| 16 | /** | 15 | /** |
| 17 | * Detailed description of the error. Code 0 always means success. | ||
| 18 | */ | ||
| 19 | enum class ErrorDescription : u32 { | ||
| 20 | Success = 0, | ||
| 21 | RemoteProcessDead = 301, | ||
| 22 | }; | ||
| 23 | |||
| 24 | /** | ||
| 25 | * Identifies the module which caused the error. Error codes can be propagated through a call | 16 | * Identifies the module which caused the error. Error codes can be propagated through a call |
| 26 | * chain, meaning that this doesn't always correspond to the module where the API call made is | 17 | * chain, meaning that this doesn't always correspond to the module where the API call made is |
| 27 | * contained. | 18 | * contained. |
| @@ -121,7 +112,7 @@ enum class ErrorModule : u32 { | |||
| 121 | ShopN = 811, | 112 | ShopN = 811, |
| 122 | }; | 113 | }; |
| 123 | 114 | ||
| 124 | /// Encapsulates a CTR-OS error code, allowing it to be separated into its constituent fields. | 115 | /// Encapsulates a Horizon OS error code, allowing it to be separated into its constituent fields. |
| 125 | union ResultCode { | 116 | union ResultCode { |
| 126 | u32 raw; | 117 | u32 raw; |
| 127 | 118 | ||
| @@ -134,17 +125,9 @@ union ResultCode { | |||
| 134 | 125 | ||
| 135 | constexpr explicit ResultCode(u32 raw) : raw(raw) {} | 126 | constexpr explicit ResultCode(u32 raw) : raw(raw) {} |
| 136 | 127 | ||
| 137 | constexpr ResultCode(ErrorModule module, ErrorDescription description) | ||
| 138 | : ResultCode(module, static_cast<u32>(description)) {} | ||
| 139 | |||
| 140 | constexpr ResultCode(ErrorModule module_, u32 description_) | 128 | constexpr ResultCode(ErrorModule module_, u32 description_) |
| 141 | : raw(module.FormatValue(module_) | description.FormatValue(description_)) {} | 129 | : raw(module.FormatValue(module_) | description.FormatValue(description_)) {} |
| 142 | 130 | ||
| 143 | constexpr ResultCode& operator=(const ResultCode& o) { | ||
| 144 | raw = o.raw; | ||
| 145 | return *this; | ||
| 146 | } | ||
| 147 | |||
| 148 | constexpr bool IsSuccess() const { | 131 | constexpr bool IsSuccess() const { |
| 149 | return raw == 0; | 132 | return raw == 0; |
| 150 | } | 133 | } |
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index d1cbe0e44..c750d70ac 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp | |||
| @@ -2,10 +2,10 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <algorithm> | ||
| 5 | #include <array> | 6 | #include <array> |
| 6 | #include <cinttypes> | 7 | #include <cinttypes> |
| 7 | #include <cstring> | 8 | #include <cstring> |
| 8 | #include <stack> | ||
| 9 | #include "audio_core/audio_renderer.h" | 9 | #include "audio_core/audio_renderer.h" |
| 10 | #include "core/core.h" | 10 | #include "core/core.h" |
| 11 | #include "core/file_sys/savedata_factory.h" | 11 | #include "core/file_sys/savedata_factory.h" |
| @@ -93,38 +93,84 @@ void IWindowController::AcquireForegroundRights(Kernel::HLERequestContext& ctx) | |||
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | IAudioController::IAudioController() : ServiceFramework("IAudioController") { | 95 | IAudioController::IAudioController() : ServiceFramework("IAudioController") { |
| 96 | // clang-format off | ||
| 96 | static const FunctionInfo functions[] = { | 97 | static const FunctionInfo functions[] = { |
| 97 | {0, &IAudioController::SetExpectedMasterVolume, "SetExpectedMasterVolume"}, | 98 | {0, &IAudioController::SetExpectedMasterVolume, "SetExpectedMasterVolume"}, |
| 98 | {1, &IAudioController::GetMainAppletExpectedMasterVolume, | 99 | {1, &IAudioController::GetMainAppletExpectedMasterVolume, "GetMainAppletExpectedMasterVolume"}, |
| 99 | "GetMainAppletExpectedMasterVolume"}, | 100 | {2, &IAudioController::GetLibraryAppletExpectedMasterVolume, "GetLibraryAppletExpectedMasterVolume"}, |
| 100 | {2, &IAudioController::GetLibraryAppletExpectedMasterVolume, | 101 | {3, &IAudioController::ChangeMainAppletMasterVolume, "ChangeMainAppletMasterVolume"}, |
| 101 | "GetLibraryAppletExpectedMasterVolume"}, | 102 | {4, &IAudioController::SetTransparentAudioRate, "SetTransparentVolumeRate"}, |
| 102 | {3, nullptr, "ChangeMainAppletMasterVolume"}, | ||
| 103 | {4, nullptr, "SetTransparentVolumeRate"}, | ||
| 104 | }; | 103 | }; |
| 104 | // clang-format on | ||
| 105 | |||
| 105 | RegisterHandlers(functions); | 106 | RegisterHandlers(functions); |
| 106 | } | 107 | } |
| 107 | 108 | ||
| 108 | IAudioController::~IAudioController() = default; | 109 | IAudioController::~IAudioController() = default; |
| 109 | 110 | ||
| 110 | void IAudioController::SetExpectedMasterVolume(Kernel::HLERequestContext& ctx) { | 111 | void IAudioController::SetExpectedMasterVolume(Kernel::HLERequestContext& ctx) { |
| 111 | LOG_WARNING(Service_AM, "(STUBBED) called"); | 112 | IPC::RequestParser rp{ctx}; |
| 113 | const float main_applet_volume_tmp = rp.Pop<float>(); | ||
| 114 | const float library_applet_volume_tmp = rp.Pop<float>(); | ||
| 115 | |||
| 116 | LOG_DEBUG(Service_AM, "called. main_applet_volume={}, library_applet_volume={}", | ||
| 117 | main_applet_volume_tmp, library_applet_volume_tmp); | ||
| 118 | |||
| 119 | // Ensure the volume values remain within the 0-100% range | ||
| 120 | main_applet_volume = std::clamp(main_applet_volume_tmp, min_allowed_volume, max_allowed_volume); | ||
| 121 | library_applet_volume = | ||
| 122 | std::clamp(library_applet_volume_tmp, min_allowed_volume, max_allowed_volume); | ||
| 123 | |||
| 112 | IPC::ResponseBuilder rb{ctx, 2}; | 124 | IPC::ResponseBuilder rb{ctx, 2}; |
| 113 | rb.Push(RESULT_SUCCESS); | 125 | rb.Push(RESULT_SUCCESS); |
| 114 | } | 126 | } |
| 115 | 127 | ||
| 116 | void IAudioController::GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) { | 128 | void IAudioController::GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) { |
| 117 | LOG_WARNING(Service_AM, "(STUBBED) called"); | 129 | LOG_DEBUG(Service_AM, "called. main_applet_volume={}", main_applet_volume); |
| 118 | IPC::ResponseBuilder rb{ctx, 3}; | 130 | IPC::ResponseBuilder rb{ctx, 3}; |
| 119 | rb.Push(RESULT_SUCCESS); | 131 | rb.Push(RESULT_SUCCESS); |
| 120 | rb.Push(volume); | 132 | rb.Push(main_applet_volume); |
| 121 | } | 133 | } |
| 122 | 134 | ||
| 123 | void IAudioController::GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) { | 135 | void IAudioController::GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx) { |
| 124 | LOG_WARNING(Service_AM, "(STUBBED) called"); | 136 | LOG_DEBUG(Service_AM, "called. library_applet_volume={}", library_applet_volume); |
| 125 | IPC::ResponseBuilder rb{ctx, 3}; | 137 | IPC::ResponseBuilder rb{ctx, 3}; |
| 126 | rb.Push(RESULT_SUCCESS); | 138 | rb.Push(RESULT_SUCCESS); |
| 127 | rb.Push(volume); | 139 | rb.Push(library_applet_volume); |
| 140 | } | ||
| 141 | |||
| 142 | void IAudioController::ChangeMainAppletMasterVolume(Kernel::HLERequestContext& ctx) { | ||
| 143 | struct Parameters { | ||
| 144 | float volume; | ||
| 145 | s64 fade_time_ns; | ||
| 146 | }; | ||
| 147 | static_assert(sizeof(Parameters) == 16); | ||
| 148 | |||
| 149 | IPC::RequestParser rp{ctx}; | ||
| 150 | const auto parameters = rp.PopRaw<Parameters>(); | ||
| 151 | |||
| 152 | LOG_DEBUG(Service_AM, "called. volume={}, fade_time_ns={}", parameters.volume, | ||
| 153 | parameters.fade_time_ns); | ||
| 154 | |||
| 155 | main_applet_volume = std::clamp(parameters.volume, min_allowed_volume, max_allowed_volume); | ||
| 156 | fade_time_ns = std::chrono::nanoseconds{parameters.fade_time_ns}; | ||
| 157 | |||
| 158 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 159 | rb.Push(RESULT_SUCCESS); | ||
| 160 | } | ||
| 161 | |||
| 162 | void IAudioController::SetTransparentAudioRate(Kernel::HLERequestContext& ctx) { | ||
| 163 | IPC::RequestParser rp{ctx}; | ||
| 164 | const float transparent_volume_rate_tmp = rp.Pop<float>(); | ||
| 165 | |||
| 166 | LOG_DEBUG(Service_AM, "called. transparent_volume_rate={}", transparent_volume_rate_tmp); | ||
| 167 | |||
| 168 | // Clamp volume range to 0-100%. | ||
| 169 | transparent_volume_rate = | ||
| 170 | std::clamp(transparent_volume_rate_tmp, min_allowed_volume, max_allowed_volume); | ||
| 171 | |||
| 172 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 173 | rb.Push(RESULT_SUCCESS); | ||
| 128 | } | 174 | } |
| 129 | 175 | ||
| 130 | IDisplayController::IDisplayController() : ServiceFramework("IDisplayController") { | 176 | IDisplayController::IDisplayController() : ServiceFramework("IDisplayController") { |
| @@ -322,14 +368,15 @@ void ISelfController::SetScreenShotImageOrientation(Kernel::HLERequestContext& c | |||
| 322 | 368 | ||
| 323 | void ISelfController::CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx) { | 369 | void ISelfController::CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx) { |
| 324 | LOG_WARNING(Service_AM, "(STUBBED) called"); | 370 | LOG_WARNING(Service_AM, "(STUBBED) called"); |
| 371 | |||
| 325 | // TODO(Subv): Find out how AM determines the display to use, for now just | 372 | // TODO(Subv): Find out how AM determines the display to use, for now just |
| 326 | // create the layer in the Default display. | 373 | // create the layer in the Default display. |
| 327 | u64 display_id = nvflinger->OpenDisplay("Default"); | 374 | const auto display_id = nvflinger->OpenDisplay("Default"); |
| 328 | u64 layer_id = nvflinger->CreateLayer(display_id); | 375 | const auto layer_id = nvflinger->CreateLayer(*display_id); |
| 329 | 376 | ||
| 330 | IPC::ResponseBuilder rb{ctx, 4}; | 377 | IPC::ResponseBuilder rb{ctx, 4}; |
| 331 | rb.Push(RESULT_SUCCESS); | 378 | rb.Push(RESULT_SUCCESS); |
| 332 | rb.Push(layer_id); | 379 | rb.Push(*layer_id); |
| 333 | } | 380 | } |
| 334 | 381 | ||
| 335 | void ISelfController::SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx) { | 382 | void ISelfController::SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx) { |
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h index b6113cfdd..565dd8e9e 100644 --- a/src/core/hle/service/am/am.h +++ b/src/core/hle/service/am/am.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <chrono> | ||
| 7 | #include <memory> | 8 | #include <memory> |
| 8 | #include <queue> | 9 | #include <queue> |
| 9 | #include "core/hle/kernel/writable_event.h" | 10 | #include "core/hle/kernel/writable_event.h" |
| @@ -81,8 +82,21 @@ private: | |||
| 81 | void SetExpectedMasterVolume(Kernel::HLERequestContext& ctx); | 82 | void SetExpectedMasterVolume(Kernel::HLERequestContext& ctx); |
| 82 | void GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx); | 83 | void GetMainAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx); |
| 83 | void GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx); | 84 | void GetLibraryAppletExpectedMasterVolume(Kernel::HLERequestContext& ctx); |
| 85 | void ChangeMainAppletMasterVolume(Kernel::HLERequestContext& ctx); | ||
| 86 | void SetTransparentAudioRate(Kernel::HLERequestContext& ctx); | ||
| 84 | 87 | ||
| 85 | u32 volume{100}; | 88 | static constexpr float min_allowed_volume = 0.0f; |
| 89 | static constexpr float max_allowed_volume = 1.0f; | ||
| 90 | |||
| 91 | float main_applet_volume{0.25f}; | ||
| 92 | float library_applet_volume{max_allowed_volume}; | ||
| 93 | float transparent_volume_rate{min_allowed_volume}; | ||
| 94 | |||
| 95 | // Volume transition fade time in nanoseconds. | ||
| 96 | // e.g. If the main applet volume was 0% and was changed to 50% | ||
| 97 | // with a fade of 50ns, then over the course of 50ns, | ||
| 98 | // the volume will gradually fade up to 50% | ||
| 99 | std::chrono::nanoseconds fade_time_ns{0}; | ||
| 86 | }; | 100 | }; |
| 87 | 101 | ||
| 88 | class IDisplayController final : public ServiceFramework<IDisplayController> { | 102 | class IDisplayController final : public ServiceFramework<IDisplayController> { |
diff --git a/src/core/hle/service/am/applets/software_keyboard.cpp b/src/core/hle/service/am/applets/software_keyboard.cpp index f255f74b5..8c5bd6059 100644 --- a/src/core/hle/service/am/applets/software_keyboard.cpp +++ b/src/core/hle/service/am/applets/software_keyboard.cpp | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include "common/string_util.h" | 7 | #include "common/string_util.h" |
| 8 | #include "core/core.h" | 8 | #include "core/core.h" |
| 9 | #include "core/frontend/applets/software_keyboard.h" | 9 | #include "core/frontend/applets/software_keyboard.h" |
| 10 | #include "core/hle/result.h" | ||
| 10 | #include "core/hle/service/am/am.h" | 11 | #include "core/hle/service/am/am.h" |
| 11 | #include "core/hle/service/am/applets/software_keyboard.h" | 12 | #include "core/hle/service/am/applets/software_keyboard.h" |
| 12 | 13 | ||
diff --git a/src/core/hle/service/am/applets/software_keyboard.h b/src/core/hle/service/am/applets/software_keyboard.h index efd5753a1..b93a30d28 100644 --- a/src/core/hle/service/am/applets/software_keyboard.h +++ b/src/core/hle/service/am/applets/software_keyboard.h | |||
| @@ -9,10 +9,13 @@ | |||
| 9 | #include <vector> | 9 | #include <vector> |
| 10 | 10 | ||
| 11 | #include "common/common_funcs.h" | 11 | #include "common/common_funcs.h" |
| 12 | #include "common/common_types.h" | ||
| 12 | #include "common/swap.h" | 13 | #include "common/swap.h" |
| 13 | #include "core/hle/service/am/am.h" | 14 | #include "core/hle/service/am/am.h" |
| 14 | #include "core/hle/service/am/applets/applets.h" | 15 | #include "core/hle/service/am/applets/applets.h" |
| 15 | 16 | ||
| 17 | union ResultCode; | ||
| 18 | |||
| 16 | namespace Service::AM::Applets { | 19 | namespace Service::AM::Applets { |
| 17 | 20 | ||
| 18 | enum class KeysetDisable : u32 { | 21 | enum class KeysetDisable : u32 { |
diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp index dc6a6b188..21f5e64c7 100644 --- a/src/core/hle/service/audio/audout_u.cpp +++ b/src/core/hle/service/audio/audout_u.cpp | |||
| @@ -18,17 +18,11 @@ | |||
| 18 | #include "core/hle/kernel/readable_event.h" | 18 | #include "core/hle/kernel/readable_event.h" |
| 19 | #include "core/hle/kernel/writable_event.h" | 19 | #include "core/hle/kernel/writable_event.h" |
| 20 | #include "core/hle/service/audio/audout_u.h" | 20 | #include "core/hle/service/audio/audout_u.h" |
| 21 | #include "core/hle/service/audio/errors.h" | ||
| 21 | #include "core/memory.h" | 22 | #include "core/memory.h" |
| 22 | 23 | ||
| 23 | namespace Service::Audio { | 24 | namespace Service::Audio { |
| 24 | 25 | ||
| 25 | namespace ErrCodes { | ||
| 26 | enum { | ||
| 27 | ErrorUnknown = 2, | ||
| 28 | BufferCountExceeded = 8, | ||
| 29 | }; | ||
| 30 | } | ||
| 31 | |||
| 32 | constexpr std::array<char, 10> DefaultDevice{{"DeviceOut"}}; | 26 | constexpr std::array<char, 10> DefaultDevice{{"DeviceOut"}}; |
| 33 | constexpr int DefaultSampleRate{48000}; | 27 | constexpr int DefaultSampleRate{48000}; |
| 34 | 28 | ||
| @@ -68,12 +62,12 @@ public: | |||
| 68 | RegisterHandlers(functions); | 62 | RegisterHandlers(functions); |
| 69 | 63 | ||
| 70 | // This is the event handle used to check if the audio buffer was released | 64 | // This is the event handle used to check if the audio buffer was released |
| 71 | auto& kernel = Core::System::GetInstance().Kernel(); | 65 | auto& system = Core::System::GetInstance(); |
| 72 | buffer_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Sticky, | 66 | buffer_event = Kernel::WritableEvent::CreateEventPair( |
| 73 | "IAudioOutBufferReleased"); | 67 | system.Kernel(), Kernel::ResetType::Sticky, "IAudioOutBufferReleased"); |
| 74 | 68 | ||
| 75 | stream = audio_core.OpenStream(audio_params.sample_rate, audio_params.channel_count, | 69 | stream = audio_core.OpenStream(system.CoreTiming(), audio_params.sample_rate, |
| 76 | std::move(unique_name), | 70 | audio_params.channel_count, std::move(unique_name), |
| 77 | [=]() { buffer_event.writable->Signal(); }); | 71 | [=]() { buffer_event.writable->Signal(); }); |
| 78 | } | 72 | } |
| 79 | 73 | ||
| @@ -100,7 +94,7 @@ private: | |||
| 100 | 94 | ||
| 101 | if (stream->IsPlaying()) { | 95 | if (stream->IsPlaying()) { |
| 102 | IPC::ResponseBuilder rb{ctx, 2}; | 96 | IPC::ResponseBuilder rb{ctx, 2}; |
| 103 | rb.Push(ResultCode(ErrorModule::Audio, ErrCodes::ErrorUnknown)); | 97 | rb.Push(ERR_OPERATION_FAILED); |
| 104 | return; | 98 | return; |
| 105 | } | 99 | } |
| 106 | 100 | ||
| @@ -113,7 +107,9 @@ private: | |||
| 113 | void StopAudioOut(Kernel::HLERequestContext& ctx) { | 107 | void StopAudioOut(Kernel::HLERequestContext& ctx) { |
| 114 | LOG_DEBUG(Service_Audio, "called"); | 108 | LOG_DEBUG(Service_Audio, "called"); |
| 115 | 109 | ||
| 116 | audio_core.StopStream(stream); | 110 | if (stream->IsPlaying()) { |
| 111 | audio_core.StopStream(stream); | ||
| 112 | } | ||
| 117 | 113 | ||
| 118 | IPC::ResponseBuilder rb{ctx, 2}; | 114 | IPC::ResponseBuilder rb{ctx, 2}; |
| 119 | rb.Push(RESULT_SUCCESS); | 115 | rb.Push(RESULT_SUCCESS); |
| @@ -143,7 +139,8 @@ private: | |||
| 143 | 139 | ||
| 144 | if (!audio_core.QueueBuffer(stream, tag, std::move(samples))) { | 140 | if (!audio_core.QueueBuffer(stream, tag, std::move(samples))) { |
| 145 | IPC::ResponseBuilder rb{ctx, 2}; | 141 | IPC::ResponseBuilder rb{ctx, 2}; |
| 146 | rb.Push(ResultCode(ErrorModule::Audio, ErrCodes::BufferCountExceeded)); | 142 | rb.Push(ERR_BUFFER_COUNT_EXCEEDED); |
| 143 | return; | ||
| 147 | } | 144 | } |
| 148 | 145 | ||
| 149 | IPC::ResponseBuilder rb{ctx, 2}; | 146 | IPC::ResponseBuilder rb{ctx, 2}; |
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp index 76cc48254..c9de10a24 100644 --- a/src/core/hle/service/audio/audren_u.cpp +++ b/src/core/hle/service/audio/audren_u.cpp | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include "core/hle/kernel/readable_event.h" | 17 | #include "core/hle/kernel/readable_event.h" |
| 18 | #include "core/hle/kernel/writable_event.h" | 18 | #include "core/hle/kernel/writable_event.h" |
| 19 | #include "core/hle/service/audio/audren_u.h" | 19 | #include "core/hle/service/audio/audren_u.h" |
| 20 | #include "core/hle/service/audio/errors.h" | ||
| 20 | 21 | ||
| 21 | namespace Service::Audio { | 22 | namespace Service::Audio { |
| 22 | 23 | ||
| @@ -37,15 +38,16 @@ public: | |||
| 37 | {8, &IAudioRenderer::SetRenderingTimeLimit, "SetRenderingTimeLimit"}, | 38 | {8, &IAudioRenderer::SetRenderingTimeLimit, "SetRenderingTimeLimit"}, |
| 38 | {9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"}, | 39 | {9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"}, |
| 39 | {10, &IAudioRenderer::RequestUpdateImpl, "RequestUpdateAuto"}, | 40 | {10, &IAudioRenderer::RequestUpdateImpl, "RequestUpdateAuto"}, |
| 40 | {11, nullptr, "ExecuteAudioRendererRendering"}, | 41 | {11, &IAudioRenderer::ExecuteAudioRendererRendering, "ExecuteAudioRendererRendering"}, |
| 41 | }; | 42 | }; |
| 42 | // clang-format on | 43 | // clang-format on |
| 43 | RegisterHandlers(functions); | 44 | RegisterHandlers(functions); |
| 44 | 45 | ||
| 45 | auto& kernel = Core::System::GetInstance().Kernel(); | 46 | auto& system = Core::System::GetInstance(); |
| 46 | system_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Sticky, | 47 | system_event = Kernel::WritableEvent::CreateEventPair( |
| 47 | "IAudioRenderer:SystemEvent"); | 48 | system.Kernel(), Kernel::ResetType::Sticky, "IAudioRenderer:SystemEvent"); |
| 48 | renderer = std::make_unique<AudioCore::AudioRenderer>(audren_params, system_event.writable); | 49 | renderer = std::make_unique<AudioCore::AudioRenderer>(system.CoreTiming(), audren_params, |
| 50 | system_event.writable); | ||
| 49 | } | 51 | } |
| 50 | 52 | ||
| 51 | private: | 53 | private: |
| @@ -137,6 +139,17 @@ private: | |||
| 137 | rb.Push(rendering_time_limit_percent); | 139 | rb.Push(rendering_time_limit_percent); |
| 138 | } | 140 | } |
| 139 | 141 | ||
| 142 | void ExecuteAudioRendererRendering(Kernel::HLERequestContext& ctx) { | ||
| 143 | LOG_DEBUG(Service_Audio, "called"); | ||
| 144 | |||
| 145 | // This service command currently only reports an unsupported operation | ||
| 146 | // error code, or aborts. Given that, we just always return an error | ||
| 147 | // code in this case. | ||
| 148 | |||
| 149 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 150 | rb.Push(ERR_NOT_SUPPORTED); | ||
| 151 | } | ||
| 152 | |||
| 140 | Kernel::EventPair system_event; | 153 | Kernel::EventPair system_event; |
| 141 | std::unique_ptr<AudioCore::AudioRenderer> renderer; | 154 | std::unique_ptr<AudioCore::AudioRenderer> renderer; |
| 142 | u32 rendering_time_limit_percent = 100; | 155 | u32 rendering_time_limit_percent = 100; |
| @@ -234,7 +247,7 @@ AudRenU::AudRenU() : ServiceFramework("audren:u") { | |||
| 234 | {0, &AudRenU::OpenAudioRenderer, "OpenAudioRenderer"}, | 247 | {0, &AudRenU::OpenAudioRenderer, "OpenAudioRenderer"}, |
| 235 | {1, &AudRenU::GetAudioRendererWorkBufferSize, "GetAudioRendererWorkBufferSize"}, | 248 | {1, &AudRenU::GetAudioRendererWorkBufferSize, "GetAudioRendererWorkBufferSize"}, |
| 236 | {2, &AudRenU::GetAudioDeviceService, "GetAudioDeviceService"}, | 249 | {2, &AudRenU::GetAudioDeviceService, "GetAudioDeviceService"}, |
| 237 | {3, nullptr, "OpenAudioRendererAuto"}, | 250 | {3, &AudRenU::OpenAudioRendererAuto, "OpenAudioRendererAuto"}, |
| 238 | {4, &AudRenU::GetAudioDeviceServiceWithRevisionInfo, "GetAudioDeviceServiceWithRevisionInfo"}, | 251 | {4, &AudRenU::GetAudioDeviceServiceWithRevisionInfo, "GetAudioDeviceServiceWithRevisionInfo"}, |
| 239 | }; | 252 | }; |
| 240 | // clang-format on | 253 | // clang-format on |
| @@ -247,12 +260,7 @@ AudRenU::~AudRenU() = default; | |||
| 247 | void AudRenU::OpenAudioRenderer(Kernel::HLERequestContext& ctx) { | 260 | void AudRenU::OpenAudioRenderer(Kernel::HLERequestContext& ctx) { |
| 248 | LOG_DEBUG(Service_Audio, "called"); | 261 | LOG_DEBUG(Service_Audio, "called"); |
| 249 | 262 | ||
| 250 | IPC::RequestParser rp{ctx}; | 263 | OpenAudioRendererImpl(ctx); |
| 251 | auto params = rp.PopRaw<AudioCore::AudioRendererParameter>(); | ||
| 252 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; | ||
| 253 | |||
| 254 | rb.Push(RESULT_SUCCESS); | ||
| 255 | rb.PushIpcInterface<Audio::IAudioRenderer>(std::move(params)); | ||
| 256 | } | 264 | } |
| 257 | 265 | ||
| 258 | void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { | 266 | void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { |
| @@ -261,20 +269,20 @@ void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { | |||
| 261 | LOG_DEBUG(Service_Audio, "called"); | 269 | LOG_DEBUG(Service_Audio, "called"); |
| 262 | 270 | ||
| 263 | u64 buffer_sz = Common::AlignUp(4 * params.mix_buffer_count, 0x40); | 271 | u64 buffer_sz = Common::AlignUp(4 * params.mix_buffer_count, 0x40); |
| 264 | buffer_sz += params.unknown_c * 1024; | 272 | buffer_sz += params.submix_count * 1024; |
| 265 | buffer_sz += 0x940 * (params.unknown_c + 1); | 273 | buffer_sz += 0x940 * (params.submix_count + 1); |
| 266 | buffer_sz += 0x3F0 * params.voice_count; | 274 | buffer_sz += 0x3F0 * params.voice_count; |
| 267 | buffer_sz += Common::AlignUp(8 * (params.unknown_c + 1), 0x10); | 275 | buffer_sz += Common::AlignUp(8 * (params.submix_count + 1), 0x10); |
| 268 | buffer_sz += Common::AlignUp(8 * params.voice_count, 0x10); | 276 | buffer_sz += Common::AlignUp(8 * params.voice_count, 0x10); |
| 269 | buffer_sz += | 277 | buffer_sz += Common::AlignUp( |
| 270 | Common::AlignUp((0x3C0 * (params.sink_count + params.unknown_c) + 4 * params.sample_count) * | 278 | (0x3C0 * (params.sink_count + params.submix_count) + 4 * params.sample_count) * |
| 271 | (params.mix_buffer_count + 6), | 279 | (params.mix_buffer_count + 6), |
| 272 | 0x40); | 280 | 0x40); |
| 273 | 281 | ||
| 274 | if (IsFeatureSupported(AudioFeatures::Splitter, params.revision)) { | 282 | if (IsFeatureSupported(AudioFeatures::Splitter, params.revision)) { |
| 275 | u32 count = params.unknown_c + 1; | 283 | const u32 count = params.submix_count + 1; |
| 276 | u64 node_count = Common::AlignUp(count, 0x40); | 284 | u64 node_count = Common::AlignUp(count, 0x40); |
| 277 | u64 node_state_buffer_sz = | 285 | const u64 node_state_buffer_sz = |
| 278 | 4 * (node_count * node_count) + 0xC * node_count + 2 * (node_count / 8); | 286 | 4 * (node_count * node_count) + 0xC * node_count + 2 * (node_count / 8); |
| 279 | u64 edge_matrix_buffer_sz = 0; | 287 | u64 edge_matrix_buffer_sz = 0; |
| 280 | node_count = Common::AlignUp(count * count, 0x40); | 288 | node_count = Common::AlignUp(count * count, 0x40); |
| @@ -288,19 +296,19 @@ void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { | |||
| 288 | 296 | ||
| 289 | buffer_sz += 0x20 * (params.effect_count + 4 * params.voice_count) + 0x50; | 297 | buffer_sz += 0x20 * (params.effect_count + 4 * params.voice_count) + 0x50; |
| 290 | if (IsFeatureSupported(AudioFeatures::Splitter, params.revision)) { | 298 | if (IsFeatureSupported(AudioFeatures::Splitter, params.revision)) { |
| 291 | buffer_sz += 0xE0 * params.unknown_2c; | 299 | buffer_sz += 0xE0 * params.num_splitter_send_channels; |
| 292 | buffer_sz += 0x20 * params.splitter_count; | 300 | buffer_sz += 0x20 * params.splitter_count; |
| 293 | buffer_sz += Common::AlignUp(4 * params.unknown_2c, 0x10); | 301 | buffer_sz += Common::AlignUp(4 * params.num_splitter_send_channels, 0x10); |
| 294 | } | 302 | } |
| 295 | buffer_sz = Common::AlignUp(buffer_sz, 0x40) + 0x170 * params.sink_count; | 303 | buffer_sz = Common::AlignUp(buffer_sz, 0x40) + 0x170 * params.sink_count; |
| 296 | u64 output_sz = buffer_sz + 0x280 * params.sink_count + 0x4B0 * params.effect_count + | 304 | u64 output_sz = buffer_sz + 0x280 * params.sink_count + 0x4B0 * params.effect_count + |
| 297 | ((params.voice_count * 256) | 0x40); | 305 | ((params.voice_count * 256) | 0x40); |
| 298 | 306 | ||
| 299 | if (params.unknown_1c >= 1) { | 307 | if (params.performance_frame_count >= 1) { |
| 300 | output_sz = Common::AlignUp(((16 * params.sink_count + 16 * params.effect_count + | 308 | output_sz = Common::AlignUp(((16 * params.sink_count + 16 * params.effect_count + |
| 301 | 16 * params.voice_count + 16) + | 309 | 16 * params.voice_count + 16) + |
| 302 | 0x658) * | 310 | 0x658) * |
| 303 | (params.unknown_1c + 1) + | 311 | (params.performance_frame_count + 1) + |
| 304 | 0xc0, | 312 | 0xc0, |
| 305 | 0x40) + | 313 | 0x40) + |
| 306 | output_sz; | 314 | output_sz; |
| @@ -324,6 +332,12 @@ void AudRenU::GetAudioDeviceService(Kernel::HLERequestContext& ctx) { | |||
| 324 | rb.PushIpcInterface<Audio::IAudioDevice>(); | 332 | rb.PushIpcInterface<Audio::IAudioDevice>(); |
| 325 | } | 333 | } |
| 326 | 334 | ||
| 335 | void AudRenU::OpenAudioRendererAuto(Kernel::HLERequestContext& ctx) { | ||
| 336 | LOG_DEBUG(Service_Audio, "called"); | ||
| 337 | |||
| 338 | OpenAudioRendererImpl(ctx); | ||
| 339 | } | ||
| 340 | |||
| 327 | void AudRenU::GetAudioDeviceServiceWithRevisionInfo(Kernel::HLERequestContext& ctx) { | 341 | void AudRenU::GetAudioDeviceServiceWithRevisionInfo(Kernel::HLERequestContext& ctx) { |
| 328 | LOG_WARNING(Service_Audio, "(STUBBED) called"); | 342 | LOG_WARNING(Service_Audio, "(STUBBED) called"); |
| 329 | 343 | ||
| @@ -334,6 +348,15 @@ void AudRenU::GetAudioDeviceServiceWithRevisionInfo(Kernel::HLERequestContext& c | |||
| 334 | // based on the current revision | 348 | // based on the current revision |
| 335 | } | 349 | } |
| 336 | 350 | ||
| 351 | void AudRenU::OpenAudioRendererImpl(Kernel::HLERequestContext& ctx) { | ||
| 352 | IPC::RequestParser rp{ctx}; | ||
| 353 | const auto params = rp.PopRaw<AudioCore::AudioRendererParameter>(); | ||
| 354 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; | ||
| 355 | |||
| 356 | rb.Push(RESULT_SUCCESS); | ||
| 357 | rb.PushIpcInterface<IAudioRenderer>(params); | ||
| 358 | } | ||
| 359 | |||
| 337 | bool AudRenU::IsFeatureSupported(AudioFeatures feature, u32_le revision) const { | 360 | bool AudRenU::IsFeatureSupported(AudioFeatures feature, u32_le revision) const { |
| 338 | u32_be version_num = (revision - Common::MakeMagic('R', 'E', 'V', '0')); // Byte swap | 361 | u32_be version_num = (revision - Common::MakeMagic('R', 'E', 'V', '0')); // Byte swap |
| 339 | switch (feature) { | 362 | switch (feature) { |
diff --git a/src/core/hle/service/audio/audren_u.h b/src/core/hle/service/audio/audren_u.h index 3d63388fb..e55d25973 100644 --- a/src/core/hle/service/audio/audren_u.h +++ b/src/core/hle/service/audio/audren_u.h | |||
| @@ -21,8 +21,11 @@ private: | |||
| 21 | void OpenAudioRenderer(Kernel::HLERequestContext& ctx); | 21 | void OpenAudioRenderer(Kernel::HLERequestContext& ctx); |
| 22 | void GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx); | 22 | void GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx); |
| 23 | void GetAudioDeviceService(Kernel::HLERequestContext& ctx); | 23 | void GetAudioDeviceService(Kernel::HLERequestContext& ctx); |
| 24 | void OpenAudioRendererAuto(Kernel::HLERequestContext& ctx); | ||
| 24 | void GetAudioDeviceServiceWithRevisionInfo(Kernel::HLERequestContext& ctx); | 25 | void GetAudioDeviceServiceWithRevisionInfo(Kernel::HLERequestContext& ctx); |
| 25 | 26 | ||
| 27 | void OpenAudioRendererImpl(Kernel::HLERequestContext& ctx); | ||
| 28 | |||
| 26 | enum class AudioFeatures : u32 { | 29 | enum class AudioFeatures : u32 { |
| 27 | Splitter, | 30 | Splitter, |
| 28 | }; | 31 | }; |
diff --git a/src/core/hle/service/audio/errors.h b/src/core/hle/service/audio/errors.h new file mode 100644 index 000000000..6f8c09bcf --- /dev/null +++ b/src/core/hle/service/audio/errors.h | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | // Copyright 2019 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include "core/hle/result.h" | ||
| 8 | |||
| 9 | namespace Service::Audio { | ||
| 10 | |||
| 11 | constexpr ResultCode ERR_OPERATION_FAILED{ErrorModule::Audio, 2}; | ||
| 12 | constexpr ResultCode ERR_BUFFER_COUNT_EXCEEDED{ErrorModule::Audio, 8}; | ||
| 13 | constexpr ResultCode ERR_NOT_SUPPORTED{ErrorModule::Audio, 513}; | ||
| 14 | |||
| 15 | } // namespace Service::Audio | ||
diff --git a/src/core/hle/service/audio/hwopus.cpp b/src/core/hle/service/audio/hwopus.cpp index 11eba4a12..cb4a1160d 100644 --- a/src/core/hle/service/audio/hwopus.cpp +++ b/src/core/hle/service/audio/hwopus.cpp | |||
| @@ -8,44 +8,34 @@ | |||
| 8 | #include <vector> | 8 | #include <vector> |
| 9 | 9 | ||
| 10 | #include <opus.h> | 10 | #include <opus.h> |
| 11 | #include <opus_multistream.h> | ||
| 11 | 12 | ||
| 12 | #include "common/common_funcs.h" | 13 | #include "common/assert.h" |
| 13 | #include "common/logging/log.h" | 14 | #include "common/logging/log.h" |
| 14 | #include "core/hle/ipc_helpers.h" | 15 | #include "core/hle/ipc_helpers.h" |
| 15 | #include "core/hle/kernel/hle_ipc.h" | 16 | #include "core/hle/kernel/hle_ipc.h" |
| 16 | #include "core/hle/service/audio/hwopus.h" | 17 | #include "core/hle/service/audio/hwopus.h" |
| 17 | 18 | ||
| 18 | namespace Service::Audio { | 19 | namespace Service::Audio { |
| 19 | 20 | namespace { | |
| 20 | struct OpusDeleter { | 21 | struct OpusDeleter { |
| 21 | void operator()(void* ptr) const { | 22 | void operator()(OpusMSDecoder* ptr) const { |
| 22 | operator delete(ptr); | 23 | opus_multistream_decoder_destroy(ptr); |
| 23 | } | 24 | } |
| 24 | }; | 25 | }; |
| 25 | 26 | ||
| 26 | class IHardwareOpusDecoderManager final : public ServiceFramework<IHardwareOpusDecoderManager> { | 27 | using OpusDecoderPtr = std::unique_ptr<OpusMSDecoder, OpusDeleter>; |
| 27 | public: | ||
| 28 | IHardwareOpusDecoderManager(std::unique_ptr<OpusDecoder, OpusDeleter> decoder, u32 sample_rate, | ||
| 29 | u32 channel_count) | ||
| 30 | : ServiceFramework("IHardwareOpusDecoderManager"), decoder(std::move(decoder)), | ||
| 31 | sample_rate(sample_rate), channel_count(channel_count) { | ||
| 32 | // clang-format off | ||
| 33 | static const FunctionInfo functions[] = { | ||
| 34 | {0, &IHardwareOpusDecoderManager::DecodeInterleavedOld, "DecodeInterleavedOld"}, | ||
| 35 | {1, nullptr, "SetContext"}, | ||
| 36 | {2, nullptr, "DecodeInterleavedForMultiStreamOld"}, | ||
| 37 | {3, nullptr, "SetContextForMultiStream"}, | ||
| 38 | {4, &IHardwareOpusDecoderManager::DecodeInterleavedWithPerfOld, "DecodeInterleavedWithPerfOld"}, | ||
| 39 | {5, nullptr, "DecodeInterleavedForMultiStreamWithPerfOld"}, | ||
| 40 | {6, &IHardwareOpusDecoderManager::DecodeInterleaved, "DecodeInterleaved"}, | ||
| 41 | {7, nullptr, "DecodeInterleavedForMultiStream"}, | ||
| 42 | }; | ||
| 43 | // clang-format on | ||
| 44 | 28 | ||
| 45 | RegisterHandlers(functions); | 29 | struct OpusPacketHeader { |
| 46 | } | 30 | // Packet size in bytes. |
| 31 | u32_be size; | ||
| 32 | // Indicates the final range of the codec's entropy coder. | ||
| 33 | u32_be final_range; | ||
| 34 | }; | ||
| 35 | static_assert(sizeof(OpusPacketHeader) == 0x8, "OpusHeader is an invalid size"); | ||
| 47 | 36 | ||
| 48 | private: | 37 | class OpusDecoderState { |
| 38 | public: | ||
| 49 | /// Describes extra behavior that may be asked of the decoding context. | 39 | /// Describes extra behavior that may be asked of the decoding context. |
| 50 | enum class ExtraBehavior { | 40 | enum class ExtraBehavior { |
| 51 | /// No extra behavior. | 41 | /// No extra behavior. |
| @@ -55,30 +45,27 @@ private: | |||
| 55 | ResetContext, | 45 | ResetContext, |
| 56 | }; | 46 | }; |
| 57 | 47 | ||
| 58 | void DecodeInterleavedOld(Kernel::HLERequestContext& ctx) { | 48 | enum class PerfTime { |
| 59 | LOG_DEBUG(Audio, "called"); | 49 | Disabled, |
| 60 | 50 | Enabled, | |
| 61 | DecodeInterleavedHelper(ctx, nullptr, ExtraBehavior::None); | 51 | }; |
| 62 | } | ||
| 63 | |||
| 64 | void DecodeInterleavedWithPerfOld(Kernel::HLERequestContext& ctx) { | ||
| 65 | LOG_DEBUG(Audio, "called"); | ||
| 66 | |||
| 67 | u64 performance = 0; | ||
| 68 | DecodeInterleavedHelper(ctx, &performance, ExtraBehavior::None); | ||
| 69 | } | ||
| 70 | |||
| 71 | void DecodeInterleaved(Kernel::HLERequestContext& ctx) { | ||
| 72 | LOG_DEBUG(Audio, "called"); | ||
| 73 | |||
| 74 | IPC::RequestParser rp{ctx}; | ||
| 75 | const auto extra_behavior = | ||
| 76 | rp.Pop<bool>() ? ExtraBehavior::ResetContext : ExtraBehavior::None; | ||
| 77 | 52 | ||
| 78 | u64 performance = 0; | 53 | explicit OpusDecoderState(OpusDecoderPtr decoder, u32 sample_rate, u32 channel_count) |
| 79 | DecodeInterleavedHelper(ctx, &performance, extra_behavior); | 54 | : decoder{std::move(decoder)}, sample_rate{sample_rate}, channel_count{channel_count} {} |
| 55 | |||
| 56 | // Decodes interleaved Opus packets. Optionally allows reporting time taken to | ||
| 57 | // perform the decoding, as well as any relevant extra behavior. | ||
| 58 | void DecodeInterleaved(Kernel::HLERequestContext& ctx, PerfTime perf_time, | ||
| 59 | ExtraBehavior extra_behavior) { | ||
| 60 | if (perf_time == PerfTime::Disabled) { | ||
| 61 | DecodeInterleavedHelper(ctx, nullptr, extra_behavior); | ||
| 62 | } else { | ||
| 63 | u64 performance = 0; | ||
| 64 | DecodeInterleavedHelper(ctx, &performance, extra_behavior); | ||
| 65 | } | ||
| 80 | } | 66 | } |
| 81 | 67 | ||
| 68 | private: | ||
| 82 | void DecodeInterleavedHelper(Kernel::HLERequestContext& ctx, u64* performance, | 69 | void DecodeInterleavedHelper(Kernel::HLERequestContext& ctx, u64* performance, |
| 83 | ExtraBehavior extra_behavior) { | 70 | ExtraBehavior extra_behavior) { |
| 84 | u32 consumed = 0; | 71 | u32 consumed = 0; |
| @@ -89,8 +76,7 @@ private: | |||
| 89 | ResetDecoderContext(); | 76 | ResetDecoderContext(); |
| 90 | } | 77 | } |
| 91 | 78 | ||
| 92 | if (!Decoder_DecodeInterleaved(consumed, sample_count, ctx.ReadBuffer(), samples, | 79 | if (!DecodeOpusData(consumed, sample_count, ctx.ReadBuffer(), samples, performance)) { |
| 93 | performance)) { | ||
| 94 | LOG_ERROR(Audio, "Failed to decode opus data"); | 80 | LOG_ERROR(Audio, "Failed to decode opus data"); |
| 95 | IPC::ResponseBuilder rb{ctx, 2}; | 81 | IPC::ResponseBuilder rb{ctx, 2}; |
| 96 | // TODO(ogniK): Use correct error code | 82 | // TODO(ogniK): Use correct error code |
| @@ -109,27 +95,27 @@ private: | |||
| 109 | ctx.WriteBuffer(samples.data(), samples.size() * sizeof(s16)); | 95 | ctx.WriteBuffer(samples.data(), samples.size() * sizeof(s16)); |
| 110 | } | 96 | } |
| 111 | 97 | ||
| 112 | bool Decoder_DecodeInterleaved(u32& consumed, u32& sample_count, const std::vector<u8>& input, | 98 | bool DecodeOpusData(u32& consumed, u32& sample_count, const std::vector<u8>& input, |
| 113 | std::vector<opus_int16>& output, u64* out_performance_time) { | 99 | std::vector<opus_int16>& output, u64* out_performance_time) const { |
| 114 | const auto start_time = std::chrono::high_resolution_clock::now(); | 100 | const auto start_time = std::chrono::high_resolution_clock::now(); |
| 115 | const std::size_t raw_output_sz = output.size() * sizeof(opus_int16); | 101 | const std::size_t raw_output_sz = output.size() * sizeof(opus_int16); |
| 116 | if (sizeof(OpusHeader) > input.size()) { | 102 | if (sizeof(OpusPacketHeader) > input.size()) { |
| 117 | LOG_ERROR(Audio, "Input is smaller than the header size, header_sz={}, input_sz={}", | 103 | LOG_ERROR(Audio, "Input is smaller than the header size, header_sz={}, input_sz={}", |
| 118 | sizeof(OpusHeader), input.size()); | 104 | sizeof(OpusPacketHeader), input.size()); |
| 119 | return false; | 105 | return false; |
| 120 | } | 106 | } |
| 121 | 107 | ||
| 122 | OpusHeader hdr{}; | 108 | OpusPacketHeader hdr{}; |
| 123 | std::memcpy(&hdr, input.data(), sizeof(OpusHeader)); | 109 | std::memcpy(&hdr, input.data(), sizeof(OpusPacketHeader)); |
| 124 | if (sizeof(OpusHeader) + static_cast<u32>(hdr.sz) > input.size()) { | 110 | if (sizeof(OpusPacketHeader) + static_cast<u32>(hdr.size) > input.size()) { |
| 125 | LOG_ERROR(Audio, "Input does not fit in the opus header size. data_sz={}, input_sz={}", | 111 | LOG_ERROR(Audio, "Input does not fit in the opus header size. data_sz={}, input_sz={}", |
| 126 | sizeof(OpusHeader) + static_cast<u32>(hdr.sz), input.size()); | 112 | sizeof(OpusPacketHeader) + static_cast<u32>(hdr.size), input.size()); |
| 127 | return false; | 113 | return false; |
| 128 | } | 114 | } |
| 129 | 115 | ||
| 130 | const auto frame = input.data() + sizeof(OpusHeader); | 116 | const auto frame = input.data() + sizeof(OpusPacketHeader); |
| 131 | const auto decoded_sample_count = opus_packet_get_nb_samples( | 117 | const auto decoded_sample_count = opus_packet_get_nb_samples( |
| 132 | frame, static_cast<opus_int32>(input.size() - sizeof(OpusHeader)), | 118 | frame, static_cast<opus_int32>(input.size() - sizeof(OpusPacketHeader)), |
| 133 | static_cast<opus_int32>(sample_rate)); | 119 | static_cast<opus_int32>(sample_rate)); |
| 134 | if (decoded_sample_count * channel_count * sizeof(u16) > raw_output_sz) { | 120 | if (decoded_sample_count * channel_count * sizeof(u16) > raw_output_sz) { |
| 135 | LOG_ERROR( | 121 | LOG_ERROR( |
| @@ -141,18 +127,18 @@ private: | |||
| 141 | 127 | ||
| 142 | const int frame_size = (static_cast<int>(raw_output_sz / sizeof(s16) / channel_count)); | 128 | const int frame_size = (static_cast<int>(raw_output_sz / sizeof(s16) / channel_count)); |
| 143 | const auto out_sample_count = | 129 | const auto out_sample_count = |
| 144 | opus_decode(decoder.get(), frame, hdr.sz, output.data(), frame_size, 0); | 130 | opus_multistream_decode(decoder.get(), frame, hdr.size, output.data(), frame_size, 0); |
| 145 | if (out_sample_count < 0) { | 131 | if (out_sample_count < 0) { |
| 146 | LOG_ERROR(Audio, | 132 | LOG_ERROR(Audio, |
| 147 | "Incorrect sample count received from opus_decode, " | 133 | "Incorrect sample count received from opus_decode, " |
| 148 | "output_sample_count={}, frame_size={}, data_sz_from_hdr={}", | 134 | "output_sample_count={}, frame_size={}, data_sz_from_hdr={}", |
| 149 | out_sample_count, frame_size, static_cast<u32>(hdr.sz)); | 135 | out_sample_count, frame_size, static_cast<u32>(hdr.size)); |
| 150 | return false; | 136 | return false; |
| 151 | } | 137 | } |
| 152 | 138 | ||
| 153 | const auto end_time = std::chrono::high_resolution_clock::now() - start_time; | 139 | const auto end_time = std::chrono::high_resolution_clock::now() - start_time; |
| 154 | sample_count = out_sample_count; | 140 | sample_count = out_sample_count; |
| 155 | consumed = static_cast<u32>(sizeof(OpusHeader) + hdr.sz); | 141 | consumed = static_cast<u32>(sizeof(OpusPacketHeader) + hdr.size); |
| 156 | if (out_performance_time != nullptr) { | 142 | if (out_performance_time != nullptr) { |
| 157 | *out_performance_time = | 143 | *out_performance_time = |
| 158 | std::chrono::duration_cast<std::chrono::milliseconds>(end_time).count(); | 144 | std::chrono::duration_cast<std::chrono::milliseconds>(end_time).count(); |
| @@ -164,25 +150,86 @@ private: | |||
| 164 | void ResetDecoderContext() { | 150 | void ResetDecoderContext() { |
| 165 | ASSERT(decoder != nullptr); | 151 | ASSERT(decoder != nullptr); |
| 166 | 152 | ||
| 167 | opus_decoder_ctl(decoder.get(), OPUS_RESET_STATE); | 153 | opus_multistream_decoder_ctl(decoder.get(), OPUS_RESET_STATE); |
| 168 | } | 154 | } |
| 169 | 155 | ||
| 170 | struct OpusHeader { | 156 | OpusDecoderPtr decoder; |
| 171 | u32_be sz; // Needs to be BE for some odd reason | ||
| 172 | INSERT_PADDING_WORDS(1); | ||
| 173 | }; | ||
| 174 | static_assert(sizeof(OpusHeader) == 0x8, "OpusHeader is an invalid size"); | ||
| 175 | |||
| 176 | std::unique_ptr<OpusDecoder, OpusDeleter> decoder; | ||
| 177 | u32 sample_rate; | 157 | u32 sample_rate; |
| 178 | u32 channel_count; | 158 | u32 channel_count; |
| 179 | }; | 159 | }; |
| 180 | 160 | ||
| 181 | static std::size_t WorkerBufferSize(u32 channel_count) { | 161 | class IHardwareOpusDecoderManager final : public ServiceFramework<IHardwareOpusDecoderManager> { |
| 162 | public: | ||
| 163 | explicit IHardwareOpusDecoderManager(OpusDecoderState decoder_state) | ||
| 164 | : ServiceFramework("IHardwareOpusDecoderManager"), decoder_state{std::move(decoder_state)} { | ||
| 165 | // clang-format off | ||
| 166 | static const FunctionInfo functions[] = { | ||
| 167 | {0, &IHardwareOpusDecoderManager::DecodeInterleavedOld, "DecodeInterleavedOld"}, | ||
| 168 | {1, nullptr, "SetContext"}, | ||
| 169 | {2, nullptr, "DecodeInterleavedForMultiStreamOld"}, | ||
| 170 | {3, nullptr, "SetContextForMultiStream"}, | ||
| 171 | {4, &IHardwareOpusDecoderManager::DecodeInterleavedWithPerfOld, "DecodeInterleavedWithPerfOld"}, | ||
| 172 | {5, nullptr, "DecodeInterleavedForMultiStreamWithPerfOld"}, | ||
| 173 | {6, &IHardwareOpusDecoderManager::DecodeInterleaved, "DecodeInterleaved"}, | ||
| 174 | {7, nullptr, "DecodeInterleavedForMultiStream"}, | ||
| 175 | }; | ||
| 176 | // clang-format on | ||
| 177 | |||
| 178 | RegisterHandlers(functions); | ||
| 179 | } | ||
| 180 | |||
| 181 | private: | ||
| 182 | void DecodeInterleavedOld(Kernel::HLERequestContext& ctx) { | ||
| 183 | LOG_DEBUG(Audio, "called"); | ||
| 184 | |||
| 185 | decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Disabled, | ||
| 186 | OpusDecoderState::ExtraBehavior::None); | ||
| 187 | } | ||
| 188 | |||
| 189 | void DecodeInterleavedWithPerfOld(Kernel::HLERequestContext& ctx) { | ||
| 190 | LOG_DEBUG(Audio, "called"); | ||
| 191 | |||
| 192 | decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Enabled, | ||
| 193 | OpusDecoderState::ExtraBehavior::None); | ||
| 194 | } | ||
| 195 | |||
| 196 | void DecodeInterleaved(Kernel::HLERequestContext& ctx) { | ||
| 197 | LOG_DEBUG(Audio, "called"); | ||
| 198 | |||
| 199 | IPC::RequestParser rp{ctx}; | ||
| 200 | const auto extra_behavior = rp.Pop<bool>() ? OpusDecoderState::ExtraBehavior::ResetContext | ||
| 201 | : OpusDecoderState::ExtraBehavior::None; | ||
| 202 | |||
| 203 | decoder_state.DecodeInterleaved(ctx, OpusDecoderState::PerfTime::Enabled, extra_behavior); | ||
| 204 | } | ||
| 205 | |||
| 206 | OpusDecoderState decoder_state; | ||
| 207 | }; | ||
| 208 | |||
| 209 | std::size_t WorkerBufferSize(u32 channel_count) { | ||
| 182 | ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count"); | 210 | ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count"); |
| 183 | return opus_decoder_get_size(static_cast<int>(channel_count)); | 211 | constexpr int num_streams = 1; |
| 212 | const int num_stereo_streams = channel_count == 2 ? 1 : 0; | ||
| 213 | return opus_multistream_decoder_get_size(num_streams, num_stereo_streams); | ||
| 184 | } | 214 | } |
| 185 | 215 | ||
| 216 | // Creates the mapping table that maps the input channels to the particular | ||
| 217 | // output channels. In the stereo case, we map the left and right input channels | ||
| 218 | // to the left and right output channels respectively. | ||
| 219 | // | ||
| 220 | // However, in the monophonic case, we only map the one available channel | ||
| 221 | // to the sole output channel. We specify 255 for the would-be right channel | ||
| 222 | // as this is a special value defined by Opus to indicate to the decoder to | ||
| 223 | // ignore that channel. | ||
| 224 | std::array<u8, 2> CreateMappingTable(u32 channel_count) { | ||
| 225 | if (channel_count == 2) { | ||
| 226 | return {{0, 1}}; | ||
| 227 | } | ||
| 228 | |||
| 229 | return {{0, 255}}; | ||
| 230 | } | ||
| 231 | } // Anonymous namespace | ||
| 232 | |||
| 186 | void HwOpus::GetWorkBufferSize(Kernel::HLERequestContext& ctx) { | 233 | void HwOpus::GetWorkBufferSize(Kernel::HLERequestContext& ctx) { |
| 187 | IPC::RequestParser rp{ctx}; | 234 | IPC::RequestParser rp{ctx}; |
| 188 | const auto sample_rate = rp.Pop<u32>(); | 235 | const auto sample_rate = rp.Pop<u32>(); |
| @@ -220,10 +267,15 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { | |||
| 220 | const std::size_t worker_sz = WorkerBufferSize(channel_count); | 267 | const std::size_t worker_sz = WorkerBufferSize(channel_count); |
| 221 | ASSERT_MSG(buffer_sz >= worker_sz, "Worker buffer too large"); | 268 | ASSERT_MSG(buffer_sz >= worker_sz, "Worker buffer too large"); |
| 222 | 269 | ||
| 223 | std::unique_ptr<OpusDecoder, OpusDeleter> decoder{ | 270 | const int num_stereo_streams = channel_count == 2 ? 1 : 0; |
| 224 | static_cast<OpusDecoder*>(operator new(worker_sz))}; | 271 | const auto mapping_table = CreateMappingTable(channel_count); |
| 225 | if (const int err = opus_decoder_init(decoder.get(), sample_rate, channel_count)) { | 272 | |
| 226 | LOG_ERROR(Audio, "Failed to init opus decoder with error={}", err); | 273 | int error = 0; |
| 274 | OpusDecoderPtr decoder{ | ||
| 275 | opus_multistream_decoder_create(sample_rate, static_cast<int>(channel_count), 1, | ||
| 276 | num_stereo_streams, mapping_table.data(), &error)}; | ||
| 277 | if (error != OPUS_OK || decoder == nullptr) { | ||
| 278 | LOG_ERROR(Audio, "Failed to create Opus decoder (error={}).", error); | ||
| 227 | IPC::ResponseBuilder rb{ctx, 2}; | 279 | IPC::ResponseBuilder rb{ctx, 2}; |
| 228 | // TODO(ogniK): Use correct error code | 280 | // TODO(ogniK): Use correct error code |
| 229 | rb.Push(ResultCode(-1)); | 281 | rb.Push(ResultCode(-1)); |
| @@ -232,8 +284,8 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { | |||
| 232 | 284 | ||
| 233 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; | 285 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; |
| 234 | rb.Push(RESULT_SUCCESS); | 286 | rb.Push(RESULT_SUCCESS); |
| 235 | rb.PushIpcInterface<IHardwareOpusDecoderManager>(std::move(decoder), sample_rate, | 287 | rb.PushIpcInterface<IHardwareOpusDecoderManager>( |
| 236 | channel_count); | 288 | OpusDecoderState{std::move(decoder), sample_rate, channel_count}); |
| 237 | } | 289 | } |
| 238 | 290 | ||
| 239 | HwOpus::HwOpus() : ServiceFramework("hwopus") { | 291 | HwOpus::HwOpus() : ServiceFramework("hwopus") { |
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp index 54959edd8..f03fb629c 100644 --- a/src/core/hle/service/filesystem/fsp_srv.cpp +++ b/src/core/hle/service/filesystem/fsp_srv.cpp | |||
| @@ -733,7 +733,10 @@ FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") { | |||
| 733 | FSP_SRV::~FSP_SRV() = default; | 733 | FSP_SRV::~FSP_SRV() = default; |
| 734 | 734 | ||
| 735 | void FSP_SRV::SetCurrentProcess(Kernel::HLERequestContext& ctx) { | 735 | void FSP_SRV::SetCurrentProcess(Kernel::HLERequestContext& ctx) { |
| 736 | LOG_WARNING(Service_FS, "(STUBBED) called"); | 736 | IPC::RequestParser rp{ctx}; |
| 737 | current_process_id = rp.Pop<u64>(); | ||
| 738 | |||
| 739 | LOG_DEBUG(Service_FS, "called. current_process_id=0x{:016X}", current_process_id); | ||
| 737 | 740 | ||
| 738 | IPC::ResponseBuilder rb{ctx, 2}; | 741 | IPC::ResponseBuilder rb{ctx, 2}; |
| 739 | rb.Push(RESULT_SUCCESS); | 742 | rb.Push(RESULT_SUCCESS); |
diff --git a/src/core/hle/service/filesystem/fsp_srv.h b/src/core/hle/service/filesystem/fsp_srv.h index 3a5f4e200..d7572ba7a 100644 --- a/src/core/hle/service/filesystem/fsp_srv.h +++ b/src/core/hle/service/filesystem/fsp_srv.h | |||
| @@ -32,6 +32,7 @@ private: | |||
| 32 | void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); | 32 | void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); |
| 33 | 33 | ||
| 34 | FileSys::VirtualFile romfs; | 34 | FileSys::VirtualFile romfs; |
| 35 | u64 current_process_id = 0; | ||
| 35 | }; | 36 | }; |
| 36 | 37 | ||
| 37 | } // namespace Service::FileSystem | 38 | } // namespace Service::FileSystem |
diff --git a/src/core/hle/service/hid/controllers/controller_base.h b/src/core/hle/service/hid/controllers/controller_base.h index f0e092b1b..5e5097a03 100644 --- a/src/core/hle/service/hid/controllers/controller_base.h +++ b/src/core/hle/service/hid/controllers/controller_base.h | |||
| @@ -7,6 +7,10 @@ | |||
| 7 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 8 | #include "common/swap.h" | 8 | #include "common/swap.h" |
| 9 | 9 | ||
| 10 | namespace Core::Timing { | ||
| 11 | class CoreTiming; | ||
| 12 | } | ||
| 13 | |||
| 10 | namespace Service::HID { | 14 | namespace Service::HID { |
| 11 | class ControllerBase { | 15 | class ControllerBase { |
| 12 | public: | 16 | public: |
| @@ -20,7 +24,8 @@ public: | |||
| 20 | virtual void OnRelease() = 0; | 24 | virtual void OnRelease() = 0; |
| 21 | 25 | ||
| 22 | // When the controller is requesting an update for the shared memory | 26 | // When the controller is requesting an update for the shared memory |
| 23 | virtual void OnUpdate(u8* data, std::size_t size) = 0; | 27 | virtual void OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, |
| 28 | std::size_t size) = 0; | ||
| 24 | 29 | ||
| 25 | // Called when input devices should be loaded | 30 | // Called when input devices should be loaded |
| 26 | virtual void OnLoadInputDevices() = 0; | 31 | virtual void OnLoadInputDevices() = 0; |
diff --git a/src/core/hle/service/hid/controllers/debug_pad.cpp b/src/core/hle/service/hid/controllers/debug_pad.cpp index c22357d8c..c5c2e032a 100644 --- a/src/core/hle/service/hid/controllers/debug_pad.cpp +++ b/src/core/hle/service/hid/controllers/debug_pad.cpp | |||
| @@ -21,8 +21,9 @@ void Controller_DebugPad::OnInit() {} | |||
| 21 | 21 | ||
| 22 | void Controller_DebugPad::OnRelease() {} | 22 | void Controller_DebugPad::OnRelease() {} |
| 23 | 23 | ||
| 24 | void Controller_DebugPad::OnUpdate(u8* data, std::size_t size) { | 24 | void Controller_DebugPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, |
| 25 | shared_memory.header.timestamp = CoreTiming::GetTicks(); | 25 | std::size_t size) { |
| 26 | shared_memory.header.timestamp = core_timing.GetTicks(); | ||
| 26 | shared_memory.header.total_entry_count = 17; | 27 | shared_memory.header.total_entry_count = 17; |
| 27 | 28 | ||
| 28 | if (!IsControllerActivated()) { | 29 | if (!IsControllerActivated()) { |
diff --git a/src/core/hle/service/hid/controllers/debug_pad.h b/src/core/hle/service/hid/controllers/debug_pad.h index 2b60ead12..e584b92ec 100644 --- a/src/core/hle/service/hid/controllers/debug_pad.h +++ b/src/core/hle/service/hid/controllers/debug_pad.h | |||
| @@ -26,7 +26,7 @@ public: | |||
| 26 | void OnRelease() override; | 26 | void OnRelease() override; |
| 27 | 27 | ||
| 28 | // When the controller is requesting an update for the shared memory | 28 | // When the controller is requesting an update for the shared memory |
| 29 | void OnUpdate(u8* data, std::size_t size) override; | 29 | void OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, std::size_t size) override; |
| 30 | 30 | ||
| 31 | // Called when input devices should be loaded | 31 | // Called when input devices should be loaded |
| 32 | void OnLoadInputDevices() override; | 32 | void OnLoadInputDevices() override; |
diff --git a/src/core/hle/service/hid/controllers/gesture.cpp b/src/core/hle/service/hid/controllers/gesture.cpp index 898572277..a179252e3 100644 --- a/src/core/hle/service/hid/controllers/gesture.cpp +++ b/src/core/hle/service/hid/controllers/gesture.cpp | |||
| @@ -17,8 +17,9 @@ void Controller_Gesture::OnInit() {} | |||
| 17 | 17 | ||
| 18 | void Controller_Gesture::OnRelease() {} | 18 | void Controller_Gesture::OnRelease() {} |
| 19 | 19 | ||
| 20 | void Controller_Gesture::OnUpdate(u8* data, std::size_t size) { | 20 | void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, |
| 21 | shared_memory.header.timestamp = CoreTiming::GetTicks(); | 21 | std::size_t size) { |
| 22 | shared_memory.header.timestamp = core_timing.GetTicks(); | ||
| 22 | shared_memory.header.total_entry_count = 17; | 23 | shared_memory.header.total_entry_count = 17; |
| 23 | 24 | ||
| 24 | if (!IsControllerActivated()) { | 25 | if (!IsControllerActivated()) { |
diff --git a/src/core/hle/service/hid/controllers/gesture.h b/src/core/hle/service/hid/controllers/gesture.h index 1056ffbcd..f305fe90f 100644 --- a/src/core/hle/service/hid/controllers/gesture.h +++ b/src/core/hle/service/hid/controllers/gesture.h | |||
| @@ -22,7 +22,7 @@ public: | |||
| 22 | void OnRelease() override; | 22 | void OnRelease() override; |
| 23 | 23 | ||
| 24 | // When the controller is requesting an update for the shared memory | 24 | // When the controller is requesting an update for the shared memory |
| 25 | void OnUpdate(u8* data, size_t size) override; | 25 | void OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, size_t size) override; |
| 26 | 26 | ||
| 27 | // Called when input devices should be loaded | 27 | // Called when input devices should be loaded |
| 28 | void OnLoadInputDevices() override; | 28 | void OnLoadInputDevices() override; |
diff --git a/src/core/hle/service/hid/controllers/keyboard.cpp b/src/core/hle/service/hid/controllers/keyboard.cpp index ca75adc2b..92d7bfb52 100644 --- a/src/core/hle/service/hid/controllers/keyboard.cpp +++ b/src/core/hle/service/hid/controllers/keyboard.cpp | |||
| @@ -19,8 +19,9 @@ void Controller_Keyboard::OnInit() {} | |||
| 19 | 19 | ||
| 20 | void Controller_Keyboard::OnRelease() {} | 20 | void Controller_Keyboard::OnRelease() {} |
| 21 | 21 | ||
| 22 | void Controller_Keyboard::OnUpdate(u8* data, std::size_t size) { | 22 | void Controller_Keyboard::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, |
| 23 | shared_memory.header.timestamp = CoreTiming::GetTicks(); | 23 | std::size_t size) { |
| 24 | shared_memory.header.timestamp = core_timing.GetTicks(); | ||
| 24 | shared_memory.header.total_entry_count = 17; | 25 | shared_memory.header.total_entry_count = 17; |
| 25 | 26 | ||
| 26 | if (!IsControllerActivated()) { | 27 | if (!IsControllerActivated()) { |
diff --git a/src/core/hle/service/hid/controllers/keyboard.h b/src/core/hle/service/hid/controllers/keyboard.h index f52775456..73cd2c7bb 100644 --- a/src/core/hle/service/hid/controllers/keyboard.h +++ b/src/core/hle/service/hid/controllers/keyboard.h | |||
| @@ -25,7 +25,7 @@ public: | |||
| 25 | void OnRelease() override; | 25 | void OnRelease() override; |
| 26 | 26 | ||
| 27 | // When the controller is requesting an update for the shared memory | 27 | // When the controller is requesting an update for the shared memory |
| 28 | void OnUpdate(u8* data, std::size_t size) override; | 28 | void OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, std::size_t size) override; |
| 29 | 29 | ||
| 30 | // Called when input devices should be loaded | 30 | // Called when input devices should be loaded |
| 31 | void OnLoadInputDevices() override; | 31 | void OnLoadInputDevices() override; |
diff --git a/src/core/hle/service/hid/controllers/mouse.cpp b/src/core/hle/service/hid/controllers/mouse.cpp index 63391dbe9..11ab096d9 100644 --- a/src/core/hle/service/hid/controllers/mouse.cpp +++ b/src/core/hle/service/hid/controllers/mouse.cpp | |||
| @@ -17,8 +17,9 @@ Controller_Mouse::~Controller_Mouse() = default; | |||
| 17 | void Controller_Mouse::OnInit() {} | 17 | void Controller_Mouse::OnInit() {} |
| 18 | void Controller_Mouse::OnRelease() {} | 18 | void Controller_Mouse::OnRelease() {} |
| 19 | 19 | ||
| 20 | void Controller_Mouse::OnUpdate(u8* data, std::size_t size) { | 20 | void Controller_Mouse::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, |
| 21 | shared_memory.header.timestamp = CoreTiming::GetTicks(); | 21 | std::size_t size) { |
| 22 | shared_memory.header.timestamp = core_timing.GetTicks(); | ||
| 22 | shared_memory.header.total_entry_count = 17; | 23 | shared_memory.header.total_entry_count = 17; |
| 23 | 24 | ||
| 24 | if (!IsControllerActivated()) { | 25 | if (!IsControllerActivated()) { |
diff --git a/src/core/hle/service/hid/controllers/mouse.h b/src/core/hle/service/hid/controllers/mouse.h index 70b654d07..9d46eecbe 100644 --- a/src/core/hle/service/hid/controllers/mouse.h +++ b/src/core/hle/service/hid/controllers/mouse.h | |||
| @@ -24,7 +24,7 @@ public: | |||
| 24 | void OnRelease() override; | 24 | void OnRelease() override; |
| 25 | 25 | ||
| 26 | // When the controller is requesting an update for the shared memory | 26 | // When the controller is requesting an update for the shared memory |
| 27 | void OnUpdate(u8* data, std::size_t size) override; | 27 | void OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, std::size_t size) override; |
| 28 | 28 | ||
| 29 | // Called when input devices should be loaded | 29 | // Called when input devices should be loaded |
| 30 | void OnLoadInputDevices() override; | 30 | void OnLoadInputDevices() override; |
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp index 04c8c35a8..e7fc7a619 100644 --- a/src/core/hle/service/hid/controllers/npad.cpp +++ b/src/core/hle/service/hid/controllers/npad.cpp | |||
| @@ -288,7 +288,8 @@ void Controller_NPad::RequestPadStateUpdate(u32 npad_id) { | |||
| 288 | rstick_entry.y = static_cast<s32>(stick_r_y_f * HID_JOYSTICK_MAX); | 288 | rstick_entry.y = static_cast<s32>(stick_r_y_f * HID_JOYSTICK_MAX); |
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | void Controller_NPad::OnUpdate(u8* data, std::size_t data_len) { | 291 | void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, |
| 292 | std::size_t data_len) { | ||
| 292 | if (!IsControllerActivated()) | 293 | if (!IsControllerActivated()) |
| 293 | return; | 294 | return; |
| 294 | for (std::size_t i = 0; i < shared_memory_entries.size(); i++) { | 295 | for (std::size_t i = 0; i < shared_memory_entries.size(); i++) { |
| @@ -308,7 +309,7 @@ void Controller_NPad::OnUpdate(u8* data, std::size_t data_len) { | |||
| 308 | const auto& last_entry = | 309 | const auto& last_entry = |
| 309 | main_controller->npad[main_controller->common.last_entry_index]; | 310 | main_controller->npad[main_controller->common.last_entry_index]; |
| 310 | 311 | ||
| 311 | main_controller->common.timestamp = CoreTiming::GetTicks(); | 312 | main_controller->common.timestamp = core_timing.GetTicks(); |
| 312 | main_controller->common.last_entry_index = | 313 | main_controller->common.last_entry_index = |
| 313 | (main_controller->common.last_entry_index + 1) % 17; | 314 | (main_controller->common.last_entry_index + 1) % 17; |
| 314 | 315 | ||
diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h index ce057da82..4ff50b3cd 100644 --- a/src/core/hle/service/hid/controllers/npad.h +++ b/src/core/hle/service/hid/controllers/npad.h | |||
| @@ -30,7 +30,7 @@ public: | |||
| 30 | void OnRelease() override; | 30 | void OnRelease() override; |
| 31 | 31 | ||
| 32 | // When the controller is requesting an update for the shared memory | 32 | // When the controller is requesting an update for the shared memory |
| 33 | void OnUpdate(u8* data, std::size_t size) override; | 33 | void OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, std::size_t size) override; |
| 34 | 34 | ||
| 35 | // Called when input devices should be loaded | 35 | // Called when input devices should be loaded |
| 36 | void OnLoadInputDevices() override; | 36 | void OnLoadInputDevices() override; |
diff --git a/src/core/hle/service/hid/controllers/stubbed.cpp b/src/core/hle/service/hid/controllers/stubbed.cpp index 02fcfadd9..946948f5e 100644 --- a/src/core/hle/service/hid/controllers/stubbed.cpp +++ b/src/core/hle/service/hid/controllers/stubbed.cpp | |||
| @@ -16,13 +16,14 @@ void Controller_Stubbed::OnInit() {} | |||
| 16 | 16 | ||
| 17 | void Controller_Stubbed::OnRelease() {} | 17 | void Controller_Stubbed::OnRelease() {} |
| 18 | 18 | ||
| 19 | void Controller_Stubbed::OnUpdate(u8* data, std::size_t size) { | 19 | void Controller_Stubbed::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, |
| 20 | std::size_t size) { | ||
| 20 | if (!smart_update) { | 21 | if (!smart_update) { |
| 21 | return; | 22 | return; |
| 22 | } | 23 | } |
| 23 | 24 | ||
| 24 | CommonHeader header{}; | 25 | CommonHeader header{}; |
| 25 | header.timestamp = CoreTiming::GetTicks(); | 26 | header.timestamp = core_timing.GetTicks(); |
| 26 | header.total_entry_count = 17; | 27 | header.total_entry_count = 17; |
| 27 | header.entry_count = 0; | 28 | header.entry_count = 0; |
| 28 | header.last_entry_index = 0; | 29 | header.last_entry_index = 0; |
diff --git a/src/core/hle/service/hid/controllers/stubbed.h b/src/core/hle/service/hid/controllers/stubbed.h index 4a21c643e..24469f03e 100644 --- a/src/core/hle/service/hid/controllers/stubbed.h +++ b/src/core/hle/service/hid/controllers/stubbed.h | |||
| @@ -20,7 +20,7 @@ public: | |||
| 20 | void OnRelease() override; | 20 | void OnRelease() override; |
| 21 | 21 | ||
| 22 | // When the controller is requesting an update for the shared memory | 22 | // When the controller is requesting an update for the shared memory |
| 23 | void OnUpdate(u8* data, std::size_t size) override; | 23 | void OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, std::size_t size) override; |
| 24 | 24 | ||
| 25 | // Called when input devices should be loaded | 25 | // Called when input devices should be loaded |
| 26 | void OnLoadInputDevices() override; | 26 | void OnLoadInputDevices() override; |
diff --git a/src/core/hle/service/hid/controllers/touchscreen.cpp b/src/core/hle/service/hid/controllers/touchscreen.cpp index f666b1bd8..1a8445a43 100644 --- a/src/core/hle/service/hid/controllers/touchscreen.cpp +++ b/src/core/hle/service/hid/controllers/touchscreen.cpp | |||
| @@ -20,8 +20,9 @@ void Controller_Touchscreen::OnInit() {} | |||
| 20 | 20 | ||
| 21 | void Controller_Touchscreen::OnRelease() {} | 21 | void Controller_Touchscreen::OnRelease() {} |
| 22 | 22 | ||
| 23 | void Controller_Touchscreen::OnUpdate(u8* data, std::size_t size) { | 23 | void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, |
| 24 | shared_memory.header.timestamp = CoreTiming::GetTicks(); | 24 | std::size_t size) { |
| 25 | shared_memory.header.timestamp = core_timing.GetTicks(); | ||
| 25 | shared_memory.header.total_entry_count = 17; | 26 | shared_memory.header.total_entry_count = 17; |
| 26 | 27 | ||
| 27 | if (!IsControllerActivated()) { | 28 | if (!IsControllerActivated()) { |
| @@ -48,7 +49,7 @@ void Controller_Touchscreen::OnUpdate(u8* data, std::size_t size) { | |||
| 48 | touch_entry.diameter_x = Settings::values.touchscreen.diameter_x; | 49 | touch_entry.diameter_x = Settings::values.touchscreen.diameter_x; |
| 49 | touch_entry.diameter_y = Settings::values.touchscreen.diameter_y; | 50 | touch_entry.diameter_y = Settings::values.touchscreen.diameter_y; |
| 50 | touch_entry.rotation_angle = Settings::values.touchscreen.rotation_angle; | 51 | touch_entry.rotation_angle = Settings::values.touchscreen.rotation_angle; |
| 51 | const u64 tick = CoreTiming::GetTicks(); | 52 | const u64 tick = core_timing.GetTicks(); |
| 52 | touch_entry.delta_time = tick - last_touch; | 53 | touch_entry.delta_time = tick - last_touch; |
| 53 | last_touch = tick; | 54 | last_touch = tick; |
| 54 | touch_entry.finger = Settings::values.touchscreen.finger; | 55 | touch_entry.finger = Settings::values.touchscreen.finger; |
diff --git a/src/core/hle/service/hid/controllers/touchscreen.h b/src/core/hle/service/hid/controllers/touchscreen.h index be2583864..76fc340e9 100644 --- a/src/core/hle/service/hid/controllers/touchscreen.h +++ b/src/core/hle/service/hid/controllers/touchscreen.h | |||
| @@ -24,7 +24,7 @@ public: | |||
| 24 | void OnRelease() override; | 24 | void OnRelease() override; |
| 25 | 25 | ||
| 26 | // When the controller is requesting an update for the shared memory | 26 | // When the controller is requesting an update for the shared memory |
| 27 | void OnUpdate(u8* data, std::size_t size) override; | 27 | void OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, std::size_t size) override; |
| 28 | 28 | ||
| 29 | // Called when input devices should be loaded | 29 | // Called when input devices should be loaded |
| 30 | void OnLoadInputDevices() override; | 30 | void OnLoadInputDevices() override; |
diff --git a/src/core/hle/service/hid/controllers/xpad.cpp b/src/core/hle/service/hid/controllers/xpad.cpp index cd397c70b..1a9da9576 100644 --- a/src/core/hle/service/hid/controllers/xpad.cpp +++ b/src/core/hle/service/hid/controllers/xpad.cpp | |||
| @@ -17,9 +17,10 @@ void Controller_XPad::OnInit() {} | |||
| 17 | 17 | ||
| 18 | void Controller_XPad::OnRelease() {} | 18 | void Controller_XPad::OnRelease() {} |
| 19 | 19 | ||
| 20 | void Controller_XPad::OnUpdate(u8* data, std::size_t size) { | 20 | void Controller_XPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, |
| 21 | std::size_t size) { | ||
| 21 | for (auto& xpad_entry : shared_memory.shared_memory_entries) { | 22 | for (auto& xpad_entry : shared_memory.shared_memory_entries) { |
| 22 | xpad_entry.header.timestamp = CoreTiming::GetTicks(); | 23 | xpad_entry.header.timestamp = core_timing.GetTicks(); |
| 23 | xpad_entry.header.total_entry_count = 17; | 24 | xpad_entry.header.total_entry_count = 17; |
| 24 | 25 | ||
| 25 | if (!IsControllerActivated()) { | 26 | if (!IsControllerActivated()) { |
diff --git a/src/core/hle/service/hid/controllers/xpad.h b/src/core/hle/service/hid/controllers/xpad.h index ff836989f..2864e6617 100644 --- a/src/core/hle/service/hid/controllers/xpad.h +++ b/src/core/hle/service/hid/controllers/xpad.h | |||
| @@ -22,7 +22,7 @@ public: | |||
| 22 | void OnRelease() override; | 22 | void OnRelease() override; |
| 23 | 23 | ||
| 24 | // When the controller is requesting an update for the shared memory | 24 | // When the controller is requesting an update for the shared memory |
| 25 | void OnUpdate(u8* data, std::size_t size) override; | 25 | void OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data, std::size_t size) override; |
| 26 | 26 | ||
| 27 | // Called when input devices should be loaded | 27 | // Called when input devices should be loaded |
| 28 | void OnLoadInputDevices() override; | 28 | void OnLoadInputDevices() override; |
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index 008bf3f02..8a6de83a2 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp | |||
| @@ -36,9 +36,9 @@ namespace Service::HID { | |||
| 36 | 36 | ||
| 37 | // Updating period for each HID device. | 37 | // Updating period for each HID device. |
| 38 | // TODO(ogniK): Find actual polling rate of hid | 38 | // TODO(ogniK): Find actual polling rate of hid |
| 39 | constexpr u64 pad_update_ticks = CoreTiming::BASE_CLOCK_RATE / 66; | 39 | constexpr u64 pad_update_ticks = Core::Timing::BASE_CLOCK_RATE / 66; |
| 40 | constexpr u64 accelerometer_update_ticks = CoreTiming::BASE_CLOCK_RATE / 100; | 40 | constexpr u64 accelerometer_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100; |
| 41 | constexpr u64 gyroscope_update_ticks = CoreTiming::BASE_CLOCK_RATE / 100; | 41 | constexpr u64 gyroscope_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100; |
| 42 | constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000; | 42 | constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000; |
| 43 | 43 | ||
| 44 | IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") { | 44 | IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") { |
| @@ -73,14 +73,15 @@ IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") { | |||
| 73 | GetController<Controller_Stubbed>(HidController::Unknown3).SetCommonHeaderOffset(0x5000); | 73 | GetController<Controller_Stubbed>(HidController::Unknown3).SetCommonHeaderOffset(0x5000); |
| 74 | 74 | ||
| 75 | // Register update callbacks | 75 | // Register update callbacks |
| 76 | auto& core_timing = Core::System::GetInstance().CoreTiming(); | ||
| 76 | pad_update_event = | 77 | pad_update_event = |
| 77 | CoreTiming::RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, int cycles_late) { | 78 | core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, int cycles_late) { |
| 78 | UpdateControllers(userdata, cycles_late); | 79 | UpdateControllers(userdata, cycles_late); |
| 79 | }); | 80 | }); |
| 80 | 81 | ||
| 81 | // TODO(shinyquagsire23): Other update callbacks? (accel, gyro?) | 82 | // TODO(shinyquagsire23): Other update callbacks? (accel, gyro?) |
| 82 | 83 | ||
| 83 | CoreTiming::ScheduleEvent(pad_update_ticks, pad_update_event); | 84 | core_timing.ScheduleEvent(pad_update_ticks, pad_update_event); |
| 84 | 85 | ||
| 85 | ReloadInputDevices(); | 86 | ReloadInputDevices(); |
| 86 | } | 87 | } |
| @@ -94,7 +95,7 @@ void IAppletResource::DeactivateController(HidController controller) { | |||
| 94 | } | 95 | } |
| 95 | 96 | ||
| 96 | IAppletResource ::~IAppletResource() { | 97 | IAppletResource ::~IAppletResource() { |
| 97 | CoreTiming::UnscheduleEvent(pad_update_event, 0); | 98 | Core::System::GetInstance().CoreTiming().UnscheduleEvent(pad_update_event, 0); |
| 98 | } | 99 | } |
| 99 | 100 | ||
| 100 | void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) { | 101 | void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) { |
| @@ -106,15 +107,17 @@ void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) { | |||
| 106 | } | 107 | } |
| 107 | 108 | ||
| 108 | void IAppletResource::UpdateControllers(u64 userdata, int cycles_late) { | 109 | void IAppletResource::UpdateControllers(u64 userdata, int cycles_late) { |
| 110 | auto& core_timing = Core::System::GetInstance().CoreTiming(); | ||
| 111 | |||
| 109 | const bool should_reload = Settings::values.is_device_reload_pending.exchange(false); | 112 | const bool should_reload = Settings::values.is_device_reload_pending.exchange(false); |
| 110 | for (const auto& controller : controllers) { | 113 | for (const auto& controller : controllers) { |
| 111 | if (should_reload) { | 114 | if (should_reload) { |
| 112 | controller->OnLoadInputDevices(); | 115 | controller->OnLoadInputDevices(); |
| 113 | } | 116 | } |
| 114 | controller->OnUpdate(shared_mem->GetPointer(), SHARED_MEMORY_SIZE); | 117 | controller->OnUpdate(core_timing, shared_mem->GetPointer(), SHARED_MEMORY_SIZE); |
| 115 | } | 118 | } |
| 116 | 119 | ||
| 117 | CoreTiming::ScheduleEvent(pad_update_ticks - cycles_late, pad_update_event); | 120 | core_timing.ScheduleEvent(pad_update_ticks - cycles_late, pad_update_event); |
| 118 | } | 121 | } |
| 119 | 122 | ||
| 120 | class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> { | 123 | class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> { |
diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h index eca27c056..7cc58db4c 100644 --- a/src/core/hle/service/hid/hid.h +++ b/src/core/hle/service/hid/hid.h | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | #include "controllers/controller_base.h" | 7 | #include "controllers/controller_base.h" |
| 8 | #include "core/hle/service/service.h" | 8 | #include "core/hle/service/service.h" |
| 9 | 9 | ||
| 10 | namespace CoreTiming { | 10 | namespace Core::Timing { |
| 11 | struct EventType; | 11 | struct EventType; |
| 12 | } | 12 | } |
| 13 | 13 | ||
| @@ -15,7 +15,7 @@ namespace Kernel { | |||
| 15 | class SharedMemory; | 15 | class SharedMemory; |
| 16 | } | 16 | } |
| 17 | 17 | ||
| 18 | namespace SM { | 18 | namespace Service::SM { |
| 19 | class ServiceManager; | 19 | class ServiceManager; |
| 20 | } | 20 | } |
| 21 | 21 | ||
| @@ -66,7 +66,7 @@ private: | |||
| 66 | 66 | ||
| 67 | Kernel::SharedPtr<Kernel::SharedMemory> shared_mem; | 67 | Kernel::SharedPtr<Kernel::SharedMemory> shared_mem; |
| 68 | 68 | ||
| 69 | CoreTiming::EventType* pad_update_event; | 69 | Core::Timing::EventType* pad_update_event; |
| 70 | 70 | ||
| 71 | std::array<std::unique_ptr<ControllerBase>, static_cast<size_t>(HidController::MaxControllers)> | 71 | std::array<std::unique_ptr<ControllerBase>, static_cast<size_t>(HidController::MaxControllers)> |
| 72 | controllers{}; | 72 | controllers{}; |
diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp index 3c7f8b1ee..2c4625c99 100644 --- a/src/core/hle/service/hid/irs.cpp +++ b/src/core/hle/service/hid/irs.cpp | |||
| @@ -98,7 +98,7 @@ void IRS::GetImageTransferProcessorState(Kernel::HLERequestContext& ctx) { | |||
| 98 | 98 | ||
| 99 | IPC::ResponseBuilder rb{ctx, 5}; | 99 | IPC::ResponseBuilder rb{ctx, 5}; |
| 100 | rb.Push(RESULT_SUCCESS); | 100 | rb.Push(RESULT_SUCCESS); |
| 101 | rb.PushRaw<u64>(CoreTiming::GetTicks()); | 101 | rb.PushRaw<u64>(Core::System::GetInstance().CoreTiming().GetTicks()); |
| 102 | rb.PushRaw<u32>(0); | 102 | rb.PushRaw<u32>(0); |
| 103 | } | 103 | } |
| 104 | 104 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index 92acc57b1..20c7c39aa 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp | |||
| @@ -23,11 +23,11 @@ u32 nvdisp_disp0::ioctl(Ioctl command, const std::vector<u8>& input, std::vector | |||
| 23 | 23 | ||
| 24 | void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height, | 24 | void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height, |
| 25 | u32 stride, NVFlinger::BufferQueue::BufferTransformFlags transform, | 25 | u32 stride, NVFlinger::BufferQueue::BufferTransformFlags transform, |
| 26 | const MathUtil::Rectangle<int>& crop_rect) { | 26 | const Common::Rectangle<int>& crop_rect) { |
| 27 | VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle); | 27 | VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle); |
| 28 | LOG_WARNING(Service, | 28 | LOG_TRACE(Service, |
| 29 | "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", | 29 | "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", |
| 30 | addr, offset, width, height, stride, format); | 30 | addr, offset, width, height, stride, format); |
| 31 | 31 | ||
| 32 | using PixelFormat = Tegra::FramebufferConfig::PixelFormat; | 32 | using PixelFormat = Tegra::FramebufferConfig::PixelFormat; |
| 33 | const Tegra::FramebufferConfig framebuffer{ | 33 | const Tegra::FramebufferConfig framebuffer{ |
| @@ -36,7 +36,7 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u3 | |||
| 36 | 36 | ||
| 37 | auto& instance = Core::System::GetInstance(); | 37 | auto& instance = Core::System::GetInstance(); |
| 38 | instance.GetPerfStats().EndGameFrame(); | 38 | instance.GetPerfStats().EndGameFrame(); |
| 39 | instance.Renderer().SwapBuffers(framebuffer); | 39 | instance.GPU().SwapBuffers(framebuffer); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | } // namespace Service::Nvidia::Devices | 42 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h index a45086e45..ace71169f 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h | |||
| @@ -25,7 +25,7 @@ public: | |||
| 25 | /// Performs a screen flip, drawing the buffer pointed to by the handle. | 25 | /// Performs a screen flip, drawing the buffer pointed to by the handle. |
| 26 | void flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height, u32 stride, | 26 | void flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height, u32 stride, |
| 27 | NVFlinger::BufferQueue::BufferTransformFlags transform, | 27 | NVFlinger::BufferQueue::BufferTransformFlags transform, |
| 28 | const MathUtil::Rectangle<int>& crop_rect); | 28 | const Common::Rectangle<int>& crop_rect); |
| 29 | 29 | ||
| 30 | private: | 30 | private: |
| 31 | std::shared_ptr<nvmap> nvmap_dev; | 31 | std::shared_ptr<nvmap> nvmap_dev; |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 466db7ccd..b031ebc66 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include "core/core.h" | 10 | #include "core/core.h" |
| 11 | #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" | 11 | #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" |
| 12 | #include "core/hle/service/nvdrv/devices/nvmap.h" | 12 | #include "core/hle/service/nvdrv/devices/nvmap.h" |
| 13 | #include "core/memory.h" | ||
| 13 | #include "video_core/memory_manager.h" | 14 | #include "video_core/memory_manager.h" |
| 14 | #include "video_core/rasterizer_interface.h" | 15 | #include "video_core/rasterizer_interface.h" |
| 15 | #include "video_core/renderer_base.h" | 16 | #include "video_core/renderer_base.h" |
| @@ -178,7 +179,7 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou | |||
| 178 | auto& gpu = system_instance.GPU(); | 179 | auto& gpu = system_instance.GPU(); |
| 179 | auto cpu_addr = gpu.MemoryManager().GpuToCpuAddress(params.offset); | 180 | auto cpu_addr = gpu.MemoryManager().GpuToCpuAddress(params.offset); |
| 180 | ASSERT(cpu_addr); | 181 | ASSERT(cpu_addr); |
| 181 | system_instance.Renderer().Rasterizer().FlushAndInvalidateRegion(*cpu_addr, itr->second.size); | 182 | gpu.FlushAndInvalidateRegion(ToCacheAddr(Memory::GetPointer(*cpu_addr)), itr->second.size); |
| 182 | 183 | ||
| 183 | params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size); | 184 | params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size); |
| 184 | 185 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp index d57a54ee8..45812d238 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <cstring> | 5 | #include <cstring> |
| 6 | #include "common/assert.h" | 6 | #include "common/assert.h" |
| 7 | #include "common/logging/log.h" | 7 | #include "common/logging/log.h" |
| 8 | #include "core/core.h" | ||
| 8 | #include "core/core_timing.h" | 9 | #include "core/core_timing.h" |
| 9 | #include "core/core_timing_util.h" | 10 | #include "core/core_timing_util.h" |
| 10 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" | 11 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" |
| @@ -184,7 +185,7 @@ u32 nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u8>& o | |||
| 184 | 185 | ||
| 185 | IoctlGetGpuTime params{}; | 186 | IoctlGetGpuTime params{}; |
| 186 | std::memcpy(¶ms, input.data(), input.size()); | 187 | std::memcpy(¶ms, input.data(), input.size()); |
| 187 | params.gpu_time = CoreTiming::cyclesToNs(CoreTiming::GetTicks()); | 188 | params.gpu_time = Core::Timing::cyclesToNs(Core::System::GetInstance().CoreTiming().GetTicks()); |
| 188 | std::memcpy(output.data(), ¶ms, output.size()); | 189 | std::memcpy(output.data(), ¶ms, output.size()); |
| 189 | return 0; | 190 | return 0; |
| 190 | } | 191 | } |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index 0a650f36c..8ce7bc7a5 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp | |||
| @@ -136,16 +136,6 @@ u32 nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::vector< | |||
| 136 | return 0; | 136 | return 0; |
| 137 | } | 137 | } |
| 138 | 138 | ||
| 139 | static void PushGPUEntries(Tegra::CommandList&& entries) { | ||
| 140 | if (entries.empty()) { | ||
| 141 | return; | ||
| 142 | } | ||
| 143 | |||
| 144 | auto& dma_pusher{Core::System::GetInstance().GPU().DmaPusher()}; | ||
| 145 | dma_pusher.Push(std::move(entries)); | ||
| 146 | dma_pusher.DispatchCalls(); | ||
| 147 | } | ||
| 148 | |||
| 149 | u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& output) { | 139 | u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& output) { |
| 150 | if (input.size() < sizeof(IoctlSubmitGpfifo)) { | 140 | if (input.size() < sizeof(IoctlSubmitGpfifo)) { |
| 151 | UNIMPLEMENTED(); | 141 | UNIMPLEMENTED(); |
| @@ -163,7 +153,7 @@ u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& outp | |||
| 163 | std::memcpy(entries.data(), &input[sizeof(IoctlSubmitGpfifo)], | 153 | std::memcpy(entries.data(), &input[sizeof(IoctlSubmitGpfifo)], |
| 164 | params.num_entries * sizeof(Tegra::CommandListHeader)); | 154 | params.num_entries * sizeof(Tegra::CommandListHeader)); |
| 165 | 155 | ||
| 166 | PushGPUEntries(std::move(entries)); | 156 | Core::System::GetInstance().GPU().PushGPUEntries(std::move(entries)); |
| 167 | 157 | ||
| 168 | params.fence_out.id = 0; | 158 | params.fence_out.id = 0; |
| 169 | params.fence_out.value = 0; | 159 | params.fence_out.value = 0; |
| @@ -184,7 +174,7 @@ u32 nvhost_gpu::KickoffPB(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 184 | Memory::ReadBlock(params.address, entries.data(), | 174 | Memory::ReadBlock(params.address, entries.data(), |
| 185 | params.num_entries * sizeof(Tegra::CommandListHeader)); | 175 | params.num_entries * sizeof(Tegra::CommandListHeader)); |
| 186 | 176 | ||
| 187 | PushGPUEntries(std::move(entries)); | 177 | Core::System::GetInstance().GPU().PushGPUEntries(std::move(entries)); |
| 188 | 178 | ||
| 189 | params.fence_out.id = 0; | 179 | params.fence_out.id = 0; |
| 190 | params.fence_out.value = 0; | 180 | params.fence_out.value = 0; |
diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp index fc07d9bb8..4d150fc71 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue.cpp | |||
| @@ -63,7 +63,7 @@ const IGBPBuffer& BufferQueue::RequestBuffer(u32 slot) const { | |||
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | void BufferQueue::QueueBuffer(u32 slot, BufferTransformFlags transform, | 65 | void BufferQueue::QueueBuffer(u32 slot, BufferTransformFlags transform, |
| 66 | const MathUtil::Rectangle<int>& crop_rect) { | 66 | const Common::Rectangle<int>& crop_rect) { |
| 67 | auto itr = std::find_if(queue.begin(), queue.end(), | 67 | auto itr = std::find_if(queue.begin(), queue.end(), |
| 68 | [&](const Buffer& buffer) { return buffer.slot == slot; }); | 68 | [&](const Buffer& buffer) { return buffer.slot == slot; }); |
| 69 | ASSERT(itr != queue.end()); | 69 | ASSERT(itr != queue.end()); |
diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h index b171f256c..e1ccb6171 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.h +++ b/src/core/hle/service/nvflinger/buffer_queue.h | |||
| @@ -13,10 +13,6 @@ | |||
| 13 | #include "core/hle/kernel/object.h" | 13 | #include "core/hle/kernel/object.h" |
| 14 | #include "core/hle/kernel/writable_event.h" | 14 | #include "core/hle/kernel/writable_event.h" |
| 15 | 15 | ||
| 16 | namespace CoreTiming { | ||
| 17 | struct EventType; | ||
| 18 | } | ||
| 19 | |||
| 20 | namespace Service::NVFlinger { | 16 | namespace Service::NVFlinger { |
| 21 | 17 | ||
| 22 | struct IGBPBuffer { | 18 | struct IGBPBuffer { |
| @@ -71,14 +67,14 @@ public: | |||
| 71 | Status status = Status::Free; | 67 | Status status = Status::Free; |
| 72 | IGBPBuffer igbp_buffer; | 68 | IGBPBuffer igbp_buffer; |
| 73 | BufferTransformFlags transform; | 69 | BufferTransformFlags transform; |
| 74 | MathUtil::Rectangle<int> crop_rect; | 70 | Common::Rectangle<int> crop_rect; |
| 75 | }; | 71 | }; |
| 76 | 72 | ||
| 77 | void SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer); | 73 | void SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer); |
| 78 | std::optional<u32> DequeueBuffer(u32 width, u32 height); | 74 | std::optional<u32> DequeueBuffer(u32 width, u32 height); |
| 79 | const IGBPBuffer& RequestBuffer(u32 slot) const; | 75 | const IGBPBuffer& RequestBuffer(u32 slot) const; |
| 80 | void QueueBuffer(u32 slot, BufferTransformFlags transform, | 76 | void QueueBuffer(u32 slot, BufferTransformFlags transform, |
| 81 | const MathUtil::Rectangle<int>& crop_rect); | 77 | const Common::Rectangle<int>& crop_rect); |
| 82 | std::optional<std::reference_wrapper<const Buffer>> AcquireBuffer(); | 78 | std::optional<std::reference_wrapper<const Buffer>> AcquireBuffer(); |
| 83 | void ReleaseBuffer(u32 slot); | 79 | void ReleaseBuffer(u32 slot); |
| 84 | u32 Query(QueryType type); | 80 | u32 Query(QueryType type); |
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 8dfc0df03..fc496b654 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp | |||
| @@ -14,135 +14,170 @@ | |||
| 14 | #include "core/core_timing_util.h" | 14 | #include "core/core_timing_util.h" |
| 15 | #include "core/hle/kernel/kernel.h" | 15 | #include "core/hle/kernel/kernel.h" |
| 16 | #include "core/hle/kernel/readable_event.h" | 16 | #include "core/hle/kernel/readable_event.h" |
| 17 | #include "core/hle/kernel/writable_event.h" | ||
| 18 | #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" | 17 | #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" |
| 19 | #include "core/hle/service/nvdrv/nvdrv.h" | 18 | #include "core/hle/service/nvdrv/nvdrv.h" |
| 20 | #include "core/hle/service/nvflinger/buffer_queue.h" | 19 | #include "core/hle/service/nvflinger/buffer_queue.h" |
| 21 | #include "core/hle/service/nvflinger/nvflinger.h" | 20 | #include "core/hle/service/nvflinger/nvflinger.h" |
| 21 | #include "core/hle/service/vi/display/vi_display.h" | ||
| 22 | #include "core/hle/service/vi/layer/vi_layer.h" | ||
| 22 | #include "core/perf_stats.h" | 23 | #include "core/perf_stats.h" |
| 23 | #include "video_core/renderer_base.h" | 24 | #include "video_core/renderer_base.h" |
| 24 | 25 | ||
| 25 | namespace Service::NVFlinger { | 26 | namespace Service::NVFlinger { |
| 26 | 27 | ||
| 27 | constexpr std::size_t SCREEN_REFRESH_RATE = 60; | 28 | constexpr std::size_t SCREEN_REFRESH_RATE = 60; |
| 28 | constexpr u64 frame_ticks = static_cast<u64>(CoreTiming::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); | 29 | constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); |
| 30 | |||
| 31 | NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} { | ||
| 32 | displays.emplace_back(0, "Default"); | ||
| 33 | displays.emplace_back(1, "External"); | ||
| 34 | displays.emplace_back(2, "Edid"); | ||
| 35 | displays.emplace_back(3, "Internal"); | ||
| 36 | displays.emplace_back(4, "Null"); | ||
| 29 | 37 | ||
| 30 | NVFlinger::NVFlinger() { | ||
| 31 | // Schedule the screen composition events | 38 | // Schedule the screen composition events |
| 32 | composition_event = | 39 | composition_event = |
| 33 | CoreTiming::RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) { | 40 | core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) { |
| 34 | Compose(); | 41 | Compose(); |
| 35 | CoreTiming::ScheduleEvent(frame_ticks - cycles_late, composition_event); | 42 | this->core_timing.ScheduleEvent(frame_ticks - cycles_late, composition_event); |
| 36 | }); | 43 | }); |
| 37 | 44 | ||
| 38 | CoreTiming::ScheduleEvent(frame_ticks, composition_event); | 45 | core_timing.ScheduleEvent(frame_ticks, composition_event); |
| 39 | } | 46 | } |
| 40 | 47 | ||
| 41 | NVFlinger::~NVFlinger() { | 48 | NVFlinger::~NVFlinger() { |
| 42 | CoreTiming::UnscheduleEvent(composition_event, 0); | 49 | core_timing.UnscheduleEvent(composition_event, 0); |
| 43 | } | 50 | } |
| 44 | 51 | ||
| 45 | void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { | 52 | void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { |
| 46 | nvdrv = std::move(instance); | 53 | nvdrv = std::move(instance); |
| 47 | } | 54 | } |
| 48 | 55 | ||
| 49 | u64 NVFlinger::OpenDisplay(std::string_view name) { | 56 | std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) { |
| 50 | LOG_DEBUG(Service, "Opening \"{}\" display", name); | 57 | LOG_DEBUG(Service, "Opening \"{}\" display", name); |
| 51 | 58 | ||
| 52 | // TODO(Subv): Currently we only support the Default display. | 59 | // TODO(Subv): Currently we only support the Default display. |
| 53 | ASSERT(name == "Default"); | 60 | ASSERT(name == "Default"); |
| 54 | 61 | ||
| 55 | const auto itr = std::find_if(displays.begin(), displays.end(), | 62 | const auto itr = |
| 56 | [&](const Display& display) { return display.name == name; }); | 63 | std::find_if(displays.begin(), displays.end(), |
| 57 | 64 | [&](const VI::Display& display) { return display.GetName() == name; }); | |
| 58 | ASSERT(itr != displays.end()); | 65 | if (itr == displays.end()) { |
| 66 | return {}; | ||
| 67 | } | ||
| 59 | 68 | ||
| 60 | return itr->id; | 69 | return itr->GetID(); |
| 61 | } | 70 | } |
| 62 | 71 | ||
| 63 | u64 NVFlinger::CreateLayer(u64 display_id) { | 72 | std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { |
| 64 | auto& display = FindDisplay(display_id); | 73 | auto* const display = FindDisplay(display_id); |
| 65 | 74 | ||
| 66 | ASSERT_MSG(display.layers.empty(), "Only one layer is supported per display at the moment"); | 75 | if (display == nullptr) { |
| 76 | return {}; | ||
| 77 | } | ||
| 67 | 78 | ||
| 68 | const u64 layer_id = next_layer_id++; | 79 | const u64 layer_id = next_layer_id++; |
| 69 | const u32 buffer_queue_id = next_buffer_queue_id++; | 80 | const u32 buffer_queue_id = next_buffer_queue_id++; |
| 70 | auto buffer_queue = std::make_shared<BufferQueue>(buffer_queue_id, layer_id); | 81 | buffer_queues.emplace_back(buffer_queue_id, layer_id); |
| 71 | display.layers.emplace_back(layer_id, buffer_queue); | 82 | display->CreateLayer(layer_id, buffer_queues.back()); |
| 72 | buffer_queues.emplace_back(std::move(buffer_queue)); | ||
| 73 | return layer_id; | 83 | return layer_id; |
| 74 | } | 84 | } |
| 75 | 85 | ||
| 76 | u32 NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) const { | 86 | std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) const { |
| 77 | const auto& layer = FindLayer(display_id, layer_id); | 87 | const auto* const layer = FindLayer(display_id, layer_id); |
| 78 | return layer.buffer_queue->GetId(); | 88 | |
| 89 | if (layer == nullptr) { | ||
| 90 | return {}; | ||
| 91 | } | ||
| 92 | |||
| 93 | return layer->GetBufferQueue().GetId(); | ||
| 79 | } | 94 | } |
| 80 | 95 | ||
| 81 | Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::GetVsyncEvent(u64 display_id) { | 96 | Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id) const { |
| 82 | return FindDisplay(display_id).vsync_event.readable; | 97 | auto* const display = FindDisplay(display_id); |
| 98 | |||
| 99 | if (display == nullptr) { | ||
| 100 | return nullptr; | ||
| 101 | } | ||
| 102 | |||
| 103 | return display->GetVSyncEvent(); | ||
| 83 | } | 104 | } |
| 84 | 105 | ||
| 85 | std::shared_ptr<BufferQueue> NVFlinger::FindBufferQueue(u32 id) const { | 106 | BufferQueue& NVFlinger::FindBufferQueue(u32 id) { |
| 86 | const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), | 107 | const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), |
| 87 | [&](const auto& queue) { return queue->GetId() == id; }); | 108 | [id](const auto& queue) { return queue.GetId() == id; }); |
| 88 | 109 | ||
| 89 | ASSERT(itr != buffer_queues.end()); | 110 | ASSERT(itr != buffer_queues.end()); |
| 90 | return *itr; | 111 | return *itr; |
| 91 | } | 112 | } |
| 92 | 113 | ||
| 93 | Display& NVFlinger::FindDisplay(u64 display_id) { | 114 | const BufferQueue& NVFlinger::FindBufferQueue(u32 id) const { |
| 94 | const auto itr = std::find_if(displays.begin(), displays.end(), | 115 | const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(), |
| 95 | [&](const Display& display) { return display.id == display_id; }); | 116 | [id](const auto& queue) { return queue.GetId() == id; }); |
| 96 | 117 | ||
| 97 | ASSERT(itr != displays.end()); | 118 | ASSERT(itr != buffer_queues.end()); |
| 98 | return *itr; | 119 | return *itr; |
| 99 | } | 120 | } |
| 100 | 121 | ||
| 101 | const Display& NVFlinger::FindDisplay(u64 display_id) const { | 122 | VI::Display* NVFlinger::FindDisplay(u64 display_id) { |
| 102 | const auto itr = std::find_if(displays.begin(), displays.end(), | 123 | const auto itr = |
| 103 | [&](const Display& display) { return display.id == display_id; }); | 124 | std::find_if(displays.begin(), displays.end(), |
| 125 | [&](const VI::Display& display) { return display.GetID() == display_id; }); | ||
| 104 | 126 | ||
| 105 | ASSERT(itr != displays.end()); | 127 | if (itr == displays.end()) { |
| 106 | return *itr; | 128 | return nullptr; |
| 129 | } | ||
| 130 | |||
| 131 | return &*itr; | ||
| 107 | } | 132 | } |
| 108 | 133 | ||
| 109 | Layer& NVFlinger::FindLayer(u64 display_id, u64 layer_id) { | 134 | const VI::Display* NVFlinger::FindDisplay(u64 display_id) const { |
| 110 | auto& display = FindDisplay(display_id); | 135 | const auto itr = |
| 136 | std::find_if(displays.begin(), displays.end(), | ||
| 137 | [&](const VI::Display& display) { return display.GetID() == display_id; }); | ||
| 111 | 138 | ||
| 112 | const auto itr = std::find_if(display.layers.begin(), display.layers.end(), | 139 | if (itr == displays.end()) { |
| 113 | [&](const Layer& layer) { return layer.id == layer_id; }); | 140 | return nullptr; |
| 141 | } | ||
| 114 | 142 | ||
| 115 | ASSERT(itr != display.layers.end()); | 143 | return &*itr; |
| 116 | return *itr; | ||
| 117 | } | 144 | } |
| 118 | 145 | ||
| 119 | const Layer& NVFlinger::FindLayer(u64 display_id, u64 layer_id) const { | 146 | VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) { |
| 120 | const auto& display = FindDisplay(display_id); | 147 | auto* const display = FindDisplay(display_id); |
| 121 | 148 | ||
| 122 | const auto itr = std::find_if(display.layers.begin(), display.layers.end(), | 149 | if (display == nullptr) { |
| 123 | [&](const Layer& layer) { return layer.id == layer_id; }); | 150 | return nullptr; |
| 151 | } | ||
| 124 | 152 | ||
| 125 | ASSERT(itr != display.layers.end()); | 153 | return display->FindLayer(layer_id); |
| 126 | return *itr; | 154 | } |
| 155 | |||
| 156 | const VI::Layer* NVFlinger::FindLayer(u64 display_id, u64 layer_id) const { | ||
| 157 | const auto* const display = FindDisplay(display_id); | ||
| 158 | |||
| 159 | if (display == nullptr) { | ||
| 160 | return nullptr; | ||
| 161 | } | ||
| 162 | |||
| 163 | return display->FindLayer(layer_id); | ||
| 127 | } | 164 | } |
| 128 | 165 | ||
| 129 | void NVFlinger::Compose() { | 166 | void NVFlinger::Compose() { |
| 130 | for (auto& display : displays) { | 167 | for (auto& display : displays) { |
| 131 | // Trigger vsync for this display at the end of drawing | 168 | // Trigger vsync for this display at the end of drawing |
| 132 | SCOPE_EXIT({ display.vsync_event.writable->Signal(); }); | 169 | SCOPE_EXIT({ display.SignalVSyncEvent(); }); |
| 133 | 170 | ||
| 134 | // Don't do anything for displays without layers. | 171 | // Don't do anything for displays without layers. |
| 135 | if (display.layers.empty()) | 172 | if (!display.HasLayers()) |
| 136 | continue; | 173 | continue; |
| 137 | 174 | ||
| 138 | // TODO(Subv): Support more than 1 layer. | 175 | // TODO(Subv): Support more than 1 layer. |
| 139 | ASSERT_MSG(display.layers.size() == 1, "Max 1 layer per display is supported"); | 176 | VI::Layer& layer = display.GetLayer(0); |
| 140 | 177 | auto& buffer_queue = layer.GetBufferQueue(); | |
| 141 | Layer& layer = display.layers[0]; | ||
| 142 | auto& buffer_queue = layer.buffer_queue; | ||
| 143 | 178 | ||
| 144 | // Search for a queued buffer and acquire it | 179 | // Search for a queued buffer and acquire it |
| 145 | auto buffer = buffer_queue->AcquireBuffer(); | 180 | auto buffer = buffer_queue.AcquireBuffer(); |
| 146 | 181 | ||
| 147 | MicroProfileFlip(); | 182 | MicroProfileFlip(); |
| 148 | 183 | ||
| @@ -151,7 +186,7 @@ void NVFlinger::Compose() { | |||
| 151 | 186 | ||
| 152 | // There was no queued buffer to draw, render previous frame | 187 | // There was no queued buffer to draw, render previous frame |
| 153 | system_instance.GetPerfStats().EndGameFrame(); | 188 | system_instance.GetPerfStats().EndGameFrame(); |
| 154 | system_instance.Renderer().SwapBuffers({}); | 189 | system_instance.GPU().SwapBuffers({}); |
| 155 | continue; | 190 | continue; |
| 156 | } | 191 | } |
| 157 | 192 | ||
| @@ -167,19 +202,8 @@ void NVFlinger::Compose() { | |||
| 167 | igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride, | 202 | igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride, |
| 168 | buffer->get().transform, buffer->get().crop_rect); | 203 | buffer->get().transform, buffer->get().crop_rect); |
| 169 | 204 | ||
| 170 | buffer_queue->ReleaseBuffer(buffer->get().slot); | 205 | buffer_queue.ReleaseBuffer(buffer->get().slot); |
| 171 | } | 206 | } |
| 172 | } | 207 | } |
| 173 | 208 | ||
| 174 | Layer::Layer(u64 id, std::shared_ptr<BufferQueue> queue) : id(id), buffer_queue(std::move(queue)) {} | ||
| 175 | Layer::~Layer() = default; | ||
| 176 | |||
| 177 | Display::Display(u64 id, std::string name) : id(id), name(std::move(name)) { | ||
| 178 | auto& kernel = Core::System::GetInstance().Kernel(); | ||
| 179 | vsync_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Sticky, | ||
| 180 | fmt::format("Display VSync Event {}", id)); | ||
| 181 | } | ||
| 182 | |||
| 183 | Display::~Display() = default; | ||
| 184 | |||
| 185 | } // namespace Service::NVFlinger | 209 | } // namespace Service::NVFlinger |
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index 83e974ed3..c0a83fffb 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h | |||
| @@ -4,8 +4,8 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <array> | ||
| 8 | #include <memory> | 7 | #include <memory> |
| 8 | #include <optional> | ||
| 9 | #include <string> | 9 | #include <string> |
| 10 | #include <string_view> | 10 | #include <string_view> |
| 11 | #include <vector> | 11 | #include <vector> |
| @@ -13,9 +13,10 @@ | |||
| 13 | #include "common/common_types.h" | 13 | #include "common/common_types.h" |
| 14 | #include "core/hle/kernel/object.h" | 14 | #include "core/hle/kernel/object.h" |
| 15 | 15 | ||
| 16 | namespace CoreTiming { | 16 | namespace Core::Timing { |
| 17 | class CoreTiming; | ||
| 17 | struct EventType; | 18 | struct EventType; |
| 18 | } | 19 | } // namespace Core::Timing |
| 19 | 20 | ||
| 20 | namespace Kernel { | 21 | namespace Kernel { |
| 21 | class ReadableEvent; | 22 | class ReadableEvent; |
| @@ -24,53 +25,50 @@ class WritableEvent; | |||
| 24 | 25 | ||
| 25 | namespace Service::Nvidia { | 26 | namespace Service::Nvidia { |
| 26 | class Module; | 27 | class Module; |
| 27 | } | 28 | } // namespace Service::Nvidia |
| 29 | |||
| 30 | namespace Service::VI { | ||
| 31 | class Display; | ||
| 32 | class Layer; | ||
| 33 | } // namespace Service::VI | ||
| 28 | 34 | ||
| 29 | namespace Service::NVFlinger { | 35 | namespace Service::NVFlinger { |
| 30 | 36 | ||
| 31 | class BufferQueue; | 37 | class BufferQueue; |
| 32 | 38 | ||
| 33 | struct Layer { | ||
| 34 | Layer(u64 id, std::shared_ptr<BufferQueue> queue); | ||
| 35 | ~Layer(); | ||
| 36 | |||
| 37 | u64 id; | ||
| 38 | std::shared_ptr<BufferQueue> buffer_queue; | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct Display { | ||
| 42 | Display(u64 id, std::string name); | ||
| 43 | ~Display(); | ||
| 44 | |||
| 45 | u64 id; | ||
| 46 | std::string name; | ||
| 47 | |||
| 48 | std::vector<Layer> layers; | ||
| 49 | Kernel::EventPair vsync_event; | ||
| 50 | }; | ||
| 51 | |||
| 52 | class NVFlinger final { | 39 | class NVFlinger final { |
| 53 | public: | 40 | public: |
| 54 | NVFlinger(); | 41 | explicit NVFlinger(Core::Timing::CoreTiming& core_timing); |
| 55 | ~NVFlinger(); | 42 | ~NVFlinger(); |
| 56 | 43 | ||
| 57 | /// Sets the NVDrv module instance to use to send buffers to the GPU. | 44 | /// Sets the NVDrv module instance to use to send buffers to the GPU. |
| 58 | void SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance); | 45 | void SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance); |
| 59 | 46 | ||
| 60 | /// Opens the specified display and returns the ID. | 47 | /// Opens the specified display and returns the ID. |
| 61 | u64 OpenDisplay(std::string_view name); | 48 | /// |
| 49 | /// If an invalid display name is provided, then an empty optional is returned. | ||
| 50 | std::optional<u64> OpenDisplay(std::string_view name); | ||
| 62 | 51 | ||
| 63 | /// Creates a layer on the specified display and returns the layer ID. | 52 | /// Creates a layer on the specified display and returns the layer ID. |
| 64 | u64 CreateLayer(u64 display_id); | 53 | /// |
| 54 | /// If an invalid display ID is specified, then an empty optional is returned. | ||
| 55 | std::optional<u64> CreateLayer(u64 display_id); | ||
| 65 | 56 | ||
| 66 | /// Finds the buffer queue ID of the specified layer in the specified display. | 57 | /// Finds the buffer queue ID of the specified layer in the specified display. |
| 67 | u32 FindBufferQueueId(u64 display_id, u64 layer_id) const; | 58 | /// |
| 59 | /// If an invalid display ID or layer ID is provided, then an empty optional is returned. | ||
| 60 | std::optional<u32> FindBufferQueueId(u64 display_id, u64 layer_id) const; | ||
| 68 | 61 | ||
| 69 | /// Gets the vsync event for the specified display. | 62 | /// Gets the vsync event for the specified display. |
| 70 | Kernel::SharedPtr<Kernel::ReadableEvent> GetVsyncEvent(u64 display_id); | 63 | /// |
| 64 | /// If an invalid display ID is provided, then nullptr is returned. | ||
| 65 | Kernel::SharedPtr<Kernel::ReadableEvent> FindVsyncEvent(u64 display_id) const; | ||
| 71 | 66 | ||
| 72 | /// Obtains a buffer queue identified by the ID. | 67 | /// Obtains a buffer queue identified by the ID. |
| 73 | std::shared_ptr<BufferQueue> FindBufferQueue(u32 id) const; | 68 | BufferQueue& FindBufferQueue(u32 id); |
| 69 | |||
| 70 | /// Obtains a buffer queue identified by the ID. | ||
| 71 | const BufferQueue& FindBufferQueue(u32 id) const; | ||
| 74 | 72 | ||
| 75 | /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when | 73 | /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when |
| 76 | /// finished. | 74 | /// finished. |
| @@ -78,27 +76,21 @@ public: | |||
| 78 | 76 | ||
| 79 | private: | 77 | private: |
| 80 | /// Finds the display identified by the specified ID. | 78 | /// Finds the display identified by the specified ID. |
| 81 | Display& FindDisplay(u64 display_id); | 79 | VI::Display* FindDisplay(u64 display_id); |
| 82 | 80 | ||
| 83 | /// Finds the display identified by the specified ID. | 81 | /// Finds the display identified by the specified ID. |
| 84 | const Display& FindDisplay(u64 display_id) const; | 82 | const VI::Display* FindDisplay(u64 display_id) const; |
| 85 | 83 | ||
| 86 | /// Finds the layer identified by the specified ID in the desired display. | 84 | /// Finds the layer identified by the specified ID in the desired display. |
| 87 | Layer& FindLayer(u64 display_id, u64 layer_id); | 85 | VI::Layer* FindLayer(u64 display_id, u64 layer_id); |
| 88 | 86 | ||
| 89 | /// Finds the layer identified by the specified ID in the desired display. | 87 | /// Finds the layer identified by the specified ID in the desired display. |
| 90 | const Layer& FindLayer(u64 display_id, u64 layer_id) const; | 88 | const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const; |
| 91 | 89 | ||
| 92 | std::shared_ptr<Nvidia::Module> nvdrv; | 90 | std::shared_ptr<Nvidia::Module> nvdrv; |
| 93 | 91 | ||
| 94 | std::array<Display, 5> displays{{ | 92 | std::vector<VI::Display> displays; |
| 95 | {0, "Default"}, | 93 | std::vector<BufferQueue> buffer_queues; |
| 96 | {1, "External"}, | ||
| 97 | {2, "Edid"}, | ||
| 98 | {3, "Internal"}, | ||
| 99 | {4, "Null"}, | ||
| 100 | }}; | ||
| 101 | std::vector<std::shared_ptr<BufferQueue>> buffer_queues; | ||
| 102 | 94 | ||
| 103 | /// Id to use for the next layer that is created, this counter is shared among all displays. | 95 | /// Id to use for the next layer that is created, this counter is shared among all displays. |
| 104 | u64 next_layer_id = 1; | 96 | u64 next_layer_id = 1; |
| @@ -106,8 +98,11 @@ private: | |||
| 106 | /// layers. | 98 | /// layers. |
| 107 | u32 next_buffer_queue_id = 1; | 99 | u32 next_buffer_queue_id = 1; |
| 108 | 100 | ||
| 109 | /// CoreTiming event that handles screen composition. | 101 | /// Event that handles screen composition. |
| 110 | CoreTiming::EventType* composition_event; | 102 | Core::Timing::EventType* composition_event; |
| 103 | |||
| 104 | /// Core timing instance for registering/unregistering the composition event. | ||
| 105 | Core::Timing::CoreTiming& core_timing; | ||
| 111 | }; | 106 | }; |
| 112 | 107 | ||
| 113 | } // namespace Service::NVFlinger | 108 | } // namespace Service::NVFlinger |
diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp index d25b80ab0..00806b0ed 100644 --- a/src/core/hle/service/service.cpp +++ b/src/core/hle/service/service.cpp | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | #include "core/hle/ipc.h" | 11 | #include "core/hle/ipc.h" |
| 12 | #include "core/hle/ipc_helpers.h" | 12 | #include "core/hle/ipc_helpers.h" |
| 13 | #include "core/hle/kernel/client_port.h" | 13 | #include "core/hle/kernel/client_port.h" |
| 14 | #include "core/hle/kernel/handle_table.h" | ||
| 15 | #include "core/hle/kernel/kernel.h" | 14 | #include "core/hle/kernel/kernel.h" |
| 16 | #include "core/hle/kernel/process.h" | 15 | #include "core/hle/kernel/process.h" |
| 17 | #include "core/hle/kernel/server_port.h" | 16 | #include "core/hle/kernel/server_port.h" |
| @@ -76,7 +75,8 @@ namespace Service { | |||
| 76 | * Creates a function string for logging, complete with the name (or header code, depending | 75 | * Creates a function string for logging, complete with the name (or header code, depending |
| 77 | * on what's passed in) the port name, and all the cmd_buff arguments. | 76 | * on what's passed in) the port name, and all the cmd_buff arguments. |
| 78 | */ | 77 | */ |
| 79 | [[maybe_unused]] static std::string MakeFunctionString(const char* name, const char* port_name, | 78 | [[maybe_unused]] static std::string MakeFunctionString(std::string_view name, |
| 79 | std::string_view port_name, | ||
| 80 | const u32* cmd_buff) { | 80 | const u32* cmd_buff) { |
| 81 | // Number of params == bits 0-5 + bits 6-11 | 81 | // Number of params == bits 0-5 + bits 6-11 |
| 82 | int num_params = (cmd_buff[0] & 0x3F) + ((cmd_buff[0] >> 6) & 0x3F); | 82 | int num_params = (cmd_buff[0] & 0x3F) + ((cmd_buff[0] >> 6) & 0x3F); |
| @@ -158,9 +158,7 @@ void ServiceFrameworkBase::InvokeRequest(Kernel::HLERequestContext& ctx) { | |||
| 158 | return ReportUnimplementedFunction(ctx, info); | 158 | return ReportUnimplementedFunction(ctx, info); |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | LOG_TRACE( | 161 | LOG_TRACE(Service, "{}", MakeFunctionString(info->name, GetServiceName(), ctx.CommandBuffer())); |
| 162 | Service, "{}", | ||
| 163 | MakeFunctionString(info->name, GetServiceName().c_str(), ctx.CommandBuffer()).c_str()); | ||
| 164 | handler_invoker(this, info->handler_callback, ctx); | 162 | handler_invoker(this, info->handler_callback, ctx); |
| 165 | } | 163 | } |
| 166 | 164 | ||
| @@ -169,7 +167,7 @@ ResultCode ServiceFrameworkBase::HandleSyncRequest(Kernel::HLERequestContext& co | |||
| 169 | case IPC::CommandType::Close: { | 167 | case IPC::CommandType::Close: { |
| 170 | IPC::ResponseBuilder rb{context, 2}; | 168 | IPC::ResponseBuilder rb{context, 2}; |
| 171 | rb.Push(RESULT_SUCCESS); | 169 | rb.Push(RESULT_SUCCESS); |
| 172 | return ResultCode(ErrorModule::HIPC, ErrorDescription::RemoteProcessDead); | 170 | return IPC::ERR_REMOTE_PROCESS_DEAD; |
| 173 | } | 171 | } |
| 174 | case IPC::CommandType::ControlWithContext: | 172 | case IPC::CommandType::ControlWithContext: |
| 175 | case IPC::CommandType::Control: { | 173 | case IPC::CommandType::Control: { |
| @@ -194,10 +192,11 @@ ResultCode ServiceFrameworkBase::HandleSyncRequest(Kernel::HLERequestContext& co | |||
| 194 | // Module interface | 192 | // Module interface |
| 195 | 193 | ||
| 196 | /// Initialize ServiceManager | 194 | /// Initialize ServiceManager |
| 197 | void Init(std::shared_ptr<SM::ServiceManager>& sm, FileSys::VfsFilesystem& vfs) { | 195 | void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system, |
| 196 | FileSys::VfsFilesystem& vfs) { | ||
| 198 | // NVFlinger needs to be accessed by several services like Vi and AppletOE so we instantiate it | 197 | // NVFlinger needs to be accessed by several services like Vi and AppletOE so we instantiate it |
| 199 | // here and pass it into the respective InstallInterfaces functions. | 198 | // here and pass it into the respective InstallInterfaces functions. |
| 200 | auto nv_flinger = std::make_shared<NVFlinger::NVFlinger>(); | 199 | auto nv_flinger = std::make_shared<NVFlinger::NVFlinger>(system.CoreTiming()); |
| 201 | 200 | ||
| 202 | SM::ServiceManager::InstallInterfaces(sm); | 201 | SM::ServiceManager::InstallInterfaces(sm); |
| 203 | 202 | ||
diff --git a/src/core/hle/service/service.h b/src/core/hle/service/service.h index 029533628..830790269 100644 --- a/src/core/hle/service/service.h +++ b/src/core/hle/service/service.h | |||
| @@ -14,6 +14,14 @@ | |||
| 14 | //////////////////////////////////////////////////////////////////////////////////////////////////// | 14 | //////////////////////////////////////////////////////////////////////////////////////////////////// |
| 15 | // Namespace Service | 15 | // Namespace Service |
| 16 | 16 | ||
| 17 | namespace Core { | ||
| 18 | class System; | ||
| 19 | } | ||
| 20 | |||
| 21 | namespace FileSys { | ||
| 22 | class VfsFilesystem; | ||
| 23 | } | ||
| 24 | |||
| 17 | namespace Kernel { | 25 | namespace Kernel { |
| 18 | class ClientPort; | 26 | class ClientPort; |
| 19 | class ServerPort; | 27 | class ServerPort; |
| @@ -21,10 +29,6 @@ class ServerSession; | |||
| 21 | class HLERequestContext; | 29 | class HLERequestContext; |
| 22 | } // namespace Kernel | 30 | } // namespace Kernel |
| 23 | 31 | ||
| 24 | namespace FileSys { | ||
| 25 | class VfsFilesystem; | ||
| 26 | } | ||
| 27 | |||
| 28 | namespace Service { | 32 | namespace Service { |
| 29 | 33 | ||
| 30 | namespace SM { | 34 | namespace SM { |
| @@ -178,7 +182,8 @@ private: | |||
| 178 | }; | 182 | }; |
| 179 | 183 | ||
| 180 | /// Initialize ServiceManager | 184 | /// Initialize ServiceManager |
| 181 | void Init(std::shared_ptr<SM::ServiceManager>& sm, FileSys::VfsFilesystem& vfs); | 185 | void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system, |
| 186 | FileSys::VfsFilesystem& vfs); | ||
| 182 | 187 | ||
| 183 | /// Shutdown ServiceManager | 188 | /// Shutdown ServiceManager |
| 184 | void Shutdown(); | 189 | void Shutdown(); |
diff --git a/src/core/hle/service/sm/controller.cpp b/src/core/hle/service/sm/controller.cpp index 74da4d5e6..e9ee73710 100644 --- a/src/core/hle/service/sm/controller.cpp +++ b/src/core/hle/service/sm/controller.cpp | |||
| @@ -30,7 +30,7 @@ void Controller::DuplicateSession(Kernel::HLERequestContext& ctx) { | |||
| 30 | 30 | ||
| 31 | IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; | 31 | IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; |
| 32 | rb.Push(RESULT_SUCCESS); | 32 | rb.Push(RESULT_SUCCESS); |
| 33 | Kernel::SharedPtr<Kernel::ClientSession> session{ctx.Session()->parent->client}; | 33 | Kernel::SharedPtr<Kernel::ClientSession> session{ctx.Session()->GetParent()->client}; |
| 34 | rb.PushMoveObjects(session); | 34 | rb.PushMoveObjects(session); |
| 35 | 35 | ||
| 36 | LOG_DEBUG(Service, "session={}", session->GetObjectId()); | 36 | LOG_DEBUG(Service, "session={}", session->GetObjectId()); |
diff --git a/src/core/hle/service/sm/sm.h b/src/core/hle/service/sm/sm.h index bef25433e..b9d6381b4 100644 --- a/src/core/hle/service/sm/sm.h +++ b/src/core/hle/service/sm/sm.h | |||
| @@ -67,7 +67,7 @@ public: | |||
| 67 | if (port == nullptr) { | 67 | if (port == nullptr) { |
| 68 | return nullptr; | 68 | return nullptr; |
| 69 | } | 69 | } |
| 70 | return std::static_pointer_cast<T>(port->hle_handler); | 70 | return std::static_pointer_cast<T>(port->GetHLEHandler()); |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | void InvokeControlRequest(Kernel::HLERequestContext& context); | 73 | void InvokeControlRequest(Kernel::HLERequestContext& context); |
diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp index c13640ad8..aa115935d 100644 --- a/src/core/hle/service/time/time.cpp +++ b/src/core/hle/service/time/time.cpp | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <chrono> | 5 | #include <chrono> |
| 6 | #include <ctime> | 6 | #include <ctime> |
| 7 | #include "common/logging/log.h" | 7 | #include "common/logging/log.h" |
| 8 | #include "core/core.h" | ||
| 8 | #include "core/core_timing.h" | 9 | #include "core/core_timing.h" |
| 9 | #include "core/core_timing_util.h" | 10 | #include "core/core_timing_util.h" |
| 10 | #include "core/hle/ipc_helpers.h" | 11 | #include "core/hle/ipc_helpers.h" |
| @@ -106,8 +107,9 @@ private: | |||
| 106 | void GetCurrentTimePoint(Kernel::HLERequestContext& ctx) { | 107 | void GetCurrentTimePoint(Kernel::HLERequestContext& ctx) { |
| 107 | LOG_DEBUG(Service_Time, "called"); | 108 | LOG_DEBUG(Service_Time, "called"); |
| 108 | 109 | ||
| 109 | SteadyClockTimePoint steady_clock_time_point{ | 110 | const auto& core_timing = Core::System::GetInstance().CoreTiming(); |
| 110 | CoreTiming::cyclesToMs(CoreTiming::GetTicks()) / 1000}; | 111 | const SteadyClockTimePoint steady_clock_time_point{ |
| 112 | Core::Timing::cyclesToMs(core_timing.GetTicks()) / 1000}; | ||
| 111 | IPC::ResponseBuilder rb{ctx, (sizeof(SteadyClockTimePoint) / 4) + 2}; | 113 | IPC::ResponseBuilder rb{ctx, (sizeof(SteadyClockTimePoint) / 4) + 2}; |
| 112 | rb.Push(RESULT_SUCCESS); | 114 | rb.Push(RESULT_SUCCESS); |
| 113 | rb.PushRaw(steady_clock_time_point); | 115 | rb.PushRaw(steady_clock_time_point); |
| @@ -281,8 +283,9 @@ void Module::Interface::GetClockSnapshot(Kernel::HLERequestContext& ctx) { | |||
| 281 | return; | 283 | return; |
| 282 | } | 284 | } |
| 283 | 285 | ||
| 286 | const auto& core_timing = Core::System::GetInstance().CoreTiming(); | ||
| 284 | const SteadyClockTimePoint steady_clock_time_point{ | 287 | const SteadyClockTimePoint steady_clock_time_point{ |
| 285 | CoreTiming::cyclesToMs(CoreTiming::GetTicks()) / 1000, {}}; | 288 | Core::Timing::cyclesToMs(core_timing.GetTicks()) / 1000, {}}; |
| 286 | 289 | ||
| 287 | CalendarTime calendar_time{}; | 290 | CalendarTime calendar_time{}; |
| 288 | calendar_time.year = tm->tm_year + 1900; | 291 | calendar_time.year = tm->tm_year + 1900; |
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp new file mode 100644 index 000000000..01d80311b --- /dev/null +++ b/src/core/hle/service/vi/display/vi_display.cpp | |||
| @@ -0,0 +1,71 @@ | |||
| 1 | // Copyright 2019 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <algorithm> | ||
| 6 | #include <utility> | ||
| 7 | |||
| 8 | #include <fmt/format.h> | ||
| 9 | |||
| 10 | #include "common/assert.h" | ||
| 11 | #include "core/core.h" | ||
| 12 | #include "core/hle/kernel/readable_event.h" | ||
| 13 | #include "core/hle/service/vi/display/vi_display.h" | ||
| 14 | #include "core/hle/service/vi/layer/vi_layer.h" | ||
| 15 | |||
| 16 | namespace Service::VI { | ||
| 17 | |||
| 18 | Display::Display(u64 id, std::string name) : id{id}, name{std::move(name)} { | ||
| 19 | auto& kernel = Core::System::GetInstance().Kernel(); | ||
| 20 | vsync_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Sticky, | ||
| 21 | fmt::format("Display VSync Event {}", id)); | ||
| 22 | } | ||
| 23 | |||
| 24 | Display::~Display() = default; | ||
| 25 | |||
| 26 | Layer& Display::GetLayer(std::size_t index) { | ||
| 27 | return layers.at(index); | ||
| 28 | } | ||
| 29 | |||
| 30 | const Layer& Display::GetLayer(std::size_t index) const { | ||
| 31 | return layers.at(index); | ||
| 32 | } | ||
| 33 | |||
| 34 | Kernel::SharedPtr<Kernel::ReadableEvent> Display::GetVSyncEvent() const { | ||
| 35 | return vsync_event.readable; | ||
| 36 | } | ||
| 37 | |||
| 38 | void Display::SignalVSyncEvent() { | ||
| 39 | vsync_event.writable->Signal(); | ||
| 40 | } | ||
| 41 | |||
| 42 | void Display::CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue) { | ||
| 43 | // TODO(Subv): Support more than 1 layer. | ||
| 44 | ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment"); | ||
| 45 | |||
| 46 | layers.emplace_back(id, buffer_queue); | ||
| 47 | } | ||
| 48 | |||
| 49 | Layer* Display::FindLayer(u64 id) { | ||
| 50 | const auto itr = std::find_if(layers.begin(), layers.end(), | ||
| 51 | [id](const VI::Layer& layer) { return layer.GetID() == id; }); | ||
| 52 | |||
| 53 | if (itr == layers.end()) { | ||
| 54 | return nullptr; | ||
| 55 | } | ||
| 56 | |||
| 57 | return &*itr; | ||
| 58 | } | ||
| 59 | |||
| 60 | const Layer* Display::FindLayer(u64 id) const { | ||
| 61 | const auto itr = std::find_if(layers.begin(), layers.end(), | ||
| 62 | [id](const VI::Layer& layer) { return layer.GetID() == id; }); | ||
| 63 | |||
| 64 | if (itr == layers.end()) { | ||
| 65 | return nullptr; | ||
| 66 | } | ||
| 67 | |||
| 68 | return &*itr; | ||
| 69 | } | ||
| 70 | |||
| 71 | } // namespace Service::VI | ||
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h new file mode 100644 index 000000000..2acd46ff8 --- /dev/null +++ b/src/core/hle/service/vi/display/vi_display.h | |||
| @@ -0,0 +1,98 @@ | |||
| 1 | // Copyright 2019 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <string> | ||
| 8 | #include <vector> | ||
| 9 | |||
| 10 | #include "common/common_types.h" | ||
| 11 | #include "core/hle/kernel/writable_event.h" | ||
| 12 | |||
| 13 | namespace Service::NVFlinger { | ||
| 14 | class BufferQueue; | ||
| 15 | } | ||
| 16 | |||
| 17 | namespace Service::VI { | ||
| 18 | |||
| 19 | class Layer; | ||
| 20 | |||
| 21 | /// Represents a single display type | ||
| 22 | class Display { | ||
| 23 | public: | ||
| 24 | /// Constructs a display with a given unique ID and name. | ||
| 25 | /// | ||
| 26 | /// @param id The unique ID for this display. | ||
| 27 | /// @param name The name for this display. | ||
| 28 | /// | ||
| 29 | Display(u64 id, std::string name); | ||
| 30 | ~Display(); | ||
| 31 | |||
| 32 | Display(const Display&) = delete; | ||
| 33 | Display& operator=(const Display&) = delete; | ||
| 34 | |||
| 35 | Display(Display&&) = default; | ||
| 36 | Display& operator=(Display&&) = default; | ||
| 37 | |||
| 38 | /// Gets the unique ID assigned to this display. | ||
| 39 | u64 GetID() const { | ||
| 40 | return id; | ||
| 41 | } | ||
| 42 | |||
| 43 | /// Gets the name of this display | ||
| 44 | const std::string& GetName() const { | ||
| 45 | return name; | ||
| 46 | } | ||
| 47 | |||
| 48 | /// Whether or not this display has any layers added to it. | ||
| 49 | bool HasLayers() const { | ||
| 50 | return !layers.empty(); | ||
| 51 | } | ||
| 52 | |||
| 53 | /// Gets a layer for this display based off an index. | ||
| 54 | Layer& GetLayer(std::size_t index); | ||
| 55 | |||
| 56 | /// Gets a layer for this display based off an index. | ||
| 57 | const Layer& GetLayer(std::size_t index) const; | ||
| 58 | |||
| 59 | /// Gets the readable vsync event. | ||
| 60 | Kernel::SharedPtr<Kernel::ReadableEvent> GetVSyncEvent() const; | ||
| 61 | |||
| 62 | /// Signals the internal vsync event. | ||
| 63 | void SignalVSyncEvent(); | ||
| 64 | |||
| 65 | /// Creates and adds a layer to this display with the given ID. | ||
| 66 | /// | ||
| 67 | /// @param id The ID to assign to the created layer. | ||
| 68 | /// @param buffer_queue The buffer queue for the layer instance to use. | ||
| 69 | /// | ||
| 70 | void CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue); | ||
| 71 | |||
| 72 | /// Attempts to find a layer with the given ID. | ||
| 73 | /// | ||
| 74 | /// @param id The layer ID. | ||
| 75 | /// | ||
| 76 | /// @returns If found, the Layer instance with the given ID. | ||
| 77 | /// If not found, then nullptr is returned. | ||
| 78 | /// | ||
| 79 | Layer* FindLayer(u64 id); | ||
| 80 | |||
| 81 | /// Attempts to find a layer with the given ID. | ||
| 82 | /// | ||
| 83 | /// @param id The layer ID. | ||
| 84 | /// | ||
| 85 | /// @returns If found, the Layer instance with the given ID. | ||
| 86 | /// If not found, then nullptr is returned. | ||
| 87 | /// | ||
| 88 | const Layer* FindLayer(u64 id) const; | ||
| 89 | |||
| 90 | private: | ||
| 91 | u64 id; | ||
| 92 | std::string name; | ||
| 93 | |||
| 94 | std::vector<Layer> layers; | ||
| 95 | Kernel::EventPair vsync_event; | ||
| 96 | }; | ||
| 97 | |||
| 98 | } // namespace Service::VI | ||
diff --git a/src/core/hle/service/vi/layer/vi_layer.cpp b/src/core/hle/service/vi/layer/vi_layer.cpp new file mode 100644 index 000000000..954225c26 --- /dev/null +++ b/src/core/hle/service/vi/layer/vi_layer.cpp | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | // Copyright 2019 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "core/hle/service/vi/layer/vi_layer.h" | ||
| 6 | |||
| 7 | namespace Service::VI { | ||
| 8 | |||
| 9 | Layer::Layer(u64 id, NVFlinger::BufferQueue& queue) : id{id}, buffer_queue{queue} {} | ||
| 10 | |||
| 11 | Layer::~Layer() = default; | ||
| 12 | |||
| 13 | } // namespace Service::VI | ||
diff --git a/src/core/hle/service/vi/layer/vi_layer.h b/src/core/hle/service/vi/layer/vi_layer.h new file mode 100644 index 000000000..c6bfd01f6 --- /dev/null +++ b/src/core/hle/service/vi/layer/vi_layer.h | |||
| @@ -0,0 +1,52 @@ | |||
| 1 | // Copyright 2019 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include "common/common_types.h" | ||
| 8 | |||
| 9 | namespace Service::NVFlinger { | ||
| 10 | class BufferQueue; | ||
| 11 | } | ||
| 12 | |||
| 13 | namespace Service::VI { | ||
| 14 | |||
| 15 | /// Represents a single display layer. | ||
| 16 | class Layer { | ||
| 17 | public: | ||
| 18 | /// Constructs a layer with a given ID and buffer queue. | ||
| 19 | /// | ||
| 20 | /// @param id The ID to assign to this layer. | ||
| 21 | /// @param queue The buffer queue for this layer to use. | ||
| 22 | /// | ||
| 23 | Layer(u64 id, NVFlinger::BufferQueue& queue); | ||
| 24 | ~Layer(); | ||
| 25 | |||
| 26 | Layer(const Layer&) = delete; | ||
| 27 | Layer& operator=(const Layer&) = delete; | ||
| 28 | |||
| 29 | Layer(Layer&&) = default; | ||
| 30 | Layer& operator=(Layer&&) = delete; | ||
| 31 | |||
| 32 | /// Gets the ID for this layer. | ||
| 33 | u64 GetID() const { | ||
| 34 | return id; | ||
| 35 | } | ||
| 36 | |||
| 37 | /// Gets a reference to the buffer queue this layer is using. | ||
| 38 | NVFlinger::BufferQueue& GetBufferQueue() { | ||
| 39 | return buffer_queue; | ||
| 40 | } | ||
| 41 | |||
| 42 | /// Gets a const reference to the buffer queue this layer is using. | ||
| 43 | const NVFlinger::BufferQueue& GetBufferQueue() const { | ||
| 44 | return buffer_queue; | ||
| 45 | } | ||
| 46 | |||
| 47 | private: | ||
| 48 | u64 id; | ||
| 49 | NVFlinger::BufferQueue& buffer_queue; | ||
| 50 | }; | ||
| 51 | |||
| 52 | } // namespace Service::VI | ||
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index fe08c38f2..566cd6006 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include "core/hle/service/nvdrv/nvdrv.h" | 24 | #include "core/hle/service/nvdrv/nvdrv.h" |
| 25 | #include "core/hle/service/nvflinger/buffer_queue.h" | 25 | #include "core/hle/service/nvflinger/buffer_queue.h" |
| 26 | #include "core/hle/service/nvflinger/nvflinger.h" | 26 | #include "core/hle/service/nvflinger/nvflinger.h" |
| 27 | #include "core/hle/service/service.h" | ||
| 27 | #include "core/hle/service/vi/vi.h" | 28 | #include "core/hle/service/vi/vi.h" |
| 28 | #include "core/hle/service/vi/vi_m.h" | 29 | #include "core/hle/service/vi/vi_m.h" |
| 29 | #include "core/hle/service/vi/vi_s.h" | 30 | #include "core/hle/service/vi/vi_s.h" |
| @@ -33,7 +34,9 @@ | |||
| 33 | namespace Service::VI { | 34 | namespace Service::VI { |
| 34 | 35 | ||
| 35 | constexpr ResultCode ERR_OPERATION_FAILED{ErrorModule::VI, 1}; | 36 | constexpr ResultCode ERR_OPERATION_FAILED{ErrorModule::VI, 1}; |
| 37 | constexpr ResultCode ERR_PERMISSION_DENIED{ErrorModule::VI, 5}; | ||
| 36 | constexpr ResultCode ERR_UNSUPPORTED{ErrorModule::VI, 6}; | 38 | constexpr ResultCode ERR_UNSUPPORTED{ErrorModule::VI, 6}; |
| 39 | constexpr ResultCode ERR_NOT_FOUND{ErrorModule::VI, 7}; | ||
| 37 | 40 | ||
| 38 | struct DisplayInfo { | 41 | struct DisplayInfo { |
| 39 | /// The name of this particular display. | 42 | /// The name of this particular display. |
| @@ -419,7 +422,7 @@ public: | |||
| 419 | u32_le fence_is_valid; | 422 | u32_le fence_is_valid; |
| 420 | std::array<Fence, 2> fences; | 423 | std::array<Fence, 2> fences; |
| 421 | 424 | ||
| 422 | MathUtil::Rectangle<int> GetCropRect() const { | 425 | Common::Rectangle<int> GetCropRect() const { |
| 423 | return {crop_left, crop_top, crop_right, crop_bottom}; | 426 | return {crop_left, crop_top, crop_right, crop_bottom}; |
| 424 | } | 427 | } |
| 425 | }; | 428 | }; |
| @@ -524,7 +527,7 @@ private: | |||
| 524 | LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, | 527 | LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id, |
| 525 | static_cast<u32>(transaction), flags); | 528 | static_cast<u32>(transaction), flags); |
| 526 | 529 | ||
| 527 | auto buffer_queue = nv_flinger->FindBufferQueue(id); | 530 | auto& buffer_queue = nv_flinger->FindBufferQueue(id); |
| 528 | 531 | ||
| 529 | if (transaction == TransactionId::Connect) { | 532 | if (transaction == TransactionId::Connect) { |
| 530 | IGBPConnectRequestParcel request{ctx.ReadBuffer()}; | 533 | IGBPConnectRequestParcel request{ctx.ReadBuffer()}; |
| @@ -537,7 +540,7 @@ private: | |||
| 537 | } else if (transaction == TransactionId::SetPreallocatedBuffer) { | 540 | } else if (transaction == TransactionId::SetPreallocatedBuffer) { |
| 538 | IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()}; | 541 | IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()}; |
| 539 | 542 | ||
| 540 | buffer_queue->SetPreallocatedBuffer(request.data.slot, request.buffer); | 543 | buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer); |
| 541 | 544 | ||
| 542 | IGBPSetPreallocatedBufferResponseParcel response{}; | 545 | IGBPSetPreallocatedBufferResponseParcel response{}; |
| 543 | ctx.WriteBuffer(response.Serialize()); | 546 | ctx.WriteBuffer(response.Serialize()); |
| @@ -545,7 +548,7 @@ private: | |||
| 545 | IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()}; | 548 | IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()}; |
| 546 | const u32 width{request.data.width}; | 549 | const u32 width{request.data.width}; |
| 547 | const u32 height{request.data.height}; | 550 | const u32 height{request.data.height}; |
| 548 | std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height); | 551 | std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height); |
| 549 | 552 | ||
| 550 | if (slot) { | 553 | if (slot) { |
| 551 | // Buffer is available | 554 | // Buffer is available |
| @@ -558,8 +561,8 @@ private: | |||
| 558 | [=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, | 561 | [=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, |
| 559 | Kernel::ThreadWakeupReason reason) { | 562 | Kernel::ThreadWakeupReason reason) { |
| 560 | // Repeat TransactParcel DequeueBuffer when a buffer is available | 563 | // Repeat TransactParcel DequeueBuffer when a buffer is available |
| 561 | auto buffer_queue = nv_flinger->FindBufferQueue(id); | 564 | auto& buffer_queue = nv_flinger->FindBufferQueue(id); |
| 562 | std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height); | 565 | std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height); |
| 563 | ASSERT_MSG(slot != std::nullopt, "Could not dequeue buffer."); | 566 | ASSERT_MSG(slot != std::nullopt, "Could not dequeue buffer."); |
| 564 | 567 | ||
| 565 | IGBPDequeueBufferResponseParcel response{*slot}; | 568 | IGBPDequeueBufferResponseParcel response{*slot}; |
| @@ -567,28 +570,28 @@ private: | |||
| 567 | IPC::ResponseBuilder rb{ctx, 2}; | 570 | IPC::ResponseBuilder rb{ctx, 2}; |
| 568 | rb.Push(RESULT_SUCCESS); | 571 | rb.Push(RESULT_SUCCESS); |
| 569 | }, | 572 | }, |
| 570 | buffer_queue->GetWritableBufferWaitEvent()); | 573 | buffer_queue.GetWritableBufferWaitEvent()); |
| 571 | } | 574 | } |
| 572 | } else if (transaction == TransactionId::RequestBuffer) { | 575 | } else if (transaction == TransactionId::RequestBuffer) { |
| 573 | IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()}; | 576 | IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()}; |
| 574 | 577 | ||
| 575 | auto& buffer = buffer_queue->RequestBuffer(request.slot); | 578 | auto& buffer = buffer_queue.RequestBuffer(request.slot); |
| 576 | 579 | ||
| 577 | IGBPRequestBufferResponseParcel response{buffer}; | 580 | IGBPRequestBufferResponseParcel response{buffer}; |
| 578 | ctx.WriteBuffer(response.Serialize()); | 581 | ctx.WriteBuffer(response.Serialize()); |
| 579 | } else if (transaction == TransactionId::QueueBuffer) { | 582 | } else if (transaction == TransactionId::QueueBuffer) { |
| 580 | IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()}; | 583 | IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()}; |
| 581 | 584 | ||
| 582 | buffer_queue->QueueBuffer(request.data.slot, request.data.transform, | 585 | buffer_queue.QueueBuffer(request.data.slot, request.data.transform, |
| 583 | request.data.GetCropRect()); | 586 | request.data.GetCropRect()); |
| 584 | 587 | ||
| 585 | IGBPQueueBufferResponseParcel response{1280, 720}; | 588 | IGBPQueueBufferResponseParcel response{1280, 720}; |
| 586 | ctx.WriteBuffer(response.Serialize()); | 589 | ctx.WriteBuffer(response.Serialize()); |
| 587 | } else if (transaction == TransactionId::Query) { | 590 | } else if (transaction == TransactionId::Query) { |
| 588 | IGBPQueryRequestParcel request{ctx.ReadBuffer()}; | 591 | IGBPQueryRequestParcel request{ctx.ReadBuffer()}; |
| 589 | 592 | ||
| 590 | u32 value = | 593 | const u32 value = |
| 591 | buffer_queue->Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type)); | 594 | buffer_queue.Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type)); |
| 592 | 595 | ||
| 593 | IGBPQueryResponseParcel response{value}; | 596 | IGBPQueryResponseParcel response{value}; |
| 594 | ctx.WriteBuffer(response.Serialize()); | 597 | ctx.WriteBuffer(response.Serialize()); |
| @@ -628,12 +631,12 @@ private: | |||
| 628 | 631 | ||
| 629 | LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown); | 632 | LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown); |
| 630 | 633 | ||
| 631 | const auto buffer_queue = nv_flinger->FindBufferQueue(id); | 634 | const auto& buffer_queue = nv_flinger->FindBufferQueue(id); |
| 632 | 635 | ||
| 633 | // TODO(Subv): Find out what this actually is. | 636 | // TODO(Subv): Find out what this actually is. |
| 634 | IPC::ResponseBuilder rb{ctx, 2, 1}; | 637 | IPC::ResponseBuilder rb{ctx, 2, 1}; |
| 635 | rb.Push(RESULT_SUCCESS); | 638 | rb.Push(RESULT_SUCCESS); |
| 636 | rb.PushCopyObjects(buffer_queue->GetBufferWaitEvent()); | 639 | rb.PushCopyObjects(buffer_queue.GetBufferWaitEvent()); |
| 637 | } | 640 | } |
| 638 | 641 | ||
| 639 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; | 642 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; |
| @@ -751,6 +754,7 @@ public: | |||
| 751 | {1102, nullptr, "GetDisplayResolution"}, | 754 | {1102, nullptr, "GetDisplayResolution"}, |
| 752 | {2010, &IManagerDisplayService::CreateManagedLayer, "CreateManagedLayer"}, | 755 | {2010, &IManagerDisplayService::CreateManagedLayer, "CreateManagedLayer"}, |
| 753 | {2011, nullptr, "DestroyManagedLayer"}, | 756 | {2011, nullptr, "DestroyManagedLayer"}, |
| 757 | {2012, nullptr, "CreateStrayLayer"}, | ||
| 754 | {2050, nullptr, "CreateIndirectLayer"}, | 758 | {2050, nullptr, "CreateIndirectLayer"}, |
| 755 | {2051, nullptr, "DestroyIndirectLayer"}, | 759 | {2051, nullptr, "DestroyIndirectLayer"}, |
| 756 | {2052, nullptr, "CreateIndirectProducerEndPoint"}, | 760 | {2052, nullptr, "CreateIndirectProducerEndPoint"}, |
| @@ -838,11 +842,16 @@ private: | |||
| 838 | "(STUBBED) called. unknown=0x{:08X}, display=0x{:016X}, aruid=0x{:016X}", | 842 | "(STUBBED) called. unknown=0x{:08X}, display=0x{:016X}, aruid=0x{:016X}", |
| 839 | unknown, display, aruid); | 843 | unknown, display, aruid); |
| 840 | 844 | ||
| 841 | const u64 layer_id = nv_flinger->CreateLayer(display); | 845 | const auto layer_id = nv_flinger->CreateLayer(display); |
| 846 | if (!layer_id) { | ||
| 847 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 848 | rb.Push(ERR_NOT_FOUND); | ||
| 849 | return; | ||
| 850 | } | ||
| 842 | 851 | ||
| 843 | IPC::ResponseBuilder rb{ctx, 4}; | 852 | IPC::ResponseBuilder rb{ctx, 4}; |
| 844 | rb.Push(RESULT_SUCCESS); | 853 | rb.Push(RESULT_SUCCESS); |
| 845 | rb.Push(layer_id); | 854 | rb.Push(*layer_id); |
| 846 | } | 855 | } |
| 847 | 856 | ||
| 848 | void AddToLayerStack(Kernel::HLERequestContext& ctx) { | 857 | void AddToLayerStack(Kernel::HLERequestContext& ctx) { |
| @@ -950,9 +959,16 @@ private: | |||
| 950 | 959 | ||
| 951 | ASSERT_MSG(name == "Default", "Non-default displays aren't supported yet"); | 960 | ASSERT_MSG(name == "Default", "Non-default displays aren't supported yet"); |
| 952 | 961 | ||
| 962 | const auto display_id = nv_flinger->OpenDisplay(name); | ||
| 963 | if (!display_id) { | ||
| 964 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 965 | rb.Push(ERR_NOT_FOUND); | ||
| 966 | return; | ||
| 967 | } | ||
| 968 | |||
| 953 | IPC::ResponseBuilder rb{ctx, 4}; | 969 | IPC::ResponseBuilder rb{ctx, 4}; |
| 954 | rb.Push(RESULT_SUCCESS); | 970 | rb.Push(RESULT_SUCCESS); |
| 955 | rb.Push<u64>(nv_flinger->OpenDisplay(name)); | 971 | rb.Push<u64>(*display_id); |
| 956 | } | 972 | } |
| 957 | 973 | ||
| 958 | void CloseDisplay(Kernel::HLERequestContext& ctx) { | 974 | void CloseDisplay(Kernel::HLERequestContext& ctx) { |
| @@ -1043,10 +1059,21 @@ private: | |||
| 1043 | 1059 | ||
| 1044 | LOG_DEBUG(Service_VI, "called. layer_id=0x{:016X}, aruid=0x{:016X}", layer_id, aruid); | 1060 | LOG_DEBUG(Service_VI, "called. layer_id=0x{:016X}, aruid=0x{:016X}", layer_id, aruid); |
| 1045 | 1061 | ||
| 1046 | const u64 display_id = nv_flinger->OpenDisplay(display_name); | 1062 | const auto display_id = nv_flinger->OpenDisplay(display_name); |
| 1047 | const u32 buffer_queue_id = nv_flinger->FindBufferQueueId(display_id, layer_id); | 1063 | if (!display_id) { |
| 1064 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 1065 | rb.Push(ERR_NOT_FOUND); | ||
| 1066 | return; | ||
| 1067 | } | ||
| 1068 | |||
| 1069 | const auto buffer_queue_id = nv_flinger->FindBufferQueueId(*display_id, layer_id); | ||
| 1070 | if (!buffer_queue_id) { | ||
| 1071 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 1072 | rb.Push(ERR_NOT_FOUND); | ||
| 1073 | return; | ||
| 1074 | } | ||
| 1048 | 1075 | ||
| 1049 | NativeWindow native_window{buffer_queue_id}; | 1076 | NativeWindow native_window{*buffer_queue_id}; |
| 1050 | IPC::ResponseBuilder rb{ctx, 4}; | 1077 | IPC::ResponseBuilder rb{ctx, 4}; |
| 1051 | rb.Push(RESULT_SUCCESS); | 1078 | rb.Push(RESULT_SUCCESS); |
| 1052 | rb.Push<u64>(ctx.WriteBuffer(native_window.Serialize())); | 1079 | rb.Push<u64>(ctx.WriteBuffer(native_window.Serialize())); |
| @@ -1062,13 +1089,24 @@ private: | |||
| 1062 | 1089 | ||
| 1063 | // TODO(Subv): What's the difference between a Stray and a Managed layer? | 1090 | // TODO(Subv): What's the difference between a Stray and a Managed layer? |
| 1064 | 1091 | ||
| 1065 | const u64 layer_id = nv_flinger->CreateLayer(display_id); | 1092 | const auto layer_id = nv_flinger->CreateLayer(display_id); |
| 1066 | const u32 buffer_queue_id = nv_flinger->FindBufferQueueId(display_id, layer_id); | 1093 | if (!layer_id) { |
| 1094 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 1095 | rb.Push(ERR_NOT_FOUND); | ||
| 1096 | return; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | const auto buffer_queue_id = nv_flinger->FindBufferQueueId(display_id, *layer_id); | ||
| 1100 | if (!buffer_queue_id) { | ||
| 1101 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 1102 | rb.Push(ERR_NOT_FOUND); | ||
| 1103 | return; | ||
| 1104 | } | ||
| 1067 | 1105 | ||
| 1068 | NativeWindow native_window{buffer_queue_id}; | 1106 | NativeWindow native_window{*buffer_queue_id}; |
| 1069 | IPC::ResponseBuilder rb{ctx, 6}; | 1107 | IPC::ResponseBuilder rb{ctx, 6}; |
| 1070 | rb.Push(RESULT_SUCCESS); | 1108 | rb.Push(RESULT_SUCCESS); |
| 1071 | rb.Push(layer_id); | 1109 | rb.Push(*layer_id); |
| 1072 | rb.Push<u64>(ctx.WriteBuffer(native_window.Serialize())); | 1110 | rb.Push<u64>(ctx.WriteBuffer(native_window.Serialize())); |
| 1073 | } | 1111 | } |
| 1074 | 1112 | ||
| @@ -1088,7 +1126,12 @@ private: | |||
| 1088 | 1126 | ||
| 1089 | LOG_WARNING(Service_VI, "(STUBBED) called. display_id=0x{:016X}", display_id); | 1127 | LOG_WARNING(Service_VI, "(STUBBED) called. display_id=0x{:016X}", display_id); |
| 1090 | 1128 | ||
| 1091 | const auto vsync_event = nv_flinger->GetVsyncEvent(display_id); | 1129 | const auto vsync_event = nv_flinger->FindVsyncEvent(display_id); |
| 1130 | if (!vsync_event) { | ||
| 1131 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 1132 | rb.Push(ERR_NOT_FOUND); | ||
| 1133 | return; | ||
| 1134 | } | ||
| 1092 | 1135 | ||
| 1093 | IPC::ResponseBuilder rb{ctx, 2, 1}; | 1136 | IPC::ResponseBuilder rb{ctx, 2, 1}; |
| 1094 | rb.Push(RESULT_SUCCESS); | 1137 | rb.Push(RESULT_SUCCESS); |
| @@ -1162,26 +1205,40 @@ IApplicationDisplayService::IApplicationDisplayService( | |||
| 1162 | RegisterHandlers(functions); | 1205 | RegisterHandlers(functions); |
| 1163 | } | 1206 | } |
| 1164 | 1207 | ||
| 1165 | Module::Interface::Interface(std::shared_ptr<Module> module, const char* name, | 1208 | static bool IsValidServiceAccess(Permission permission, Policy policy) { |
| 1166 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) | 1209 | if (permission == Permission::User) { |
| 1167 | : ServiceFramework(name), module(std::move(module)), nv_flinger(std::move(nv_flinger)) {} | 1210 | return policy == Policy::User; |
| 1211 | } | ||
| 1212 | |||
| 1213 | if (permission == Permission::System || permission == Permission::Manager) { | ||
| 1214 | return policy == Policy::User || policy == Policy::Compositor; | ||
| 1215 | } | ||
| 1168 | 1216 | ||
| 1169 | Module::Interface::~Interface() = default; | 1217 | return false; |
| 1218 | } | ||
| 1170 | 1219 | ||
| 1171 | void Module::Interface::GetDisplayService(Kernel::HLERequestContext& ctx) { | 1220 | void detail::GetDisplayServiceImpl(Kernel::HLERequestContext& ctx, |
| 1172 | LOG_WARNING(Service_VI, "(STUBBED) called"); | 1221 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger, |
| 1222 | Permission permission) { | ||
| 1223 | IPC::RequestParser rp{ctx}; | ||
| 1224 | const auto policy = rp.PopEnum<Policy>(); | ||
| 1225 | |||
| 1226 | if (!IsValidServiceAccess(permission, policy)) { | ||
| 1227 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 1228 | rb.Push(ERR_PERMISSION_DENIED); | ||
| 1229 | return; | ||
| 1230 | } | ||
| 1173 | 1231 | ||
| 1174 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; | 1232 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; |
| 1175 | rb.Push(RESULT_SUCCESS); | 1233 | rb.Push(RESULT_SUCCESS); |
| 1176 | rb.PushIpcInterface<IApplicationDisplayService>(nv_flinger); | 1234 | rb.PushIpcInterface<IApplicationDisplayService>(std::move(nv_flinger)); |
| 1177 | } | 1235 | } |
| 1178 | 1236 | ||
| 1179 | void InstallInterfaces(SM::ServiceManager& service_manager, | 1237 | void InstallInterfaces(SM::ServiceManager& service_manager, |
| 1180 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) { | 1238 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) { |
| 1181 | auto module = std::make_shared<Module>(); | 1239 | std::make_shared<VI_M>(nv_flinger)->InstallAsService(service_manager); |
| 1182 | std::make_shared<VI_M>(module, nv_flinger)->InstallAsService(service_manager); | 1240 | std::make_shared<VI_S>(nv_flinger)->InstallAsService(service_manager); |
| 1183 | std::make_shared<VI_S>(module, nv_flinger)->InstallAsService(service_manager); | 1241 | std::make_shared<VI_U>(nv_flinger)->InstallAsService(service_manager); |
| 1184 | std::make_shared<VI_U>(module, nv_flinger)->InstallAsService(service_manager); | ||
| 1185 | } | 1242 | } |
| 1186 | 1243 | ||
| 1187 | } // namespace Service::VI | 1244 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi.h b/src/core/hle/service/vi/vi.h index e3963502a..6b66f8b81 100644 --- a/src/core/hle/service/vi/vi.h +++ b/src/core/hle/service/vi/vi.h | |||
| @@ -4,12 +4,21 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "core/hle/service/service.h" | 7 | #include <memory> |
| 8 | #include "common/common_types.h" | ||
| 9 | |||
| 10 | namespace Kernel { | ||
| 11 | class HLERequestContext; | ||
| 12 | } | ||
| 8 | 13 | ||
| 9 | namespace Service::NVFlinger { | 14 | namespace Service::NVFlinger { |
| 10 | class NVFlinger; | 15 | class NVFlinger; |
| 11 | } | 16 | } |
| 12 | 17 | ||
| 18 | namespace Service::SM { | ||
| 19 | class ServiceManager; | ||
| 20 | } | ||
| 21 | |||
| 13 | namespace Service::VI { | 22 | namespace Service::VI { |
| 14 | 23 | ||
| 15 | enum class DisplayResolution : u32 { | 24 | enum class DisplayResolution : u32 { |
| @@ -19,22 +28,25 @@ enum class DisplayResolution : u32 { | |||
| 19 | UndockedHeight = 720, | 28 | UndockedHeight = 720, |
| 20 | }; | 29 | }; |
| 21 | 30 | ||
| 22 | class Module final { | 31 | /// Permission level for a particular VI service instance |
| 23 | public: | 32 | enum class Permission { |
| 24 | class Interface : public ServiceFramework<Interface> { | 33 | User, |
| 25 | public: | 34 | System, |
| 26 | explicit Interface(std::shared_ptr<Module> module, const char* name, | 35 | Manager, |
| 27 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); | 36 | }; |
| 28 | ~Interface() override; | ||
| 29 | |||
| 30 | void GetDisplayService(Kernel::HLERequestContext& ctx); | ||
| 31 | 37 | ||
| 32 | protected: | 38 | /// A policy type that may be requested via GetDisplayService and |
| 33 | std::shared_ptr<Module> module; | 39 | /// GetDisplayServiceWithProxyNameExchange |
| 34 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; | 40 | enum class Policy { |
| 35 | }; | 41 | User, |
| 42 | Compositor, | ||
| 36 | }; | 43 | }; |
| 37 | 44 | ||
| 45 | namespace detail { | ||
| 46 | void GetDisplayServiceImpl(Kernel::HLERequestContext& ctx, | ||
| 47 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger, Permission permission); | ||
| 48 | } // namespace detail | ||
| 49 | |||
| 38 | /// Registers all VI services with the specified service manager. | 50 | /// Registers all VI services with the specified service manager. |
| 39 | void InstallInterfaces(SM::ServiceManager& service_manager, | 51 | void InstallInterfaces(SM::ServiceManager& service_manager, |
| 40 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); | 52 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); |
diff --git a/src/core/hle/service/vi/vi_m.cpp b/src/core/hle/service/vi/vi_m.cpp index 207c06b16..06070087f 100644 --- a/src/core/hle/service/vi/vi_m.cpp +++ b/src/core/hle/service/vi/vi_m.cpp | |||
| @@ -2,12 +2,14 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include "common/logging/log.h" | ||
| 6 | #include "core/hle/service/vi/vi.h" | ||
| 5 | #include "core/hle/service/vi/vi_m.h" | 7 | #include "core/hle/service/vi/vi_m.h" |
| 6 | 8 | ||
| 7 | namespace Service::VI { | 9 | namespace Service::VI { |
| 8 | 10 | ||
| 9 | VI_M::VI_M(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) | 11 | VI_M::VI_M(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) |
| 10 | : Module::Interface(std::move(module), "vi:m", std::move(nv_flinger)) { | 12 | : ServiceFramework{"vi:m"}, nv_flinger{std::move(nv_flinger)} { |
| 11 | static const FunctionInfo functions[] = { | 13 | static const FunctionInfo functions[] = { |
| 12 | {2, &VI_M::GetDisplayService, "GetDisplayService"}, | 14 | {2, &VI_M::GetDisplayService, "GetDisplayService"}, |
| 13 | {3, nullptr, "GetDisplayServiceWithProxyNameExchange"}, | 15 | {3, nullptr, "GetDisplayServiceWithProxyNameExchange"}, |
| @@ -17,4 +19,10 @@ VI_M::VI_M(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> | |||
| 17 | 19 | ||
| 18 | VI_M::~VI_M() = default; | 20 | VI_M::~VI_M() = default; |
| 19 | 21 | ||
| 22 | void VI_M::GetDisplayService(Kernel::HLERequestContext& ctx) { | ||
| 23 | LOG_DEBUG(Service_VI, "called"); | ||
| 24 | |||
| 25 | detail::GetDisplayServiceImpl(ctx, nv_flinger, Permission::Manager); | ||
| 26 | } | ||
| 27 | |||
| 20 | } // namespace Service::VI | 28 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi_m.h b/src/core/hle/service/vi/vi_m.h index 487d58d50..290e06689 100644 --- a/src/core/hle/service/vi/vi_m.h +++ b/src/core/hle/service/vi/vi_m.h | |||
| @@ -4,14 +4,27 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "core/hle/service/vi/vi.h" | 7 | #include "core/hle/service/service.h" |
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | class HLERequestContext; | ||
| 11 | } | ||
| 12 | |||
| 13 | namespace Service::NVFlinger { | ||
| 14 | class NVFlinger; | ||
| 15 | } | ||
| 8 | 16 | ||
| 9 | namespace Service::VI { | 17 | namespace Service::VI { |
| 10 | 18 | ||
| 11 | class VI_M final : public Module::Interface { | 19 | class VI_M final : public ServiceFramework<VI_M> { |
| 12 | public: | 20 | public: |
| 13 | explicit VI_M(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); | 21 | explicit VI_M(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); |
| 14 | ~VI_M() override; | 22 | ~VI_M() override; |
| 23 | |||
| 24 | private: | ||
| 25 | void GetDisplayService(Kernel::HLERequestContext& ctx); | ||
| 26 | |||
| 27 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; | ||
| 15 | }; | 28 | }; |
| 16 | 29 | ||
| 17 | } // namespace Service::VI | 30 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi_s.cpp b/src/core/hle/service/vi/vi_s.cpp index 920e6a1f6..57c596cc4 100644 --- a/src/core/hle/service/vi/vi_s.cpp +++ b/src/core/hle/service/vi/vi_s.cpp | |||
| @@ -2,12 +2,14 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include "common/logging/log.h" | ||
| 6 | #include "core/hle/service/vi/vi.h" | ||
| 5 | #include "core/hle/service/vi/vi_s.h" | 7 | #include "core/hle/service/vi/vi_s.h" |
| 6 | 8 | ||
| 7 | namespace Service::VI { | 9 | namespace Service::VI { |
| 8 | 10 | ||
| 9 | VI_S::VI_S(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) | 11 | VI_S::VI_S(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) |
| 10 | : Module::Interface(std::move(module), "vi:s", std::move(nv_flinger)) { | 12 | : ServiceFramework{"vi:s"}, nv_flinger{std::move(nv_flinger)} { |
| 11 | static const FunctionInfo functions[] = { | 13 | static const FunctionInfo functions[] = { |
| 12 | {1, &VI_S::GetDisplayService, "GetDisplayService"}, | 14 | {1, &VI_S::GetDisplayService, "GetDisplayService"}, |
| 13 | {3, nullptr, "GetDisplayServiceWithProxyNameExchange"}, | 15 | {3, nullptr, "GetDisplayServiceWithProxyNameExchange"}, |
| @@ -17,4 +19,10 @@ VI_S::VI_S(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> | |||
| 17 | 19 | ||
| 18 | VI_S::~VI_S() = default; | 20 | VI_S::~VI_S() = default; |
| 19 | 21 | ||
| 22 | void VI_S::GetDisplayService(Kernel::HLERequestContext& ctx) { | ||
| 23 | LOG_DEBUG(Service_VI, "called"); | ||
| 24 | |||
| 25 | detail::GetDisplayServiceImpl(ctx, nv_flinger, Permission::System); | ||
| 26 | } | ||
| 27 | |||
| 20 | } // namespace Service::VI | 28 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi_s.h b/src/core/hle/service/vi/vi_s.h index bbc31148f..47804dc0b 100644 --- a/src/core/hle/service/vi/vi_s.h +++ b/src/core/hle/service/vi/vi_s.h | |||
| @@ -4,14 +4,27 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "core/hle/service/vi/vi.h" | 7 | #include "core/hle/service/service.h" |
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | class HLERequestContext; | ||
| 11 | } | ||
| 12 | |||
| 13 | namespace Service::NVFlinger { | ||
| 14 | class NVFlinger; | ||
| 15 | } | ||
| 8 | 16 | ||
| 9 | namespace Service::VI { | 17 | namespace Service::VI { |
| 10 | 18 | ||
| 11 | class VI_S final : public Module::Interface { | 19 | class VI_S final : public ServiceFramework<VI_S> { |
| 12 | public: | 20 | public: |
| 13 | explicit VI_S(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); | 21 | explicit VI_S(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); |
| 14 | ~VI_S() override; | 22 | ~VI_S() override; |
| 23 | |||
| 24 | private: | ||
| 25 | void GetDisplayService(Kernel::HLERequestContext& ctx); | ||
| 26 | |||
| 27 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; | ||
| 15 | }; | 28 | }; |
| 16 | 29 | ||
| 17 | } // namespace Service::VI | 30 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi_u.cpp b/src/core/hle/service/vi/vi_u.cpp index d81e410d6..9d5ceb608 100644 --- a/src/core/hle/service/vi/vi_u.cpp +++ b/src/core/hle/service/vi/vi_u.cpp | |||
| @@ -2,12 +2,14 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include "common/logging/log.h" | ||
| 6 | #include "core/hle/service/vi/vi.h" | ||
| 5 | #include "core/hle/service/vi/vi_u.h" | 7 | #include "core/hle/service/vi/vi_u.h" |
| 6 | 8 | ||
| 7 | namespace Service::VI { | 9 | namespace Service::VI { |
| 8 | 10 | ||
| 9 | VI_U::VI_U(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) | 11 | VI_U::VI_U(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) |
| 10 | : Module::Interface(std::move(module), "vi:u", std::move(nv_flinger)) { | 12 | : ServiceFramework{"vi:u"}, nv_flinger{std::move(nv_flinger)} { |
| 11 | static const FunctionInfo functions[] = { | 13 | static const FunctionInfo functions[] = { |
| 12 | {0, &VI_U::GetDisplayService, "GetDisplayService"}, | 14 | {0, &VI_U::GetDisplayService, "GetDisplayService"}, |
| 13 | }; | 15 | }; |
| @@ -16,4 +18,10 @@ VI_U::VI_U(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> | |||
| 16 | 18 | ||
| 17 | VI_U::~VI_U() = default; | 19 | VI_U::~VI_U() = default; |
| 18 | 20 | ||
| 21 | void VI_U::GetDisplayService(Kernel::HLERequestContext& ctx) { | ||
| 22 | LOG_DEBUG(Service_VI, "called"); | ||
| 23 | |||
| 24 | detail::GetDisplayServiceImpl(ctx, nv_flinger, Permission::User); | ||
| 25 | } | ||
| 26 | |||
| 19 | } // namespace Service::VI | 27 | } // namespace Service::VI |
diff --git a/src/core/hle/service/vi/vi_u.h b/src/core/hle/service/vi/vi_u.h index b92f28c92..19bdb73b0 100644 --- a/src/core/hle/service/vi/vi_u.h +++ b/src/core/hle/service/vi/vi_u.h | |||
| @@ -4,14 +4,27 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "core/hle/service/vi/vi.h" | 7 | #include "core/hle/service/service.h" |
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | class HLERequestContext; | ||
| 11 | } | ||
| 12 | |||
| 13 | namespace Service::NVFlinger { | ||
| 14 | class NVFlinger; | ||
| 15 | } | ||
| 8 | 16 | ||
| 9 | namespace Service::VI { | 17 | namespace Service::VI { |
| 10 | 18 | ||
| 11 | class VI_U final : public Module::Interface { | 19 | class VI_U final : public ServiceFramework<VI_U> { |
| 12 | public: | 20 | public: |
| 13 | explicit VI_U(std::shared_ptr<Module> module, std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); | 21 | explicit VI_U(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger); |
| 14 | ~VI_U() override; | 22 | ~VI_U() override; |
| 23 | |||
| 24 | private: | ||
| 25 | void GetDisplayService(Kernel::HLERequestContext& ctx); | ||
| 26 | |||
| 27 | std::shared_ptr<NVFlinger::NVFlinger> nv_flinger; | ||
| 15 | }; | 28 | }; |
| 16 | 29 | ||
| 17 | } // namespace Service::VI | 30 | } // namespace Service::VI |
diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp index 6057c7f26..8b1920f22 100644 --- a/src/core/loader/elf.cpp +++ b/src/core/loader/elf.cpp | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "common/file_util.h" | 10 | #include "common/file_util.h" |
| 11 | #include "common/logging/log.h" | 11 | #include "common/logging/log.h" |
| 12 | #include "core/hle/kernel/code_set.h" | ||
| 12 | #include "core/hle/kernel/process.h" | 13 | #include "core/hle/kernel/process.h" |
| 13 | #include "core/hle/kernel/vm_manager.h" | 14 | #include "core/hle/kernel/vm_manager.h" |
| 14 | #include "core/loader/elf.h" | 15 | #include "core/loader/elf.h" |
diff --git a/src/core/loader/linker.cpp b/src/core/loader/linker.cpp deleted file mode 100644 index 57ca8c3ee..000000000 --- a/src/core/loader/linker.cpp +++ /dev/null | |||
| @@ -1,147 +0,0 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <vector> | ||
| 6 | |||
| 7 | #include "common/common_funcs.h" | ||
| 8 | #include "common/logging/log.h" | ||
| 9 | #include "common/swap.h" | ||
| 10 | #include "core/loader/linker.h" | ||
| 11 | #include "core/memory.h" | ||
| 12 | |||
| 13 | namespace Loader { | ||
| 14 | |||
| 15 | enum class RelocationType : u32 { ABS64 = 257, GLOB_DAT = 1025, JUMP_SLOT = 1026, RELATIVE = 1027 }; | ||
| 16 | |||
| 17 | enum DynamicType : u32 { | ||
| 18 | DT_NULL = 0, | ||
| 19 | DT_PLTRELSZ = 2, | ||
| 20 | DT_STRTAB = 5, | ||
| 21 | DT_SYMTAB = 6, | ||
| 22 | DT_RELA = 7, | ||
| 23 | DT_RELASZ = 8, | ||
| 24 | DT_STRSZ = 10, | ||
| 25 | DT_JMPREL = 23, | ||
| 26 | }; | ||
| 27 | |||
| 28 | struct Elf64_Rela { | ||
| 29 | u64_le offset; | ||
| 30 | RelocationType type; | ||
| 31 | u32_le symbol; | ||
| 32 | s64_le addend; | ||
| 33 | }; | ||
| 34 | static_assert(sizeof(Elf64_Rela) == 0x18, "Elf64_Rela has incorrect size."); | ||
| 35 | |||
| 36 | struct Elf64_Dyn { | ||
| 37 | u64_le tag; | ||
| 38 | u64_le value; | ||
| 39 | }; | ||
| 40 | static_assert(sizeof(Elf64_Dyn) == 0x10, "Elf64_Dyn has incorrect size."); | ||
| 41 | |||
| 42 | struct Elf64_Sym { | ||
| 43 | u32_le name; | ||
| 44 | INSERT_PADDING_BYTES(0x2); | ||
| 45 | u16_le shndx; | ||
| 46 | u64_le value; | ||
| 47 | u64_le size; | ||
| 48 | }; | ||
| 49 | static_assert(sizeof(Elf64_Sym) == 0x18, "Elf64_Sym has incorrect size."); | ||
| 50 | |||
| 51 | void Linker::WriteRelocations(std::vector<u8>& program_image, const std::vector<Symbol>& symbols, | ||
| 52 | u64 relocation_offset, u64 size, VAddr load_base) { | ||
| 53 | for (u64 i = 0; i < size; i += sizeof(Elf64_Rela)) { | ||
| 54 | Elf64_Rela rela; | ||
| 55 | std::memcpy(&rela, &program_image[relocation_offset + i], sizeof(Elf64_Rela)); | ||
| 56 | |||
| 57 | const Symbol& symbol = symbols[rela.symbol]; | ||
| 58 | switch (rela.type) { | ||
| 59 | case RelocationType::RELATIVE: { | ||
| 60 | const u64 value = load_base + rela.addend; | ||
| 61 | if (!symbol.name.empty()) { | ||
| 62 | exports[symbol.name] = value; | ||
| 63 | } | ||
| 64 | std::memcpy(&program_image[rela.offset], &value, sizeof(u64)); | ||
| 65 | break; | ||
| 66 | } | ||
| 67 | case RelocationType::JUMP_SLOT: | ||
| 68 | case RelocationType::GLOB_DAT: | ||
| 69 | if (!symbol.value) { | ||
| 70 | imports[symbol.name] = {rela.offset + load_base, 0}; | ||
| 71 | } else { | ||
| 72 | exports[symbol.name] = symbol.value; | ||
| 73 | std::memcpy(&program_image[rela.offset], &symbol.value, sizeof(u64)); | ||
| 74 | } | ||
| 75 | break; | ||
| 76 | case RelocationType::ABS64: | ||
| 77 | if (!symbol.value) { | ||
| 78 | imports[symbol.name] = {rela.offset + load_base, rela.addend}; | ||
| 79 | } else { | ||
| 80 | const u64 value = symbol.value + rela.addend; | ||
| 81 | exports[symbol.name] = value; | ||
| 82 | std::memcpy(&program_image[rela.offset], &value, sizeof(u64)); | ||
| 83 | } | ||
| 84 | break; | ||
| 85 | default: | ||
| 86 | LOG_CRITICAL(Loader, "Unknown relocation type: {}", static_cast<int>(rela.type)); | ||
| 87 | break; | ||
| 88 | } | ||
| 89 | } | ||
| 90 | } | ||
| 91 | |||
| 92 | void Linker::Relocate(std::vector<u8>& program_image, u32 dynamic_section_offset, VAddr load_base) { | ||
| 93 | std::map<u64, u64> dynamic; | ||
| 94 | while (dynamic_section_offset < program_image.size()) { | ||
| 95 | Elf64_Dyn dyn; | ||
| 96 | std::memcpy(&dyn, &program_image[dynamic_section_offset], sizeof(Elf64_Dyn)); | ||
| 97 | dynamic_section_offset += sizeof(Elf64_Dyn); | ||
| 98 | |||
| 99 | if (dyn.tag == DT_NULL) { | ||
| 100 | break; | ||
| 101 | } | ||
| 102 | dynamic[dyn.tag] = dyn.value; | ||
| 103 | } | ||
| 104 | |||
| 105 | u64 offset = dynamic[DT_SYMTAB]; | ||
| 106 | std::vector<Symbol> symbols; | ||
| 107 | while (offset < program_image.size()) { | ||
| 108 | Elf64_Sym sym; | ||
| 109 | std::memcpy(&sym, &program_image[offset], sizeof(Elf64_Sym)); | ||
| 110 | offset += sizeof(Elf64_Sym); | ||
| 111 | |||
| 112 | if (sym.name >= dynamic[DT_STRSZ]) { | ||
| 113 | break; | ||
| 114 | } | ||
| 115 | |||
| 116 | std::string name = reinterpret_cast<char*>(&program_image[dynamic[DT_STRTAB] + sym.name]); | ||
| 117 | if (sym.value) { | ||
| 118 | exports[name] = load_base + sym.value; | ||
| 119 | symbols.emplace_back(std::move(name), load_base + sym.value); | ||
| 120 | } else { | ||
| 121 | symbols.emplace_back(std::move(name), 0); | ||
| 122 | } | ||
| 123 | } | ||
| 124 | |||
| 125 | if (dynamic.find(DT_RELA) != dynamic.end()) { | ||
| 126 | WriteRelocations(program_image, symbols, dynamic[DT_RELA], dynamic[DT_RELASZ], load_base); | ||
| 127 | } | ||
| 128 | |||
| 129 | if (dynamic.find(DT_JMPREL) != dynamic.end()) { | ||
| 130 | WriteRelocations(program_image, symbols, dynamic[DT_JMPREL], dynamic[DT_PLTRELSZ], | ||
| 131 | load_base); | ||
| 132 | } | ||
| 133 | } | ||
| 134 | |||
| 135 | void Linker::ResolveImports() { | ||
| 136 | // Resolve imports | ||
| 137 | for (const auto& import : imports) { | ||
| 138 | const auto& search = exports.find(import.first); | ||
| 139 | if (search != exports.end()) { | ||
| 140 | Memory::Write64(import.second.ea, search->second + import.second.addend); | ||
| 141 | } else { | ||
| 142 | LOG_ERROR(Loader, "Unresolved import: {}", import.first); | ||
| 143 | } | ||
| 144 | } | ||
| 145 | } | ||
| 146 | |||
| 147 | } // namespace Loader | ||
diff --git a/src/core/loader/linker.h b/src/core/loader/linker.h deleted file mode 100644 index 107625837..000000000 --- a/src/core/loader/linker.h +++ /dev/null | |||
| @@ -1,36 +0,0 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <map> | ||
| 8 | #include <string> | ||
| 9 | #include "common/common_types.h" | ||
| 10 | |||
| 11 | namespace Loader { | ||
| 12 | |||
| 13 | class Linker { | ||
| 14 | protected: | ||
| 15 | struct Symbol { | ||
| 16 | Symbol(std::string&& name, u64 value) : name(std::move(name)), value(value) {} | ||
| 17 | std::string name; | ||
| 18 | u64 value; | ||
| 19 | }; | ||
| 20 | |||
| 21 | struct Import { | ||
| 22 | VAddr ea; | ||
| 23 | s64 addend; | ||
| 24 | }; | ||
| 25 | |||
| 26 | void WriteRelocations(std::vector<u8>& program_image, const std::vector<Symbol>& symbols, | ||
| 27 | u64 relocation_offset, u64 size, VAddr load_base); | ||
| 28 | void Relocate(std::vector<u8>& program_image, u32 dynamic_section_offset, VAddr load_base); | ||
| 29 | |||
| 30 | void ResolveImports(); | ||
| 31 | |||
| 32 | std::map<std::string, Import> imports; | ||
| 33 | std::map<std::string, VAddr> exports; | ||
| 34 | }; | ||
| 35 | |||
| 36 | } // namespace Loader | ||
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp index 4fad0c0dd..5de02a94b 100644 --- a/src/core/loader/nro.cpp +++ b/src/core/loader/nro.cpp | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include "core/file_sys/romfs_factory.h" | 14 | #include "core/file_sys/romfs_factory.h" |
| 15 | #include "core/file_sys/vfs_offset.h" | 15 | #include "core/file_sys/vfs_offset.h" |
| 16 | #include "core/gdbstub/gdbstub.h" | 16 | #include "core/gdbstub/gdbstub.h" |
| 17 | #include "core/hle/kernel/code_set.h" | ||
| 17 | #include "core/hle/kernel/process.h" | 18 | #include "core/hle/kernel/process.h" |
| 18 | #include "core/hle/kernel/vm_manager.h" | 19 | #include "core/hle/kernel/vm_manager.h" |
| 19 | #include "core/hle/service/filesystem/filesystem.h" | 20 | #include "core/hle/service/filesystem/filesystem.h" |
diff --git a/src/core/loader/nro.h b/src/core/loader/nro.h index 013d629c0..85b0ed644 100644 --- a/src/core/loader/nro.h +++ b/src/core/loader/nro.h | |||
| @@ -4,10 +4,10 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <memory> | ||
| 7 | #include <string> | 8 | #include <string> |
| 8 | #include <vector> | 9 | #include <vector> |
| 9 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 10 | #include "core/loader/linker.h" | ||
| 11 | #include "core/loader/loader.h" | 11 | #include "core/loader/loader.h" |
| 12 | 12 | ||
| 13 | namespace FileSys { | 13 | namespace FileSys { |
| @@ -21,7 +21,7 @@ class Process; | |||
| 21 | namespace Loader { | 21 | namespace Loader { |
| 22 | 22 | ||
| 23 | /// Loads an NRO file | 23 | /// Loads an NRO file |
| 24 | class AppLoader_NRO final : public AppLoader, Linker { | 24 | class AppLoader_NRO final : public AppLoader { |
| 25 | public: | 25 | public: |
| 26 | explicit AppLoader_NRO(FileSys::VirtualFile file); | 26 | explicit AppLoader_NRO(FileSys::VirtualFile file); |
| 27 | ~AppLoader_NRO() override; | 27 | ~AppLoader_NRO() override; |
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp index 6ded0b707..e1c8908a1 100644 --- a/src/core/loader/nso.cpp +++ b/src/core/loader/nso.cpp | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include "common/swap.h" | 11 | #include "common/swap.h" |
| 12 | #include "core/file_sys/patch_manager.h" | 12 | #include "core/file_sys/patch_manager.h" |
| 13 | #include "core/gdbstub/gdbstub.h" | 13 | #include "core/gdbstub/gdbstub.h" |
| 14 | #include "core/hle/kernel/code_set.h" | ||
| 14 | #include "core/hle/kernel/process.h" | 15 | #include "core/hle/kernel/process.h" |
| 15 | #include "core/hle/kernel/vm_manager.h" | 16 | #include "core/hle/kernel/vm_manager.h" |
| 16 | #include "core/loader/nso.h" | 17 | #include "core/loader/nso.h" |
diff --git a/src/core/loader/nso.h b/src/core/loader/nso.h index 135b6ea5a..167c8a694 100644 --- a/src/core/loader/nso.h +++ b/src/core/loader/nso.h | |||
| @@ -6,8 +6,8 @@ | |||
| 6 | 6 | ||
| 7 | #include <optional> | 7 | #include <optional> |
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "common/swap.h" | ||
| 9 | #include "core/file_sys/patch_manager.h" | 10 | #include "core/file_sys/patch_manager.h" |
| 10 | #include "core/loader/linker.h" | ||
| 11 | #include "core/loader/loader.h" | 11 | #include "core/loader/loader.h" |
| 12 | 12 | ||
| 13 | namespace Kernel { | 13 | namespace Kernel { |
| @@ -26,7 +26,7 @@ struct NSOArgumentHeader { | |||
| 26 | static_assert(sizeof(NSOArgumentHeader) == 0x20, "NSOArgumentHeader has incorrect size."); | 26 | static_assert(sizeof(NSOArgumentHeader) == 0x20, "NSOArgumentHeader has incorrect size."); |
| 27 | 27 | ||
| 28 | /// Loads an NSO file | 28 | /// Loads an NSO file |
| 29 | class AppLoader_NSO final : public AppLoader, Linker { | 29 | class AppLoader_NSO final : public AppLoader { |
| 30 | public: | 30 | public: |
| 31 | explicit AppLoader_NSO(FileSys::VirtualFile file); | 31 | explicit AppLoader_NSO(FileSys::VirtualFile file); |
| 32 | 32 | ||
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index e9166dbd9..365ac82b4 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include "common/assert.h" | 10 | #include "common/assert.h" |
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "common/logging/log.h" | 12 | #include "common/logging/log.h" |
| 13 | #include "common/page_table.h" | ||
| 13 | #include "common/swap.h" | 14 | #include "common/swap.h" |
| 14 | #include "core/arm/arm_interface.h" | 15 | #include "core/arm/arm_interface.h" |
| 15 | #include "core/core.h" | 16 | #include "core/core.h" |
| @@ -18,13 +19,14 @@ | |||
| 18 | #include "core/hle/lock.h" | 19 | #include "core/hle/lock.h" |
| 19 | #include "core/memory.h" | 20 | #include "core/memory.h" |
| 20 | #include "core/memory_setup.h" | 21 | #include "core/memory_setup.h" |
| 22 | #include "video_core/gpu.h" | ||
| 21 | #include "video_core/renderer_base.h" | 23 | #include "video_core/renderer_base.h" |
| 22 | 24 | ||
| 23 | namespace Memory { | 25 | namespace Memory { |
| 24 | 26 | ||
| 25 | static PageTable* current_page_table = nullptr; | 27 | static Common::PageTable* current_page_table = nullptr; |
| 26 | 28 | ||
| 27 | void SetCurrentPageTable(PageTable* page_table) { | 29 | void SetCurrentPageTable(Common::PageTable* page_table) { |
| 28 | current_page_table = page_table; | 30 | current_page_table = page_table; |
| 29 | 31 | ||
| 30 | auto& system = Core::System::GetInstance(); | 32 | auto& system = Core::System::GetInstance(); |
| @@ -36,88 +38,80 @@ void SetCurrentPageTable(PageTable* page_table) { | |||
| 36 | } | 38 | } |
| 37 | } | 39 | } |
| 38 | 40 | ||
| 39 | PageTable* GetCurrentPageTable() { | 41 | Common::PageTable* GetCurrentPageTable() { |
| 40 | return current_page_table; | 42 | return current_page_table; |
| 41 | } | 43 | } |
| 42 | 44 | ||
| 43 | PageTable::PageTable() = default; | 45 | static void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory, |
| 44 | 46 | Common::PageType type) { | |
| 45 | PageTable::PageTable(std::size_t address_space_width_in_bits) { | ||
| 46 | Resize(address_space_width_in_bits); | ||
| 47 | } | ||
| 48 | |||
| 49 | PageTable::~PageTable() = default; | ||
| 50 | |||
| 51 | void PageTable::Resize(std::size_t address_space_width_in_bits) { | ||
| 52 | const std::size_t num_page_table_entries = 1ULL << (address_space_width_in_bits - PAGE_BITS); | ||
| 53 | |||
| 54 | pointers.resize(num_page_table_entries); | ||
| 55 | attributes.resize(num_page_table_entries); | ||
| 56 | |||
| 57 | // The default is a 39-bit address space, which causes an initial 1GB allocation size. If the | ||
| 58 | // vector size is subsequently decreased (via resize), the vector might not automatically | ||
| 59 | // actually reallocate/resize its underlying allocation, which wastes up to ~800 MB for | ||
| 60 | // 36-bit titles. Call shrink_to_fit to reduce capacity to what's actually in use. | ||
| 61 | |||
| 62 | pointers.shrink_to_fit(); | ||
| 63 | attributes.shrink_to_fit(); | ||
| 64 | } | ||
| 65 | |||
| 66 | static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, PageType type) { | ||
| 67 | LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, | 47 | LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, |
| 68 | (base + size) * PAGE_SIZE); | 48 | (base + size) * PAGE_SIZE); |
| 69 | 49 | ||
| 70 | RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE, | 50 | // During boot, current_page_table might not be set yet, in which case we need not flush |
| 71 | FlushMode::FlushAndInvalidate); | 51 | if (current_page_table) { |
| 52 | Core::System::GetInstance().GPU().FlushAndInvalidateRegion(base << PAGE_BITS, | ||
| 53 | size * PAGE_SIZE); | ||
| 54 | } | ||
| 72 | 55 | ||
| 73 | VAddr end = base + size; | 56 | VAddr end = base + size; |
| 74 | while (base != end) { | 57 | ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", |
| 75 | ASSERT_MSG(base < page_table.pointers.size(), "out of range mapping at {:016X}", base); | 58 | base + page_table.pointers.size()); |
| 76 | 59 | ||
| 77 | page_table.attributes[base] = type; | 60 | std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); |
| 78 | page_table.pointers[base] = memory; | ||
| 79 | 61 | ||
| 80 | base += 1; | 62 | if (memory == nullptr) { |
| 81 | if (memory != nullptr) | 63 | std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory); |
| 64 | } else { | ||
| 65 | while (base != end) { | ||
| 66 | page_table.pointers[base] = memory; | ||
| 67 | |||
| 68 | base += 1; | ||
| 82 | memory += PAGE_SIZE; | 69 | memory += PAGE_SIZE; |
| 70 | } | ||
| 83 | } | 71 | } |
| 84 | } | 72 | } |
| 85 | 73 | ||
| 86 | void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target) { | 74 | void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { |
| 87 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); | 75 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); |
| 88 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); | 76 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); |
| 89 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); | 77 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); |
| 90 | } | 78 | } |
| 91 | 79 | ||
| 92 | void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler) { | 80 | void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, |
| 81 | Common::MemoryHookPointer mmio_handler) { | ||
| 93 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); | 82 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); |
| 94 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); | 83 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); |
| 95 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); | 84 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Special); |
| 96 | 85 | ||
| 97 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | 86 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); |
| 98 | SpecialRegion region{SpecialRegion::Type::IODevice, std::move(mmio_handler)}; | 87 | Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, std::move(mmio_handler)}; |
| 99 | page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region})); | 88 | page_table.special_regions.add( |
| 89 | std::make_pair(interval, std::set<Common::SpecialRegion>{region})); | ||
| 100 | } | 90 | } |
| 101 | 91 | ||
| 102 | void UnmapRegion(PageTable& page_table, VAddr base, u64 size) { | 92 | void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { |
| 103 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); | 93 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); |
| 104 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); | 94 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); |
| 105 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); | 95 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Unmapped); |
| 106 | 96 | ||
| 107 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | 97 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); |
| 108 | page_table.special_regions.erase(interval); | 98 | page_table.special_regions.erase(interval); |
| 109 | } | 99 | } |
| 110 | 100 | ||
| 111 | void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) { | 101 | void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, |
| 102 | Common::MemoryHookPointer hook) { | ||
| 112 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | 103 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); |
| 113 | SpecialRegion region{SpecialRegion::Type::DebugHook, std::move(hook)}; | 104 | Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; |
| 114 | page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region})); | 105 | page_table.special_regions.add( |
| 106 | std::make_pair(interval, std::set<Common::SpecialRegion>{region})); | ||
| 115 | } | 107 | } |
| 116 | 108 | ||
| 117 | void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) { | 109 | void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, |
| 110 | Common::MemoryHookPointer hook) { | ||
| 118 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | 111 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); |
| 119 | SpecialRegion region{SpecialRegion::Type::DebugHook, std::move(hook)}; | 112 | Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; |
| 120 | page_table.special_regions.subtract(std::make_pair(interval, std::set<SpecialRegion>{region})); | 113 | page_table.special_regions.subtract( |
| 114 | std::make_pair(interval, std::set<Common::SpecialRegion>{region})); | ||
| 121 | } | 115 | } |
| 122 | 116 | ||
| 123 | /** | 117 | /** |
| @@ -166,22 +160,19 @@ T Read(const VAddr vaddr) { | |||
| 166 | return value; | 160 | return value; |
| 167 | } | 161 | } |
| 168 | 162 | ||
| 169 | // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state | 163 | Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 170 | std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); | ||
| 171 | |||
| 172 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||
| 173 | switch (type) { | 164 | switch (type) { |
| 174 | case PageType::Unmapped: | 165 | case Common::PageType::Unmapped: |
| 175 | LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); | 166 | LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); |
| 176 | return 0; | 167 | return 0; |
| 177 | case PageType::Memory: | 168 | case Common::PageType::Memory: |
| 178 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); | 169 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); |
| 179 | break; | 170 | break; |
| 180 | case PageType::RasterizerCachedMemory: { | 171 | case Common::PageType::RasterizerCachedMemory: { |
| 181 | RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Flush); | 172 | auto host_ptr{GetPointerFromVMA(vaddr)}; |
| 182 | 173 | Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T)); | |
| 183 | T value; | 174 | T value; |
| 184 | std::memcpy(&value, GetPointerFromVMA(vaddr), sizeof(T)); | 175 | std::memcpy(&value, host_ptr, sizeof(T)); |
| 185 | return value; | 176 | return value; |
| 186 | } | 177 | } |
| 187 | default: | 178 | default: |
| @@ -199,21 +190,19 @@ void Write(const VAddr vaddr, const T data) { | |||
| 199 | return; | 190 | return; |
| 200 | } | 191 | } |
| 201 | 192 | ||
| 202 | // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state | 193 | Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 203 | std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); | ||
| 204 | |||
| 205 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||
| 206 | switch (type) { | 194 | switch (type) { |
| 207 | case PageType::Unmapped: | 195 | case Common::PageType::Unmapped: |
| 208 | LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, | 196 | LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, |
| 209 | static_cast<u32>(data), vaddr); | 197 | static_cast<u32>(data), vaddr); |
| 210 | return; | 198 | return; |
| 211 | case PageType::Memory: | 199 | case Common::PageType::Memory: |
| 212 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); | 200 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); |
| 213 | break; | 201 | break; |
| 214 | case PageType::RasterizerCachedMemory: { | 202 | case Common::PageType::RasterizerCachedMemory: { |
| 215 | RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Invalidate); | 203 | auto host_ptr{GetPointerFromVMA(vaddr)}; |
| 216 | std::memcpy(GetPointerFromVMA(vaddr), &data, sizeof(T)); | 204 | Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T)); |
| 205 | std::memcpy(host_ptr, &data, sizeof(T)); | ||
| 217 | break; | 206 | break; |
| 218 | } | 207 | } |
| 219 | default: | 208 | default: |
| @@ -228,10 +217,10 @@ bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { | |||
| 228 | if (page_pointer) | 217 | if (page_pointer) |
| 229 | return true; | 218 | return true; |
| 230 | 219 | ||
| 231 | if (page_table.attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) | 220 | if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) |
| 232 | return true; | 221 | return true; |
| 233 | 222 | ||
| 234 | if (page_table.attributes[vaddr >> PAGE_BITS] != PageType::Special) | 223 | if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special) |
| 235 | return false; | 224 | return false; |
| 236 | 225 | ||
| 237 | return false; | 226 | return false; |
| @@ -251,7 +240,8 @@ u8* GetPointer(const VAddr vaddr) { | |||
| 251 | return page_pointer + (vaddr & PAGE_MASK); | 240 | return page_pointer + (vaddr & PAGE_MASK); |
| 252 | } | 241 | } |
| 253 | 242 | ||
| 254 | if (current_page_table->attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) { | 243 | if (current_page_table->attributes[vaddr >> PAGE_BITS] == |
| 244 | Common::PageType::RasterizerCachedMemory) { | ||
| 255 | return GetPointerFromVMA(vaddr); | 245 | return GetPointerFromVMA(vaddr); |
| 256 | } | 246 | } |
| 257 | 247 | ||
| @@ -285,20 +275,20 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { | |||
| 285 | 275 | ||
| 286 | u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; | 276 | u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; |
| 287 | for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { | 277 | for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { |
| 288 | PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; | 278 | Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 289 | 279 | ||
| 290 | if (cached) { | 280 | if (cached) { |
| 291 | // Switch page type to cached if now cached | 281 | // Switch page type to cached if now cached |
| 292 | switch (page_type) { | 282 | switch (page_type) { |
| 293 | case PageType::Unmapped: | 283 | case Common::PageType::Unmapped: |
| 294 | // It is not necessary for a process to have this region mapped into its address | 284 | // It is not necessary for a process to have this region mapped into its address |
| 295 | // space, for example, a system module need not have a VRAM mapping. | 285 | // space, for example, a system module need not have a VRAM mapping. |
| 296 | break; | 286 | break; |
| 297 | case PageType::Memory: | 287 | case Common::PageType::Memory: |
| 298 | page_type = PageType::RasterizerCachedMemory; | 288 | page_type = Common::PageType::RasterizerCachedMemory; |
| 299 | current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; | 289 | current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; |
| 300 | break; | 290 | break; |
| 301 | case PageType::RasterizerCachedMemory: | 291 | case Common::PageType::RasterizerCachedMemory: |
| 302 | // There can be more than one GPU region mapped per CPU region, so it's common that | 292 | // There can be more than one GPU region mapped per CPU region, so it's common that |
| 303 | // this area is already marked as cached. | 293 | // this area is already marked as cached. |
| 304 | break; | 294 | break; |
| @@ -308,23 +298,23 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { | |||
| 308 | } else { | 298 | } else { |
| 309 | // Switch page type to uncached if now uncached | 299 | // Switch page type to uncached if now uncached |
| 310 | switch (page_type) { | 300 | switch (page_type) { |
| 311 | case PageType::Unmapped: | 301 | case Common::PageType::Unmapped: |
| 312 | // It is not necessary for a process to have this region mapped into its address | 302 | // It is not necessary for a process to have this region mapped into its address |
| 313 | // space, for example, a system module need not have a VRAM mapping. | 303 | // space, for example, a system module need not have a VRAM mapping. |
| 314 | break; | 304 | break; |
| 315 | case PageType::Memory: | 305 | case Common::PageType::Memory: |
| 316 | // There can be more than one GPU region mapped per CPU region, so it's common that | 306 | // There can be more than one GPU region mapped per CPU region, so it's common that |
| 317 | // this area is already unmarked as cached. | 307 | // this area is already unmarked as cached. |
| 318 | break; | 308 | break; |
| 319 | case PageType::RasterizerCachedMemory: { | 309 | case Common::PageType::RasterizerCachedMemory: { |
| 320 | u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); | 310 | u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); |
| 321 | if (pointer == nullptr) { | 311 | if (pointer == nullptr) { |
| 322 | // It's possible that this function has been called while updating the pagetable | 312 | // It's possible that this function has been called while updating the pagetable |
| 323 | // after unmapping a VMA. In that case the underlying VMA will no longer exist, | 313 | // after unmapping a VMA. In that case the underlying VMA will no longer exist, |
| 324 | // and we should just leave the pagetable entry blank. | 314 | // and we should just leave the pagetable entry blank. |
| 325 | page_type = PageType::Unmapped; | 315 | page_type = Common::PageType::Unmapped; |
| 326 | } else { | 316 | } else { |
| 327 | page_type = PageType::Memory; | 317 | page_type = Common::PageType::Memory; |
| 328 | current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; | 318 | current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; |
| 329 | } | 319 | } |
| 330 | break; | 320 | break; |
| @@ -336,47 +326,6 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { | |||
| 336 | } | 326 | } |
| 337 | } | 327 | } |
| 338 | 328 | ||
| 339 | void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) { | ||
| 340 | auto& system_instance = Core::System::GetInstance(); | ||
| 341 | |||
| 342 | // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be | ||
| 343 | // null here | ||
| 344 | if (!system_instance.IsPoweredOn()) { | ||
| 345 | return; | ||
| 346 | } | ||
| 347 | |||
| 348 | const VAddr end = start + size; | ||
| 349 | |||
| 350 | const auto CheckRegion = [&](VAddr region_start, VAddr region_end) { | ||
| 351 | if (start >= region_end || end <= region_start) { | ||
| 352 | // No overlap with region | ||
| 353 | return; | ||
| 354 | } | ||
| 355 | |||
| 356 | const VAddr overlap_start = std::max(start, region_start); | ||
| 357 | const VAddr overlap_end = std::min(end, region_end); | ||
| 358 | const VAddr overlap_size = overlap_end - overlap_start; | ||
| 359 | |||
| 360 | auto& rasterizer = system_instance.Renderer().Rasterizer(); | ||
| 361 | switch (mode) { | ||
| 362 | case FlushMode::Flush: | ||
| 363 | rasterizer.FlushRegion(overlap_start, overlap_size); | ||
| 364 | break; | ||
| 365 | case FlushMode::Invalidate: | ||
| 366 | rasterizer.InvalidateRegion(overlap_start, overlap_size); | ||
| 367 | break; | ||
| 368 | case FlushMode::FlushAndInvalidate: | ||
| 369 | rasterizer.FlushAndInvalidateRegion(overlap_start, overlap_size); | ||
| 370 | break; | ||
| 371 | } | ||
| 372 | }; | ||
| 373 | |||
| 374 | const auto& vm_manager = Core::CurrentProcess()->VMManager(); | ||
| 375 | |||
| 376 | CheckRegion(vm_manager.GetCodeRegionBaseAddress(), vm_manager.GetCodeRegionEndAddress()); | ||
| 377 | CheckRegion(vm_manager.GetHeapRegionBaseAddress(), vm_manager.GetHeapRegionEndAddress()); | ||
| 378 | } | ||
| 379 | |||
| 380 | u8 Read8(const VAddr addr) { | 329 | u8 Read8(const VAddr addr) { |
| 381 | return Read<u8>(addr); | 330 | return Read<u8>(addr); |
| 382 | } | 331 | } |
| @@ -407,24 +356,24 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_ | |||
| 407 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 356 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 408 | 357 | ||
| 409 | switch (page_table.attributes[page_index]) { | 358 | switch (page_table.attributes[page_index]) { |
| 410 | case PageType::Unmapped: { | 359 | case Common::PageType::Unmapped: { |
| 411 | LOG_ERROR(HW_Memory, | 360 | LOG_ERROR(HW_Memory, |
| 412 | "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 361 | "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 413 | current_vaddr, src_addr, size); | 362 | current_vaddr, src_addr, size); |
| 414 | std::memset(dest_buffer, 0, copy_amount); | 363 | std::memset(dest_buffer, 0, copy_amount); |
| 415 | break; | 364 | break; |
| 416 | } | 365 | } |
| 417 | case PageType::Memory: { | 366 | case Common::PageType::Memory: { |
| 418 | DEBUG_ASSERT(page_table.pointers[page_index]); | 367 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 419 | 368 | ||
| 420 | const u8* src_ptr = page_table.pointers[page_index] + page_offset; | 369 | const u8* src_ptr = page_table.pointers[page_index] + page_offset; |
| 421 | std::memcpy(dest_buffer, src_ptr, copy_amount); | 370 | std::memcpy(dest_buffer, src_ptr, copy_amount); |
| 422 | break; | 371 | break; |
| 423 | } | 372 | } |
| 424 | case PageType::RasterizerCachedMemory: { | 373 | case Common::PageType::RasterizerCachedMemory: { |
| 425 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | 374 | const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; |
| 426 | FlushMode::Flush); | 375 | Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); |
| 427 | std::memcpy(dest_buffer, GetPointerFromVMA(process, current_vaddr), copy_amount); | 376 | std::memcpy(dest_buffer, host_ptr, copy_amount); |
| 428 | break; | 377 | break; |
| 429 | } | 378 | } |
| 430 | default: | 379 | default: |
| @@ -471,23 +420,23 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi | |||
| 471 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 420 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 472 | 421 | ||
| 473 | switch (page_table.attributes[page_index]) { | 422 | switch (page_table.attributes[page_index]) { |
| 474 | case PageType::Unmapped: { | 423 | case Common::PageType::Unmapped: { |
| 475 | LOG_ERROR(HW_Memory, | 424 | LOG_ERROR(HW_Memory, |
| 476 | "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 425 | "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 477 | current_vaddr, dest_addr, size); | 426 | current_vaddr, dest_addr, size); |
| 478 | break; | 427 | break; |
| 479 | } | 428 | } |
| 480 | case PageType::Memory: { | 429 | case Common::PageType::Memory: { |
| 481 | DEBUG_ASSERT(page_table.pointers[page_index]); | 430 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 482 | 431 | ||
| 483 | u8* dest_ptr = page_table.pointers[page_index] + page_offset; | 432 | u8* dest_ptr = page_table.pointers[page_index] + page_offset; |
| 484 | std::memcpy(dest_ptr, src_buffer, copy_amount); | 433 | std::memcpy(dest_ptr, src_buffer, copy_amount); |
| 485 | break; | 434 | break; |
| 486 | } | 435 | } |
| 487 | case PageType::RasterizerCachedMemory: { | 436 | case Common::PageType::RasterizerCachedMemory: { |
| 488 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | 437 | const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; |
| 489 | FlushMode::Invalidate); | 438 | Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); |
| 490 | std::memcpy(GetPointerFromVMA(process, current_vaddr), src_buffer, copy_amount); | 439 | std::memcpy(host_ptr, src_buffer, copy_amount); |
| 491 | break; | 440 | break; |
| 492 | } | 441 | } |
| 493 | default: | 442 | default: |
| @@ -517,23 +466,23 @@ void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std: | |||
| 517 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 466 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 518 | 467 | ||
| 519 | switch (page_table.attributes[page_index]) { | 468 | switch (page_table.attributes[page_index]) { |
| 520 | case PageType::Unmapped: { | 469 | case Common::PageType::Unmapped: { |
| 521 | LOG_ERROR(HW_Memory, | 470 | LOG_ERROR(HW_Memory, |
| 522 | "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 471 | "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 523 | current_vaddr, dest_addr, size); | 472 | current_vaddr, dest_addr, size); |
| 524 | break; | 473 | break; |
| 525 | } | 474 | } |
| 526 | case PageType::Memory: { | 475 | case Common::PageType::Memory: { |
| 527 | DEBUG_ASSERT(page_table.pointers[page_index]); | 476 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 528 | 477 | ||
| 529 | u8* dest_ptr = page_table.pointers[page_index] + page_offset; | 478 | u8* dest_ptr = page_table.pointers[page_index] + page_offset; |
| 530 | std::memset(dest_ptr, 0, copy_amount); | 479 | std::memset(dest_ptr, 0, copy_amount); |
| 531 | break; | 480 | break; |
| 532 | } | 481 | } |
| 533 | case PageType::RasterizerCachedMemory: { | 482 | case Common::PageType::RasterizerCachedMemory: { |
| 534 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | 483 | const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; |
| 535 | FlushMode::Invalidate); | 484 | Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); |
| 536 | std::memset(GetPointerFromVMA(process, current_vaddr), 0, copy_amount); | 485 | std::memset(host_ptr, 0, copy_amount); |
| 537 | break; | 486 | break; |
| 538 | } | 487 | } |
| 539 | default: | 488 | default: |
| @@ -559,23 +508,23 @@ void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, | |||
| 559 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 508 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 560 | 509 | ||
| 561 | switch (page_table.attributes[page_index]) { | 510 | switch (page_table.attributes[page_index]) { |
| 562 | case PageType::Unmapped: { | 511 | case Common::PageType::Unmapped: { |
| 563 | LOG_ERROR(HW_Memory, | 512 | LOG_ERROR(HW_Memory, |
| 564 | "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", | 513 | "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", |
| 565 | current_vaddr, src_addr, size); | 514 | current_vaddr, src_addr, size); |
| 566 | ZeroBlock(process, dest_addr, copy_amount); | 515 | ZeroBlock(process, dest_addr, copy_amount); |
| 567 | break; | 516 | break; |
| 568 | } | 517 | } |
| 569 | case PageType::Memory: { | 518 | case Common::PageType::Memory: { |
| 570 | DEBUG_ASSERT(page_table.pointers[page_index]); | 519 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 571 | const u8* src_ptr = page_table.pointers[page_index] + page_offset; | 520 | const u8* src_ptr = page_table.pointers[page_index] + page_offset; |
| 572 | WriteBlock(process, dest_addr, src_ptr, copy_amount); | 521 | WriteBlock(process, dest_addr, src_ptr, copy_amount); |
| 573 | break; | 522 | break; |
| 574 | } | 523 | } |
| 575 | case PageType::RasterizerCachedMemory: { | 524 | case Common::PageType::RasterizerCachedMemory: { |
| 576 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | 525 | const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; |
| 577 | FlushMode::Flush); | 526 | Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); |
| 578 | WriteBlock(process, dest_addr, GetPointerFromVMA(process, current_vaddr), copy_amount); | 527 | WriteBlock(process, dest_addr, host_ptr, copy_amount); |
| 579 | break; | 528 | break; |
| 580 | } | 529 | } |
| 581 | default: | 530 | default: |
diff --git a/src/core/memory.h b/src/core/memory.h index 1acf5ce8c..3f60d868c 100644 --- a/src/core/memory.h +++ b/src/core/memory.h | |||
| @@ -10,7 +10,10 @@ | |||
| 10 | #include <vector> | 10 | #include <vector> |
| 11 | #include <boost/icl/interval_map.hpp> | 11 | #include <boost/icl/interval_map.hpp> |
| 12 | #include "common/common_types.h" | 12 | #include "common/common_types.h" |
| 13 | #include "core/memory_hook.h" | 13 | |
| 14 | namespace Common { | ||
| 15 | struct PageTable; | ||
| 16 | } | ||
| 14 | 17 | ||
| 15 | namespace Kernel { | 18 | namespace Kernel { |
| 16 | class Process; | 19 | class Process; |
| @@ -26,71 +29,6 @@ constexpr std::size_t PAGE_BITS = 12; | |||
| 26 | constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS; | 29 | constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS; |
| 27 | constexpr u64 PAGE_MASK = PAGE_SIZE - 1; | 30 | constexpr u64 PAGE_MASK = PAGE_SIZE - 1; |
| 28 | 31 | ||
| 29 | enum class PageType : u8 { | ||
| 30 | /// Page is unmapped and should cause an access error. | ||
| 31 | Unmapped, | ||
| 32 | /// Page is mapped to regular memory. This is the only type you can get pointers to. | ||
| 33 | Memory, | ||
| 34 | /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and | ||
| 35 | /// invalidation | ||
| 36 | RasterizerCachedMemory, | ||
| 37 | /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions. | ||
| 38 | Special, | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct SpecialRegion { | ||
| 42 | enum class Type { | ||
| 43 | DebugHook, | ||
| 44 | IODevice, | ||
| 45 | } type; | ||
| 46 | |||
| 47 | MemoryHookPointer handler; | ||
| 48 | |||
| 49 | bool operator<(const SpecialRegion& other) const { | ||
| 50 | return std::tie(type, handler) < std::tie(other.type, other.handler); | ||
| 51 | } | ||
| 52 | |||
| 53 | bool operator==(const SpecialRegion& other) const { | ||
| 54 | return std::tie(type, handler) == std::tie(other.type, other.handler); | ||
| 55 | } | ||
| 56 | }; | ||
| 57 | |||
| 58 | /** | ||
| 59 | * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely | ||
| 60 | * mimics the way a real CPU page table works. | ||
| 61 | */ | ||
| 62 | struct PageTable { | ||
| 63 | explicit PageTable(); | ||
| 64 | explicit PageTable(std::size_t address_space_width_in_bits); | ||
| 65 | ~PageTable(); | ||
| 66 | |||
| 67 | /** | ||
| 68 | * Resizes the page table to be able to accomodate enough pages within | ||
| 69 | * a given address space. | ||
| 70 | * | ||
| 71 | * @param address_space_width_in_bits The address size width in bits. | ||
| 72 | */ | ||
| 73 | void Resize(std::size_t address_space_width_in_bits); | ||
| 74 | |||
| 75 | /** | ||
| 76 | * Vector of memory pointers backing each page. An entry can only be non-null if the | ||
| 77 | * corresponding entry in the `attributes` vector is of type `Memory`. | ||
| 78 | */ | ||
| 79 | std::vector<u8*> pointers; | ||
| 80 | |||
| 81 | /** | ||
| 82 | * Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is | ||
| 83 | * of type `Special`. | ||
| 84 | */ | ||
| 85 | boost::icl::interval_map<VAddr, std::set<SpecialRegion>> special_regions; | ||
| 86 | |||
| 87 | /** | ||
| 88 | * Vector of fine grained page attributes. If it is set to any value other than `Memory`, then | ||
| 89 | * the corresponding entry in `pointers` MUST be set to null. | ||
| 90 | */ | ||
| 91 | std::vector<PageType> attributes; | ||
| 92 | }; | ||
| 93 | |||
| 94 | /// Virtual user-space memory regions | 32 | /// Virtual user-space memory regions |
| 95 | enum : VAddr { | 33 | enum : VAddr { |
| 96 | /// Read-only page containing kernel and system configuration values. | 34 | /// Read-only page containing kernel and system configuration values. |
| @@ -116,8 +54,8 @@ enum : VAddr { | |||
| 116 | }; | 54 | }; |
| 117 | 55 | ||
| 118 | /// Currently active page table | 56 | /// Currently active page table |
| 119 | void SetCurrentPageTable(PageTable* page_table); | 57 | void SetCurrentPageTable(Common::PageTable* page_table); |
| 120 | PageTable* GetCurrentPageTable(); | 58 | Common::PageTable* GetCurrentPageTable(); |
| 121 | 59 | ||
| 122 | /// Determines if the given VAddr is valid for the specified process. | 60 | /// Determines if the given VAddr is valid for the specified process. |
| 123 | bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr); | 61 | bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr); |
| @@ -161,10 +99,4 @@ enum class FlushMode { | |||
| 161 | */ | 99 | */ |
| 162 | void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached); | 100 | void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached); |
| 163 | 101 | ||
| 164 | /** | ||
| 165 | * Flushes and invalidates any externally cached rasterizer resources touching the given virtual | ||
| 166 | * address region. | ||
| 167 | */ | ||
| 168 | void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode); | ||
| 169 | |||
| 170 | } // namespace Memory | 102 | } // namespace Memory |
diff --git a/src/core/memory_hook.cpp b/src/core/memory_hook.cpp deleted file mode 100644 index c61c6c1fb..000000000 --- a/src/core/memory_hook.cpp +++ /dev/null | |||
| @@ -1,11 +0,0 @@ | |||
| 1 | // Copyright 2018 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "core/memory_hook.h" | ||
| 6 | |||
| 7 | namespace Memory { | ||
| 8 | |||
| 9 | MemoryHook::~MemoryHook() = default; | ||
| 10 | |||
| 11 | } // namespace Memory | ||
diff --git a/src/core/memory_hook.h b/src/core/memory_hook.h deleted file mode 100644 index 940777107..000000000 --- a/src/core/memory_hook.h +++ /dev/null | |||
| @@ -1,47 +0,0 @@ | |||
| 1 | // Copyright 2016 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <memory> | ||
| 8 | #include <optional> | ||
| 9 | |||
| 10 | #include "common/common_types.h" | ||
| 11 | |||
| 12 | namespace Memory { | ||
| 13 | |||
| 14 | /** | ||
| 15 | * Memory hooks have two purposes: | ||
| 16 | * 1. To allow reads and writes to a region of memory to be intercepted. This is used to implement | ||
| 17 | * texture forwarding and memory breakpoints for debugging. | ||
| 18 | * 2. To allow for the implementation of MMIO devices. | ||
| 19 | * | ||
| 20 | * A hook may be mapped to multiple regions of memory. | ||
| 21 | * | ||
| 22 | * If a std::nullopt or false is returned from a function, the read/write request is passed through | ||
| 23 | * to the underlying memory region. | ||
| 24 | */ | ||
| 25 | class MemoryHook { | ||
| 26 | public: | ||
| 27 | virtual ~MemoryHook(); | ||
| 28 | |||
| 29 | virtual std::optional<bool> IsValidAddress(VAddr addr) = 0; | ||
| 30 | |||
| 31 | virtual std::optional<u8> Read8(VAddr addr) = 0; | ||
| 32 | virtual std::optional<u16> Read16(VAddr addr) = 0; | ||
| 33 | virtual std::optional<u32> Read32(VAddr addr) = 0; | ||
| 34 | virtual std::optional<u64> Read64(VAddr addr) = 0; | ||
| 35 | |||
| 36 | virtual bool ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size) = 0; | ||
| 37 | |||
| 38 | virtual bool Write8(VAddr addr, u8 data) = 0; | ||
| 39 | virtual bool Write16(VAddr addr, u16 data) = 0; | ||
| 40 | virtual bool Write32(VAddr addr, u32 data) = 0; | ||
| 41 | virtual bool Write64(VAddr addr, u64 data) = 0; | ||
| 42 | |||
| 43 | virtual bool WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size) = 0; | ||
| 44 | }; | ||
| 45 | |||
| 46 | using MemoryHookPointer = std::shared_ptr<MemoryHook>; | ||
| 47 | } // namespace Memory | ||
diff --git a/src/core/memory_setup.h b/src/core/memory_setup.h index 9a1a4f4be..5225ee8e2 100644 --- a/src/core/memory_setup.h +++ b/src/core/memory_setup.h | |||
| @@ -5,7 +5,11 @@ | |||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 8 | #include "core/memory_hook.h" | 8 | #include "common/memory_hook.h" |
| 9 | |||
| 10 | namespace Common { | ||
| 11 | struct PageTable; | ||
| 12 | } | ||
| 9 | 13 | ||
| 10 | namespace Memory { | 14 | namespace Memory { |
| 11 | 15 | ||
| @@ -17,7 +21,7 @@ namespace Memory { | |||
| 17 | * @param size The amount of bytes to map. Must be page-aligned. | 21 | * @param size The amount of bytes to map. Must be page-aligned. |
| 18 | * @param target Buffer with the memory backing the mapping. Must be of length at least `size`. | 22 | * @param target Buffer with the memory backing the mapping. Must be of length at least `size`. |
| 19 | */ | 23 | */ |
| 20 | void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target); | 24 | void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target); |
| 21 | 25 | ||
| 22 | /** | 26 | /** |
| 23 | * Maps a region of the emulated process address space as a IO region. | 27 | * Maps a region of the emulated process address space as a IO region. |
| @@ -26,11 +30,14 @@ void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target); | |||
| 26 | * @param size The amount of bytes to map. Must be page-aligned. | 30 | * @param size The amount of bytes to map. Must be page-aligned. |
| 27 | * @param mmio_handler The handler that backs the mapping. | 31 | * @param mmio_handler The handler that backs the mapping. |
| 28 | */ | 32 | */ |
| 29 | void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler); | 33 | void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, |
| 34 | Common::MemoryHookPointer mmio_handler); | ||
| 30 | 35 | ||
| 31 | void UnmapRegion(PageTable& page_table, VAddr base, u64 size); | 36 | void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size); |
| 32 | 37 | ||
| 33 | void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook); | 38 | void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, |
| 34 | void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook); | 39 | Common::MemoryHookPointer hook); |
| 40 | void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, | ||
| 41 | Common::MemoryHookPointer hook); | ||
| 35 | 42 | ||
| 36 | } // namespace Memory | 43 | } // namespace Memory |
diff --git a/src/core/settings.cpp b/src/core/settings.cpp index 2e232e1e7..6dd3139cc 100644 --- a/src/core/settings.cpp +++ b/src/core/settings.cpp | |||
| @@ -91,7 +91,10 @@ void LogSettings() { | |||
| 91 | LogSetting("Renderer_UseResolutionFactor", Settings::values.resolution_factor); | 91 | LogSetting("Renderer_UseResolutionFactor", Settings::values.resolution_factor); |
| 92 | LogSetting("Renderer_UseFrameLimit", Settings::values.use_frame_limit); | 92 | LogSetting("Renderer_UseFrameLimit", Settings::values.use_frame_limit); |
| 93 | LogSetting("Renderer_FrameLimit", Settings::values.frame_limit); | 93 | LogSetting("Renderer_FrameLimit", Settings::values.frame_limit); |
| 94 | LogSetting("Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache); | ||
| 94 | LogSetting("Renderer_UseAccurateGpuEmulation", Settings::values.use_accurate_gpu_emulation); | 95 | LogSetting("Renderer_UseAccurateGpuEmulation", Settings::values.use_accurate_gpu_emulation); |
| 96 | LogSetting("Renderer_UseAsynchronousGpuEmulation", | ||
| 97 | Settings::values.use_asynchronous_gpu_emulation); | ||
| 95 | LogSetting("Audio_OutputEngine", Settings::values.sink_id); | 98 | LogSetting("Audio_OutputEngine", Settings::values.sink_id); |
| 96 | LogSetting("Audio_EnableAudioStretching", Settings::values.enable_audio_stretching); | 99 | LogSetting("Audio_EnableAudioStretching", Settings::values.enable_audio_stretching); |
| 97 | LogSetting("Audio_OutputDevice", Settings::values.audio_device_id); | 100 | LogSetting("Audio_OutputDevice", Settings::values.audio_device_id); |
diff --git a/src/core/settings.h b/src/core/settings.h index c97387fc7..cdfb2f742 100644 --- a/src/core/settings.h +++ b/src/core/settings.h | |||
| @@ -391,7 +391,9 @@ struct Values { | |||
| 391 | float resolution_factor; | 391 | float resolution_factor; |
| 392 | bool use_frame_limit; | 392 | bool use_frame_limit; |
| 393 | u16 frame_limit; | 393 | u16 frame_limit; |
| 394 | bool use_disk_shader_cache; | ||
| 394 | bool use_accurate_gpu_emulation; | 395 | bool use_accurate_gpu_emulation; |
| 396 | bool use_asynchronous_gpu_emulation; | ||
| 395 | 397 | ||
| 396 | float bg_red; | 398 | float bg_red; |
| 397 | float bg_green; | 399 | float bg_green; |
diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp index 09ed74d78..e1db06811 100644 --- a/src/core/telemetry_session.cpp +++ b/src/core/telemetry_session.cpp | |||
| @@ -158,8 +158,12 @@ TelemetrySession::TelemetrySession() { | |||
| 158 | AddField(Telemetry::FieldType::UserConfig, "Renderer_UseFrameLimit", | 158 | AddField(Telemetry::FieldType::UserConfig, "Renderer_UseFrameLimit", |
| 159 | Settings::values.use_frame_limit); | 159 | Settings::values.use_frame_limit); |
| 160 | AddField(Telemetry::FieldType::UserConfig, "Renderer_FrameLimit", Settings::values.frame_limit); | 160 | AddField(Telemetry::FieldType::UserConfig, "Renderer_FrameLimit", Settings::values.frame_limit); |
| 161 | AddField(Telemetry::FieldType::UserConfig, "Renderer_UseDiskShaderCache", | ||
| 162 | Settings::values.use_disk_shader_cache); | ||
| 161 | AddField(Telemetry::FieldType::UserConfig, "Renderer_UseAccurateGpuEmulation", | 163 | AddField(Telemetry::FieldType::UserConfig, "Renderer_UseAccurateGpuEmulation", |
| 162 | Settings::values.use_accurate_gpu_emulation); | 164 | Settings::values.use_accurate_gpu_emulation); |
| 165 | AddField(Telemetry::FieldType::UserConfig, "Renderer_UseAsynchronousGpuEmulation", | ||
| 166 | Settings::values.use_asynchronous_gpu_emulation); | ||
| 163 | AddField(Telemetry::FieldType::UserConfig, "System_UseDockedMode", | 167 | AddField(Telemetry::FieldType::UserConfig, "System_UseDockedMode", |
| 164 | Settings::values.use_docked_mode); | 168 | Settings::values.use_docked_mode); |
| 165 | } | 169 | } |