diff options
Diffstat (limited to 'src')
197 files changed, 5324 insertions, 2122 deletions
diff --git a/src/audio_core/CMakeLists.txt b/src/audio_core/CMakeLists.txt index 0a1f3bf18..8e3a8f5a8 100644 --- a/src/audio_core/CMakeLists.txt +++ b/src/audio_core/CMakeLists.txt | |||
| @@ -217,7 +217,7 @@ else() | |||
| 217 | endif() | 217 | endif() |
| 218 | 218 | ||
| 219 | target_link_libraries(audio_core PUBLIC common core) | 219 | target_link_libraries(audio_core PUBLIC common core) |
| 220 | if (ARCHITECTURE_x86_64) | 220 | if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) |
| 221 | target_link_libraries(audio_core PRIVATE dynarmic) | 221 | target_link_libraries(audio_core PRIVATE dynarmic) |
| 222 | endif() | 222 | endif() |
| 223 | 223 | ||
diff --git a/src/audio_core/in/audio_in_system.cpp b/src/audio_core/in/audio_in_system.cpp index 6b7e6715c..4324cafd8 100644 --- a/src/audio_core/in/audio_in_system.cpp +++ b/src/audio_core/in/audio_in_system.cpp | |||
| @@ -56,7 +56,7 @@ Result System::IsConfigValid(const std::string_view device_name, | |||
| 56 | return ResultSuccess; | 56 | return ResultSuccess; |
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | Result System::Initialize(std::string& device_name, const AudioInParameter& in_params, | 59 | Result System::Initialize(std::string device_name, const AudioInParameter& in_params, |
| 60 | const u32 handle_, const u64 applet_resource_user_id_) { | 60 | const u32 handle_, const u64 applet_resource_user_id_) { |
| 61 | auto result{IsConfigValid(device_name, in_params)}; | 61 | auto result{IsConfigValid(device_name, in_params)}; |
| 62 | if (result.IsError()) { | 62 | if (result.IsError()) { |
diff --git a/src/audio_core/in/audio_in_system.h b/src/audio_core/in/audio_in_system.h index b9dc0e60f..1c5154638 100644 --- a/src/audio_core/in/audio_in_system.h +++ b/src/audio_core/in/audio_in_system.h | |||
| @@ -97,7 +97,7 @@ public: | |||
| 97 | * @param applet_resource_user_id - Unused. | 97 | * @param applet_resource_user_id - Unused. |
| 98 | * @return Result code. | 98 | * @return Result code. |
| 99 | */ | 99 | */ |
| 100 | Result Initialize(std::string& device_name, const AudioInParameter& in_params, u32 handle, | 100 | Result Initialize(std::string device_name, const AudioInParameter& in_params, u32 handle, |
| 101 | u64 applet_resource_user_id); | 101 | u64 applet_resource_user_id); |
| 102 | 102 | ||
| 103 | /** | 103 | /** |
diff --git a/src/audio_core/out/audio_out_system.cpp b/src/audio_core/out/audio_out_system.cpp index 48a801923..a66208ed9 100644 --- a/src/audio_core/out/audio_out_system.cpp +++ b/src/audio_core/out/audio_out_system.cpp | |||
| @@ -49,8 +49,8 @@ Result System::IsConfigValid(std::string_view device_name, | |||
| 49 | return Service::Audio::ERR_INVALID_CHANNEL_COUNT; | 49 | return Service::Audio::ERR_INVALID_CHANNEL_COUNT; |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | Result System::Initialize(std::string& device_name, const AudioOutParameter& in_params, u32 handle_, | 52 | Result System::Initialize(std::string device_name, const AudioOutParameter& in_params, u32 handle_, |
| 53 | u64& applet_resource_user_id_) { | 53 | u64 applet_resource_user_id_) { |
| 54 | auto result = IsConfigValid(device_name, in_params); | 54 | auto result = IsConfigValid(device_name, in_params); |
| 55 | if (result.IsError()) { | 55 | if (result.IsError()) { |
| 56 | return result; | 56 | return result; |
diff --git a/src/audio_core/out/audio_out_system.h b/src/audio_core/out/audio_out_system.h index 0817b2f37..b95cb91be 100644 --- a/src/audio_core/out/audio_out_system.h +++ b/src/audio_core/out/audio_out_system.h | |||
| @@ -88,8 +88,8 @@ public: | |||
| 88 | * @param applet_resource_user_id - Unused. | 88 | * @param applet_resource_user_id - Unused. |
| 89 | * @return Result code. | 89 | * @return Result code. |
| 90 | */ | 90 | */ |
| 91 | Result Initialize(std::string& device_name, const AudioOutParameter& in_params, u32 handle, | 91 | Result Initialize(std::string device_name, const AudioOutParameter& in_params, u32 handle, |
| 92 | u64& applet_resource_user_id); | 92 | u64 applet_resource_user_id); |
| 93 | 93 | ||
| 94 | /** | 94 | /** |
| 95 | * Start this system. | 95 | * Start this system. |
diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h index e1e2a90fc..0dad9338a 100644 --- a/src/common/common_funcs.h +++ b/src/common/common_funcs.h | |||
| @@ -31,8 +31,10 @@ | |||
| 31 | 31 | ||
| 32 | #ifndef _MSC_VER | 32 | #ifndef _MSC_VER |
| 33 | 33 | ||
| 34 | #ifdef ARCHITECTURE_x86_64 | 34 | #if defined(ARCHITECTURE_x86_64) |
| 35 | #define Crash() __asm__ __volatile__("int $3") | 35 | #define Crash() __asm__ __volatile__("int $3") |
| 36 | #elif defined(ARCHITECTURE_arm64) | ||
| 37 | #define Crash() __asm__ __volatile__("brk #0") | ||
| 36 | #else | 38 | #else |
| 37 | #define Crash() exit(1) | 39 | #define Crash() exit(1) |
| 38 | #endif | 40 | #endif |
diff --git a/src/common/concepts.h b/src/common/concepts.h index e8ce30dfe..a9acff3e7 100644 --- a/src/common/concepts.h +++ b/src/common/concepts.h | |||
| @@ -3,24 +3,14 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <iterator> | ||
| 6 | #include <type_traits> | 7 | #include <type_traits> |
| 7 | 8 | ||
| 8 | namespace Common { | 9 | namespace Common { |
| 9 | 10 | ||
| 10 | // Check if type is like an STL container | 11 | // Check if type satisfies the ContiguousContainer named requirement. |
| 11 | template <typename T> | 12 | template <typename T> |
| 12 | concept IsSTLContainer = requires(T t) { | 13 | concept IsContiguousContainer = std::contiguous_iterator<typename T::iterator>; |
| 13 | typename T::value_type; | ||
| 14 | typename T::iterator; | ||
| 15 | typename T::const_iterator; | ||
| 16 | // TODO(ogniK): Replace below is std::same_as<void> when MSVC supports it. | ||
| 17 | t.begin(); | ||
| 18 | t.end(); | ||
| 19 | t.cbegin(); | ||
| 20 | t.cend(); | ||
| 21 | t.data(); | ||
| 22 | t.size(); | ||
| 23 | }; | ||
| 24 | 14 | ||
| 25 | // TODO: Replace with std::derived_from when the <concepts> header | 15 | // TODO: Replace with std::derived_from when the <concepts> header |
| 26 | // is available on all supported platforms. | 16 | // is available on all supported platforms. |
diff --git a/src/common/fs/file.h b/src/common/fs/file.h index 69b53384c..167c4d826 100644 --- a/src/common/fs/file.h +++ b/src/common/fs/file.h | |||
| @@ -209,8 +209,8 @@ public: | |||
| 209 | 209 | ||
| 210 | /** | 210 | /** |
| 211 | * Helper function which deduces the value type of a contiguous STL container used in ReadSpan. | 211 | * Helper function which deduces the value type of a contiguous STL container used in ReadSpan. |
| 212 | * If T is not a contiguous STL container as defined by the concept IsSTLContainer, this calls | 212 | * If T is not a contiguous container as defined by the concept IsContiguousContainer, this |
| 213 | * ReadObject and T must be a trivially copyable object. | 213 | * calls ReadObject and T must be a trivially copyable object. |
| 214 | * | 214 | * |
| 215 | * See ReadSpan for more details if T is a contiguous container. | 215 | * See ReadSpan for more details if T is a contiguous container. |
| 216 | * See ReadObject for more details if T is a trivially copyable object. | 216 | * See ReadObject for more details if T is a trivially copyable object. |
| @@ -223,7 +223,7 @@ public: | |||
| 223 | */ | 223 | */ |
| 224 | template <typename T> | 224 | template <typename T> |
| 225 | [[nodiscard]] size_t Read(T& data) const { | 225 | [[nodiscard]] size_t Read(T& data) const { |
| 226 | if constexpr (IsSTLContainer<T>) { | 226 | if constexpr (IsContiguousContainer<T>) { |
| 227 | using ContiguousType = typename T::value_type; | 227 | using ContiguousType = typename T::value_type; |
| 228 | static_assert(std::is_trivially_copyable_v<ContiguousType>, | 228 | static_assert(std::is_trivially_copyable_v<ContiguousType>, |
| 229 | "Data type must be trivially copyable."); | 229 | "Data type must be trivially copyable."); |
| @@ -235,8 +235,8 @@ public: | |||
| 235 | 235 | ||
| 236 | /** | 236 | /** |
| 237 | * Helper function which deduces the value type of a contiguous STL container used in WriteSpan. | 237 | * Helper function which deduces the value type of a contiguous STL container used in WriteSpan. |
| 238 | * If T is not a contiguous STL container as defined by the concept IsSTLContainer, this calls | 238 | * If T is not a contiguous STL container as defined by the concept IsContiguousContainer, this |
| 239 | * WriteObject and T must be a trivially copyable object. | 239 | * calls WriteObject and T must be a trivially copyable object. |
| 240 | * | 240 | * |
| 241 | * See WriteSpan for more details if T is a contiguous container. | 241 | * See WriteSpan for more details if T is a contiguous container. |
| 242 | * See WriteObject for more details if T is a trivially copyable object. | 242 | * See WriteObject for more details if T is a trivially copyable object. |
| @@ -249,7 +249,7 @@ public: | |||
| 249 | */ | 249 | */ |
| 250 | template <typename T> | 250 | template <typename T> |
| 251 | [[nodiscard]] size_t Write(const T& data) const { | 251 | [[nodiscard]] size_t Write(const T& data) const { |
| 252 | if constexpr (IsSTLContainer<T>) { | 252 | if constexpr (IsContiguousContainer<T>) { |
| 253 | using ContiguousType = typename T::value_type; | 253 | using ContiguousType = typename T::value_type; |
| 254 | static_assert(std::is_trivially_copyable_v<ContiguousType>, | 254 | static_assert(std::is_trivially_copyable_v<ContiguousType>, |
| 255 | "Data type must be trivially copyable."); | 255 | "Data type must be trivially copyable."); |
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp index 7f9659612..909f6cf3f 100644 --- a/src/common/host_memory.cpp +++ b/src/common/host_memory.cpp | |||
| @@ -359,6 +359,12 @@ public: | |||
| 359 | } | 359 | } |
| 360 | }); | 360 | }); |
| 361 | 361 | ||
| 362 | long page_size = sysconf(_SC_PAGESIZE); | ||
| 363 | if (page_size != 0x1000) { | ||
| 364 | LOG_CRITICAL(HW_Memory, "page size {:#x} is incompatible with 4K paging", page_size); | ||
| 365 | throw std::bad_alloc{}; | ||
| 366 | } | ||
| 367 | |||
| 362 | // Backing memory initialization | 368 | // Backing memory initialization |
| 363 | #if defined(__FreeBSD__) && __FreeBSD__ < 13 | 369 | #if defined(__FreeBSD__) && __FreeBSD__ < 13 |
| 364 | // XXX Drop after FreeBSD 12.* reaches EOL on 2024-06-30 | 370 | // XXX Drop after FreeBSD 12.* reaches EOL on 2024-06-30 |
diff --git a/src/common/input.h b/src/common/input.h index b533f3844..cb30b7254 100644 --- a/src/common/input.h +++ b/src/common/input.h | |||
| @@ -100,7 +100,6 @@ enum class CameraError { | |||
| 100 | enum class VibrationAmplificationType { | 100 | enum class VibrationAmplificationType { |
| 101 | Linear, | 101 | Linear, |
| 102 | Exponential, | 102 | Exponential, |
| 103 | Test, | ||
| 104 | }; | 103 | }; |
| 105 | 104 | ||
| 106 | // Analog properties for calibration | 105 | // Analog properties for calibration |
| @@ -325,6 +324,10 @@ public: | |||
| 325 | return VibrationError::NotSupported; | 324 | return VibrationError::NotSupported; |
| 326 | } | 325 | } |
| 327 | 326 | ||
| 327 | virtual bool IsVibrationEnabled() { | ||
| 328 | return false; | ||
| 329 | } | ||
| 330 | |||
| 328 | virtual PollingError SetPollingMode([[maybe_unused]] PollingMode polling_mode) { | 331 | virtual PollingError SetPollingMode([[maybe_unused]] PollingMode polling_mode) { |
| 329 | return PollingError::NotSupported; | 332 | return PollingError::NotSupported; |
| 330 | } | 333 | } |
diff --git a/src/common/settings.cpp b/src/common/settings.cpp index 0a560ebb7..8173462cb 100644 --- a/src/common/settings.cpp +++ b/src/common/settings.cpp | |||
| @@ -151,6 +151,7 @@ void UpdateRescalingInfo() { | |||
| 151 | ASSERT(false); | 151 | ASSERT(false); |
| 152 | info.up_scale = 1; | 152 | info.up_scale = 1; |
| 153 | info.down_shift = 0; | 153 | info.down_shift = 0; |
| 154 | break; | ||
| 154 | } | 155 | } |
| 155 | info.up_factor = static_cast<f32>(info.up_scale) / (1U << info.down_shift); | 156 | info.up_factor = static_cast<f32>(info.up_scale) / (1U << info.down_shift); |
| 156 | info.down_factor = static_cast<f32>(1U << info.down_shift) / info.up_scale; | 157 | info.down_factor = static_cast<f32>(1U << info.down_shift) / info.up_scale; |
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 113e663b5..f67f1ce92 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -190,11 +190,13 @@ add_library(core STATIC | |||
| 190 | hle/kernel/k_code_memory.h | 190 | hle/kernel/k_code_memory.h |
| 191 | hle/kernel/k_condition_variable.cpp | 191 | hle/kernel/k_condition_variable.cpp |
| 192 | hle/kernel/k_condition_variable.h | 192 | hle/kernel/k_condition_variable.h |
| 193 | hle/kernel/k_debug.h | ||
| 193 | hle/kernel/k_dynamic_page_manager.h | 194 | hle/kernel/k_dynamic_page_manager.h |
| 194 | hle/kernel/k_dynamic_resource_manager.h | 195 | hle/kernel/k_dynamic_resource_manager.h |
| 195 | hle/kernel/k_dynamic_slab_heap.h | 196 | hle/kernel/k_dynamic_slab_heap.h |
| 196 | hle/kernel/k_event.cpp | 197 | hle/kernel/k_event.cpp |
| 197 | hle/kernel/k_event.h | 198 | hle/kernel/k_event.h |
| 199 | hle/kernel/k_event_info.h | ||
| 198 | hle/kernel/k_handle_table.cpp | 200 | hle/kernel/k_handle_table.cpp |
| 199 | hle/kernel/k_handle_table.h | 201 | hle/kernel/k_handle_table.h |
| 200 | hle/kernel/k_interrupt_manager.cpp | 202 | hle/kernel/k_interrupt_manager.cpp |
| @@ -222,6 +224,8 @@ add_library(core STATIC | |||
| 222 | hle/kernel/k_page_group.h | 224 | hle/kernel/k_page_group.h |
| 223 | hle/kernel/k_page_table.cpp | 225 | hle/kernel/k_page_table.cpp |
| 224 | hle/kernel/k_page_table.h | 226 | hle/kernel/k_page_table.h |
| 227 | hle/kernel/k_page_table_manager.h | ||
| 228 | hle/kernel/k_page_table_slab_heap.h | ||
| 225 | hle/kernel/k_port.cpp | 229 | hle/kernel/k_port.cpp |
| 226 | hle/kernel/k_port.h | 230 | hle/kernel/k_port.h |
| 227 | hle/kernel/k_priority_queue.h | 231 | hle/kernel/k_priority_queue.h |
| @@ -254,6 +258,8 @@ add_library(core STATIC | |||
| 254 | hle/kernel/k_synchronization_object.cpp | 258 | hle/kernel/k_synchronization_object.cpp |
| 255 | hle/kernel/k_synchronization_object.h | 259 | hle/kernel/k_synchronization_object.h |
| 256 | hle/kernel/k_system_control.h | 260 | hle/kernel/k_system_control.h |
| 261 | hle/kernel/k_system_resource.cpp | ||
| 262 | hle/kernel/k_system_resource.h | ||
| 257 | hle/kernel/k_thread.cpp | 263 | hle/kernel/k_thread.cpp |
| 258 | hle/kernel/k_thread.h | 264 | hle/kernel/k_thread.h |
| 259 | hle/kernel/k_thread_local_page.cpp | 265 | hle/kernel/k_thread_local_page.cpp |
| @@ -491,10 +497,6 @@ add_library(core STATIC | |||
| 491 | hle/service/hid/irsensor/processor_base.h | 497 | hle/service/hid/irsensor/processor_base.h |
| 492 | hle/service/hid/irsensor/tera_plugin_processor.cpp | 498 | hle/service/hid/irsensor/tera_plugin_processor.cpp |
| 493 | hle/service/hid/irsensor/tera_plugin_processor.h | 499 | hle/service/hid/irsensor/tera_plugin_processor.h |
| 494 | hle/service/jit/jit_context.cpp | ||
| 495 | hle/service/jit/jit_context.h | ||
| 496 | hle/service/jit/jit.cpp | ||
| 497 | hle/service/jit/jit.h | ||
| 498 | hle/service/lbl/lbl.cpp | 500 | hle/service/lbl/lbl.cpp |
| 499 | hle/service/lbl/lbl.h | 501 | hle/service/lbl/lbl.h |
| 500 | hle/service/ldn/lan_discovery.cpp | 502 | hle/service/ldn/lan_discovery.cpp |
| @@ -799,14 +801,18 @@ if (ENABLE_WEB_SERVICE) | |||
| 799 | target_link_libraries(core PRIVATE web_service) | 801 | target_link_libraries(core PRIVATE web_service) |
| 800 | endif() | 802 | endif() |
| 801 | 803 | ||
| 802 | if (ARCHITECTURE_x86_64) | 804 | if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) |
| 803 | target_sources(core PRIVATE | 805 | target_sources(core PRIVATE |
| 804 | arm/dynarmic/arm_dynarmic_32.cpp | ||
| 805 | arm/dynarmic/arm_dynarmic_32.h | ||
| 806 | arm/dynarmic/arm_dynarmic_64.cpp | 806 | arm/dynarmic/arm_dynarmic_64.cpp |
| 807 | arm/dynarmic/arm_dynarmic_64.h | 807 | arm/dynarmic/arm_dynarmic_64.h |
| 808 | arm/dynarmic/arm_dynarmic_32.cpp | ||
| 809 | arm/dynarmic/arm_dynarmic_32.h | ||
| 808 | arm/dynarmic/arm_dynarmic_cp15.cpp | 810 | arm/dynarmic/arm_dynarmic_cp15.cpp |
| 809 | arm/dynarmic/arm_dynarmic_cp15.h | 811 | arm/dynarmic/arm_dynarmic_cp15.h |
| 812 | hle/service/jit/jit_context.cpp | ||
| 813 | hle/service/jit/jit_context.h | ||
| 814 | hle/service/jit/jit.cpp | ||
| 815 | hle/service/jit/jit.h | ||
| 810 | ) | 816 | ) |
| 811 | target_link_libraries(core PRIVATE dynarmic) | 817 | target_link_libraries(core PRIVATE dynarmic) |
| 812 | endif() | 818 | endif() |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index d1e70f19d..227e06ea1 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp | |||
| @@ -301,6 +301,11 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* | |||
| 301 | } | 301 | } |
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | #ifdef ARCHITECTURE_arm64 | ||
| 305 | // TODO: remove when fixed in dynarmic | ||
| 306 | config.optimizations &= ~Dynarmic::OptimizationFlag::BlockLinking; | ||
| 307 | #endif | ||
| 308 | |||
| 304 | return std::make_unique<Dynarmic::A32::Jit>(config); | 309 | return std::make_unique<Dynarmic::A32::Jit>(config); |
| 305 | } | 310 | } |
| 306 | 311 | ||
| @@ -450,7 +455,7 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_32::GetBacktrace(Core::S | |||
| 450 | // Frame records are two words long: | 455 | // Frame records are two words long: |
| 451 | // fp+0 : pointer to previous frame record | 456 | // fp+0 : pointer to previous frame record |
| 452 | // fp+4 : value of lr for frame | 457 | // fp+4 : value of lr for frame |
| 453 | while (true) { | 458 | for (size_t i = 0; i < 256; i++) { |
| 454 | out.push_back({"", 0, lr, 0, ""}); | 459 | out.push_back({"", 0, lr, 0, ""}); |
| 455 | if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 8)) { | 460 | if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 8)) { |
| 456 | break; | 461 | break; |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 22b5d5656..afb7fb3a0 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp | |||
| @@ -517,7 +517,7 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_64::GetBacktrace(Core::S | |||
| 517 | // Frame records are two words long: | 517 | // Frame records are two words long: |
| 518 | // fp+0 : pointer to previous frame record | 518 | // fp+0 : pointer to previous frame record |
| 519 | // fp+8 : value of lr for frame | 519 | // fp+8 : value of lr for frame |
| 520 | while (true) { | 520 | for (size_t i = 0; i < 256; i++) { |
| 521 | out.push_back({"", 0, lr, 0, ""}); | 521 | out.push_back({"", 0, lr, 0, ""}); |
| 522 | if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 16)) { | 522 | if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 16)) { |
| 523 | break; | 523 | break; |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp b/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp index 200efe4db..5a4eba3eb 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_cp15.cpp | |||
| @@ -52,12 +52,16 @@ CallbackOrAccessOneWord DynarmicCP15::CompileSendOneWord(bool two, unsigned opc1 | |||
| 52 | case 4: | 52 | case 4: |
| 53 | // CP15_DATA_SYNC_BARRIER | 53 | // CP15_DATA_SYNC_BARRIER |
| 54 | return Callback{ | 54 | return Callback{ |
| 55 | [](Dynarmic::A32::Jit*, void*, std::uint32_t, std::uint32_t) -> std::uint64_t { | 55 | [](void*, std::uint32_t, std::uint32_t) -> std::uint64_t { |
| 56 | #ifdef _MSC_VER | 56 | #if defined(_MSC_VER) && defined(ARCHITECTURE_x86_64) |
| 57 | _mm_mfence(); | 57 | _mm_mfence(); |
| 58 | _mm_lfence(); | 58 | _mm_lfence(); |
| 59 | #else | 59 | #elif defined(ARCHITECTURE_x86_64) |
| 60 | asm volatile("mfence\n\tlfence\n\t" : : : "memory"); | 60 | asm volatile("mfence\n\tlfence\n\t" : : : "memory"); |
| 61 | #elif defined(ARCHITECTURE_arm64) | ||
| 62 | asm volatile("dsb sy\n\t" : : : "memory"); | ||
| 63 | #else | ||
| 64 | #error Unsupported architecture | ||
| 61 | #endif | 65 | #endif |
| 62 | return 0; | 66 | return 0; |
| 63 | }, | 67 | }, |
| @@ -66,11 +70,15 @@ CallbackOrAccessOneWord DynarmicCP15::CompileSendOneWord(bool two, unsigned opc1 | |||
| 66 | case 5: | 70 | case 5: |
| 67 | // CP15_DATA_MEMORY_BARRIER | 71 | // CP15_DATA_MEMORY_BARRIER |
| 68 | return Callback{ | 72 | return Callback{ |
| 69 | [](Dynarmic::A32::Jit*, void*, std::uint32_t, std::uint32_t) -> std::uint64_t { | 73 | [](void*, std::uint32_t, std::uint32_t) -> std::uint64_t { |
| 70 | #ifdef _MSC_VER | 74 | #if defined(_MSC_VER) && defined(ARCHITECTURE_x86_64) |
| 71 | _mm_mfence(); | 75 | _mm_mfence(); |
| 72 | #else | 76 | #elif defined(ARCHITECTURE_x86_64) |
| 73 | asm volatile("mfence\n\t" : : : "memory"); | 77 | asm volatile("mfence\n\t" : : : "memory"); |
| 78 | #elif defined(ARCHITECTURE_arm64) | ||
| 79 | asm volatile("dmb sy\n\t" : : : "memory"); | ||
| 80 | #else | ||
| 81 | #error Unsupported architecture | ||
| 74 | #endif | 82 | #endif |
| 75 | return 0; | 83 | return 0; |
| 76 | }, | 84 | }, |
| @@ -115,7 +123,7 @@ CallbackOrAccessOneWord DynarmicCP15::CompileGetOneWord(bool two, unsigned opc1, | |||
| 115 | CallbackOrAccessTwoWords DynarmicCP15::CompileGetTwoWords(bool two, unsigned opc, CoprocReg CRm) { | 123 | CallbackOrAccessTwoWords DynarmicCP15::CompileGetTwoWords(bool two, unsigned opc, CoprocReg CRm) { |
| 116 | if (!two && opc == 0 && CRm == CoprocReg::C14) { | 124 | if (!two && opc == 0 && CRm == CoprocReg::C14) { |
| 117 | // CNTPCT | 125 | // CNTPCT |
| 118 | const auto callback = [](Dynarmic::A32::Jit*, void* arg, u32, u32) -> u64 { | 126 | const auto callback = [](void* arg, u32, u32) -> u64 { |
| 119 | const auto& parent_arg = *static_cast<ARM_Dynarmic_32*>(arg); | 127 | const auto& parent_arg = *static_cast<ARM_Dynarmic_32*>(arg); |
| 120 | return parent_arg.system.CoreTiming().GetClockTicks(); | 128 | return parent_arg.system.CoreTiming().GetClockTicks(); |
| 121 | }; | 129 | }; |
diff --git a/src/core/arm/exclusive_monitor.cpp b/src/core/arm/exclusive_monitor.cpp index 2db0b035d..20550faeb 100644 --- a/src/core/arm/exclusive_monitor.cpp +++ b/src/core/arm/exclusive_monitor.cpp | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #ifdef ARCHITECTURE_x86_64 | 4 | #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) |
| 5 | #include "core/arm/dynarmic/arm_exclusive_monitor.h" | 5 | #include "core/arm/dynarmic/arm_exclusive_monitor.h" |
| 6 | #endif | 6 | #endif |
| 7 | #include "core/arm/exclusive_monitor.h" | 7 | #include "core/arm/exclusive_monitor.h" |
| @@ -13,7 +13,7 @@ ExclusiveMonitor::~ExclusiveMonitor() = default; | |||
| 13 | 13 | ||
| 14 | std::unique_ptr<Core::ExclusiveMonitor> MakeExclusiveMonitor(Memory::Memory& memory, | 14 | std::unique_ptr<Core::ExclusiveMonitor> MakeExclusiveMonitor(Memory::Memory& memory, |
| 15 | std::size_t num_cores) { | 15 | std::size_t num_cores) { |
| 16 | #ifdef ARCHITECTURE_x86_64 | 16 | #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) |
| 17 | return std::make_unique<Core::DynarmicExclusiveMonitor>(memory, num_cores); | 17 | return std::make_unique<Core::DynarmicExclusiveMonitor>(memory, num_cores); |
| 18 | #else | 18 | #else |
| 19 | // TODO(merry): Passthrough exclusive monitor | 19 | // TODO(merry): Passthrough exclusive monitor |
diff --git a/src/core/core.cpp b/src/core/core.cpp index 40a610435..d8934be52 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp | |||
| @@ -137,6 +137,7 @@ struct System::Impl { | |||
| 137 | device_memory = std::make_unique<Core::DeviceMemory>(); | 137 | device_memory = std::make_unique<Core::DeviceMemory>(); |
| 138 | 138 | ||
| 139 | is_multicore = Settings::values.use_multi_core.GetValue(); | 139 | is_multicore = Settings::values.use_multi_core.GetValue(); |
| 140 | extended_memory_layout = Settings::values.use_extended_memory_layout.GetValue(); | ||
| 140 | 141 | ||
| 141 | core_timing.SetMulticore(is_multicore); | 142 | core_timing.SetMulticore(is_multicore); |
| 142 | core_timing.Initialize([&system]() { system.RegisterHostThread(); }); | 143 | core_timing.Initialize([&system]() { system.RegisterHostThread(); }); |
| @@ -166,13 +167,18 @@ struct System::Impl { | |||
| 166 | } | 167 | } |
| 167 | 168 | ||
| 168 | void ReinitializeIfNecessary(System& system) { | 169 | void ReinitializeIfNecessary(System& system) { |
| 169 | if (is_multicore == Settings::values.use_multi_core.GetValue()) { | 170 | const bool must_reinitialize = |
| 171 | is_multicore != Settings::values.use_multi_core.GetValue() || | ||
| 172 | extended_memory_layout != Settings::values.use_extended_memory_layout.GetValue(); | ||
| 173 | |||
| 174 | if (!must_reinitialize) { | ||
| 170 | return; | 175 | return; |
| 171 | } | 176 | } |
| 172 | 177 | ||
| 173 | LOG_DEBUG(Kernel, "Re-initializing"); | 178 | LOG_DEBUG(Kernel, "Re-initializing"); |
| 174 | 179 | ||
| 175 | is_multicore = Settings::values.use_multi_core.GetValue(); | 180 | is_multicore = Settings::values.use_multi_core.GetValue(); |
| 181 | extended_memory_layout = Settings::values.use_extended_memory_layout.GetValue(); | ||
| 176 | 182 | ||
| 177 | Initialize(system); | 183 | Initialize(system); |
| 178 | } | 184 | } |
| @@ -521,6 +527,7 @@ struct System::Impl { | |||
| 521 | 527 | ||
| 522 | bool is_multicore{}; | 528 | bool is_multicore{}; |
| 523 | bool is_async_gpu{}; | 529 | bool is_async_gpu{}; |
| 530 | bool extended_memory_layout{}; | ||
| 524 | 531 | ||
| 525 | ExecuteProgramCallback execute_program_callback; | 532 | ExecuteProgramCallback execute_program_callback; |
| 526 | ExitCallback exit_callback; | 533 | ExitCallback exit_callback; |
diff --git a/src/core/file_sys/control_metadata.cpp b/src/core/file_sys/control_metadata.cpp index be25da2f6..50f44f598 100644 --- a/src/core/file_sys/control_metadata.cpp +++ b/src/core/file_sys/control_metadata.cpp | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include "common/settings.h" | ||
| 4 | #include "common/string_util.h" | 5 | #include "common/string_util.h" |
| 5 | #include "common/swap.h" | 6 | #include "common/swap.h" |
| 6 | #include "core/file_sys/control_metadata.h" | 7 | #include "core/file_sys/control_metadata.h" |
| @@ -37,6 +38,27 @@ std::string LanguageEntry::GetDeveloperName() const { | |||
| 37 | developer_name.size()); | 38 | developer_name.size()); |
| 38 | } | 39 | } |
| 39 | 40 | ||
| 41 | constexpr std::array<Language, 18> language_to_codes = {{ | ||
| 42 | Language::Japanese, | ||
| 43 | Language::AmericanEnglish, | ||
| 44 | Language::French, | ||
| 45 | Language::German, | ||
| 46 | Language::Italian, | ||
| 47 | Language::Spanish, | ||
| 48 | Language::Chinese, | ||
| 49 | Language::Korean, | ||
| 50 | Language::Dutch, | ||
| 51 | Language::Portuguese, | ||
| 52 | Language::Russian, | ||
| 53 | Language::Taiwanese, | ||
| 54 | Language::BritishEnglish, | ||
| 55 | Language::CanadianFrench, | ||
| 56 | Language::LatinAmericanSpanish, | ||
| 57 | Language::Chinese, | ||
| 58 | Language::Taiwanese, | ||
| 59 | Language::BrazilianPortuguese, | ||
| 60 | }}; | ||
| 61 | |||
| 40 | NACP::NACP() = default; | 62 | NACP::NACP() = default; |
| 41 | 63 | ||
| 42 | NACP::NACP(VirtualFile file) { | 64 | NACP::NACP(VirtualFile file) { |
| @@ -45,9 +67,13 @@ NACP::NACP(VirtualFile file) { | |||
| 45 | 67 | ||
| 46 | NACP::~NACP() = default; | 68 | NACP::~NACP() = default; |
| 47 | 69 | ||
| 48 | const LanguageEntry& NACP::GetLanguageEntry(Language language) const { | 70 | const LanguageEntry& NACP::GetLanguageEntry() const { |
| 49 | if (language != Language::Default) { | 71 | Language language = language_to_codes[Settings::values.language_index.GetValue()]; |
| 50 | return raw.language_entries.at(static_cast<u8>(language)); | 72 | |
| 73 | { | ||
| 74 | const auto& language_entry = raw.language_entries.at(static_cast<u8>(language)); | ||
| 75 | if (!language_entry.GetApplicationName().empty()) | ||
| 76 | return language_entry; | ||
| 51 | } | 77 | } |
| 52 | 78 | ||
| 53 | for (const auto& language_entry : raw.language_entries) { | 79 | for (const auto& language_entry : raw.language_entries) { |
| @@ -55,16 +81,15 @@ const LanguageEntry& NACP::GetLanguageEntry(Language language) const { | |||
| 55 | return language_entry; | 81 | return language_entry; |
| 56 | } | 82 | } |
| 57 | 83 | ||
| 58 | // Fallback to English | 84 | return raw.language_entries.at(static_cast<u8>(Language::AmericanEnglish)); |
| 59 | return GetLanguageEntry(Language::AmericanEnglish); | ||
| 60 | } | 85 | } |
| 61 | 86 | ||
| 62 | std::string NACP::GetApplicationName(Language language) const { | 87 | std::string NACP::GetApplicationName() const { |
| 63 | return GetLanguageEntry(language).GetApplicationName(); | 88 | return GetLanguageEntry().GetApplicationName(); |
| 64 | } | 89 | } |
| 65 | 90 | ||
| 66 | std::string NACP::GetDeveloperName(Language language) const { | 91 | std::string NACP::GetDeveloperName() const { |
| 67 | return GetLanguageEntry(language).GetDeveloperName(); | 92 | return GetLanguageEntry().GetDeveloperName(); |
| 68 | } | 93 | } |
| 69 | 94 | ||
| 70 | u64 NACP::GetTitleId() const { | 95 | u64 NACP::GetTitleId() const { |
diff --git a/src/core/file_sys/control_metadata.h b/src/core/file_sys/control_metadata.h index 75295519c..6a81873b1 100644 --- a/src/core/file_sys/control_metadata.h +++ b/src/core/file_sys/control_metadata.h | |||
| @@ -101,9 +101,9 @@ public: | |||
| 101 | explicit NACP(VirtualFile file); | 101 | explicit NACP(VirtualFile file); |
| 102 | ~NACP(); | 102 | ~NACP(); |
| 103 | 103 | ||
| 104 | const LanguageEntry& GetLanguageEntry(Language language = Language::Default) const; | 104 | const LanguageEntry& GetLanguageEntry() const; |
| 105 | std::string GetApplicationName(Language language = Language::Default) const; | 105 | std::string GetApplicationName() const; |
| 106 | std::string GetDeveloperName(Language language = Language::Default) const; | 106 | std::string GetDeveloperName() const; |
| 107 | u64 GetTitleId() const; | 107 | u64 GetTitleId() const; |
| 108 | u64 GetDLCBaseTitleId() const; | 108 | u64 GetDLCBaseTitleId() const; |
| 109 | std::string GetVersionString() const; | 109 | std::string GetVersionString() const; |
diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp index 57eff72fe..ec1364452 100644 --- a/src/core/hid/emulated_controller.cpp +++ b/src/core/hid/emulated_controller.cpp | |||
| @@ -970,14 +970,7 @@ bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue v | |||
| 970 | Common::Input::VibrationError::None; | 970 | Common::Input::VibrationError::None; |
| 971 | } | 971 | } |
| 972 | 972 | ||
| 973 | bool EmulatedController::TestVibration(std::size_t device_index) { | 973 | bool EmulatedController::IsVibrationEnabled(std::size_t device_index) { |
| 974 | if (device_index >= output_devices.size()) { | ||
| 975 | return false; | ||
| 976 | } | ||
| 977 | if (!output_devices[device_index]) { | ||
| 978 | return false; | ||
| 979 | } | ||
| 980 | |||
| 981 | const auto player_index = NpadIdTypeToIndex(npad_id_type); | 974 | const auto player_index = NpadIdTypeToIndex(npad_id_type); |
| 982 | const auto& player = Settings::values.players.GetValue()[player_index]; | 975 | const auto& player = Settings::values.players.GetValue()[player_index]; |
| 983 | 976 | ||
| @@ -985,31 +978,15 @@ bool EmulatedController::TestVibration(std::size_t device_index) { | |||
| 985 | return false; | 978 | return false; |
| 986 | } | 979 | } |
| 987 | 980 | ||
| 988 | const Common::Input::VibrationStatus test_vibration = { | 981 | if (device_index >= output_devices.size()) { |
| 989 | .low_amplitude = 0.001f, | 982 | return false; |
| 990 | .low_frequency = DEFAULT_VIBRATION_VALUE.low_frequency, | 983 | } |
| 991 | .high_amplitude = 0.001f, | ||
| 992 | .high_frequency = DEFAULT_VIBRATION_VALUE.high_frequency, | ||
| 993 | .type = Common::Input::VibrationAmplificationType::Test, | ||
| 994 | }; | ||
| 995 | |||
| 996 | const Common::Input::VibrationStatus zero_vibration = { | ||
| 997 | .low_amplitude = DEFAULT_VIBRATION_VALUE.low_amplitude, | ||
| 998 | .low_frequency = DEFAULT_VIBRATION_VALUE.low_frequency, | ||
| 999 | .high_amplitude = DEFAULT_VIBRATION_VALUE.high_amplitude, | ||
| 1000 | .high_frequency = DEFAULT_VIBRATION_VALUE.high_frequency, | ||
| 1001 | .type = Common::Input::VibrationAmplificationType::Test, | ||
| 1002 | }; | ||
| 1003 | |||
| 1004 | // Send a slight vibration to test for rumble support | ||
| 1005 | output_devices[device_index]->SetVibration(test_vibration); | ||
| 1006 | 984 | ||
| 1007 | // Wait for about 15ms to ensure the controller is ready for the stop command | 985 | if (!output_devices[device_index]) { |
| 1008 | std::this_thread::sleep_for(std::chrono::milliseconds(15)); | 986 | return false; |
| 987 | } | ||
| 1009 | 988 | ||
| 1010 | // Stop any vibration and return the result | 989 | return output_devices[device_index]->IsVibrationEnabled(); |
| 1011 | return output_devices[device_index]->SetVibration(zero_vibration) == | ||
| 1012 | Common::Input::VibrationError::None; | ||
| 1013 | } | 990 | } |
| 1014 | 991 | ||
| 1015 | bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) { | 992 | bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) { |
| @@ -1048,6 +1025,7 @@ bool EmulatedController::HasNfc() const { | |||
| 1048 | case NpadStyleIndex::JoyconRight: | 1025 | case NpadStyleIndex::JoyconRight: |
| 1049 | case NpadStyleIndex::JoyconDual: | 1026 | case NpadStyleIndex::JoyconDual: |
| 1050 | case NpadStyleIndex::ProController: | 1027 | case NpadStyleIndex::ProController: |
| 1028 | case NpadStyleIndex::Handheld: | ||
| 1051 | break; | 1029 | break; |
| 1052 | default: | 1030 | default: |
| 1053 | return false; | 1031 | return false; |
| @@ -1234,12 +1212,6 @@ bool EmulatedController::IsConnected(bool get_temporary_value) const { | |||
| 1234 | return is_connected; | 1212 | return is_connected; |
| 1235 | } | 1213 | } |
| 1236 | 1214 | ||
| 1237 | bool EmulatedController::IsVibrationEnabled() const { | ||
| 1238 | const auto player_index = NpadIdTypeToIndex(npad_id_type); | ||
| 1239 | const auto& player = Settings::values.players.GetValue()[player_index]; | ||
| 1240 | return player.vibration_enabled; | ||
| 1241 | } | ||
| 1242 | |||
| 1243 | NpadIdType EmulatedController::GetNpadIdType() const { | 1215 | NpadIdType EmulatedController::GetNpadIdType() const { |
| 1244 | std::scoped_lock lock{mutex}; | 1216 | std::scoped_lock lock{mutex}; |
| 1245 | return npad_id_type; | 1217 | return npad_id_type; |
diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h index 319226bf8..d004ca56a 100644 --- a/src/core/hid/emulated_controller.h +++ b/src/core/hid/emulated_controller.h | |||
| @@ -206,9 +206,6 @@ public: | |||
| 206 | */ | 206 | */ |
| 207 | bool IsConnected(bool get_temporary_value = false) const; | 207 | bool IsConnected(bool get_temporary_value = false) const; |
| 208 | 208 | ||
| 209 | /// Returns true if vibration is enabled | ||
| 210 | bool IsVibrationEnabled() const; | ||
| 211 | |||
| 212 | /// Removes all callbacks created from input devices | 209 | /// Removes all callbacks created from input devices |
| 213 | void UnloadInput(); | 210 | void UnloadInput(); |
| 214 | 211 | ||
| @@ -339,7 +336,7 @@ public: | |||
| 339 | * Sends a small vibration to the output device | 336 | * Sends a small vibration to the output device |
| 340 | * @return true if SetVibration was successfull | 337 | * @return true if SetVibration was successfull |
| 341 | */ | 338 | */ |
| 342 | bool TestVibration(std::size_t device_index); | 339 | bool IsVibrationEnabled(std::size_t device_index); |
| 343 | 340 | ||
| 344 | /** | 341 | /** |
| 345 | * Sets the desired data to be polled from a controller | 342 | * Sets the desired data to be polled from a controller |
diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index 18fde8bd6..a86bec252 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h | |||
| @@ -86,13 +86,13 @@ public: | |||
| 86 | u32 num_domain_objects{}; | 86 | u32 num_domain_objects{}; |
| 87 | const bool always_move_handles{ | 87 | const bool always_move_handles{ |
| 88 | (static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0}; | 88 | (static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0}; |
| 89 | if (!ctx.Session()->GetSessionRequestManager()->IsDomain() || always_move_handles) { | 89 | if (!ctx.GetManager()->IsDomain() || always_move_handles) { |
| 90 | num_handles_to_move = num_objects_to_move; | 90 | num_handles_to_move = num_objects_to_move; |
| 91 | } else { | 91 | } else { |
| 92 | num_domain_objects = num_objects_to_move; | 92 | num_domain_objects = num_objects_to_move; |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | if (ctx.Session()->GetSessionRequestManager()->IsDomain()) { | 95 | if (ctx.GetManager()->IsDomain()) { |
| 96 | raw_data_size += | 96 | raw_data_size += |
| 97 | static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects); | 97 | static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects); |
| 98 | ctx.write_size += num_domain_objects; | 98 | ctx.write_size += num_domain_objects; |
| @@ -125,8 +125,7 @@ public: | |||
| 125 | if (!ctx.IsTipc()) { | 125 | if (!ctx.IsTipc()) { |
| 126 | AlignWithPadding(); | 126 | AlignWithPadding(); |
| 127 | 127 | ||
| 128 | if (ctx.Session()->GetSessionRequestManager()->IsDomain() && | 128 | if (ctx.GetManager()->IsDomain() && ctx.HasDomainMessageHeader()) { |
| 129 | ctx.HasDomainMessageHeader()) { | ||
| 130 | IPC::DomainMessageHeader domain_header{}; | 129 | IPC::DomainMessageHeader domain_header{}; |
| 131 | domain_header.num_objects = num_domain_objects; | 130 | domain_header.num_objects = num_domain_objects; |
| 132 | PushRaw(domain_header); | 131 | PushRaw(domain_header); |
| @@ -146,18 +145,18 @@ public: | |||
| 146 | 145 | ||
| 147 | template <class T> | 146 | template <class T> |
| 148 | void PushIpcInterface(std::shared_ptr<T> iface) { | 147 | void PushIpcInterface(std::shared_ptr<T> iface) { |
| 149 | if (context->Session()->GetSessionRequestManager()->IsDomain()) { | 148 | if (context->GetManager()->IsDomain()) { |
| 150 | context->AddDomainObject(std::move(iface)); | 149 | context->AddDomainObject(std::move(iface)); |
| 151 | } else { | 150 | } else { |
| 152 | kernel.CurrentProcess()->GetResourceLimit()->Reserve( | 151 | kernel.CurrentProcess()->GetResourceLimit()->Reserve( |
| 153 | Kernel::LimitableResource::Sessions, 1); | 152 | Kernel::LimitableResource::SessionCountMax, 1); |
| 154 | 153 | ||
| 155 | auto* session = Kernel::KSession::Create(kernel); | 154 | auto* session = Kernel::KSession::Create(kernel); |
| 156 | session->Initialize(nullptr, iface->GetServiceName(), | 155 | session->Initialize(nullptr, iface->GetServiceName()); |
| 157 | std::make_shared<Kernel::SessionRequestManager>(kernel)); | 156 | iface->RegisterSession(&session->GetServerSession(), |
| 157 | std::make_shared<Kernel::SessionRequestManager>(kernel)); | ||
| 158 | 158 | ||
| 159 | context->AddMoveObject(&session->GetClientSession()); | 159 | context->AddMoveObject(&session->GetClientSession()); |
| 160 | iface->ClientConnected(&session->GetServerSession()); | ||
| 161 | } | 160 | } |
| 162 | } | 161 | } |
| 163 | 162 | ||
| @@ -387,7 +386,7 @@ public: | |||
| 387 | 386 | ||
| 388 | template <class T> | 387 | template <class T> |
| 389 | std::weak_ptr<T> PopIpcInterface() { | 388 | std::weak_ptr<T> PopIpcInterface() { |
| 390 | ASSERT(context->Session()->GetSessionRequestManager()->IsDomain()); | 389 | ASSERT(context->GetManager()->IsDomain()); |
| 391 | ASSERT(context->GetDomainMessageHeader().input_object_count > 0); | 390 | ASSERT(context->GetDomainMessageHeader().input_object_count > 0); |
| 392 | return context->GetDomainHandler<T>(Pop<u32>() - 1); | 391 | return context->GetDomainHandler<T>(Pop<u32>() - 1); |
| 393 | } | 392 | } |
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h index fe375769e..4b717d091 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h | |||
| @@ -9,6 +9,10 @@ namespace Kernel::Board::Nintendo::Nx { | |||
| 9 | 9 | ||
| 10 | class KSystemControl { | 10 | class KSystemControl { |
| 11 | public: | 11 | public: |
| 12 | // This can be overridden as needed. | ||
| 13 | static constexpr size_t SecureAppletMemorySize = 4 * 1024 * 1024; // 4_MB | ||
| 14 | |||
| 15 | public: | ||
| 12 | class Init { | 16 | class Init { |
| 13 | public: | 17 | public: |
| 14 | // Initialization. | 18 | // Initialization. |
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index 65576b8c4..fd911a3a5 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp | |||
| @@ -49,4 +49,26 @@ bool GlobalSchedulerContext::IsLocked() const { | |||
| 49 | return scheduler_lock.IsLockedByCurrentThread(); | 49 | return scheduler_lock.IsLockedByCurrentThread(); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) { | ||
| 53 | ASSERT(IsLocked()); | ||
| 54 | |||
| 55 | woken_dummy_threads.insert(thread); | ||
| 56 | } | ||
| 57 | |||
| 58 | void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) { | ||
| 59 | ASSERT(IsLocked()); | ||
| 60 | |||
| 61 | woken_dummy_threads.erase(thread); | ||
| 62 | } | ||
| 63 | |||
| 64 | void GlobalSchedulerContext::WakeupWaitingDummyThreads() { | ||
| 65 | ASSERT(IsLocked()); | ||
| 66 | |||
| 67 | for (auto* thread : woken_dummy_threads) { | ||
| 68 | thread->DummyThreadEndWait(); | ||
| 69 | } | ||
| 70 | |||
| 71 | woken_dummy_threads.clear(); | ||
| 72 | } | ||
| 73 | |||
| 52 | } // namespace Kernel | 74 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h index 67bb9852d..220ed6192 100644 --- a/src/core/hle/kernel/global_scheduler_context.h +++ b/src/core/hle/kernel/global_scheduler_context.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <atomic> | 6 | #include <atomic> |
| 7 | #include <set> | ||
| 7 | #include <vector> | 8 | #include <vector> |
| 8 | 9 | ||
| 9 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| @@ -58,6 +59,10 @@ public: | |||
| 58 | /// Returns true if the global scheduler lock is acquired | 59 | /// Returns true if the global scheduler lock is acquired |
| 59 | bool IsLocked() const; | 60 | bool IsLocked() const; |
| 60 | 61 | ||
| 62 | void UnregisterDummyThreadForWakeup(KThread* thread); | ||
| 63 | void RegisterDummyThreadForWakeup(KThread* thread); | ||
| 64 | void WakeupWaitingDummyThreads(); | ||
| 65 | |||
| 61 | [[nodiscard]] LockType& SchedulerLock() { | 66 | [[nodiscard]] LockType& SchedulerLock() { |
| 62 | return scheduler_lock; | 67 | return scheduler_lock; |
| 63 | } | 68 | } |
| @@ -76,6 +81,9 @@ private: | |||
| 76 | KSchedulerPriorityQueue priority_queue; | 81 | KSchedulerPriorityQueue priority_queue; |
| 77 | LockType scheduler_lock; | 82 | LockType scheduler_lock; |
| 78 | 83 | ||
| 84 | /// Lists dummy threads pending wakeup on lock release | ||
| 85 | std::set<KThread*> woken_dummy_threads; | ||
| 86 | |||
| 79 | /// Lists all thread ids that aren't deleted/etc. | 87 | /// Lists all thread ids that aren't deleted/etc. |
| 80 | std::vector<KThread*> thread_list; | 88 | std::vector<KThread*> thread_list; |
| 81 | std::mutex global_list_guard; | 89 | std::mutex global_list_guard; |
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index e4f43a053..06010b8d1 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include "core/hle/kernel/k_auto_object.h" | 16 | #include "core/hle/kernel/k_auto_object.h" |
| 17 | #include "core/hle/kernel/k_handle_table.h" | 17 | #include "core/hle/kernel/k_handle_table.h" |
| 18 | #include "core/hle/kernel/k_process.h" | 18 | #include "core/hle/kernel/k_process.h" |
| 19 | #include "core/hle/kernel/k_server_port.h" | ||
| 19 | #include "core/hle/kernel/k_server_session.h" | 20 | #include "core/hle/kernel/k_server_session.h" |
| 20 | #include "core/hle/kernel/k_thread.h" | 21 | #include "core/hle/kernel/k_thread.h" |
| 21 | #include "core/hle/kernel/kernel.h" | 22 | #include "core/hle/kernel/kernel.h" |
| @@ -26,18 +27,28 @@ namespace Kernel { | |||
| 26 | 27 | ||
| 27 | SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_, | 28 | SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_, |
| 28 | ServiceThreadType thread_type) | 29 | ServiceThreadType thread_type) |
| 29 | : kernel{kernel_} { | 30 | : kernel{kernel_}, service_thread{thread_type == ServiceThreadType::CreateNew |
| 30 | if (thread_type == ServiceThreadType::CreateNew) { | 31 | ? kernel.CreateServiceThread(service_name_) |
| 31 | service_thread = kernel.CreateServiceThread(service_name_); | 32 | : kernel.GetDefaultServiceThread()} {} |
| 32 | } else { | ||
| 33 | service_thread = kernel.GetDefaultServiceThread(); | ||
| 34 | } | ||
| 35 | } | ||
| 36 | 33 | ||
| 37 | SessionRequestHandler::~SessionRequestHandler() { | 34 | SessionRequestHandler::~SessionRequestHandler() { |
| 38 | kernel.ReleaseServiceThread(service_thread); | 35 | kernel.ReleaseServiceThread(service_thread); |
| 39 | } | 36 | } |
| 40 | 37 | ||
| 38 | void SessionRequestHandler::AcceptSession(KServerPort* server_port) { | ||
| 39 | auto* server_session = server_port->AcceptSession(); | ||
| 40 | ASSERT(server_session != nullptr); | ||
| 41 | |||
| 42 | RegisterSession(server_session, std::make_shared<SessionRequestManager>(kernel)); | ||
| 43 | } | ||
| 44 | |||
| 45 | void SessionRequestHandler::RegisterSession(KServerSession* server_session, | ||
| 46 | std::shared_ptr<SessionRequestManager> manager) { | ||
| 47 | manager->SetSessionHandler(shared_from_this()); | ||
| 48 | service_thread.RegisterServerSession(server_session, manager); | ||
| 49 | server_session->Close(); | ||
| 50 | } | ||
| 51 | |||
| 41 | SessionRequestManager::SessionRequestManager(KernelCore& kernel_) : kernel{kernel_} {} | 52 | SessionRequestManager::SessionRequestManager(KernelCore& kernel_) : kernel{kernel_} {} |
| 42 | 53 | ||
| 43 | SessionRequestManager::~SessionRequestManager() = default; | 54 | SessionRequestManager::~SessionRequestManager() = default; |
| @@ -92,7 +103,7 @@ Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_ses | |||
| 92 | } | 103 | } |
| 93 | 104 | ||
| 94 | // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs | 105 | // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs |
| 95 | context.SetSessionRequestManager(server_session->GetSessionRequestManager()); | 106 | ASSERT(context.GetManager().get() == this); |
| 96 | 107 | ||
| 97 | // If there is a DomainMessageHeader, then this is CommandType "Request" | 108 | // If there is a DomainMessageHeader, then this is CommandType "Request" |
| 98 | const auto& domain_message_header = context.GetDomainMessageHeader(); | 109 | const auto& domain_message_header = context.GetDomainMessageHeader(); |
| @@ -130,31 +141,6 @@ Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_ses | |||
| 130 | return ResultSuccess; | 141 | return ResultSuccess; |
| 131 | } | 142 | } |
| 132 | 143 | ||
| 133 | Result SessionRequestManager::QueueSyncRequest(KSession* parent, | ||
| 134 | std::shared_ptr<HLERequestContext>&& context) { | ||
| 135 | // Ensure we have a session request handler | ||
| 136 | if (this->HasSessionRequestHandler(*context)) { | ||
| 137 | if (auto strong_ptr = this->GetServiceThread().lock()) { | ||
| 138 | strong_ptr->QueueSyncRequest(*parent, std::move(context)); | ||
| 139 | } else { | ||
| 140 | ASSERT_MSG(false, "strong_ptr is nullptr!"); | ||
| 141 | } | ||
| 142 | } else { | ||
| 143 | ASSERT_MSG(false, "handler is invalid!"); | ||
| 144 | } | ||
| 145 | |||
| 146 | return ResultSuccess; | ||
| 147 | } | ||
| 148 | |||
| 149 | void SessionRequestHandler::ClientConnected(KServerSession* session) { | ||
| 150 | session->GetSessionRequestManager()->SetSessionHandler(shared_from_this()); | ||
| 151 | |||
| 152 | // Ensure our server session is tracked globally. | ||
| 153 | kernel.RegisterServerObject(session); | ||
| 154 | } | ||
| 155 | |||
| 156 | void SessionRequestHandler::ClientDisconnected(KServerSession* session) {} | ||
| 157 | |||
| 158 | HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_, | 144 | HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_, |
| 159 | KServerSession* server_session_, KThread* thread_) | 145 | KServerSession* server_session_, KThread* thread_) |
| 160 | : server_session(server_session_), thread(thread_), kernel{kernel_}, memory{memory_} { | 146 | : server_session(server_session_), thread(thread_), kernel{kernel_}, memory{memory_} { |
| @@ -214,7 +200,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32 | |||
| 214 | // Padding to align to 16 bytes | 200 | // Padding to align to 16 bytes |
| 215 | rp.AlignWithPadding(); | 201 | rp.AlignWithPadding(); |
| 216 | 202 | ||
| 217 | if (Session()->GetSessionRequestManager()->IsDomain() && | 203 | if (GetManager()->IsDomain() && |
| 218 | ((command_header->type == IPC::CommandType::Request || | 204 | ((command_header->type == IPC::CommandType::Request || |
| 219 | command_header->type == IPC::CommandType::RequestWithContext) || | 205 | command_header->type == IPC::CommandType::RequestWithContext) || |
| 220 | !incoming)) { | 206 | !incoming)) { |
| @@ -223,7 +209,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32 | |||
| 223 | if (incoming || domain_message_header) { | 209 | if (incoming || domain_message_header) { |
| 224 | domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>(); | 210 | domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>(); |
| 225 | } else { | 211 | } else { |
| 226 | if (Session()->GetSessionRequestManager()->IsDomain()) { | 212 | if (GetManager()->IsDomain()) { |
| 227 | LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!"); | 213 | LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!"); |
| 228 | } | 214 | } |
| 229 | } | 215 | } |
| @@ -316,12 +302,11 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa | |||
| 316 | // Write the domain objects to the command buffer, these go after the raw untranslated data. | 302 | // Write the domain objects to the command buffer, these go after the raw untranslated data. |
| 317 | // TODO(Subv): This completely ignores C buffers. | 303 | // TODO(Subv): This completely ignores C buffers. |
| 318 | 304 | ||
| 319 | if (server_session->GetSessionRequestManager()->IsDomain()) { | 305 | if (GetManager()->IsDomain()) { |
| 320 | current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size()); | 306 | current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size()); |
| 321 | for (auto& object : outgoing_domain_objects) { | 307 | for (auto& object : outgoing_domain_objects) { |
| 322 | server_session->GetSessionRequestManager()->AppendDomainHandler(std::move(object)); | 308 | GetManager()->AppendDomainHandler(std::move(object)); |
| 323 | cmd_buf[current_offset++] = static_cast<u32_le>( | 309 | cmd_buf[current_offset++] = static_cast<u32_le>(GetManager()->DomainHandlerCount()); |
| 324 | server_session->GetSessionRequestManager()->DomainHandlerCount()); | ||
| 325 | } | 310 | } |
| 326 | } | 311 | } |
| 327 | 312 | ||
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index a0522bca0..d87be72d6 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h | |||
| @@ -45,11 +45,13 @@ class KAutoObject; | |||
| 45 | class KernelCore; | 45 | class KernelCore; |
| 46 | class KEvent; | 46 | class KEvent; |
| 47 | class KHandleTable; | 47 | class KHandleTable; |
| 48 | class KServerPort; | ||
| 48 | class KProcess; | 49 | class KProcess; |
| 49 | class KServerSession; | 50 | class KServerSession; |
| 50 | class KThread; | 51 | class KThread; |
| 51 | class KReadableEvent; | 52 | class KReadableEvent; |
| 52 | class KSession; | 53 | class KSession; |
| 54 | class SessionRequestManager; | ||
| 53 | class ServiceThread; | 55 | class ServiceThread; |
| 54 | 56 | ||
| 55 | enum class ThreadWakeupReason; | 57 | enum class ThreadWakeupReason; |
| @@ -76,27 +78,17 @@ public: | |||
| 76 | virtual Result HandleSyncRequest(Kernel::KServerSession& session, | 78 | virtual Result HandleSyncRequest(Kernel::KServerSession& session, |
| 77 | Kernel::HLERequestContext& context) = 0; | 79 | Kernel::HLERequestContext& context) = 0; |
| 78 | 80 | ||
| 79 | /** | 81 | void AcceptSession(KServerPort* server_port); |
| 80 | * Signals that a client has just connected to this HLE handler and keeps the | 82 | void RegisterSession(KServerSession* server_session, |
| 81 | * associated ServerSession alive for the duration of the connection. | 83 | std::shared_ptr<SessionRequestManager> manager); |
| 82 | * @param server_session Owning pointer to the ServerSession associated with the connection. | ||
| 83 | */ | ||
| 84 | void ClientConnected(KServerSession* session); | ||
| 85 | 84 | ||
| 86 | /** | 85 | ServiceThread& GetServiceThread() const { |
| 87 | * Signals that a client has just disconnected from this HLE handler and releases the | ||
| 88 | * associated ServerSession. | ||
| 89 | * @param server_session ServerSession associated with the connection. | ||
| 90 | */ | ||
| 91 | void ClientDisconnected(KServerSession* session); | ||
| 92 | |||
| 93 | std::weak_ptr<ServiceThread> GetServiceThread() const { | ||
| 94 | return service_thread; | 86 | return service_thread; |
| 95 | } | 87 | } |
| 96 | 88 | ||
| 97 | protected: | 89 | protected: |
| 98 | KernelCore& kernel; | 90 | KernelCore& kernel; |
| 99 | std::weak_ptr<ServiceThread> service_thread; | 91 | ServiceThread& service_thread; |
| 100 | }; | 92 | }; |
| 101 | 93 | ||
| 102 | using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>; | 94 | using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>; |
| @@ -162,7 +154,7 @@ public: | |||
| 162 | session_handler = std::move(handler); | 154 | session_handler = std::move(handler); |
| 163 | } | 155 | } |
| 164 | 156 | ||
| 165 | std::weak_ptr<ServiceThread> GetServiceThread() const { | 157 | ServiceThread& GetServiceThread() const { |
| 166 | return session_handler->GetServiceThread(); | 158 | return session_handler->GetServiceThread(); |
| 167 | } | 159 | } |
| 168 | 160 | ||
| @@ -170,7 +162,6 @@ public: | |||
| 170 | 162 | ||
| 171 | Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context); | 163 | Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context); |
| 172 | Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context); | 164 | Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context); |
| 173 | Result QueueSyncRequest(KSession* parent, std::shared_ptr<HLERequestContext>&& context); | ||
| 174 | 165 | ||
| 175 | private: | 166 | private: |
| 176 | bool convert_to_domain{}; | 167 | bool convert_to_domain{}; |
| @@ -304,7 +295,7 @@ public: | |||
| 304 | */ | 295 | */ |
| 305 | template <typename T, typename = std::enable_if_t<!std::is_pointer_v<T>>> | 296 | template <typename T, typename = std::enable_if_t<!std::is_pointer_v<T>>> |
| 306 | std::size_t WriteBuffer(const T& data, std::size_t buffer_index = 0) const { | 297 | std::size_t WriteBuffer(const T& data, std::size_t buffer_index = 0) const { |
| 307 | if constexpr (Common::IsSTLContainer<T>) { | 298 | if constexpr (Common::IsContiguousContainer<T>) { |
| 308 | using ContiguousType = typename T::value_type; | 299 | using ContiguousType = typename T::value_type; |
| 309 | static_assert(std::is_trivially_copyable_v<ContiguousType>, | 300 | static_assert(std::is_trivially_copyable_v<ContiguousType>, |
| 310 | "Container to WriteBuffer must contain trivially copyable objects"); | 301 | "Container to WriteBuffer must contain trivially copyable objects"); |
| @@ -350,11 +341,11 @@ public: | |||
| 350 | 341 | ||
| 351 | template <typename T> | 342 | template <typename T> |
| 352 | std::shared_ptr<T> GetDomainHandler(std::size_t index) const { | 343 | std::shared_ptr<T> GetDomainHandler(std::size_t index) const { |
| 353 | return std::static_pointer_cast<T>(manager.lock()->DomainHandler(index).lock()); | 344 | return std::static_pointer_cast<T>(GetManager()->DomainHandler(index).lock()); |
| 354 | } | 345 | } |
| 355 | 346 | ||
| 356 | void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) { | 347 | void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) { |
| 357 | manager = std::move(manager_); | 348 | manager = manager_; |
| 358 | } | 349 | } |
| 359 | 350 | ||
| 360 | std::string Description() const; | 351 | std::string Description() const; |
| @@ -363,6 +354,10 @@ public: | |||
| 363 | return *thread; | 354 | return *thread; |
| 364 | } | 355 | } |
| 365 | 356 | ||
| 357 | std::shared_ptr<SessionRequestManager> GetManager() const { | ||
| 358 | return manager.lock(); | ||
| 359 | } | ||
| 360 | |||
| 366 | private: | 361 | private: |
| 367 | friend class IPC::ResponseBuilder; | 362 | friend class IPC::ResponseBuilder; |
| 368 | 363 | ||
| @@ -396,7 +391,7 @@ private: | |||
| 396 | u32 handles_offset{}; | 391 | u32 handles_offset{}; |
| 397 | u32 domain_offset{}; | 392 | u32 domain_offset{}; |
| 398 | 393 | ||
| 399 | std::weak_ptr<SessionRequestManager> manager; | 394 | std::weak_ptr<SessionRequestManager> manager{}; |
| 400 | 395 | ||
| 401 | KernelCore& kernel; | 396 | KernelCore& kernel; |
| 402 | Core::Memory::Memory& memory; | 397 | Core::Memory::Memory& memory; |
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 477e4e407..bda098511 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp | |||
| @@ -10,7 +10,9 @@ | |||
| 10 | #include "core/hardware_properties.h" | 10 | #include "core/hardware_properties.h" |
| 11 | #include "core/hle/kernel/init/init_slab_setup.h" | 11 | #include "core/hle/kernel/init/init_slab_setup.h" |
| 12 | #include "core/hle/kernel/k_code_memory.h" | 12 | #include "core/hle/kernel/k_code_memory.h" |
| 13 | #include "core/hle/kernel/k_debug.h" | ||
| 13 | #include "core/hle/kernel/k_event.h" | 14 | #include "core/hle/kernel/k_event.h" |
| 15 | #include "core/hle/kernel/k_event_info.h" | ||
| 14 | #include "core/hle/kernel/k_memory_layout.h" | 16 | #include "core/hle/kernel/k_memory_layout.h" |
| 15 | #include "core/hle/kernel/k_memory_manager.h" | 17 | #include "core/hle/kernel/k_memory_manager.h" |
| 16 | #include "core/hle/kernel/k_page_buffer.h" | 18 | #include "core/hle/kernel/k_page_buffer.h" |
| @@ -22,6 +24,7 @@ | |||
| 22 | #include "core/hle/kernel/k_shared_memory.h" | 24 | #include "core/hle/kernel/k_shared_memory.h" |
| 23 | #include "core/hle/kernel/k_shared_memory_info.h" | 25 | #include "core/hle/kernel/k_shared_memory_info.h" |
| 24 | #include "core/hle/kernel/k_system_control.h" | 26 | #include "core/hle/kernel/k_system_control.h" |
| 27 | #include "core/hle/kernel/k_system_resource.h" | ||
| 25 | #include "core/hle/kernel/k_thread.h" | 28 | #include "core/hle/kernel/k_thread.h" |
| 26 | #include "core/hle/kernel/k_thread_local_page.h" | 29 | #include "core/hle/kernel/k_thread_local_page.h" |
| 27 | #include "core/hle/kernel/k_transfer_memory.h" | 30 | #include "core/hle/kernel/k_transfer_memory.h" |
| @@ -44,7 +47,10 @@ namespace Kernel::Init { | |||
| 44 | HANDLER(KThreadLocalPage, \ | 47 | HANDLER(KThreadLocalPage, \ |
| 45 | (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \ | 48 | (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \ |
| 46 | ##__VA_ARGS__) \ | 49 | ##__VA_ARGS__) \ |
| 47 | HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) | 50 | HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \ |
| 51 | HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ | ||
| 52 | HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ | ||
| 53 | HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) | ||
| 48 | 54 | ||
| 49 | namespace { | 55 | namespace { |
| 50 | 56 | ||
| @@ -73,8 +79,20 @@ constexpr size_t SlabCountKResourceLimit = 5; | |||
| 73 | constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; | 79 | constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; |
| 74 | constexpr size_t SlabCountKIoPool = 1; | 80 | constexpr size_t SlabCountKIoPool = 1; |
| 75 | constexpr size_t SlabCountKIoRegion = 6; | 81 | constexpr size_t SlabCountKIoRegion = 6; |
| 82 | constexpr size_t SlabcountKSessionRequestMappings = 40; | ||
| 76 | 83 | ||
| 77 | constexpr size_t SlabCountExtraKThread = 160; | 84 | constexpr size_t SlabCountExtraKThread = (1024 + 256 + 256) - SlabCountKThread; |
| 85 | |||
| 86 | namespace test { | ||
| 87 | |||
| 88 | static_assert(KernelPageBufferHeapSize == | ||
| 89 | 2 * PageSize + (SlabCountKProcess + SlabCountKThread + | ||
| 90 | (SlabCountKProcess + SlabCountKThread) / 8) * | ||
| 91 | PageSize); | ||
| 92 | static_assert(KernelPageBufferAdditionalSize == | ||
| 93 | (SlabCountExtraKThread + (SlabCountExtraKThread / 8)) * PageSize); | ||
| 94 | |||
| 95 | } // namespace test | ||
| 78 | 96 | ||
| 79 | /// Helper function to translate from the slab virtual address to the reserved location in physical | 97 | /// Helper function to translate from the slab virtual address to the reserved location in physical |
| 80 | /// memory. | 98 | /// memory. |
| @@ -109,7 +127,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd | |||
| 109 | } | 127 | } |
| 110 | 128 | ||
| 111 | size_t CalculateSlabHeapGapSize() { | 129 | size_t CalculateSlabHeapGapSize() { |
| 112 | constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB; | 130 | constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB; |
| 113 | static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax); | 131 | static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax); |
| 114 | return KernelSlabHeapGapSize; | 132 | return KernelSlabHeapGapSize; |
| 115 | } | 133 | } |
| @@ -134,6 +152,7 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() { | |||
| 134 | .num_KDebug = SlabCountKDebug, | 152 | .num_KDebug = SlabCountKDebug, |
| 135 | .num_KIoPool = SlabCountKIoPool, | 153 | .num_KIoPool = SlabCountKIoPool, |
| 136 | .num_KIoRegion = SlabCountKIoRegion, | 154 | .num_KIoRegion = SlabCountKIoRegion, |
| 155 | .num_KSessionRequestMappings = SlabcountKSessionRequestMappings, | ||
| 137 | }; | 156 | }; |
| 138 | } | 157 | } |
| 139 | 158 | ||
| @@ -164,29 +183,6 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) { | |||
| 164 | return size; | 183 | return size; |
| 165 | } | 184 | } |
| 166 | 185 | ||
| 167 | void InitializeKPageBufferSlabHeap(Core::System& system) { | ||
| 168 | auto& kernel = system.Kernel(); | ||
| 169 | |||
| 170 | const auto& counts = kernel.SlabResourceCounts(); | ||
| 171 | const size_t num_pages = | ||
| 172 | counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; | ||
| 173 | const size_t slab_size = num_pages * PageSize; | ||
| 174 | |||
| 175 | // Reserve memory from the system resource limit. | ||
| 176 | ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size)); | ||
| 177 | |||
| 178 | // Allocate memory for the slab. | ||
| 179 | constexpr auto AllocateOption = KMemoryManager::EncodeOption( | ||
| 180 | KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); | ||
| 181 | const PAddr slab_address = | ||
| 182 | kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); | ||
| 183 | ASSERT(slab_address != 0); | ||
| 184 | |||
| 185 | // Initialize the slabheap. | ||
| 186 | KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), | ||
| 187 | slab_size); | ||
| 188 | } | ||
| 189 | |||
| 190 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | 186 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { |
| 191 | auto& kernel = system.Kernel(); | 187 | auto& kernel = system.Kernel(); |
| 192 | 188 | ||
| @@ -258,3 +254,30 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | |||
| 258 | } | 254 | } |
| 259 | 255 | ||
| 260 | } // namespace Kernel::Init | 256 | } // namespace Kernel::Init |
| 257 | |||
| 258 | namespace Kernel { | ||
| 259 | |||
| 260 | void KPageBufferSlabHeap::Initialize(Core::System& system) { | ||
| 261 | auto& kernel = system.Kernel(); | ||
| 262 | const auto& counts = kernel.SlabResourceCounts(); | ||
| 263 | const size_t num_pages = | ||
| 264 | counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; | ||
| 265 | const size_t slab_size = num_pages * PageSize; | ||
| 266 | |||
| 267 | // Reserve memory from the system resource limit. | ||
| 268 | ASSERT( | ||
| 269 | kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemoryMax, slab_size)); | ||
| 270 | |||
| 271 | // Allocate memory for the slab. | ||
| 272 | constexpr auto AllocateOption = KMemoryManager::EncodeOption( | ||
| 273 | KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); | ||
| 274 | const PAddr slab_address = | ||
| 275 | kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); | ||
| 276 | ASSERT(slab_address != 0); | ||
| 277 | |||
| 278 | // Initialize the slabheap. | ||
| 279 | KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), | ||
| 280 | slab_size); | ||
| 281 | } | ||
| 282 | |||
| 283 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/init/init_slab_setup.h b/src/core/hle/kernel/init/init_slab_setup.h index 13be63c87..5e22821bc 100644 --- a/src/core/hle/kernel/init/init_slab_setup.h +++ b/src/core/hle/kernel/init/init_slab_setup.h | |||
| @@ -33,11 +33,11 @@ struct KSlabResourceCounts { | |||
| 33 | size_t num_KDebug; | 33 | size_t num_KDebug; |
| 34 | size_t num_KIoPool; | 34 | size_t num_KIoPool; |
| 35 | size_t num_KIoRegion; | 35 | size_t num_KIoRegion; |
| 36 | size_t num_KSessionRequestMappings; | ||
| 36 | }; | 37 | }; |
| 37 | 38 | ||
| 38 | void InitializeSlabResourceCounts(KernelCore& kernel); | 39 | void InitializeSlabResourceCounts(KernelCore& kernel); |
| 39 | size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); | 40 | size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); |
| 40 | void InitializeKPageBufferSlabHeap(Core::System& system); | ||
| 41 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); | 41 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); |
| 42 | 42 | ||
| 43 | } // namespace Kernel::Init | 43 | } // namespace Kernel::Init |
diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp index 10265c23c..a850db3c4 100644 --- a/src/core/hle/kernel/k_class_token.cpp +++ b/src/core/hle/kernel/k_class_token.cpp | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include "core/hle/kernel/k_session.h" | 16 | #include "core/hle/kernel/k_session.h" |
| 17 | #include "core/hle/kernel/k_shared_memory.h" | 17 | #include "core/hle/kernel/k_shared_memory.h" |
| 18 | #include "core/hle/kernel/k_synchronization_object.h" | 18 | #include "core/hle/kernel/k_synchronization_object.h" |
| 19 | #include "core/hle/kernel/k_system_resource.h" | ||
| 19 | #include "core/hle/kernel/k_thread.h" | 20 | #include "core/hle/kernel/k_thread.h" |
| 20 | #include "core/hle/kernel/k_transfer_memory.h" | 21 | #include "core/hle/kernel/k_transfer_memory.h" |
| 21 | 22 | ||
| @@ -119,4 +120,6 @@ static_assert(std::is_final_v<KTransferMemory> && std::is_base_of_v<KAutoObject, | |||
| 119 | // static_assert(std::is_final_v<KCodeMemory> && | 120 | // static_assert(std::is_final_v<KCodeMemory> && |
| 120 | // std::is_base_of_v<KAutoObject, KCodeMemory>); | 121 | // std::is_base_of_v<KAutoObject, KCodeMemory>); |
| 121 | 122 | ||
| 123 | static_assert(std::is_base_of_v<KAutoObject, KSystemResource>); | ||
| 124 | |||
| 122 | } // namespace Kernel | 125 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h index ab20e00ff..e75b1c035 100644 --- a/src/core/hle/kernel/k_class_token.h +++ b/src/core/hle/kernel/k_class_token.h | |||
| @@ -10,6 +10,8 @@ namespace Kernel { | |||
| 10 | 10 | ||
| 11 | class KAutoObject; | 11 | class KAutoObject; |
| 12 | 12 | ||
| 13 | class KSystemResource; | ||
| 14 | |||
| 13 | class KClassTokenGenerator { | 15 | class KClassTokenGenerator { |
| 14 | public: | 16 | public: |
| 15 | using TokenBaseType = u16; | 17 | using TokenBaseType = u16; |
| @@ -58,7 +60,7 @@ private: | |||
| 58 | if constexpr (std::is_same<T, KAutoObject>::value) { | 60 | if constexpr (std::is_same<T, KAutoObject>::value) { |
| 59 | static_assert(T::ObjectType == ObjectType::KAutoObject); | 61 | static_assert(T::ObjectType == ObjectType::KAutoObject); |
| 60 | return 0; | 62 | return 0; |
| 61 | } else if constexpr (!std::is_final<T>::value) { | 63 | } else if constexpr (!std::is_final<T>::value && !std::same_as<T, KSystemResource>) { |
| 62 | static_assert(ObjectType::BaseClassesStart <= T::ObjectType && | 64 | static_assert(ObjectType::BaseClassesStart <= T::ObjectType && |
| 63 | T::ObjectType < ObjectType::BaseClassesEnd); | 65 | T::ObjectType < ObjectType::BaseClassesEnd); |
| 64 | constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - | 66 | constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - |
| @@ -108,6 +110,8 @@ public: | |||
| 108 | KSessionRequest, | 110 | KSessionRequest, |
| 109 | KCodeMemory, | 111 | KCodeMemory, |
| 110 | 112 | ||
| 113 | KSystemResource, | ||
| 114 | |||
| 111 | // NOTE: True order for these has not been determined yet. | 115 | // NOTE: True order for these has not been determined yet. |
| 112 | KAlpha, | 116 | KAlpha, |
| 113 | KBeta, | 117 | KBeta, |
diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp index 3cb22ff4d..2ec623a58 100644 --- a/src/core/hle/kernel/k_client_port.cpp +++ b/src/core/hle/kernel/k_client_port.cpp | |||
| @@ -58,11 +58,10 @@ bool KClientPort::IsSignaled() const { | |||
| 58 | return num_sessions < max_sessions; | 58 | return num_sessions < max_sessions; |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | Result KClientPort::CreateSession(KClientSession** out, | 61 | Result KClientPort::CreateSession(KClientSession** out) { |
| 62 | std::shared_ptr<SessionRequestManager> session_manager) { | ||
| 63 | // Reserve a new session from the resource limit. | 62 | // Reserve a new session from the resource limit. |
| 64 | KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(), | 63 | KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(), |
| 65 | LimitableResource::Sessions); | 64 | LimitableResource::SessionCountMax); |
| 66 | R_UNLESS(session_reservation.Succeeded(), ResultLimitReached); | 65 | R_UNLESS(session_reservation.Succeeded(), ResultLimitReached); |
| 67 | 66 | ||
| 68 | // Update the session counts. | 67 | // Update the session counts. |
| @@ -104,7 +103,7 @@ Result KClientPort::CreateSession(KClientSession** out, | |||
| 104 | } | 103 | } |
| 105 | 104 | ||
| 106 | // Initialize the session. | 105 | // Initialize the session. |
| 107 | session->Initialize(this, parent->GetName(), session_manager); | 106 | session->Initialize(this, parent->GetName()); |
| 108 | 107 | ||
| 109 | // Commit the session reservation. | 108 | // Commit the session reservation. |
| 110 | session_reservation.Commit(); | 109 | session_reservation.Commit(); |
diff --git a/src/core/hle/kernel/k_client_port.h b/src/core/hle/kernel/k_client_port.h index e17eff28f..81046fb86 100644 --- a/src/core/hle/kernel/k_client_port.h +++ b/src/core/hle/kernel/k_client_port.h | |||
| @@ -52,8 +52,7 @@ public: | |||
| 52 | void Destroy() override; | 52 | void Destroy() override; |
| 53 | bool IsSignaled() const override; | 53 | bool IsSignaled() const override; |
| 54 | 54 | ||
| 55 | Result CreateSession(KClientSession** out, | 55 | Result CreateSession(KClientSession** out); |
| 56 | std::shared_ptr<SessionRequestManager> session_manager = nullptr); | ||
| 57 | 56 | ||
| 58 | private: | 57 | private: |
| 59 | std::atomic<s32> num_sessions{}; | 58 | std::atomic<s32> num_sessions{}; |
diff --git a/src/core/hle/kernel/k_debug.h b/src/core/hle/kernel/k_debug.h new file mode 100644 index 000000000..e3a0689c8 --- /dev/null +++ b/src/core/hle/kernel/k_debug.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "core/hle/kernel/k_auto_object.h" | ||
| 7 | #include "core/hle/kernel/slab_helpers.h" | ||
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | |||
| 11 | class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObjectWithList> { | ||
| 12 | KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject); | ||
| 13 | |||
| 14 | public: | ||
| 15 | explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | ||
| 16 | |||
| 17 | static void PostDestroy([[maybe_unused]] uintptr_t arg) {} | ||
| 18 | }; | ||
| 19 | |||
| 20 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h index 9076c8fa3..ac80d60a1 100644 --- a/src/core/hle/kernel/k_dynamic_page_manager.h +++ b/src/core/hle/kernel/k_dynamic_page_manager.h | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <vector> | ||
| 7 | |||
| 6 | #include "common/alignment.h" | 8 | #include "common/alignment.h" |
| 7 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 8 | #include "core/hle/kernel/k_page_bitmap.h" | 10 | #include "core/hle/kernel/k_page_bitmap.h" |
| @@ -33,28 +35,36 @@ public: | |||
| 33 | return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); | 35 | return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); |
| 34 | } | 36 | } |
| 35 | 37 | ||
| 36 | Result Initialize(VAddr addr, size_t sz) { | 38 | Result Initialize(VAddr memory, size_t size, size_t align) { |
| 37 | // We need to have positive size. | 39 | // We need to have positive size. |
| 38 | R_UNLESS(sz > 0, ResultOutOfMemory); | 40 | R_UNLESS(size > 0, ResultOutOfMemory); |
| 39 | m_backing_memory.resize(sz); | 41 | m_backing_memory.resize(size); |
| 42 | |||
| 43 | // Set addresses. | ||
| 44 | m_address = memory; | ||
| 45 | m_aligned_address = Common::AlignDown(memory, align); | ||
| 40 | 46 | ||
| 41 | // Calculate management overhead. | 47 | // Calculate extents. |
| 42 | const size_t management_size = | 48 | const size_t managed_size = m_address + size - m_aligned_address; |
| 43 | KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); | 49 | const size_t overhead_size = Common::AlignUp( |
| 44 | const size_t allocatable_size = sz - management_size; | 50 | KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)), |
| 51 | sizeof(PageBuffer)); | ||
| 52 | R_UNLESS(overhead_size < size, ResultOutOfMemory); | ||
| 45 | 53 | ||
| 46 | // Set tracking fields. | 54 | // Set tracking fields. |
| 47 | m_address = addr; | 55 | m_size = Common::AlignDown(size - overhead_size, sizeof(PageBuffer)); |
| 48 | m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); | 56 | m_count = m_size / sizeof(PageBuffer); |
| 49 | m_count = allocatable_size / sizeof(PageBuffer); | ||
| 50 | R_UNLESS(m_count > 0, ResultOutOfMemory); | ||
| 51 | 57 | ||
| 52 | // Clear the management region. | 58 | // Clear the management region. |
| 53 | u64* management_ptr = GetPointer<u64>(m_address + allocatable_size); | 59 | u64* management_ptr = GetPointer<u64>(m_address + size - overhead_size); |
| 54 | std::memset(management_ptr, 0, management_size); | 60 | std::memset(management_ptr, 0, overhead_size); |
| 55 | 61 | ||
| 56 | // Initialize the bitmap. | 62 | // Initialize the bitmap. |
| 57 | m_page_bitmap.Initialize(management_ptr, m_count); | 63 | const size_t allocatable_region_size = |
| 64 | (m_address + size - overhead_size) - m_aligned_address; | ||
| 65 | ASSERT(allocatable_region_size >= sizeof(PageBuffer)); | ||
| 66 | |||
| 67 | m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer)); | ||
| 58 | 68 | ||
| 59 | // Free the pages to the bitmap. | 69 | // Free the pages to the bitmap. |
| 60 | for (size_t i = 0; i < m_count; i++) { | 70 | for (size_t i = 0; i < m_count; i++) { |
| @@ -62,7 +72,8 @@ public: | |||
| 62 | std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); | 72 | std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); |
| 63 | 73 | ||
| 64 | // Set the bit for the free page. | 74 | // Set the bit for the free page. |
| 65 | m_page_bitmap.SetBit(i); | 75 | m_page_bitmap.SetBit((m_address + (i * sizeof(PageBuffer)) - m_aligned_address) / |
| 76 | sizeof(PageBuffer)); | ||
| 66 | } | 77 | } |
| 67 | 78 | ||
| 68 | R_SUCCEED(); | 79 | R_SUCCEED(); |
| @@ -101,7 +112,28 @@ public: | |||
| 101 | m_page_bitmap.ClearBit(offset); | 112 | m_page_bitmap.ClearBit(offset); |
| 102 | m_peak = std::max(m_peak, (++m_used)); | 113 | m_peak = std::max(m_peak, (++m_used)); |
| 103 | 114 | ||
| 104 | return GetPointer<PageBuffer>(m_address) + offset; | 115 | return GetPointer<PageBuffer>(m_aligned_address) + offset; |
| 116 | } | ||
| 117 | |||
| 118 | PageBuffer* Allocate(size_t count) { | ||
| 119 | // Take the lock. | ||
| 120 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | ||
| 121 | KScopedSpinLock lk(m_lock); | ||
| 122 | |||
| 123 | // Find a random free block. | ||
| 124 | s64 soffset = m_page_bitmap.FindFreeRange(count); | ||
| 125 | if (soffset < 0) [[likely]] { | ||
| 126 | return nullptr; | ||
| 127 | } | ||
| 128 | |||
| 129 | const size_t offset = static_cast<size_t>(soffset); | ||
| 130 | |||
| 131 | // Update our tracking. | ||
| 132 | m_page_bitmap.ClearRange(offset, count); | ||
| 133 | m_used += count; | ||
| 134 | m_peak = std::max(m_peak, m_used); | ||
| 135 | |||
| 136 | return GetPointer<PageBuffer>(m_aligned_address) + offset; | ||
| 105 | } | 137 | } |
| 106 | 138 | ||
| 107 | void Free(PageBuffer* pb) { | 139 | void Free(PageBuffer* pb) { |
| @@ -113,7 +145,7 @@ public: | |||
| 113 | KScopedSpinLock lk(m_lock); | 145 | KScopedSpinLock lk(m_lock); |
| 114 | 146 | ||
| 115 | // Set the bit for the free page. | 147 | // Set the bit for the free page. |
| 116 | size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer); | 148 | size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer); |
| 117 | m_page_bitmap.SetBit(offset); | 149 | m_page_bitmap.SetBit(offset); |
| 118 | 150 | ||
| 119 | // Decrement our used count. | 151 | // Decrement our used count. |
| @@ -127,6 +159,7 @@ private: | |||
| 127 | size_t m_peak{}; | 159 | size_t m_peak{}; |
| 128 | size_t m_count{}; | 160 | size_t m_count{}; |
| 129 | VAddr m_address{}; | 161 | VAddr m_address{}; |
| 162 | VAddr m_aligned_address{}; | ||
| 130 | size_t m_size{}; | 163 | size_t m_size{}; |
| 131 | 164 | ||
| 132 | // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. | 165 | // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. |
diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h index 1ce517e8e..b6a27d648 100644 --- a/src/core/hle/kernel/k_dynamic_resource_manager.h +++ b/src/core/hle/kernel/k_dynamic_resource_manager.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include "common/common_funcs.h" | 6 | #include "common/common_funcs.h" |
| 7 | #include "core/hle/kernel/k_dynamic_slab_heap.h" | 7 | #include "core/hle/kernel/k_dynamic_slab_heap.h" |
| 8 | #include "core/hle/kernel/k_memory_block.h" | 8 | #include "core/hle/kernel/k_memory_block.h" |
| 9 | #include "core/hle/kernel/k_page_group.h" | ||
| 9 | 10 | ||
| 10 | namespace Kernel { | 11 | namespace Kernel { |
| 11 | 12 | ||
| @@ -51,8 +52,10 @@ private: | |||
| 51 | DynamicSlabType* m_slab_heap{}; | 52 | DynamicSlabType* m_slab_heap{}; |
| 52 | }; | 53 | }; |
| 53 | 54 | ||
| 55 | class KBlockInfoManager : public KDynamicResourceManager<KBlockInfo> {}; | ||
| 54 | class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; | 56 | class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; |
| 55 | 57 | ||
| 58 | using KBlockInfoSlabHeap = typename KBlockInfoManager::DynamicSlabType; | ||
| 56 | using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; | 59 | using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; |
| 57 | 60 | ||
| 58 | } // namespace Kernel | 61 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp index 78ca59463..27f70e5c5 100644 --- a/src/core/hle/kernel/k_event.cpp +++ b/src/core/hle/kernel/k_event.cpp | |||
| @@ -50,7 +50,7 @@ Result KEvent::Clear() { | |||
| 50 | void KEvent::PostDestroy(uintptr_t arg) { | 50 | void KEvent::PostDestroy(uintptr_t arg) { |
| 51 | // Release the event count resource the owner process holds. | 51 | // Release the event count resource the owner process holds. |
| 52 | KProcess* owner = reinterpret_cast<KProcess*>(arg); | 52 | KProcess* owner = reinterpret_cast<KProcess*>(arg); |
| 53 | owner->GetResourceLimit()->Release(LimitableResource::Events, 1); | 53 | owner->GetResourceLimit()->Release(LimitableResource::EventCountMax, 1); |
| 54 | owner->Close(); | 54 | owner->Close(); |
| 55 | } | 55 | } |
| 56 | 56 | ||
diff --git a/src/core/hle/kernel/k_event_info.h b/src/core/hle/kernel/k_event_info.h new file mode 100644 index 000000000..25b3ff594 --- /dev/null +++ b/src/core/hle/kernel/k_event_info.h | |||
| @@ -0,0 +1,64 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <array> | ||
| 7 | |||
| 8 | #include <boost/intrusive/list.hpp> | ||
| 9 | |||
| 10 | #include "core/hle/kernel/slab_helpers.h" | ||
| 11 | #include "core/hle/kernel/svc_types.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | class KEventInfo : public KSlabAllocated<KEventInfo>, public boost::intrusive::list_base_hook<> { | ||
| 16 | public: | ||
| 17 | struct InfoCreateThread { | ||
| 18 | u32 thread_id{}; | ||
| 19 | uintptr_t tls_address{}; | ||
| 20 | }; | ||
| 21 | |||
| 22 | struct InfoExitProcess { | ||
| 23 | Svc::ProcessExitReason reason{}; | ||
| 24 | }; | ||
| 25 | |||
| 26 | struct InfoExitThread { | ||
| 27 | Svc::ThreadExitReason reason{}; | ||
| 28 | }; | ||
| 29 | |||
| 30 | struct InfoException { | ||
| 31 | Svc::DebugException exception_type{}; | ||
| 32 | s32 exception_data_count{}; | ||
| 33 | uintptr_t exception_address{}; | ||
| 34 | std::array<uintptr_t, 4> exception_data{}; | ||
| 35 | }; | ||
| 36 | |||
| 37 | struct InfoSystemCall { | ||
| 38 | s64 tick{}; | ||
| 39 | s32 id{}; | ||
| 40 | }; | ||
| 41 | |||
| 42 | public: | ||
| 43 | KEventInfo() = default; | ||
| 44 | ~KEventInfo() = default; | ||
| 45 | |||
| 46 | public: | ||
| 47 | Svc::DebugEvent event{}; | ||
| 48 | u32 thread_id{}; | ||
| 49 | u32 flags{}; | ||
| 50 | bool is_attached{}; | ||
| 51 | bool continue_flag{}; | ||
| 52 | bool ignore_continue{}; | ||
| 53 | bool close_once{}; | ||
| 54 | union { | ||
| 55 | InfoCreateThread create_thread; | ||
| 56 | InfoExitProcess exit_process; | ||
| 57 | InfoExitThread exit_thread; | ||
| 58 | InfoException exception; | ||
| 59 | InfoSystemCall system_call; | ||
| 60 | } info{}; | ||
| 61 | KThread* debug_thread{}; | ||
| 62 | }; | ||
| 63 | |||
| 64 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index e830ca46e..1c7a766c8 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp | |||
| @@ -5,14 +5,11 @@ | |||
| 5 | 5 | ||
| 6 | namespace Kernel { | 6 | namespace Kernel { |
| 7 | 7 | ||
| 8 | KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {} | ||
| 9 | KHandleTable::~KHandleTable() = default; | ||
| 10 | |||
| 11 | Result KHandleTable::Finalize() { | 8 | Result KHandleTable::Finalize() { |
| 12 | // Get the table and clear our record of it. | 9 | // Get the table and clear our record of it. |
| 13 | u16 saved_table_size = 0; | 10 | u16 saved_table_size = 0; |
| 14 | { | 11 | { |
| 15 | KScopedDisableDispatch dd(kernel); | 12 | KScopedDisableDispatch dd{m_kernel}; |
| 16 | KScopedSpinLock lk(m_lock); | 13 | KScopedSpinLock lk(m_lock); |
| 17 | 14 | ||
| 18 | std::swap(m_table_size, saved_table_size); | 15 | std::swap(m_table_size, saved_table_size); |
| @@ -25,28 +22,28 @@ Result KHandleTable::Finalize() { | |||
| 25 | } | 22 | } |
| 26 | } | 23 | } |
| 27 | 24 | ||
| 28 | return ResultSuccess; | 25 | R_SUCCEED(); |
| 29 | } | 26 | } |
| 30 | 27 | ||
| 31 | bool KHandleTable::Remove(Handle handle) { | 28 | bool KHandleTable::Remove(Handle handle) { |
| 32 | // Don't allow removal of a pseudo-handle. | 29 | // Don't allow removal of a pseudo-handle. |
| 33 | if (Svc::IsPseudoHandle(handle)) { | 30 | if (Svc::IsPseudoHandle(handle)) [[unlikely]] { |
| 34 | return false; | 31 | return false; |
| 35 | } | 32 | } |
| 36 | 33 | ||
| 37 | // Handles must not have reserved bits set. | 34 | // Handles must not have reserved bits set. |
| 38 | const auto handle_pack = HandlePack(handle); | 35 | const auto handle_pack = HandlePack(handle); |
| 39 | if (handle_pack.reserved != 0) { | 36 | if (handle_pack.reserved != 0) [[unlikely]] { |
| 40 | return false; | 37 | return false; |
| 41 | } | 38 | } |
| 42 | 39 | ||
| 43 | // Find the object and free the entry. | 40 | // Find the object and free the entry. |
| 44 | KAutoObject* obj = nullptr; | 41 | KAutoObject* obj = nullptr; |
| 45 | { | 42 | { |
| 46 | KScopedDisableDispatch dd(kernel); | 43 | KScopedDisableDispatch dd{m_kernel}; |
| 47 | KScopedSpinLock lk(m_lock); | 44 | KScopedSpinLock lk(m_lock); |
| 48 | 45 | ||
| 49 | if (this->IsValidHandle(handle)) { | 46 | if (this->IsValidHandle(handle)) [[likely]] { |
| 50 | const auto index = handle_pack.index; | 47 | const auto index = handle_pack.index; |
| 51 | 48 | ||
| 52 | obj = m_objects[index]; | 49 | obj = m_objects[index]; |
| @@ -57,13 +54,13 @@ bool KHandleTable::Remove(Handle handle) { | |||
| 57 | } | 54 | } |
| 58 | 55 | ||
| 59 | // Close the object. | 56 | // Close the object. |
| 60 | kernel.UnregisterInUseObject(obj); | 57 | m_kernel.UnregisterInUseObject(obj); |
| 61 | obj->Close(); | 58 | obj->Close(); |
| 62 | return true; | 59 | return true; |
| 63 | } | 60 | } |
| 64 | 61 | ||
| 65 | Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { | 62 | Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { |
| 66 | KScopedDisableDispatch dd(kernel); | 63 | KScopedDisableDispatch dd{m_kernel}; |
| 67 | KScopedSpinLock lk(m_lock); | 64 | KScopedSpinLock lk(m_lock); |
| 68 | 65 | ||
| 69 | // Never exceed our capacity. | 66 | // Never exceed our capacity. |
| @@ -82,22 +79,22 @@ Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { | |||
| 82 | *out_handle = EncodeHandle(static_cast<u16>(index), linear_id); | 79 | *out_handle = EncodeHandle(static_cast<u16>(index), linear_id); |
| 83 | } | 80 | } |
| 84 | 81 | ||
| 85 | return ResultSuccess; | 82 | R_SUCCEED(); |
| 86 | } | 83 | } |
| 87 | 84 | ||
| 88 | Result KHandleTable::Reserve(Handle* out_handle) { | 85 | Result KHandleTable::Reserve(Handle* out_handle) { |
| 89 | KScopedDisableDispatch dd(kernel); | 86 | KScopedDisableDispatch dd{m_kernel}; |
| 90 | KScopedSpinLock lk(m_lock); | 87 | KScopedSpinLock lk(m_lock); |
| 91 | 88 | ||
| 92 | // Never exceed our capacity. | 89 | // Never exceed our capacity. |
| 93 | R_UNLESS(m_count < m_table_size, ResultOutOfHandles); | 90 | R_UNLESS(m_count < m_table_size, ResultOutOfHandles); |
| 94 | 91 | ||
| 95 | *out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId()); | 92 | *out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId()); |
| 96 | return ResultSuccess; | 93 | R_SUCCEED(); |
| 97 | } | 94 | } |
| 98 | 95 | ||
| 99 | void KHandleTable::Unreserve(Handle handle) { | 96 | void KHandleTable::Unreserve(Handle handle) { |
| 100 | KScopedDisableDispatch dd(kernel); | 97 | KScopedDisableDispatch dd{m_kernel}; |
| 101 | KScopedSpinLock lk(m_lock); | 98 | KScopedSpinLock lk(m_lock); |
| 102 | 99 | ||
| 103 | // Unpack the handle. | 100 | // Unpack the handle. |
| @@ -108,7 +105,7 @@ void KHandleTable::Unreserve(Handle handle) { | |||
| 108 | ASSERT(reserved == 0); | 105 | ASSERT(reserved == 0); |
| 109 | ASSERT(linear_id != 0); | 106 | ASSERT(linear_id != 0); |
| 110 | 107 | ||
| 111 | if (index < m_table_size) { | 108 | if (index < m_table_size) [[likely]] { |
| 112 | // NOTE: This code does not check the linear id. | 109 | // NOTE: This code does not check the linear id. |
| 113 | ASSERT(m_objects[index] == nullptr); | 110 | ASSERT(m_objects[index] == nullptr); |
| 114 | this->FreeEntry(index); | 111 | this->FreeEntry(index); |
| @@ -116,7 +113,7 @@ void KHandleTable::Unreserve(Handle handle) { | |||
| 116 | } | 113 | } |
| 117 | 114 | ||
| 118 | void KHandleTable::Register(Handle handle, KAutoObject* obj) { | 115 | void KHandleTable::Register(Handle handle, KAutoObject* obj) { |
| 119 | KScopedDisableDispatch dd(kernel); | 116 | KScopedDisableDispatch dd{m_kernel}; |
| 120 | KScopedSpinLock lk(m_lock); | 117 | KScopedSpinLock lk(m_lock); |
| 121 | 118 | ||
| 122 | // Unpack the handle. | 119 | // Unpack the handle. |
| @@ -127,7 +124,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj) { | |||
| 127 | ASSERT(reserved == 0); | 124 | ASSERT(reserved == 0); |
| 128 | ASSERT(linear_id != 0); | 125 | ASSERT(linear_id != 0); |
| 129 | 126 | ||
| 130 | if (index < m_table_size) { | 127 | if (index < m_table_size) [[likely]] { |
| 131 | // Set the entry. | 128 | // Set the entry. |
| 132 | ASSERT(m_objects[index] == nullptr); | 129 | ASSERT(m_objects[index] == nullptr); |
| 133 | 130 | ||
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 0864a737c..65cae3b27 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h | |||
| @@ -21,33 +21,38 @@ namespace Kernel { | |||
| 21 | class KernelCore; | 21 | class KernelCore; |
| 22 | 22 | ||
| 23 | class KHandleTable { | 23 | class KHandleTable { |
| 24 | public: | ||
| 25 | YUZU_NON_COPYABLE(KHandleTable); | 24 | YUZU_NON_COPYABLE(KHandleTable); |
| 26 | YUZU_NON_MOVEABLE(KHandleTable); | 25 | YUZU_NON_MOVEABLE(KHandleTable); |
| 27 | 26 | ||
| 27 | public: | ||
| 28 | static constexpr size_t MaxTableSize = 1024; | 28 | static constexpr size_t MaxTableSize = 1024; |
| 29 | 29 | ||
| 30 | explicit KHandleTable(KernelCore& kernel_); | 30 | public: |
| 31 | ~KHandleTable(); | 31 | explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {} |
| 32 | 32 | ||
| 33 | Result Initialize(s32 size) { | 33 | Result Initialize(s32 size) { |
| 34 | // Check that the table size is valid. | ||
| 34 | R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory); | 35 | R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory); |
| 35 | 36 | ||
| 37 | // Lock. | ||
| 38 | KScopedDisableDispatch dd{m_kernel}; | ||
| 39 | KScopedSpinLock lk(m_lock); | ||
| 40 | |||
| 36 | // Initialize all fields. | 41 | // Initialize all fields. |
| 37 | m_max_count = 0; | 42 | m_max_count = 0; |
| 38 | m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size); | 43 | m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size); |
| 39 | m_next_linear_id = MinLinearId; | 44 | m_next_linear_id = MinLinearId; |
| 40 | m_count = 0; | 45 | m_count = 0; |
| 41 | m_free_head_index = -1; | 46 | m_free_head_index = -1; |
| 42 | 47 | ||
| 43 | // Free all entries. | 48 | // Free all entries. |
| 44 | for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) { | 49 | for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) { |
| 45 | m_objects[i] = nullptr; | 50 | m_objects[i] = nullptr; |
| 46 | m_entry_infos[i].next_free_index = i - 1; | 51 | m_entry_infos[i].next_free_index = static_cast<s16>(i - 1); |
| 47 | m_free_head_index = i; | 52 | m_free_head_index = i; |
| 48 | } | 53 | } |
| 49 | 54 | ||
| 50 | return ResultSuccess; | 55 | R_SUCCEED(); |
| 51 | } | 56 | } |
| 52 | 57 | ||
| 53 | size_t GetTableSize() const { | 58 | size_t GetTableSize() const { |
| @@ -66,13 +71,13 @@ public: | |||
| 66 | template <typename T = KAutoObject> | 71 | template <typename T = KAutoObject> |
| 67 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | 72 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { |
| 68 | // Lock and look up in table. | 73 | // Lock and look up in table. |
| 69 | KScopedDisableDispatch dd(kernel); | 74 | KScopedDisableDispatch dd{m_kernel}; |
| 70 | KScopedSpinLock lk(m_lock); | 75 | KScopedSpinLock lk(m_lock); |
| 71 | 76 | ||
| 72 | if constexpr (std::is_same_v<T, KAutoObject>) { | 77 | if constexpr (std::is_same_v<T, KAutoObject>) { |
| 73 | return this->GetObjectImpl(handle); | 78 | return this->GetObjectImpl(handle); |
| 74 | } else { | 79 | } else { |
| 75 | if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) { | 80 | if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] { |
| 76 | return obj->DynamicCast<T*>(); | 81 | return obj->DynamicCast<T*>(); |
| 77 | } else { | 82 | } else { |
| 78 | return nullptr; | 83 | return nullptr; |
| @@ -85,13 +90,13 @@ public: | |||
| 85 | // Handle pseudo-handles. | 90 | // Handle pseudo-handles. |
| 86 | if constexpr (std::derived_from<KProcess, T>) { | 91 | if constexpr (std::derived_from<KProcess, T>) { |
| 87 | if (handle == Svc::PseudoHandle::CurrentProcess) { | 92 | if (handle == Svc::PseudoHandle::CurrentProcess) { |
| 88 | auto* const cur_process = kernel.CurrentProcess(); | 93 | auto* const cur_process = m_kernel.CurrentProcess(); |
| 89 | ASSERT(cur_process != nullptr); | 94 | ASSERT(cur_process != nullptr); |
| 90 | return cur_process; | 95 | return cur_process; |
| 91 | } | 96 | } |
| 92 | } else if constexpr (std::derived_from<KThread, T>) { | 97 | } else if constexpr (std::derived_from<KThread, T>) { |
| 93 | if (handle == Svc::PseudoHandle::CurrentThread) { | 98 | if (handle == Svc::PseudoHandle::CurrentThread) { |
| 94 | auto* const cur_thread = GetCurrentThreadPointer(kernel); | 99 | auto* const cur_thread = GetCurrentThreadPointer(m_kernel); |
| 95 | ASSERT(cur_thread != nullptr); | 100 | ASSERT(cur_thread != nullptr); |
| 96 | return cur_thread; | 101 | return cur_thread; |
| 97 | } | 102 | } |
| @@ -100,6 +105,37 @@ public: | |||
| 100 | return this->template GetObjectWithoutPseudoHandle<T>(handle); | 105 | return this->template GetObjectWithoutPseudoHandle<T>(handle); |
| 101 | } | 106 | } |
| 102 | 107 | ||
| 108 | KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const { | ||
| 109 | // Lock and look up in table. | ||
| 110 | KScopedDisableDispatch dd{m_kernel}; | ||
| 111 | KScopedSpinLock lk(m_lock); | ||
| 112 | |||
| 113 | return this->GetObjectImpl(handle); | ||
| 114 | } | ||
| 115 | |||
| 116 | KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const { | ||
| 117 | // Handle pseudo-handles. | ||
| 118 | ASSERT(cur_thread != nullptr); | ||
| 119 | if (handle == Svc::PseudoHandle::CurrentProcess) { | ||
| 120 | auto* const cur_process = | ||
| 121 | static_cast<KAutoObject*>(static_cast<void*>(cur_thread->GetOwnerProcess())); | ||
| 122 | ASSERT(cur_process != nullptr); | ||
| 123 | return cur_process; | ||
| 124 | } | ||
| 125 | if (handle == Svc::PseudoHandle::CurrentThread) { | ||
| 126 | return static_cast<KAutoObject*>(cur_thread); | ||
| 127 | } | ||
| 128 | |||
| 129 | return GetObjectForIpcWithoutPseudoHandle(handle); | ||
| 130 | } | ||
| 131 | |||
| 132 | KScopedAutoObject<KAutoObject> GetObjectByIndex(Handle* out_handle, size_t index) const { | ||
| 133 | KScopedDisableDispatch dd{m_kernel}; | ||
| 134 | KScopedSpinLock lk(m_lock); | ||
| 135 | |||
| 136 | return this->GetObjectByIndexImpl(out_handle, index); | ||
| 137 | } | ||
| 138 | |||
| 103 | Result Reserve(Handle* out_handle); | 139 | Result Reserve(Handle* out_handle); |
| 104 | void Unreserve(Handle handle); | 140 | void Unreserve(Handle handle); |
| 105 | 141 | ||
| @@ -112,7 +148,7 @@ public: | |||
| 112 | size_t num_opened; | 148 | size_t num_opened; |
| 113 | { | 149 | { |
| 114 | // Lock the table. | 150 | // Lock the table. |
| 115 | KScopedDisableDispatch dd(kernel); | 151 | KScopedDisableDispatch dd{m_kernel}; |
| 116 | KScopedSpinLock lk(m_lock); | 152 | KScopedSpinLock lk(m_lock); |
| 117 | for (num_opened = 0; num_opened < num_handles; num_opened++) { | 153 | for (num_opened = 0; num_opened < num_handles; num_opened++) { |
| 118 | // Get the current handle. | 154 | // Get the current handle. |
| @@ -120,13 +156,13 @@ public: | |||
| 120 | 156 | ||
| 121 | // Get the object for the current handle. | 157 | // Get the object for the current handle. |
| 122 | KAutoObject* cur_object = this->GetObjectImpl(cur_handle); | 158 | KAutoObject* cur_object = this->GetObjectImpl(cur_handle); |
| 123 | if (cur_object == nullptr) { | 159 | if (cur_object == nullptr) [[unlikely]] { |
| 124 | break; | 160 | break; |
| 125 | } | 161 | } |
| 126 | 162 | ||
| 127 | // Cast the current object to the desired type. | 163 | // Cast the current object to the desired type. |
| 128 | T* cur_t = cur_object->DynamicCast<T*>(); | 164 | T* cur_t = cur_object->DynamicCast<T*>(); |
| 129 | if (cur_t == nullptr) { | 165 | if (cur_t == nullptr) [[unlikely]] { |
| 130 | break; | 166 | break; |
| 131 | } | 167 | } |
| 132 | 168 | ||
| @@ -137,7 +173,7 @@ public: | |||
| 137 | } | 173 | } |
| 138 | 174 | ||
| 139 | // If we converted every object, succeed. | 175 | // If we converted every object, succeed. |
| 140 | if (num_opened == num_handles) { | 176 | if (num_opened == num_handles) [[likely]] { |
| 141 | return true; | 177 | return true; |
| 142 | } | 178 | } |
| 143 | 179 | ||
| @@ -191,21 +227,21 @@ private: | |||
| 191 | ASSERT(reserved == 0); | 227 | ASSERT(reserved == 0); |
| 192 | 228 | ||
| 193 | // Validate our indexing information. | 229 | // Validate our indexing information. |
| 194 | if (raw_value == 0) { | 230 | if (raw_value == 0) [[unlikely]] { |
| 195 | return false; | 231 | return false; |
| 196 | } | 232 | } |
| 197 | if (linear_id == 0) { | 233 | if (linear_id == 0) [[unlikely]] { |
| 198 | return false; | 234 | return false; |
| 199 | } | 235 | } |
| 200 | if (index >= m_table_size) { | 236 | if (index >= m_table_size) [[unlikely]] { |
| 201 | return false; | 237 | return false; |
| 202 | } | 238 | } |
| 203 | 239 | ||
| 204 | // Check that there's an object, and our serial id is correct. | 240 | // Check that there's an object, and our serial id is correct. |
| 205 | if (m_objects[index] == nullptr) { | 241 | if (m_objects[index] == nullptr) [[unlikely]] { |
| 206 | return false; | 242 | return false; |
| 207 | } | 243 | } |
| 208 | if (m_entry_infos[index].GetLinearId() != linear_id) { | 244 | if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] { |
| 209 | return false; | 245 | return false; |
| 210 | } | 246 | } |
| 211 | 247 | ||
| @@ -215,11 +251,11 @@ private: | |||
| 215 | KAutoObject* GetObjectImpl(Handle handle) const { | 251 | KAutoObject* GetObjectImpl(Handle handle) const { |
| 216 | // Handles must not have reserved bits set. | 252 | // Handles must not have reserved bits set. |
| 217 | const auto handle_pack = HandlePack(handle); | 253 | const auto handle_pack = HandlePack(handle); |
| 218 | if (handle_pack.reserved != 0) { | 254 | if (handle_pack.reserved != 0) [[unlikely]] { |
| 219 | return nullptr; | 255 | return nullptr; |
| 220 | } | 256 | } |
| 221 | 257 | ||
| 222 | if (this->IsValidHandle(handle)) { | 258 | if (this->IsValidHandle(handle)) [[likely]] { |
| 223 | return m_objects[handle_pack.index]; | 259 | return m_objects[handle_pack.index]; |
| 224 | } else { | 260 | } else { |
| 225 | return nullptr; | 261 | return nullptr; |
| @@ -227,9 +263,8 @@ private: | |||
| 227 | } | 263 | } |
| 228 | 264 | ||
| 229 | KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const { | 265 | KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const { |
| 230 | |||
| 231 | // Index must be in bounds. | 266 | // Index must be in bounds. |
| 232 | if (index >= m_table_size) { | 267 | if (index >= m_table_size) [[unlikely]] { |
| 233 | return nullptr; | 268 | return nullptr; |
| 234 | } | 269 | } |
| 235 | 270 | ||
| @@ -244,18 +279,15 @@ private: | |||
| 244 | 279 | ||
| 245 | private: | 280 | private: |
| 246 | union HandlePack { | 281 | union HandlePack { |
| 247 | HandlePack() = default; | 282 | constexpr HandlePack() = default; |
| 248 | HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {} | 283 | constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {} |
| 249 | 284 | ||
| 250 | u32 raw; | 285 | u32 raw{}; |
| 251 | BitField<0, 15, u32> index; | 286 | BitField<0, 15, u32> index; |
| 252 | BitField<15, 15, u32> linear_id; | 287 | BitField<15, 15, u32> linear_id; |
| 253 | BitField<30, 2, u32> reserved; | 288 | BitField<30, 2, u32> reserved; |
| 254 | }; | 289 | }; |
| 255 | 290 | ||
| 256 | static constexpr u16 MinLinearId = 1; | ||
| 257 | static constexpr u16 MaxLinearId = 0x7FFF; | ||
| 258 | |||
| 259 | static constexpr Handle EncodeHandle(u16 index, u16 linear_id) { | 291 | static constexpr Handle EncodeHandle(u16 index, u16 linear_id) { |
| 260 | HandlePack handle{}; | 292 | HandlePack handle{}; |
| 261 | handle.index.Assign(index); | 293 | handle.index.Assign(index); |
| @@ -264,6 +296,10 @@ private: | |||
| 264 | return handle.raw; | 296 | return handle.raw; |
| 265 | } | 297 | } |
| 266 | 298 | ||
| 299 | private: | ||
| 300 | static constexpr u16 MinLinearId = 1; | ||
| 301 | static constexpr u16 MaxLinearId = 0x7FFF; | ||
| 302 | |||
| 267 | union EntryInfo { | 303 | union EntryInfo { |
| 268 | u16 linear_id; | 304 | u16 linear_id; |
| 269 | s16 next_free_index; | 305 | s16 next_free_index; |
| @@ -271,21 +307,21 @@ private: | |||
| 271 | constexpr u16 GetLinearId() const { | 307 | constexpr u16 GetLinearId() const { |
| 272 | return linear_id; | 308 | return linear_id; |
| 273 | } | 309 | } |
| 274 | constexpr s16 GetNextFreeIndex() const { | 310 | constexpr s32 GetNextFreeIndex() const { |
| 275 | return next_free_index; | 311 | return next_free_index; |
| 276 | } | 312 | } |
| 277 | }; | 313 | }; |
| 278 | 314 | ||
| 279 | private: | 315 | private: |
| 316 | KernelCore& m_kernel; | ||
| 280 | std::array<EntryInfo, MaxTableSize> m_entry_infos{}; | 317 | std::array<EntryInfo, MaxTableSize> m_entry_infos{}; |
| 281 | std::array<KAutoObject*, MaxTableSize> m_objects{}; | 318 | std::array<KAutoObject*, MaxTableSize> m_objects{}; |
| 282 | s32 m_free_head_index{-1}; | 319 | mutable KSpinLock m_lock; |
| 320 | s32 m_free_head_index{}; | ||
| 283 | u16 m_table_size{}; | 321 | u16 m_table_size{}; |
| 284 | u16 m_max_count{}; | 322 | u16 m_max_count{}; |
| 285 | u16 m_next_linear_id{MinLinearId}; | 323 | u16 m_next_linear_id{}; |
| 286 | u16 m_count{}; | 324 | u16 m_count{}; |
| 287 | mutable KSpinLock m_lock; | ||
| 288 | KernelCore& kernel; | ||
| 289 | }; | 325 | }; |
| 290 | 326 | ||
| 291 | } // namespace Kernel | 327 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 9444f6bd2..3b6e7baff 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h | |||
| @@ -35,26 +35,32 @@ enum class KMemoryState : u32 { | |||
| 35 | FlagCanMapProcess = (1 << 23), | 35 | FlagCanMapProcess = (1 << 23), |
| 36 | FlagCanChangeAttribute = (1 << 24), | 36 | FlagCanChangeAttribute = (1 << 24), |
| 37 | FlagCanCodeMemory = (1 << 25), | 37 | FlagCanCodeMemory = (1 << 25), |
| 38 | FlagLinearMapped = (1 << 26), | ||
| 38 | 39 | ||
| 39 | FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | | 40 | FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | |
| 40 | FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | | 41 | FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | |
| 41 | FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | | 42 | FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | |
| 42 | FlagReferenceCounted | FlagCanChangeAttribute, | 43 | FlagReferenceCounted | FlagCanChangeAttribute | FlagLinearMapped, |
| 43 | 44 | ||
| 44 | FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | | 45 | FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | |
| 45 | FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | | 46 | FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | |
| 46 | FlagCanAlignedDeviceMap | FlagReferenceCounted, | 47 | FlagCanAlignedDeviceMap | FlagReferenceCounted | FlagLinearMapped, |
| 47 | 48 | ||
| 48 | FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap, | 49 | FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap | |
| 50 | FlagLinearMapped, | ||
| 49 | 51 | ||
| 50 | Free = static_cast<u32>(Svc::MemoryState::Free), | 52 | Free = static_cast<u32>(Svc::MemoryState::Free), |
| 51 | Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped, | 53 | Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | |
| 54 | FlagCanAlignedDeviceMap, | ||
| 52 | Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, | 55 | Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, |
| 53 | Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, | 56 | Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, |
| 54 | CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | | 57 | CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | |
| 55 | FlagCanCodeMemory, | 58 | FlagCanCodeMemory, |
| 56 | Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted, | ||
| 57 | Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, | 59 | Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, |
| 60 | Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted | | ||
| 61 | FlagLinearMapped, | ||
| 62 | |||
| 63 | // Alias was removed after 1.0.0. | ||
| 58 | 64 | ||
| 59 | AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | | 65 | AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | |
| 60 | FlagCanCodeAlias, | 66 | FlagCanCodeAlias, |
| @@ -67,18 +73,18 @@ enum class KMemoryState : u32 { | |||
| 67 | Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | | 73 | Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | |
| 68 | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 74 | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |
| 69 | 75 | ||
| 70 | ThreadLocal = | 76 | ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped, |
| 71 | static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted, | ||
| 72 | 77 | ||
| 73 | Transfered = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc | | 78 | Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | |
| 74 | FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | | 79 | FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | |
| 75 | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 80 | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |
| 76 | 81 | ||
| 77 | SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc | | 82 | SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc | |
| 78 | FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 83 | FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |
| 79 | 84 | ||
| 80 | SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | | 85 | SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | |
| 81 | FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 86 | FlagReferenceCounted | FlagLinearMapped | FlagCanUseNonSecureIpc | |
| 87 | FlagCanUseNonDeviceIpc, | ||
| 82 | 88 | ||
| 83 | Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible), | 89 | Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible), |
| 84 | 90 | ||
| @@ -91,69 +97,69 @@ enum class KMemoryState : u32 { | |||
| 91 | Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, | 97 | Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, |
| 92 | 98 | ||
| 93 | GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | | 99 | GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | |
| 94 | FlagReferenceCounted | FlagCanDebug, | 100 | FlagReferenceCounted | FlagCanDebug | FlagLinearMapped, |
| 95 | CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted, | 101 | CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted | |
| 102 | FlagLinearMapped, | ||
| 96 | 103 | ||
| 97 | Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped, | 104 | Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped, |
| 105 | |||
| 106 | Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted | | ||
| 107 | FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap | | ||
| 108 | FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | ||
| 98 | }; | 109 | }; |
| 99 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); | 110 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); |
| 100 | 111 | ||
| 101 | static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); | 112 | static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); |
| 102 | static_assert(static_cast<u32>(KMemoryState::Io) == 0x00002001); | 113 | static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001); |
| 103 | static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); | 114 | static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); |
| 104 | static_assert(static_cast<u32>(KMemoryState::Code) == 0x00DC7E03); | 115 | static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03); |
| 105 | static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x03FEBD04); | 116 | static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04); |
| 106 | static_assert(static_cast<u32>(KMemoryState::Normal) == 0x037EBD05); | 117 | static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05); |
| 107 | static_assert(static_cast<u32>(KMemoryState::Shared) == 0x00402006); | 118 | static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006); |
| 108 | static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x00DD7E08); | 119 | |
| 109 | static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x03FFBD09); | 120 | static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08); |
| 110 | static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x005C3C0A); | 121 | static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09); |
| 111 | static_assert(static_cast<u32>(KMemoryState::Stack) == 0x005C3C0B); | 122 | static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A); |
| 112 | static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0040200C); | 123 | static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B); |
| 113 | static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x015C3C0D); | 124 | static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C); |
| 114 | static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x005C380E); | 125 | static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D); |
| 115 | static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0040380F); | 126 | static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E); |
| 127 | static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F); | ||
| 116 | static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); | 128 | static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); |
| 117 | static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x005C3811); | 129 | static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811); |
| 118 | static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x004C2812); | 130 | static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812); |
| 119 | static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); | 131 | static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); |
| 120 | static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x00402214); | 132 | static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214); |
| 121 | static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015); | 133 | static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015); |
| 122 | static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); | 134 | static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); |
| 135 | static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817); | ||
| 123 | 136 | ||
| 124 | enum class KMemoryPermission : u8 { | 137 | enum class KMemoryPermission : u8 { |
| 125 | None = 0, | 138 | None = 0, |
| 126 | All = static_cast<u8>(~None), | 139 | All = static_cast<u8>(~None), |
| 127 | 140 | ||
| 128 | Read = 1 << 0, | ||
| 129 | Write = 1 << 1, | ||
| 130 | Execute = 1 << 2, | ||
| 131 | |||
| 132 | ReadAndWrite = Read | Write, | ||
| 133 | ReadAndExecute = Read | Execute, | ||
| 134 | |||
| 135 | UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | | ||
| 136 | Svc::MemoryPermission::Execute), | ||
| 137 | |||
| 138 | KernelShift = 3, | 141 | KernelShift = 3, |
| 139 | 142 | ||
| 140 | KernelRead = Read << KernelShift, | 143 | KernelRead = static_cast<u8>(Svc::MemoryPermission::Read) << KernelShift, |
| 141 | KernelWrite = Write << KernelShift, | 144 | KernelWrite = static_cast<u8>(Svc::MemoryPermission::Write) << KernelShift, |
| 142 | KernelExecute = Execute << KernelShift, | 145 | KernelExecute = static_cast<u8>(Svc::MemoryPermission::Execute) << KernelShift, |
| 143 | 146 | ||
| 144 | NotMapped = (1 << (2 * KernelShift)), | 147 | NotMapped = (1 << (2 * KernelShift)), |
| 145 | 148 | ||
| 146 | KernelReadWrite = KernelRead | KernelWrite, | 149 | KernelReadWrite = KernelRead | KernelWrite, |
| 147 | KernelReadExecute = KernelRead | KernelExecute, | 150 | KernelReadExecute = KernelRead | KernelExecute, |
| 148 | 151 | ||
| 149 | UserRead = Read | KernelRead, | 152 | UserRead = static_cast<u8>(Svc::MemoryPermission::Read) | KernelRead, |
| 150 | UserWrite = Write | KernelWrite, | 153 | UserWrite = static_cast<u8>(Svc::MemoryPermission::Write) | KernelWrite, |
| 151 | UserExecute = Execute, | 154 | UserExecute = static_cast<u8>(Svc::MemoryPermission::Execute), |
| 152 | 155 | ||
| 153 | UserReadWrite = UserRead | UserWrite, | 156 | UserReadWrite = UserRead | UserWrite, |
| 154 | UserReadExecute = UserRead | UserExecute, | 157 | UserReadExecute = UserRead | UserExecute, |
| 155 | 158 | ||
| 156 | IpcLockChangeMask = NotMapped | UserReadWrite | 159 | UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | |
| 160 | Svc::MemoryPermission::Execute), | ||
| 161 | |||
| 162 | IpcLockChangeMask = NotMapped | UserReadWrite, | ||
| 157 | }; | 163 | }; |
| 158 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); | 164 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); |
| 159 | 165 | ||
| @@ -210,13 +216,15 @@ struct KMemoryInfo { | |||
| 210 | 216 | ||
| 211 | constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { | 217 | constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { |
| 212 | return { | 218 | return { |
| 213 | .addr = m_address, | 219 | .base_address = m_address, |
| 214 | .size = m_size, | 220 | .size = m_size, |
| 215 | .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask), | 221 | .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask), |
| 216 | .attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask), | 222 | .attribute = |
| 217 | .perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask), | 223 | static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask), |
| 218 | .ipc_refcount = m_ipc_lock_count, | 224 | .permission = |
| 219 | .device_refcount = m_device_use_count, | 225 | static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask), |
| 226 | .ipc_count = m_ipc_lock_count, | ||
| 227 | .device_count = m_device_use_count, | ||
| 220 | .padding = {}, | 228 | .padding = {}, |
| 221 | }; | 229 | }; |
| 222 | } | 230 | } |
| @@ -468,6 +476,7 @@ public: | |||
| 468 | 476 | ||
| 469 | constexpr void UpdateDeviceDisableMergeStateForShareLeft( | 477 | constexpr void UpdateDeviceDisableMergeStateForShareLeft( |
| 470 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { | 478 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { |
| 479 | // New permission/right aren't used. | ||
| 471 | if (left) { | 480 | if (left) { |
| 472 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | 481 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( |
| 473 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); | 482 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); |
| @@ -478,6 +487,7 @@ public: | |||
| 478 | 487 | ||
| 479 | constexpr void UpdateDeviceDisableMergeStateForShareRight( | 488 | constexpr void UpdateDeviceDisableMergeStateForShareRight( |
| 480 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { | 489 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { |
| 490 | // New permission/left aren't used. | ||
| 481 | if (right) { | 491 | if (right) { |
| 482 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | 492 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( |
| 483 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); | 493 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); |
| @@ -494,6 +504,8 @@ public: | |||
| 494 | 504 | ||
| 495 | constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, | 505 | constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 496 | bool right) { | 506 | bool right) { |
| 507 | // New permission isn't used. | ||
| 508 | |||
| 497 | // We must either be shared or have a zero lock count. | 509 | // We must either be shared or have a zero lock count. |
| 498 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || | 510 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || |
| 499 | m_device_use_count == 0); | 511 | m_device_use_count == 0); |
| @@ -509,6 +521,7 @@ public: | |||
| 509 | 521 | ||
| 510 | constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( | 522 | constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( |
| 511 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { | 523 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { |
| 524 | // New permission/right aren't used. | ||
| 512 | 525 | ||
| 513 | if (left) { | 526 | if (left) { |
| 514 | if (!m_device_disable_merge_left_count) { | 527 | if (!m_device_disable_merge_left_count) { |
| @@ -528,6 +541,8 @@ public: | |||
| 528 | 541 | ||
| 529 | constexpr void UpdateDeviceDisableMergeStateForUnshareRight( | 542 | constexpr void UpdateDeviceDisableMergeStateForUnshareRight( |
| 530 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { | 543 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { |
| 544 | // New permission/left aren't used. | ||
| 545 | |||
| 531 | if (right) { | 546 | if (right) { |
| 532 | const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; | 547 | const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; |
| 533 | ASSERT(old_device_disable_merge_right_count > 0); | 548 | ASSERT(old_device_disable_merge_right_count > 0); |
| @@ -546,6 +561,8 @@ public: | |||
| 546 | 561 | ||
| 547 | constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, | 562 | constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 548 | bool right) { | 563 | bool right) { |
| 564 | // New permission isn't used. | ||
| 565 | |||
| 549 | // We must be shared. | 566 | // We must be shared. |
| 550 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); | 567 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); |
| 551 | 568 | ||
| @@ -563,6 +580,7 @@ public: | |||
| 563 | 580 | ||
| 564 | constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, | 581 | constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 565 | bool right) { | 582 | bool right) { |
| 583 | // New permission isn't used. | ||
| 566 | 584 | ||
| 567 | // We must be shared. | 585 | // We must be shared. |
| 568 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); | 586 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); |
| @@ -613,6 +631,8 @@ public: | |||
| 613 | 631 | ||
| 614 | constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, | 632 | constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 615 | [[maybe_unused]] bool right) { | 633 | [[maybe_unused]] bool right) { |
| 634 | // New permission isn't used. | ||
| 635 | |||
| 616 | // We must be locked. | 636 | // We must be locked. |
| 617 | ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); | 637 | ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); |
| 618 | 638 | ||
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp index 55dc296d0..72c3ee4b7 100644 --- a/src/core/hle/kernel/k_memory_layout.cpp +++ b/src/core/hle/kernel/k_memory_layout.cpp | |||
| @@ -153,13 +153,9 @@ void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_ | |||
| 153 | } | 153 | } |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | size_t KMemoryLayout::GetResourceRegionSizeForInit() { | 156 | size_t KMemoryLayout::GetResourceRegionSizeForInit(bool use_extra_resource) { |
| 157 | // Calculate resource region size based on whether we allow extra threads. | 157 | return KernelResourceSize + KSystemControl::SecureAppletMemorySize + |
| 158 | const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); | 158 | (use_extra_resource ? KernelSlabHeapAdditionalSize + KernelPageBufferAdditionalSize : 0); |
| 159 | size_t resource_region_size = | ||
| 160 | KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0); | ||
| 161 | |||
| 162 | return resource_region_size; | ||
| 163 | } | 159 | } |
| 164 | 160 | ||
| 165 | } // namespace Kernel | 161 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 884fc623a..fd6e1d3e6 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h | |||
| @@ -60,10 +60,12 @@ constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB; | |||
| 60 | constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax; | 60 | constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax; |
| 61 | 61 | ||
| 62 | // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. | 62 | // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. |
| 63 | constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000; | 63 | constexpr size_t KernelPageBufferHeapSize = 0x3E0000; |
| 64 | constexpr size_t KernelSlabHeapAdditionalSize = 0x148000; | ||
| 65 | constexpr size_t KernelPageBufferAdditionalSize = 0x33C000; | ||
| 64 | 66 | ||
| 65 | constexpr std::size_t KernelResourceSize = | 67 | constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + |
| 66 | KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize; | 68 | KernelSlabHeapSize + KernelPageBufferHeapSize; |
| 67 | 69 | ||
| 68 | constexpr bool IsKernelAddressKey(VAddr key) { | 70 | constexpr bool IsKernelAddressKey(VAddr key) { |
| 69 | return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; | 71 | return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; |
| @@ -168,6 +170,11 @@ public: | |||
| 168 | KMemoryRegionType_VirtualDramKernelTraceBuffer)); | 170 | KMemoryRegionType_VirtualDramKernelTraceBuffer)); |
| 169 | } | 171 | } |
| 170 | 172 | ||
| 173 | const KMemoryRegion& GetSecureAppletMemoryRegion() { | ||
| 174 | return Dereference(GetVirtualMemoryRegionTree().FindByType( | ||
| 175 | KMemoryRegionType_VirtualDramKernelSecureAppletMemory)); | ||
| 176 | } | ||
| 177 | |||
| 171 | const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { | 178 | const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { |
| 172 | return Dereference(FindVirtualLinear(address)); | 179 | return Dereference(FindVirtualLinear(address)); |
| 173 | } | 180 | } |
| @@ -229,7 +236,7 @@ public: | |||
| 229 | 236 | ||
| 230 | void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, | 237 | void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, |
| 231 | VAddr linear_virtual_start); | 238 | VAddr linear_virtual_start); |
| 232 | static size_t GetResourceRegionSizeForInit(); | 239 | static size_t GetResourceRegionSizeForInit(bool use_extra_resource); |
| 233 | 240 | ||
| 234 | auto GetKernelRegionExtents() const { | 241 | auto GetKernelRegionExtents() const { |
| 235 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); | 242 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); |
| @@ -279,6 +286,10 @@ public: | |||
| 279 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | 286 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( |
| 280 | KMemoryRegionType_DramKernelSlab); | 287 | KMemoryRegionType_DramKernelSlab); |
| 281 | } | 288 | } |
| 289 | auto GetKernelSecureAppletMemoryRegionPhysicalExtents() { | ||
| 290 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 291 | KMemoryRegionType_DramKernelSecureAppletMemory); | ||
| 292 | } | ||
| 282 | auto GetKernelPageTableHeapRegionPhysicalExtents() const { | 293 | auto GetKernelPageTableHeapRegionPhysicalExtents() const { |
| 283 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | 294 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( |
| 284 | KMemoryRegionType_DramKernelPtHeap); | 295 | KMemoryRegionType_DramKernelPtHeap); |
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 646711505..c4bf306e8 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -29,43 +29,44 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) { | |||
| 29 | } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { | 29 | } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { |
| 30 | return KMemoryManager::Pool::SystemNonSecure; | 30 | return KMemoryManager::Pool::SystemNonSecure; |
| 31 | } else { | 31 | } else { |
| 32 | ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool"); | 32 | UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool"); |
| 33 | return {}; | ||
| 34 | } | 33 | } |
| 35 | } | 34 | } |
| 36 | 35 | ||
| 37 | } // namespace | 36 | } // namespace |
| 38 | 37 | ||
| 39 | KMemoryManager::KMemoryManager(Core::System& system_) | 38 | KMemoryManager::KMemoryManager(Core::System& system) |
| 40 | : system{system_}, pool_locks{ | 39 | : m_system{system}, m_memory_layout{system.Kernel().MemoryLayout()}, |
| 41 | KLightLock{system_.Kernel()}, | 40 | m_pool_locks{ |
| 42 | KLightLock{system_.Kernel()}, | 41 | KLightLock{system.Kernel()}, |
| 43 | KLightLock{system_.Kernel()}, | 42 | KLightLock{system.Kernel()}, |
| 44 | KLightLock{system_.Kernel()}, | 43 | KLightLock{system.Kernel()}, |
| 45 | } {} | 44 | KLightLock{system.Kernel()}, |
| 45 | } {} | ||
| 46 | 46 | ||
| 47 | void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { | 47 | void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { |
| 48 | 48 | ||
| 49 | // Clear the management region to zero. | 49 | // Clear the management region to zero. |
| 50 | const VAddr management_region_end = management_region + management_region_size; | 50 | const VAddr management_region_end = management_region + management_region_size; |
| 51 | // std::memset(GetVoidPointer(management_region), 0, management_region_size); | ||
| 51 | 52 | ||
| 52 | // Reset our manager count. | 53 | // Reset our manager count. |
| 53 | num_managers = 0; | 54 | m_num_managers = 0; |
| 54 | 55 | ||
| 55 | // Traverse the virtual memory layout tree, initializing each manager as appropriate. | 56 | // Traverse the virtual memory layout tree, initializing each manager as appropriate. |
| 56 | while (num_managers != MaxManagerCount) { | 57 | while (m_num_managers != MaxManagerCount) { |
| 57 | // Locate the region that should initialize the current manager. | 58 | // Locate the region that should initialize the current manager. |
| 58 | PAddr region_address = 0; | 59 | PAddr region_address = 0; |
| 59 | size_t region_size = 0; | 60 | size_t region_size = 0; |
| 60 | Pool region_pool = Pool::Count; | 61 | Pool region_pool = Pool::Count; |
| 61 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | 62 | for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { |
| 62 | // We only care about regions that we need to create managers for. | 63 | // We only care about regions that we need to create managers for. |
| 63 | if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | 64 | if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { |
| 64 | continue; | 65 | continue; |
| 65 | } | 66 | } |
| 66 | 67 | ||
| 67 | // We want to initialize the managers in order. | 68 | // We want to initialize the managers in order. |
| 68 | if (it.GetAttributes() != num_managers) { | 69 | if (it.GetAttributes() != m_num_managers) { |
| 69 | continue; | 70 | continue; |
| 70 | } | 71 | } |
| 71 | 72 | ||
| @@ -97,8 +98,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 97 | } | 98 | } |
| 98 | 99 | ||
| 99 | // Initialize a new manager for the region. | 100 | // Initialize a new manager for the region. |
| 100 | Impl* manager = std::addressof(managers[num_managers++]); | 101 | Impl* manager = std::addressof(m_managers[m_num_managers++]); |
| 101 | ASSERT(num_managers <= managers.size()); | 102 | ASSERT(m_num_managers <= m_managers.size()); |
| 102 | 103 | ||
| 103 | const size_t cur_size = manager->Initialize(region_address, region_size, management_region, | 104 | const size_t cur_size = manager->Initialize(region_address, region_size, management_region, |
| 104 | management_region_end, region_pool); | 105 | management_region_end, region_pool); |
| @@ -107,13 +108,13 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 107 | 108 | ||
| 108 | // Insert the manager into the pool list. | 109 | // Insert the manager into the pool list. |
| 109 | const auto region_pool_index = static_cast<u32>(region_pool); | 110 | const auto region_pool_index = static_cast<u32>(region_pool); |
| 110 | if (pool_managers_tail[region_pool_index] == nullptr) { | 111 | if (m_pool_managers_tail[region_pool_index] == nullptr) { |
| 111 | pool_managers_head[region_pool_index] = manager; | 112 | m_pool_managers_head[region_pool_index] = manager; |
| 112 | } else { | 113 | } else { |
| 113 | pool_managers_tail[region_pool_index]->SetNext(manager); | 114 | m_pool_managers_tail[region_pool_index]->SetNext(manager); |
| 114 | manager->SetPrev(pool_managers_tail[region_pool_index]); | 115 | manager->SetPrev(m_pool_managers_tail[region_pool_index]); |
| 115 | } | 116 | } |
| 116 | pool_managers_tail[region_pool_index] = manager; | 117 | m_pool_managers_tail[region_pool_index] = manager; |
| 117 | } | 118 | } |
| 118 | 119 | ||
| 119 | // Free each region to its corresponding heap. | 120 | // Free each region to its corresponding heap. |
| @@ -121,11 +122,10 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 121 | const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); | 122 | const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); |
| 122 | const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; | 123 | const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; |
| 123 | const PAddr ini_last = ini_end - 1; | 124 | const PAddr ini_last = ini_end - 1; |
| 124 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | 125 | for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { |
| 125 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | 126 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { |
| 126 | // Get the manager for the region. | 127 | // Get the manager for the region. |
| 127 | auto index = it.GetAttributes(); | 128 | auto& manager = m_managers[it.GetAttributes()]; |
| 128 | auto& manager = managers[index]; | ||
| 129 | 129 | ||
| 130 | const PAddr cur_start = it.GetAddress(); | 130 | const PAddr cur_start = it.GetAddress(); |
| 131 | const PAddr cur_last = it.GetLastAddress(); | 131 | const PAddr cur_last = it.GetLastAddress(); |
| @@ -162,11 +162,19 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | // Update the used size for all managers. | 164 | // Update the used size for all managers. |
| 165 | for (size_t i = 0; i < num_managers; ++i) { | 165 | for (size_t i = 0; i < m_num_managers; ++i) { |
| 166 | managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); | 166 | m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); |
| 167 | } | 167 | } |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) { | ||
| 171 | UNREACHABLE(); | ||
| 172 | } | ||
| 173 | |||
| 174 | void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) { | ||
| 175 | UNREACHABLE(); | ||
| 176 | } | ||
| 177 | |||
| 170 | PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { | 178 | PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { |
| 171 | // Early return if we're allocating no pages. | 179 | // Early return if we're allocating no pages. |
| 172 | if (num_pages == 0) { | 180 | if (num_pages == 0) { |
| @@ -175,7 +183,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 175 | 183 | ||
| 176 | // Lock the pool that we're allocating from. | 184 | // Lock the pool that we're allocating from. |
| 177 | const auto [pool, dir] = DecodeOption(option); | 185 | const auto [pool, dir] = DecodeOption(option); |
| 178 | KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]); | 186 | KScopedLightLock lk(m_pool_locks[static_cast<std::size_t>(pool)]); |
| 179 | 187 | ||
| 180 | // Choose a heap based on our page size request. | 188 | // Choose a heap based on our page size request. |
| 181 | const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); | 189 | const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); |
| @@ -185,7 +193,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 185 | PAddr allocated_block = 0; | 193 | PAddr allocated_block = 0; |
| 186 | for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; | 194 | for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; |
| 187 | chosen_manager = this->GetNextManager(chosen_manager, dir)) { | 195 | chosen_manager = this->GetNextManager(chosen_manager, dir)) { |
| 188 | allocated_block = chosen_manager->AllocateBlock(heap_index, true); | 196 | allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages); |
| 189 | if (allocated_block != 0) { | 197 | if (allocated_block != 0) { |
| 190 | break; | 198 | break; |
| 191 | } | 199 | } |
| @@ -196,10 +204,9 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 196 | return 0; | 204 | return 0; |
| 197 | } | 205 | } |
| 198 | 206 | ||
| 199 | // If we allocated more than we need, free some. | 207 | // Maintain the optimized memory bitmap, if we should. |
| 200 | const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index); | 208 | if (m_has_optimized_process[static_cast<size_t>(pool)]) { |
| 201 | if (allocated_pages > num_pages) { | 209 | UNIMPLEMENTED(); |
| 202 | chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); | ||
| 203 | } | 210 | } |
| 204 | 211 | ||
| 205 | // Open the first reference to the pages. | 212 | // Open the first reference to the pages. |
| @@ -209,20 +216,21 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 209 | } | 216 | } |
| 210 | 217 | ||
| 211 | Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, | 218 | Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, |
| 212 | Direction dir, bool random) { | 219 | Direction dir, bool unoptimized, bool random) { |
| 213 | // Choose a heap based on our page size request. | 220 | // Choose a heap based on our page size request. |
| 214 | const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); | 221 | const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); |
| 215 | R_UNLESS(0 <= heap_index, ResultOutOfMemory); | 222 | R_UNLESS(0 <= heap_index, ResultOutOfMemory); |
| 216 | 223 | ||
| 217 | // Ensure that we don't leave anything un-freed. | 224 | // Ensure that we don't leave anything un-freed. |
| 218 | auto group_guard = SCOPE_GUARD({ | 225 | ON_RESULT_FAILURE { |
| 219 | for (const auto& it : out->Nodes()) { | 226 | for (const auto& it : out->Nodes()) { |
| 220 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress()); | 227 | auto& manager = this->GetManager(it.GetAddress()); |
| 221 | const size_t num_pages_to_free = | 228 | const size_t node_num_pages = |
| 222 | std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); | 229 | std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); |
| 223 | manager.Free(it.GetAddress(), num_pages_to_free); | 230 | manager.Free(it.GetAddress(), node_num_pages); |
| 224 | } | 231 | } |
| 225 | }); | 232 | out->Finalize(); |
| 233 | }; | ||
| 226 | 234 | ||
| 227 | // Keep allocating until we've allocated all our pages. | 235 | // Keep allocating until we've allocated all our pages. |
| 228 | for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { | 236 | for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { |
| @@ -236,12 +244,17 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | |||
| 236 | break; | 244 | break; |
| 237 | } | 245 | } |
| 238 | 246 | ||
| 239 | // Safely add it to our group. | 247 | // Ensure we don't leak the block if we fail. |
| 240 | { | 248 | ON_RESULT_FAILURE_2 { |
| 241 | auto block_guard = | 249 | cur_manager->Free(allocated_block, pages_per_alloc); |
| 242 | SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); }); | 250 | }; |
| 243 | R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | 251 | |
| 244 | block_guard.Cancel(); | 252 | // Add the block to our group. |
| 253 | R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | ||
| 254 | |||
| 255 | // Maintain the optimized memory bitmap, if we should. | ||
| 256 | if (unoptimized) { | ||
| 257 | UNIMPLEMENTED(); | ||
| 245 | } | 258 | } |
| 246 | 259 | ||
| 247 | num_pages -= pages_per_alloc; | 260 | num_pages -= pages_per_alloc; |
| @@ -253,8 +266,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | |||
| 253 | R_UNLESS(num_pages == 0, ResultOutOfMemory); | 266 | R_UNLESS(num_pages == 0, ResultOutOfMemory); |
| 254 | 267 | ||
| 255 | // We succeeded! | 268 | // We succeeded! |
| 256 | group_guard.Cancel(); | 269 | R_SUCCEED(); |
| 257 | return ResultSuccess; | ||
| 258 | } | 270 | } |
| 259 | 271 | ||
| 260 | Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) { | 272 | Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) { |
| @@ -266,10 +278,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 266 | 278 | ||
| 267 | // Lock the pool that we're allocating from. | 279 | // Lock the pool that we're allocating from. |
| 268 | const auto [pool, dir] = DecodeOption(option); | 280 | const auto [pool, dir] = DecodeOption(option); |
| 269 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); | 281 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 270 | 282 | ||
| 271 | // Allocate the page group. | 283 | // Allocate the page group. |
| 272 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | 284 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, |
| 285 | m_has_optimized_process[static_cast<size_t>(pool)], true)); | ||
| 273 | 286 | ||
| 274 | // Open the first reference to the pages. | 287 | // Open the first reference to the pages. |
| 275 | for (const auto& block : out->Nodes()) { | 288 | for (const auto& block : out->Nodes()) { |
| @@ -277,7 +290,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 277 | size_t remaining_pages = block.GetNumPages(); | 290 | size_t remaining_pages = block.GetNumPages(); |
| 278 | while (remaining_pages > 0) { | 291 | while (remaining_pages > 0) { |
| 279 | // Get the manager for the current address. | 292 | // Get the manager for the current address. |
| 280 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | 293 | auto& manager = this->GetManager(cur_address); |
| 281 | 294 | ||
| 282 | // Process part or all of the block. | 295 | // Process part or all of the block. |
| 283 | const size_t cur_pages = | 296 | const size_t cur_pages = |
| @@ -290,11 +303,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 290 | } | 303 | } |
| 291 | } | 304 | } |
| 292 | 305 | ||
| 293 | return ResultSuccess; | 306 | R_SUCCEED(); |
| 294 | } | 307 | } |
| 295 | 308 | ||
| 296 | Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, | 309 | Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, |
| 297 | u64 process_id, u8 fill_pattern) { | 310 | u64 process_id, u8 fill_pattern) { |
| 298 | ASSERT(out != nullptr); | 311 | ASSERT(out != nullptr); |
| 299 | ASSERT(out->GetNumPages() == 0); | 312 | ASSERT(out->GetNumPages() == 0); |
| 300 | 313 | ||
| @@ -302,83 +315,89 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag | |||
| 302 | const auto [pool, dir] = DecodeOption(option); | 315 | const auto [pool, dir] = DecodeOption(option); |
| 303 | 316 | ||
| 304 | // Allocate the memory. | 317 | // Allocate the memory. |
| 318 | bool optimized; | ||
| 305 | { | 319 | { |
| 306 | // Lock the pool that we're allocating from. | 320 | // Lock the pool that we're allocating from. |
| 307 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); | 321 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 322 | |||
| 323 | // Check if we have an optimized process. | ||
| 324 | const bool has_optimized = m_has_optimized_process[static_cast<size_t>(pool)]; | ||
| 325 | const bool is_optimized = m_optimized_process_ids[static_cast<size_t>(pool)] == process_id; | ||
| 308 | 326 | ||
| 309 | // Allocate the page group. | 327 | // Allocate the page group. |
| 310 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | 328 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, |
| 329 | false)); | ||
| 311 | 330 | ||
| 312 | // Open the first reference to the pages. | 331 | // Set whether we should optimize. |
| 313 | for (const auto& block : out->Nodes()) { | 332 | optimized = has_optimized && is_optimized; |
| 314 | PAddr cur_address = block.GetAddress(); | ||
| 315 | size_t remaining_pages = block.GetNumPages(); | ||
| 316 | while (remaining_pages > 0) { | ||
| 317 | // Get the manager for the current address. | ||
| 318 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | ||
| 319 | |||
| 320 | // Process part or all of the block. | ||
| 321 | const size_t cur_pages = | ||
| 322 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 323 | manager.OpenFirst(cur_address, cur_pages); | ||
| 324 | |||
| 325 | // Advance. | ||
| 326 | cur_address += cur_pages * PageSize; | ||
| 327 | remaining_pages -= cur_pages; | ||
| 328 | } | ||
| 329 | } | ||
| 330 | } | 333 | } |
| 331 | 334 | ||
| 332 | // Set all the allocated memory. | 335 | // Perform optimized memory tracking, if we should. |
| 333 | for (const auto& block : out->Nodes()) { | 336 | if (optimized) { |
| 334 | std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, | 337 | // Iterate over the allocated blocks. |
| 335 | block.GetSize()); | 338 | for (const auto& block : out->Nodes()) { |
| 336 | } | 339 | // Get the block extents. |
| 340 | const PAddr block_address = block.GetAddress(); | ||
| 341 | const size_t block_pages = block.GetNumPages(); | ||
| 337 | 342 | ||
| 338 | return ResultSuccess; | 343 | // If it has no pages, we don't need to do anything. |
| 339 | } | 344 | if (block_pages == 0) { |
| 345 | continue; | ||
| 346 | } | ||
| 340 | 347 | ||
| 341 | void KMemoryManager::Open(PAddr address, size_t num_pages) { | 348 | // Fill all the pages that we need to fill. |
| 342 | // Repeatedly open references until we've done so for all pages. | 349 | bool any_new = false; |
| 343 | while (num_pages) { | 350 | { |
| 344 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); | 351 | PAddr cur_address = block_address; |
| 345 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | 352 | size_t remaining_pages = block_pages; |
| 353 | while (remaining_pages > 0) { | ||
| 354 | // Get the manager for the current address. | ||
| 355 | auto& manager = this->GetManager(cur_address); | ||
| 356 | |||
| 357 | // Process part or all of the block. | ||
| 358 | const size_t cur_pages = | ||
| 359 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 360 | any_new = | ||
| 361 | manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern); | ||
| 362 | |||
| 363 | // Advance. | ||
| 364 | cur_address += cur_pages * PageSize; | ||
| 365 | remaining_pages -= cur_pages; | ||
| 366 | } | ||
| 367 | } | ||
| 346 | 368 | ||
| 347 | { | 369 | // If there are new pages, update tracking for the allocation. |
| 348 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); | 370 | if (any_new) { |
| 349 | manager.Open(address, cur_pages); | 371 | // Update tracking for the allocation. |
| 372 | PAddr cur_address = block_address; | ||
| 373 | size_t remaining_pages = block_pages; | ||
| 374 | while (remaining_pages > 0) { | ||
| 375 | // Get the manager for the current address. | ||
| 376 | auto& manager = this->GetManager(cur_address); | ||
| 377 | |||
| 378 | // Lock the pool for the manager. | ||
| 379 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 380 | |||
| 381 | // Track some or all of the current pages. | ||
| 382 | const size_t cur_pages = | ||
| 383 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 384 | manager.TrackOptimizedAllocation(cur_address, cur_pages); | ||
| 385 | |||
| 386 | // Advance. | ||
| 387 | cur_address += cur_pages * PageSize; | ||
| 388 | remaining_pages -= cur_pages; | ||
| 389 | } | ||
| 390 | } | ||
| 350 | } | 391 | } |
| 351 | 392 | } else { | |
| 352 | num_pages -= cur_pages; | 393 | // Set all the allocated memory. |
| 353 | address += cur_pages * PageSize; | 394 | for (const auto& block : out->Nodes()) { |
| 354 | } | 395 | std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, |
| 355 | } | 396 | block.GetSize()); |
| 356 | |||
| 357 | void KMemoryManager::Close(PAddr address, size_t num_pages) { | ||
| 358 | // Repeatedly close references until we've done so for all pages. | ||
| 359 | while (num_pages) { | ||
| 360 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); | ||
| 361 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 362 | |||
| 363 | { | ||
| 364 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 365 | manager.Close(address, cur_pages); | ||
| 366 | } | 397 | } |
| 367 | |||
| 368 | num_pages -= cur_pages; | ||
| 369 | address += cur_pages * PageSize; | ||
| 370 | } | 398 | } |
| 371 | } | ||
| 372 | 399 | ||
| 373 | void KMemoryManager::Close(const KPageGroup& pg) { | 400 | R_SUCCEED(); |
| 374 | for (const auto& node : pg.Nodes()) { | ||
| 375 | Close(node.GetAddress(), node.GetNumPages()); | ||
| 376 | } | ||
| 377 | } | ||
| 378 | void KMemoryManager::Open(const KPageGroup& pg) { | ||
| 379 | for (const auto& node : pg.Nodes()) { | ||
| 380 | Open(node.GetAddress(), node.GetNumPages()); | ||
| 381 | } | ||
| 382 | } | 401 | } |
| 383 | 402 | ||
| 384 | size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, | 403 | size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, |
| @@ -394,18 +413,31 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage | |||
| 394 | ASSERT(Common::IsAligned(total_management_size, PageSize)); | 413 | ASSERT(Common::IsAligned(total_management_size, PageSize)); |
| 395 | 414 | ||
| 396 | // Setup region. | 415 | // Setup region. |
| 397 | pool = p; | 416 | m_pool = p; |
| 398 | management_region = management; | 417 | m_management_region = management; |
| 399 | page_reference_counts.resize( | 418 | m_page_reference_counts.resize( |
| 400 | Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); | 419 | Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); |
| 401 | ASSERT(Common::IsAligned(management_region, PageSize)); | 420 | ASSERT(Common::IsAligned(m_management_region, PageSize)); |
| 402 | 421 | ||
| 403 | // Initialize the manager's KPageHeap. | 422 | // Initialize the manager's KPageHeap. |
| 404 | heap.Initialize(address, size, management + manager_size, page_heap_size); | 423 | m_heap.Initialize(address, size, management + manager_size, page_heap_size); |
| 405 | 424 | ||
| 406 | return total_management_size; | 425 | return total_management_size; |
| 407 | } | 426 | } |
| 408 | 427 | ||
| 428 | void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) { | ||
| 429 | UNREACHABLE(); | ||
| 430 | } | ||
| 431 | |||
| 432 | void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) { | ||
| 433 | UNREACHABLE(); | ||
| 434 | } | ||
| 435 | |||
| 436 | bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages, | ||
| 437 | u8 fill_pattern) { | ||
| 438 | UNREACHABLE(); | ||
| 439 | } | ||
| 440 | |||
| 409 | size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { | 441 | size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { |
| 410 | const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); | 442 | const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); |
| 411 | const size_t optimize_map_size = | 443 | const size_t optimize_map_size = |
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index dcb9b6348..401d4e644 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h | |||
| @@ -21,11 +21,8 @@ namespace Kernel { | |||
| 21 | 21 | ||
| 22 | class KPageGroup; | 22 | class KPageGroup; |
| 23 | 23 | ||
| 24 | class KMemoryManager final { | 24 | class KMemoryManager { |
| 25 | public: | 25 | public: |
| 26 | YUZU_NON_COPYABLE(KMemoryManager); | ||
| 27 | YUZU_NON_MOVEABLE(KMemoryManager); | ||
| 28 | |||
| 29 | enum class Pool : u32 { | 26 | enum class Pool : u32 { |
| 30 | Application = 0, | 27 | Application = 0, |
| 31 | Applet = 1, | 28 | Applet = 1, |
| @@ -45,16 +42,85 @@ public: | |||
| 45 | enum class Direction : u32 { | 42 | enum class Direction : u32 { |
| 46 | FromFront = 0, | 43 | FromFront = 0, |
| 47 | FromBack = 1, | 44 | FromBack = 1, |
| 48 | |||
| 49 | Shift = 0, | 45 | Shift = 0, |
| 50 | Mask = (0xF << Shift), | 46 | Mask = (0xF << Shift), |
| 51 | }; | 47 | }; |
| 52 | 48 | ||
| 53 | explicit KMemoryManager(Core::System& system_); | 49 | static constexpr size_t MaxManagerCount = 10; |
| 50 | |||
| 51 | explicit KMemoryManager(Core::System& system); | ||
| 54 | 52 | ||
| 55 | void Initialize(VAddr management_region, size_t management_region_size); | 53 | void Initialize(VAddr management_region, size_t management_region_size); |
| 56 | 54 | ||
| 57 | constexpr size_t GetSize(Pool pool) const { | 55 | Result InitializeOptimizedMemory(u64 process_id, Pool pool); |
| 56 | void FinalizeOptimizedMemory(u64 process_id, Pool pool); | ||
| 57 | |||
| 58 | PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); | ||
| 59 | Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); | ||
| 60 | Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, | ||
| 61 | u8 fill_pattern); | ||
| 62 | |||
| 63 | Pool GetPool(PAddr address) const { | ||
| 64 | return this->GetManager(address).GetPool(); | ||
| 65 | } | ||
| 66 | |||
| 67 | void Open(PAddr address, size_t num_pages) { | ||
| 68 | // Repeatedly open references until we've done so for all pages. | ||
| 69 | while (num_pages) { | ||
| 70 | auto& manager = this->GetManager(address); | ||
| 71 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 72 | |||
| 73 | { | ||
| 74 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 75 | manager.Open(address, cur_pages); | ||
| 76 | } | ||
| 77 | |||
| 78 | num_pages -= cur_pages; | ||
| 79 | address += cur_pages * PageSize; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 83 | void OpenFirst(PAddr address, size_t num_pages) { | ||
| 84 | // Repeatedly open references until we've done so for all pages. | ||
| 85 | while (num_pages) { | ||
| 86 | auto& manager = this->GetManager(address); | ||
| 87 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 88 | |||
| 89 | { | ||
| 90 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 91 | manager.OpenFirst(address, cur_pages); | ||
| 92 | } | ||
| 93 | |||
| 94 | num_pages -= cur_pages; | ||
| 95 | address += cur_pages * PageSize; | ||
| 96 | } | ||
| 97 | } | ||
| 98 | |||
| 99 | void Close(PAddr address, size_t num_pages) { | ||
| 100 | // Repeatedly close references until we've done so for all pages. | ||
| 101 | while (num_pages) { | ||
| 102 | auto& manager = this->GetManager(address); | ||
| 103 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 104 | |||
| 105 | { | ||
| 106 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 107 | manager.Close(address, cur_pages); | ||
| 108 | } | ||
| 109 | |||
| 110 | num_pages -= cur_pages; | ||
| 111 | address += cur_pages * PageSize; | ||
| 112 | } | ||
| 113 | } | ||
| 114 | |||
| 115 | size_t GetSize() { | ||
| 116 | size_t total = 0; | ||
| 117 | for (size_t i = 0; i < m_num_managers; i++) { | ||
| 118 | total += m_managers[i].GetSize(); | ||
| 119 | } | ||
| 120 | return total; | ||
| 121 | } | ||
| 122 | |||
| 123 | size_t GetSize(Pool pool) { | ||
| 58 | constexpr Direction GetSizeDirection = Direction::FromFront; | 124 | constexpr Direction GetSizeDirection = Direction::FromFront; |
| 59 | size_t total = 0; | 125 | size_t total = 0; |
| 60 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | 126 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; |
| @@ -64,18 +130,36 @@ public: | |||
| 64 | return total; | 130 | return total; |
| 65 | } | 131 | } |
| 66 | 132 | ||
| 67 | PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); | 133 | size_t GetFreeSize() { |
| 68 | Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); | 134 | size_t total = 0; |
| 69 | Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, | 135 | for (size_t i = 0; i < m_num_managers; i++) { |
| 70 | u8 fill_pattern); | 136 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(m_managers[i].GetPool())]); |
| 137 | total += m_managers[i].GetFreeSize(); | ||
| 138 | } | ||
| 139 | return total; | ||
| 140 | } | ||
| 71 | 141 | ||
| 72 | static constexpr size_t MaxManagerCount = 10; | 142 | size_t GetFreeSize(Pool pool) { |
| 143 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); | ||
| 144 | |||
| 145 | constexpr Direction GetSizeDirection = Direction::FromFront; | ||
| 146 | size_t total = 0; | ||
| 147 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | ||
| 148 | manager = this->GetNextManager(manager, GetSizeDirection)) { | ||
| 149 | total += manager->GetFreeSize(); | ||
| 150 | } | ||
| 151 | return total; | ||
| 152 | } | ||
| 73 | 153 | ||
| 74 | void Close(PAddr address, size_t num_pages); | 154 | void DumpFreeList(Pool pool) { |
| 75 | void Close(const KPageGroup& pg); | 155 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 76 | 156 | ||
| 77 | void Open(PAddr address, size_t num_pages); | 157 | constexpr Direction DumpDirection = Direction::FromFront; |
| 78 | void Open(const KPageGroup& pg); | 158 | for (auto* manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr; |
| 159 | manager = this->GetNextManager(manager, DumpDirection)) { | ||
| 160 | manager->DumpFreeList(); | ||
| 161 | } | ||
| 162 | } | ||
| 79 | 163 | ||
| 80 | public: | 164 | public: |
| 81 | static size_t CalculateManagementOverheadSize(size_t region_size) { | 165 | static size_t CalculateManagementOverheadSize(size_t region_size) { |
| @@ -88,14 +172,13 @@ public: | |||
| 88 | } | 172 | } |
| 89 | 173 | ||
| 90 | static constexpr Pool GetPool(u32 option) { | 174 | static constexpr Pool GetPool(u32 option) { |
| 91 | return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >> | 175 | return static_cast<Pool>((option & static_cast<u32>(Pool::Mask)) >> |
| 92 | static_cast<u32>(Pool::Shift)); | 176 | static_cast<u32>(Pool::Shift)); |
| 93 | } | 177 | } |
| 94 | 178 | ||
| 95 | static constexpr Direction GetDirection(u32 option) { | 179 | static constexpr Direction GetDirection(u32 option) { |
| 96 | return static_cast<Direction>( | 180 | return static_cast<Direction>((option & static_cast<u32>(Direction::Mask)) >> |
| 97 | (static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >> | 181 | static_cast<u32>(Direction::Shift)); |
| 98 | static_cast<u32>(Direction::Shift)); | ||
| 99 | } | 182 | } |
| 100 | 183 | ||
| 101 | static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) { | 184 | static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) { |
| @@ -103,74 +186,88 @@ public: | |||
| 103 | } | 186 | } |
| 104 | 187 | ||
| 105 | private: | 188 | private: |
| 106 | class Impl final { | 189 | class Impl { |
| 107 | public: | 190 | public: |
| 108 | YUZU_NON_COPYABLE(Impl); | 191 | static size_t CalculateManagementOverheadSize(size_t region_size); |
| 109 | YUZU_NON_MOVEABLE(Impl); | 192 | |
| 193 | static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { | ||
| 194 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | ||
| 195 | Common::BitSize<u64>()) * | ||
| 196 | sizeof(u64); | ||
| 197 | } | ||
| 110 | 198 | ||
| 199 | public: | ||
| 111 | Impl() = default; | 200 | Impl() = default; |
| 112 | ~Impl() = default; | ||
| 113 | 201 | ||
| 114 | size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, | 202 | size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, |
| 115 | Pool p); | 203 | Pool p); |
| 116 | 204 | ||
| 117 | VAddr AllocateBlock(s32 index, bool random) { | 205 | PAddr AllocateBlock(s32 index, bool random) { |
| 118 | return heap.AllocateBlock(index, random); | 206 | return m_heap.AllocateBlock(index, random); |
| 119 | } | 207 | } |
| 120 | 208 | PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { | |
| 121 | void Free(VAddr addr, size_t num_pages) { | 209 | return m_heap.AllocateAligned(index, num_pages, align_pages); |
| 122 | heap.Free(addr, num_pages); | 210 | } |
| 211 | void Free(PAddr addr, size_t num_pages) { | ||
| 212 | m_heap.Free(addr, num_pages); | ||
| 123 | } | 213 | } |
| 124 | 214 | ||
| 125 | void SetInitialUsedHeapSize(size_t reserved_size) { | 215 | void SetInitialUsedHeapSize(size_t reserved_size) { |
| 126 | heap.SetInitialUsedSize(reserved_size); | 216 | m_heap.SetInitialUsedSize(reserved_size); |
| 127 | } | 217 | } |
| 128 | 218 | ||
| 129 | constexpr Pool GetPool() const { | 219 | void InitializeOptimizedMemory() { |
| 130 | return pool; | 220 | UNIMPLEMENTED(); |
| 131 | } | 221 | } |
| 132 | 222 | ||
| 223 | void TrackUnoptimizedAllocation(PAddr block, size_t num_pages); | ||
| 224 | void TrackOptimizedAllocation(PAddr block, size_t num_pages); | ||
| 225 | |||
| 226 | bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern); | ||
| 227 | |||
| 228 | constexpr Pool GetPool() const { | ||
| 229 | return m_pool; | ||
| 230 | } | ||
| 133 | constexpr size_t GetSize() const { | 231 | constexpr size_t GetSize() const { |
| 134 | return heap.GetSize(); | 232 | return m_heap.GetSize(); |
| 233 | } | ||
| 234 | constexpr PAddr GetEndAddress() const { | ||
| 235 | return m_heap.GetEndAddress(); | ||
| 135 | } | 236 | } |
| 136 | 237 | ||
| 137 | constexpr VAddr GetAddress() const { | 238 | size_t GetFreeSize() const { |
| 138 | return heap.GetAddress(); | 239 | return m_heap.GetFreeSize(); |
| 139 | } | 240 | } |
| 140 | 241 | ||
| 141 | constexpr VAddr GetEndAddress() const { | 242 | void DumpFreeList() const { |
| 142 | return heap.GetEndAddress(); | 243 | UNIMPLEMENTED(); |
| 143 | } | 244 | } |
| 144 | 245 | ||
| 145 | constexpr size_t GetPageOffset(PAddr address) const { | 246 | constexpr size_t GetPageOffset(PAddr address) const { |
| 146 | return heap.GetPageOffset(address); | 247 | return m_heap.GetPageOffset(address); |
| 147 | } | 248 | } |
| 148 | |||
| 149 | constexpr size_t GetPageOffsetToEnd(PAddr address) const { | 249 | constexpr size_t GetPageOffsetToEnd(PAddr address) const { |
| 150 | return heap.GetPageOffsetToEnd(address); | 250 | return m_heap.GetPageOffsetToEnd(address); |
| 151 | } | 251 | } |
| 152 | 252 | ||
| 153 | constexpr void SetNext(Impl* n) { | 253 | constexpr void SetNext(Impl* n) { |
| 154 | next = n; | 254 | m_next = n; |
| 155 | } | 255 | } |
| 156 | |||
| 157 | constexpr void SetPrev(Impl* n) { | 256 | constexpr void SetPrev(Impl* n) { |
| 158 | prev = n; | 257 | m_prev = n; |
| 159 | } | 258 | } |
| 160 | |||
| 161 | constexpr Impl* GetNext() const { | 259 | constexpr Impl* GetNext() const { |
| 162 | return next; | 260 | return m_next; |
| 163 | } | 261 | } |
| 164 | |||
| 165 | constexpr Impl* GetPrev() const { | 262 | constexpr Impl* GetPrev() const { |
| 166 | return prev; | 263 | return m_prev; |
| 167 | } | 264 | } |
| 168 | 265 | ||
| 169 | void OpenFirst(PAddr address, size_t num_pages) { | 266 | void OpenFirst(PAddr address, size_t num_pages) { |
| 170 | size_t index = this->GetPageOffset(address); | 267 | size_t index = this->GetPageOffset(address); |
| 171 | const size_t end = index + num_pages; | 268 | const size_t end = index + num_pages; |
| 172 | while (index < end) { | 269 | while (index < end) { |
| 173 | const RefCount ref_count = (++page_reference_counts[index]); | 270 | const RefCount ref_count = (++m_page_reference_counts[index]); |
| 174 | ASSERT(ref_count == 1); | 271 | ASSERT(ref_count == 1); |
| 175 | 272 | ||
| 176 | index++; | 273 | index++; |
| @@ -181,7 +278,7 @@ private: | |||
| 181 | size_t index = this->GetPageOffset(address); | 278 | size_t index = this->GetPageOffset(address); |
| 182 | const size_t end = index + num_pages; | 279 | const size_t end = index + num_pages; |
| 183 | while (index < end) { | 280 | while (index < end) { |
| 184 | const RefCount ref_count = (++page_reference_counts[index]); | 281 | const RefCount ref_count = (++m_page_reference_counts[index]); |
| 185 | ASSERT(ref_count > 1); | 282 | ASSERT(ref_count > 1); |
| 186 | 283 | ||
| 187 | index++; | 284 | index++; |
| @@ -195,8 +292,8 @@ private: | |||
| 195 | size_t free_start = 0; | 292 | size_t free_start = 0; |
| 196 | size_t free_count = 0; | 293 | size_t free_count = 0; |
| 197 | while (index < end) { | 294 | while (index < end) { |
| 198 | ASSERT(page_reference_counts[index] > 0); | 295 | ASSERT(m_page_reference_counts[index] > 0); |
| 199 | const RefCount ref_count = (--page_reference_counts[index]); | 296 | const RefCount ref_count = (--m_page_reference_counts[index]); |
| 200 | 297 | ||
| 201 | // Keep track of how many zero refcounts we see in a row, to minimize calls to free. | 298 | // Keep track of how many zero refcounts we see in a row, to minimize calls to free. |
| 202 | if (ref_count == 0) { | 299 | if (ref_count == 0) { |
| @@ -208,7 +305,7 @@ private: | |||
| 208 | } | 305 | } |
| 209 | } else { | 306 | } else { |
| 210 | if (free_count > 0) { | 307 | if (free_count > 0) { |
| 211 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | 308 | this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); |
| 212 | free_count = 0; | 309 | free_count = 0; |
| 213 | } | 310 | } |
| 214 | } | 311 | } |
| @@ -217,44 +314,36 @@ private: | |||
| 217 | } | 314 | } |
| 218 | 315 | ||
| 219 | if (free_count > 0) { | 316 | if (free_count > 0) { |
| 220 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | 317 | this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); |
| 221 | } | 318 | } |
| 222 | } | 319 | } |
| 223 | 320 | ||
| 224 | static size_t CalculateManagementOverheadSize(size_t region_size); | ||
| 225 | |||
| 226 | static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { | ||
| 227 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | ||
| 228 | Common::BitSize<u64>()) * | ||
| 229 | sizeof(u64); | ||
| 230 | } | ||
| 231 | |||
| 232 | private: | 321 | private: |
| 233 | using RefCount = u16; | 322 | using RefCount = u16; |
| 234 | 323 | ||
| 235 | KPageHeap heap; | 324 | KPageHeap m_heap; |
| 236 | std::vector<RefCount> page_reference_counts; | 325 | std::vector<RefCount> m_page_reference_counts; |
| 237 | VAddr management_region{}; | 326 | VAddr m_management_region{}; |
| 238 | Pool pool{}; | 327 | Pool m_pool{}; |
| 239 | Impl* next{}; | 328 | Impl* m_next{}; |
| 240 | Impl* prev{}; | 329 | Impl* m_prev{}; |
| 241 | }; | 330 | }; |
| 242 | 331 | ||
| 243 | private: | 332 | private: |
| 244 | Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) { | 333 | Impl& GetManager(PAddr address) { |
| 245 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | 334 | return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; |
| 246 | } | 335 | } |
| 247 | 336 | ||
| 248 | const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const { | 337 | const Impl& GetManager(PAddr address) const { |
| 249 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | 338 | return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; |
| 250 | } | 339 | } |
| 251 | 340 | ||
| 252 | constexpr Impl* GetFirstManager(Pool pool, Direction dir) const { | 341 | constexpr Impl* GetFirstManager(Pool pool, Direction dir) { |
| 253 | return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)] | 342 | return dir == Direction::FromBack ? m_pool_managers_tail[static_cast<size_t>(pool)] |
| 254 | : pool_managers_head[static_cast<size_t>(pool)]; | 343 | : m_pool_managers_head[static_cast<size_t>(pool)]; |
| 255 | } | 344 | } |
| 256 | 345 | ||
| 257 | constexpr Impl* GetNextManager(Impl* cur, Direction dir) const { | 346 | constexpr Impl* GetNextManager(Impl* cur, Direction dir) { |
| 258 | if (dir == Direction::FromBack) { | 347 | if (dir == Direction::FromBack) { |
| 259 | return cur->GetPrev(); | 348 | return cur->GetPrev(); |
| 260 | } else { | 349 | } else { |
| @@ -263,15 +352,21 @@ private: | |||
| 263 | } | 352 | } |
| 264 | 353 | ||
| 265 | Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir, | 354 | Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir, |
| 266 | bool random); | 355 | bool unoptimized, bool random); |
| 267 | 356 | ||
| 268 | private: | 357 | private: |
| 269 | Core::System& system; | 358 | template <typename T> |
| 270 | std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks; | 359 | using PoolArray = std::array<T, static_cast<size_t>(Pool::Count)>; |
| 271 | std::array<Impl*, MaxManagerCount> pool_managers_head{}; | 360 | |
| 272 | std::array<Impl*, MaxManagerCount> pool_managers_tail{}; | 361 | Core::System& m_system; |
| 273 | std::array<Impl, MaxManagerCount> managers; | 362 | const KMemoryLayout& m_memory_layout; |
| 274 | size_t num_managers{}; | 363 | PoolArray<KLightLock> m_pool_locks; |
| 364 | std::array<Impl*, MaxManagerCount> m_pool_managers_head{}; | ||
| 365 | std::array<Impl*, MaxManagerCount> m_pool_managers_tail{}; | ||
| 366 | std::array<Impl, MaxManagerCount> m_managers; | ||
| 367 | size_t m_num_managers{}; | ||
| 368 | PoolArray<u64> m_optimized_process_ids{}; | ||
| 369 | PoolArray<bool> m_has_optimized_process{}; | ||
| 275 | }; | 370 | }; |
| 276 | 371 | ||
| 277 | } // namespace Kernel | 372 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index 7e2fcccdc..e5630c1ac 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h | |||
| @@ -142,32 +142,38 @@ private: | |||
| 142 | 142 | ||
| 143 | } // namespace impl | 143 | } // namespace impl |
| 144 | 144 | ||
| 145 | constexpr auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); | 145 | constexpr inline auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); |
| 146 | constexpr auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); | 146 | |
| 147 | constexpr auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2); | 147 | constexpr inline auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); |
| 148 | constexpr inline auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2); | ||
| 148 | static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1); | 149 | static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1); |
| 149 | static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); | 150 | static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); |
| 150 | 151 | ||
| 151 | constexpr auto KMemoryRegionType_DramKernelBase = | 152 | // constexpr inline auto KMemoryRegionType_CoreLocalRegion = |
| 153 | // KMemoryRegionType_None.DeriveInitial(2).Finalize(); | ||
| 154 | // static_assert(KMemoryRegionType_CoreLocalRegion.GetValue() == 0x4); | ||
| 155 | |||
| 156 | constexpr inline auto KMemoryRegionType_DramKernelBase = | ||
| 152 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 0) | 157 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 0) |
| 153 | .SetAttribute(KMemoryRegionAttr_NoUserMap) | 158 | .SetAttribute(KMemoryRegionAttr_NoUserMap) |
| 154 | .SetAttribute(KMemoryRegionAttr_CarveoutProtected); | 159 | .SetAttribute(KMemoryRegionAttr_CarveoutProtected); |
| 155 | constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); | 160 | constexpr inline auto KMemoryRegionType_DramReservedBase = |
| 156 | constexpr auto KMemoryRegionType_DramHeapBase = | 161 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); |
| 162 | constexpr inline auto KMemoryRegionType_DramHeapBase = | ||
| 157 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped); | 163 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped); |
| 158 | static_assert(KMemoryRegionType_DramKernelBase.GetValue() == | 164 | static_assert(KMemoryRegionType_DramKernelBase.GetValue() == |
| 159 | (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); | 165 | (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); |
| 160 | static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16)); | 166 | static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16)); |
| 161 | static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped)); | 167 | static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped)); |
| 162 | 168 | ||
| 163 | constexpr auto KMemoryRegionType_DramKernelCode = | 169 | constexpr inline auto KMemoryRegionType_DramKernelCode = |
| 164 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); | 170 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); |
| 165 | constexpr auto KMemoryRegionType_DramKernelSlab = | 171 | constexpr inline auto KMemoryRegionType_DramKernelSlab = |
| 166 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); | 172 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); |
| 167 | constexpr auto KMemoryRegionType_DramKernelPtHeap = | 173 | constexpr inline auto KMemoryRegionType_DramKernelPtHeap = |
| 168 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute( | 174 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute( |
| 169 | KMemoryRegionAttr_LinearMapped); | 175 | KMemoryRegionAttr_LinearMapped); |
| 170 | constexpr auto KMemoryRegionType_DramKernelInitPt = | 176 | constexpr inline auto KMemoryRegionType_DramKernelInitPt = |
| 171 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute( | 177 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute( |
| 172 | KMemoryRegionAttr_LinearMapped); | 178 | KMemoryRegionAttr_LinearMapped); |
| 173 | static_assert(KMemoryRegionType_DramKernelCode.GetValue() == | 179 | static_assert(KMemoryRegionType_DramKernelCode.GetValue() == |
| @@ -181,32 +187,40 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == | |||
| 181 | (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | 187 | (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | |
| 182 | KMemoryRegionAttr_LinearMapped)); | 188 | KMemoryRegionAttr_LinearMapped)); |
| 183 | 189 | ||
| 184 | constexpr auto KMemoryRegionType_DramReservedEarly = | 190 | constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory = |
| 191 | KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute( | ||
| 192 | KMemoryRegionAttr_LinearMapped); | ||
| 193 | static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == | ||
| 194 | (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | ||
| 195 | KMemoryRegionAttr_LinearMapped)); | ||
| 196 | |||
| 197 | constexpr inline auto KMemoryRegionType_DramReservedEarly = | ||
| 185 | KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); | 198 | KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); |
| 186 | static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == | 199 | static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == |
| 187 | (0x16 | KMemoryRegionAttr_NoUserMap)); | 200 | (0x16 | KMemoryRegionAttr_NoUserMap)); |
| 188 | 201 | ||
| 189 | constexpr auto KMemoryRegionType_KernelTraceBuffer = | 202 | constexpr inline auto KMemoryRegionType_KernelTraceBuffer = |
| 190 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0) | 203 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0) |
| 191 | .SetAttribute(KMemoryRegionAttr_LinearMapped) | 204 | .SetAttribute(KMemoryRegionAttr_LinearMapped) |
| 192 | .SetAttribute(KMemoryRegionAttr_UserReadOnly); | 205 | .SetAttribute(KMemoryRegionAttr_UserReadOnly); |
| 193 | constexpr auto KMemoryRegionType_OnMemoryBootImage = | 206 | constexpr inline auto KMemoryRegionType_OnMemoryBootImage = |
| 194 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); | 207 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); |
| 195 | constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); | 208 | constexpr inline auto KMemoryRegionType_DTB = |
| 209 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); | ||
| 196 | static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() == | 210 | static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() == |
| 197 | (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly)); | 211 | (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly)); |
| 198 | static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156); | 212 | static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156); |
| 199 | static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); | 213 | static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); |
| 200 | 214 | ||
| 201 | constexpr auto KMemoryRegionType_DramPoolPartition = | 215 | constexpr inline auto KMemoryRegionType_DramPoolPartition = |
| 202 | KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); | 216 | KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); |
| 203 | static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == | 217 | static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == |
| 204 | (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 218 | (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| 205 | 219 | ||
| 206 | constexpr auto KMemoryRegionType_DramPoolManagement = | 220 | constexpr inline auto KMemoryRegionType_DramPoolManagement = |
| 207 | KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( | 221 | KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( |
| 208 | KMemoryRegionAttr_CarveoutProtected); | 222 | KMemoryRegionAttr_CarveoutProtected); |
| 209 | constexpr auto KMemoryRegionType_DramUserPool = | 223 | constexpr inline auto KMemoryRegionType_DramUserPool = |
| 210 | KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); | 224 | KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); |
| 211 | static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == | 225 | static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == |
| 212 | (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | 226 | (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | |
| @@ -214,11 +228,13 @@ static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == | |||
| 214 | static_assert(KMemoryRegionType_DramUserPool.GetValue() == | 228 | static_assert(KMemoryRegionType_DramUserPool.GetValue() == |
| 215 | (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 229 | (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| 216 | 230 | ||
| 217 | constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0); | 231 | constexpr inline auto KMemoryRegionType_DramApplicationPool = |
| 218 | constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1); | 232 | KMemoryRegionType_DramUserPool.Derive(4, 0); |
| 219 | constexpr auto KMemoryRegionType_DramSystemNonSecurePool = | 233 | constexpr inline auto KMemoryRegionType_DramAppletPool = |
| 234 | KMemoryRegionType_DramUserPool.Derive(4, 1); | ||
| 235 | constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool = | ||
| 220 | KMemoryRegionType_DramUserPool.Derive(4, 2); | 236 | KMemoryRegionType_DramUserPool.Derive(4, 2); |
| 221 | constexpr auto KMemoryRegionType_DramSystemPool = | 237 | constexpr inline auto KMemoryRegionType_DramSystemPool = |
| 222 | KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); | 238 | KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); |
| 223 | static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == | 239 | static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == |
| 224 | (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 240 | (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| @@ -230,50 +246,55 @@ static_assert(KMemoryRegionType_DramSystemPool.GetValue() == | |||
| 230 | (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | 246 | (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | |
| 231 | KMemoryRegionAttr_CarveoutProtected)); | 247 | KMemoryRegionAttr_CarveoutProtected)); |
| 232 | 248 | ||
| 233 | constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); | 249 | constexpr inline auto KMemoryRegionType_VirtualDramHeapBase = |
| 234 | constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap = | 250 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); |
| 251 | constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap = | ||
| 235 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); | 252 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); |
| 236 | constexpr auto KMemoryRegionType_VirtualDramKernelTraceBuffer = | 253 | constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer = |
| 237 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); | 254 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); |
| 238 | static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); | 255 | static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); |
| 239 | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); | 256 | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); |
| 240 | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); | 257 | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); |
| 241 | 258 | ||
| 242 | // UNUSED: .DeriveSparse(2, 2, 0); | 259 | // UNUSED: .DeriveSparse(2, 2, 0); |
| 243 | constexpr auto KMemoryRegionType_VirtualDramUnknownDebug = | 260 | constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug = |
| 244 | KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); | 261 | KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); |
| 245 | static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); | 262 | static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); |
| 246 | 263 | ||
| 247 | constexpr auto KMemoryRegionType_VirtualDramKernelInitPt = | 264 | constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = |
| 265 | KMemoryRegionType_Dram.DeriveSparse(3, 1, 0); | ||
| 266 | static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62)); | ||
| 267 | |||
| 268 | constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt = | ||
| 248 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); | 269 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); |
| 249 | constexpr auto KMemoryRegionType_VirtualDramPoolManagement = | 270 | constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement = |
| 250 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); | 271 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); |
| 251 | constexpr auto KMemoryRegionType_VirtualDramUserPool = | 272 | constexpr inline auto KMemoryRegionType_VirtualDramUserPool = |
| 252 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); | 273 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); |
| 253 | static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); | 274 | static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); |
| 254 | static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); | 275 | static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); |
| 255 | static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); | 276 | static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); |
| 256 | 277 | ||
| 257 | // NOTE: For unknown reason, the pools are derived out-of-order here. It's worth eventually trying | 278 | // NOTE: For unknown reason, the pools are derived out-of-order here. |
| 258 | // to understand why Nintendo made this choice. | 279 | // It's worth eventually trying to understand why Nintendo made this choice. |
| 259 | // UNUSED: .Derive(6, 0); | 280 | // UNUSED: .Derive(6, 0); |
| 260 | // UNUSED: .Derive(6, 1); | 281 | // UNUSED: .Derive(6, 1); |
| 261 | constexpr auto KMemoryRegionType_VirtualDramAppletPool = | 282 | constexpr inline auto KMemoryRegionType_VirtualDramAppletPool = |
| 262 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); | 283 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); |
| 263 | constexpr auto KMemoryRegionType_VirtualDramApplicationPool = | 284 | constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool = |
| 264 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); | 285 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); |
| 265 | constexpr auto KMemoryRegionType_VirtualDramSystemNonSecurePool = | 286 | constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool = |
| 266 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); | 287 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); |
| 267 | constexpr auto KMemoryRegionType_VirtualDramSystemPool = | 288 | constexpr inline auto KMemoryRegionType_VirtualDramSystemPool = |
| 268 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); | 289 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); |
| 269 | static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); | 290 | static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); |
| 270 | static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); | 291 | static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); |
| 271 | static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); | 292 | static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); |
| 272 | static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); | 293 | static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); |
| 273 | 294 | ||
| 274 | constexpr auto KMemoryRegionType_ArchDeviceBase = | 295 | constexpr inline auto KMemoryRegionType_ArchDeviceBase = |
| 275 | KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); | 296 | KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); |
| 276 | constexpr auto KMemoryRegionType_BoardDeviceBase = | 297 | constexpr inline auto KMemoryRegionType_BoardDeviceBase = |
| 277 | KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly(); | 298 | KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly(); |
| 278 | static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5); | 299 | static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5); |
| 279 | static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); | 300 | static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); |
| @@ -284,7 +305,7 @@ static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); | |||
| 284 | #error "Unimplemented" | 305 | #error "Unimplemented" |
| 285 | #else | 306 | #else |
| 286 | // Default to no architecture devices. | 307 | // Default to no architecture devices. |
| 287 | constexpr auto NumArchitectureDeviceRegions = 0; | 308 | constexpr inline auto NumArchitectureDeviceRegions = 0; |
| 288 | #endif | 309 | #endif |
| 289 | static_assert(NumArchitectureDeviceRegions >= 0); | 310 | static_assert(NumArchitectureDeviceRegions >= 0); |
| 290 | 311 | ||
| @@ -292,34 +313,35 @@ static_assert(NumArchitectureDeviceRegions >= 0); | |||
| 292 | #include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc" | 313 | #include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc" |
| 293 | #else | 314 | #else |
| 294 | // Default to no board devices. | 315 | // Default to no board devices. |
| 295 | constexpr auto NumBoardDeviceRegions = 0; | 316 | constexpr inline auto NumBoardDeviceRegions = 0; |
| 296 | #endif | 317 | #endif |
| 297 | static_assert(NumBoardDeviceRegions >= 0); | 318 | static_assert(NumBoardDeviceRegions >= 0); |
| 298 | 319 | ||
| 299 | constexpr auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); | 320 | constexpr inline auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); |
| 300 | constexpr auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); | 321 | constexpr inline auto KMemoryRegionType_KernelStack = |
| 301 | constexpr auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); | 322 | KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); |
| 302 | constexpr auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3); | 323 | constexpr inline auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); |
| 324 | constexpr inline auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3); | ||
| 303 | static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19); | 325 | static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19); |
| 304 | static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29); | 326 | static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29); |
| 305 | static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49); | 327 | static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49); |
| 306 | static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89); | 328 | static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89); |
| 307 | 329 | ||
| 308 | constexpr auto KMemoryRegionType_KernelMiscDerivedBase = | 330 | constexpr inline auto KMemoryRegionType_KernelMiscDerivedBase = |
| 309 | KMemoryRegionType_KernelMisc.DeriveTransition(); | 331 | KMemoryRegionType_KernelMisc.DeriveTransition(); |
| 310 | static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149); | 332 | static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149); |
| 311 | 333 | ||
| 312 | // UNUSED: .Derive(7, 0); | 334 | // UNUSED: .Derive(7, 0); |
| 313 | constexpr auto KMemoryRegionType_KernelMiscMainStack = | 335 | constexpr inline auto KMemoryRegionType_KernelMiscMainStack = |
| 314 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1); | 336 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1); |
| 315 | constexpr auto KMemoryRegionType_KernelMiscMappedDevice = | 337 | constexpr inline auto KMemoryRegionType_KernelMiscMappedDevice = |
| 316 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2); | 338 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2); |
| 317 | constexpr auto KMemoryRegionType_KernelMiscExceptionStack = | 339 | constexpr inline auto KMemoryRegionType_KernelMiscExceptionStack = |
| 318 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3); | 340 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3); |
| 319 | constexpr auto KMemoryRegionType_KernelMiscUnknownDebug = | 341 | constexpr inline auto KMemoryRegionType_KernelMiscUnknownDebug = |
| 320 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4); | 342 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4); |
| 321 | // UNUSED: .Derive(7, 5); | 343 | // UNUSED: .Derive(7, 5); |
| 322 | constexpr auto KMemoryRegionType_KernelMiscIdleStack = | 344 | constexpr inline auto KMemoryRegionType_KernelMiscIdleStack = |
| 323 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6); | 345 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6); |
| 324 | static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49); | 346 | static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49); |
| 325 | static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49); | 347 | static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49); |
| @@ -327,7 +349,8 @@ static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349); | |||
| 327 | static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549); | 349 | static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549); |
| 328 | static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349); | 350 | static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349); |
| 329 | 351 | ||
| 330 | constexpr auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0); | 352 | constexpr inline auto KMemoryRegionType_KernelTemp = |
| 353 | KMemoryRegionType_Kernel.Advance(2).Derive(2, 0); | ||
| 331 | static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); | 354 | static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); |
| 332 | 355 | ||
| 333 | constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | 356 | constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { |
| @@ -335,6 +358,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | |||
| 335 | return KMemoryRegionType_VirtualDramKernelTraceBuffer; | 358 | return KMemoryRegionType_VirtualDramKernelTraceBuffer; |
| 336 | } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { | 359 | } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { |
| 337 | return KMemoryRegionType_VirtualDramKernelPtHeap; | 360 | return KMemoryRegionType_VirtualDramKernelPtHeap; |
| 361 | } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) { | ||
| 362 | return KMemoryRegionType_VirtualDramKernelSecureAppletMemory; | ||
| 338 | } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { | 363 | } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { |
| 339 | return KMemoryRegionType_VirtualDramUnknownDebug; | 364 | return KMemoryRegionType_VirtualDramUnknownDebug; |
| 340 | } else { | 365 | } else { |
diff --git a/src/core/hle/kernel/k_page_bitmap.h b/src/core/hle/kernel/k_page_bitmap.h index c97b3dc0b..0ff987732 100644 --- a/src/core/hle/kernel/k_page_bitmap.h +++ b/src/core/hle/kernel/k_page_bitmap.h | |||
| @@ -16,107 +16,126 @@ | |||
| 16 | namespace Kernel { | 16 | namespace Kernel { |
| 17 | 17 | ||
| 18 | class KPageBitmap { | 18 | class KPageBitmap { |
| 19 | private: | 19 | public: |
| 20 | class RandomBitGenerator { | 20 | class RandomBitGenerator { |
| 21 | private: | 21 | public: |
| 22 | Common::TinyMT rng{}; | 22 | RandomBitGenerator() { |
| 23 | u32 entropy{}; | 23 | m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64())); |
| 24 | u32 bits_available{}; | 24 | } |
| 25 | |||
| 26 | u64 SelectRandomBit(u64 bitmap) { | ||
| 27 | u64 selected = 0; | ||
| 28 | |||
| 29 | for (size_t cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; cur_num_bits != 0; | ||
| 30 | cur_num_bits /= 2) { | ||
| 31 | const u64 high = (bitmap >> cur_num_bits); | ||
| 32 | const u64 low = (bitmap & (~(UINT64_C(0xFFFFFFFFFFFFFFFF) << cur_num_bits))); | ||
| 33 | |||
| 34 | // Choose high if we have high and (don't have low or select high randomly). | ||
| 35 | if (high && (low == 0 || this->GenerateRandomBit())) { | ||
| 36 | bitmap = high; | ||
| 37 | selected += cur_num_bits; | ||
| 38 | } else { | ||
| 39 | bitmap = low; | ||
| 40 | selected += 0; | ||
| 41 | } | ||
| 42 | } | ||
| 43 | |||
| 44 | return selected; | ||
| 45 | } | ||
| 46 | |||
| 47 | u64 GenerateRandom(u64 max) { | ||
| 48 | // Determine the number of bits we need. | ||
| 49 | const u64 bits_needed = 1 + (Common::BitSize<decltype(max)>() - std::countl_zero(max)); | ||
| 50 | |||
| 51 | // Generate a random value of the desired bitwidth. | ||
| 52 | const u64 rnd = this->GenerateRandomBits(static_cast<u32>(bits_needed)); | ||
| 53 | |||
| 54 | // Adjust the value to be in range. | ||
| 55 | return rnd - ((rnd / max) * max); | ||
| 56 | } | ||
| 25 | 57 | ||
| 26 | private: | 58 | private: |
| 27 | void RefreshEntropy() { | 59 | void RefreshEntropy() { |
| 28 | entropy = rng.GenerateRandomU32(); | 60 | m_entropy = m_rng.GenerateRandomU32(); |
| 29 | bits_available = static_cast<u32>(Common::BitSize<decltype(entropy)>()); | 61 | m_bits_available = static_cast<u32>(Common::BitSize<decltype(m_entropy)>()); |
| 30 | } | 62 | } |
| 31 | 63 | ||
| 32 | bool GenerateRandomBit() { | 64 | bool GenerateRandomBit() { |
| 33 | if (bits_available == 0) { | 65 | if (m_bits_available == 0) { |
| 34 | this->RefreshEntropy(); | 66 | this->RefreshEntropy(); |
| 35 | } | 67 | } |
| 36 | 68 | ||
| 37 | const bool rnd_bit = (entropy & 1) != 0; | 69 | const bool rnd_bit = (m_entropy & 1) != 0; |
| 38 | entropy >>= 1; | 70 | m_entropy >>= 1; |
| 39 | --bits_available; | 71 | --m_bits_available; |
| 40 | return rnd_bit; | 72 | return rnd_bit; |
| 41 | } | 73 | } |
| 42 | 74 | ||
| 43 | public: | 75 | u64 GenerateRandomBits(u32 num_bits) { |
| 44 | RandomBitGenerator() { | 76 | u64 result = 0; |
| 45 | rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64())); | ||
| 46 | } | ||
| 47 | 77 | ||
| 48 | std::size_t SelectRandomBit(u64 bitmap) { | 78 | // Iteratively add random bits to our result. |
| 49 | u64 selected = 0; | 79 | while (num_bits > 0) { |
| 80 | // Ensure we have random bits to take from. | ||
| 81 | if (m_bits_available == 0) { | ||
| 82 | this->RefreshEntropy(); | ||
| 83 | } | ||
| 50 | 84 | ||
| 51 | u64 cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; | 85 | // Determine how many bits to take this round. |
| 52 | u64 cur_mask = (1ULL << cur_num_bits) - 1; | 86 | const auto cur_bits = std::min(num_bits, m_bits_available); |
| 53 | 87 | ||
| 54 | while (cur_num_bits) { | 88 | // Generate mask for our current bits. |
| 55 | const u64 low = (bitmap >> 0) & cur_mask; | 89 | const u64 mask = (static_cast<u64>(1) << cur_bits) - 1; |
| 56 | const u64 high = (bitmap >> cur_num_bits) & cur_mask; | ||
| 57 | 90 | ||
| 58 | bool choose_low; | 91 | // Add bits to output from our entropy. |
| 59 | if (high == 0) { | 92 | result <<= cur_bits; |
| 60 | // If only low val is set, choose low. | 93 | result |= (m_entropy & mask); |
| 61 | choose_low = true; | ||
| 62 | } else if (low == 0) { | ||
| 63 | // If only high val is set, choose high. | ||
| 64 | choose_low = false; | ||
| 65 | } else { | ||
| 66 | // If both are set, choose random. | ||
| 67 | choose_low = this->GenerateRandomBit(); | ||
| 68 | } | ||
| 69 | 94 | ||
| 70 | // If we chose low, proceed with low. | 95 | // Remove bits from our entropy. |
| 71 | if (choose_low) { | 96 | m_entropy >>= cur_bits; |
| 72 | bitmap = low; | 97 | m_bits_available -= cur_bits; |
| 73 | selected += 0; | ||
| 74 | } else { | ||
| 75 | bitmap = high; | ||
| 76 | selected += cur_num_bits; | ||
| 77 | } | ||
| 78 | 98 | ||
| 79 | // Proceed. | 99 | // Advance. |
| 80 | cur_num_bits /= 2; | 100 | num_bits -= cur_bits; |
| 81 | cur_mask >>= cur_num_bits; | ||
| 82 | } | 101 | } |
| 83 | 102 | ||
| 84 | return selected; | 103 | return result; |
| 85 | } | 104 | } |
| 105 | |||
| 106 | private: | ||
| 107 | Common::TinyMT m_rng; | ||
| 108 | u32 m_entropy{}; | ||
| 109 | u32 m_bits_available{}; | ||
| 86 | }; | 110 | }; |
| 87 | 111 | ||
| 88 | public: | 112 | public: |
| 89 | static constexpr std::size_t MaxDepth = 4; | 113 | static constexpr size_t MaxDepth = 4; |
| 90 | |||
| 91 | private: | ||
| 92 | std::array<u64*, MaxDepth> bit_storages{}; | ||
| 93 | RandomBitGenerator rng{}; | ||
| 94 | std::size_t num_bits{}; | ||
| 95 | std::size_t used_depths{}; | ||
| 96 | 114 | ||
| 97 | public: | 115 | public: |
| 98 | KPageBitmap() = default; | 116 | KPageBitmap() = default; |
| 99 | 117 | ||
| 100 | constexpr std::size_t GetNumBits() const { | 118 | constexpr size_t GetNumBits() const { |
| 101 | return num_bits; | 119 | return m_num_bits; |
| 102 | } | 120 | } |
| 103 | constexpr s32 GetHighestDepthIndex() const { | 121 | constexpr s32 GetHighestDepthIndex() const { |
| 104 | return static_cast<s32>(used_depths) - 1; | 122 | return static_cast<s32>(m_used_depths) - 1; |
| 105 | } | 123 | } |
| 106 | 124 | ||
| 107 | u64* Initialize(u64* storage, std::size_t size) { | 125 | u64* Initialize(u64* storage, size_t size) { |
| 108 | // Initially, everything is un-set. | 126 | // Initially, everything is un-set. |
| 109 | num_bits = 0; | 127 | m_num_bits = 0; |
| 110 | 128 | ||
| 111 | // Calculate the needed bitmap depth. | 129 | // Calculate the needed bitmap depth. |
| 112 | used_depths = static_cast<std::size_t>(GetRequiredDepth(size)); | 130 | m_used_depths = static_cast<size_t>(GetRequiredDepth(size)); |
| 113 | ASSERT(used_depths <= MaxDepth); | 131 | ASSERT(m_used_depths <= MaxDepth); |
| 114 | 132 | ||
| 115 | // Set the bitmap pointers. | 133 | // Set the bitmap pointers. |
| 116 | for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { | 134 | for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { |
| 117 | bit_storages[depth] = storage; | 135 | m_bit_storages[depth] = storage; |
| 118 | size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>(); | 136 | size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>(); |
| 119 | storage += size; | 137 | storage += size; |
| 138 | m_end_storages[depth] = storage; | ||
| 120 | } | 139 | } |
| 121 | 140 | ||
| 122 | return storage; | 141 | return storage; |
| @@ -128,19 +147,19 @@ public: | |||
| 128 | 147 | ||
| 129 | if (random) { | 148 | if (random) { |
| 130 | do { | 149 | do { |
| 131 | const u64 v = bit_storages[depth][offset]; | 150 | const u64 v = m_bit_storages[depth][offset]; |
| 132 | if (v == 0) { | 151 | if (v == 0) { |
| 133 | // If depth is bigger than zero, then a previous level indicated a block was | 152 | // If depth is bigger than zero, then a previous level indicated a block was |
| 134 | // free. | 153 | // free. |
| 135 | ASSERT(depth == 0); | 154 | ASSERT(depth == 0); |
| 136 | return -1; | 155 | return -1; |
| 137 | } | 156 | } |
| 138 | offset = offset * Common::BitSize<u64>() + rng.SelectRandomBit(v); | 157 | offset = offset * Common::BitSize<u64>() + m_rng.SelectRandomBit(v); |
| 139 | ++depth; | 158 | ++depth; |
| 140 | } while (depth < static_cast<s32>(used_depths)); | 159 | } while (depth < static_cast<s32>(m_used_depths)); |
| 141 | } else { | 160 | } else { |
| 142 | do { | 161 | do { |
| 143 | const u64 v = bit_storages[depth][offset]; | 162 | const u64 v = m_bit_storages[depth][offset]; |
| 144 | if (v == 0) { | 163 | if (v == 0) { |
| 145 | // If depth is bigger than zero, then a previous level indicated a block was | 164 | // If depth is bigger than zero, then a previous level indicated a block was |
| 146 | // free. | 165 | // free. |
| @@ -149,28 +168,69 @@ public: | |||
| 149 | } | 168 | } |
| 150 | offset = offset * Common::BitSize<u64>() + std::countr_zero(v); | 169 | offset = offset * Common::BitSize<u64>() + std::countr_zero(v); |
| 151 | ++depth; | 170 | ++depth; |
| 152 | } while (depth < static_cast<s32>(used_depths)); | 171 | } while (depth < static_cast<s32>(m_used_depths)); |
| 153 | } | 172 | } |
| 154 | 173 | ||
| 155 | return static_cast<s64>(offset); | 174 | return static_cast<s64>(offset); |
| 156 | } | 175 | } |
| 157 | 176 | ||
| 158 | void SetBit(std::size_t offset) { | 177 | s64 FindFreeRange(size_t count) { |
| 178 | // Check that it is possible to find a range. | ||
| 179 | const u64* const storage_start = m_bit_storages[m_used_depths - 1]; | ||
| 180 | const u64* const storage_end = m_end_storages[m_used_depths - 1]; | ||
| 181 | |||
| 182 | // If we don't have a storage to iterate (or want more blocks than fit in a single storage), | ||
| 183 | // we can't find a free range. | ||
| 184 | if (!(storage_start < storage_end && count <= Common::BitSize<u64>())) { | ||
| 185 | return -1; | ||
| 186 | } | ||
| 187 | |||
| 188 | // Walk the storages to select a random free range. | ||
| 189 | const size_t options_per_storage = std::max<size_t>(Common::BitSize<u64>() / count, 1); | ||
| 190 | const size_t num_entries = std::max<size_t>(storage_end - storage_start, 1); | ||
| 191 | |||
| 192 | const u64 free_mask = (static_cast<u64>(1) << count) - 1; | ||
| 193 | |||
| 194 | size_t num_valid_options = 0; | ||
| 195 | s64 chosen_offset = -1; | ||
| 196 | for (size_t storage_index = 0; storage_index < num_entries; ++storage_index) { | ||
| 197 | u64 storage = storage_start[storage_index]; | ||
| 198 | for (size_t option = 0; option < options_per_storage; ++option) { | ||
| 199 | if ((storage & free_mask) == free_mask) { | ||
| 200 | // We've found a new valid option. | ||
| 201 | ++num_valid_options; | ||
| 202 | |||
| 203 | // Select the Kth valid option with probability 1/K. This leads to an overall | ||
| 204 | // uniform distribution. | ||
| 205 | if (num_valid_options == 1 || m_rng.GenerateRandom(num_valid_options) == 0) { | ||
| 206 | // This is our first option, so select it. | ||
| 207 | chosen_offset = storage_index * Common::BitSize<u64>() + option * count; | ||
| 208 | } | ||
| 209 | } | ||
| 210 | storage >>= count; | ||
| 211 | } | ||
| 212 | } | ||
| 213 | |||
| 214 | // Return the random offset we chose.*/ | ||
| 215 | return chosen_offset; | ||
| 216 | } | ||
| 217 | |||
| 218 | void SetBit(size_t offset) { | ||
| 159 | this->SetBit(this->GetHighestDepthIndex(), offset); | 219 | this->SetBit(this->GetHighestDepthIndex(), offset); |
| 160 | num_bits++; | 220 | m_num_bits++; |
| 161 | } | 221 | } |
| 162 | 222 | ||
| 163 | void ClearBit(std::size_t offset) { | 223 | void ClearBit(size_t offset) { |
| 164 | this->ClearBit(this->GetHighestDepthIndex(), offset); | 224 | this->ClearBit(this->GetHighestDepthIndex(), offset); |
| 165 | num_bits--; | 225 | m_num_bits--; |
| 166 | } | 226 | } |
| 167 | 227 | ||
| 168 | bool ClearRange(std::size_t offset, std::size_t count) { | 228 | bool ClearRange(size_t offset, size_t count) { |
| 169 | s32 depth = this->GetHighestDepthIndex(); | 229 | s32 depth = this->GetHighestDepthIndex(); |
| 170 | u64* bits = bit_storages[depth]; | 230 | u64* bits = m_bit_storages[depth]; |
| 171 | std::size_t bit_ind = offset / Common::BitSize<u64>(); | 231 | size_t bit_ind = offset / Common::BitSize<u64>(); |
| 172 | if (count < Common::BitSize<u64>()) { | 232 | if (count < Common::BitSize<u64>()) [[likely]] { |
| 173 | const std::size_t shift = offset % Common::BitSize<u64>(); | 233 | const size_t shift = offset % Common::BitSize<u64>(); |
| 174 | ASSERT(shift + count <= Common::BitSize<u64>()); | 234 | ASSERT(shift + count <= Common::BitSize<u64>()); |
| 175 | // Check that all the bits are set. | 235 | // Check that all the bits are set. |
| 176 | const u64 mask = ((u64(1) << count) - 1) << shift; | 236 | const u64 mask = ((u64(1) << count) - 1) << shift; |
| @@ -189,8 +249,8 @@ public: | |||
| 189 | ASSERT(offset % Common::BitSize<u64>() == 0); | 249 | ASSERT(offset % Common::BitSize<u64>() == 0); |
| 190 | ASSERT(count % Common::BitSize<u64>() == 0); | 250 | ASSERT(count % Common::BitSize<u64>() == 0); |
| 191 | // Check that all the bits are set. | 251 | // Check that all the bits are set. |
| 192 | std::size_t remaining = count; | 252 | size_t remaining = count; |
| 193 | std::size_t i = 0; | 253 | size_t i = 0; |
| 194 | do { | 254 | do { |
| 195 | if (bits[bit_ind + i++] != ~u64(0)) { | 255 | if (bits[bit_ind + i++] != ~u64(0)) { |
| 196 | return false; | 256 | return false; |
| @@ -209,18 +269,18 @@ public: | |||
| 209 | } while (remaining > 0); | 269 | } while (remaining > 0); |
| 210 | } | 270 | } |
| 211 | 271 | ||
| 212 | num_bits -= count; | 272 | m_num_bits -= count; |
| 213 | return true; | 273 | return true; |
| 214 | } | 274 | } |
| 215 | 275 | ||
| 216 | private: | 276 | private: |
| 217 | void SetBit(s32 depth, std::size_t offset) { | 277 | void SetBit(s32 depth, size_t offset) { |
| 218 | while (depth >= 0) { | 278 | while (depth >= 0) { |
| 219 | std::size_t ind = offset / Common::BitSize<u64>(); | 279 | size_t ind = offset / Common::BitSize<u64>(); |
| 220 | std::size_t which = offset % Common::BitSize<u64>(); | 280 | size_t which = offset % Common::BitSize<u64>(); |
| 221 | const u64 mask = u64(1) << which; | 281 | const u64 mask = u64(1) << which; |
| 222 | 282 | ||
| 223 | u64* bit = std::addressof(bit_storages[depth][ind]); | 283 | u64* bit = std::addressof(m_bit_storages[depth][ind]); |
| 224 | u64 v = *bit; | 284 | u64 v = *bit; |
| 225 | ASSERT((v & mask) == 0); | 285 | ASSERT((v & mask) == 0); |
| 226 | *bit = v | mask; | 286 | *bit = v | mask; |
| @@ -232,13 +292,13 @@ private: | |||
| 232 | } | 292 | } |
| 233 | } | 293 | } |
| 234 | 294 | ||
| 235 | void ClearBit(s32 depth, std::size_t offset) { | 295 | void ClearBit(s32 depth, size_t offset) { |
| 236 | while (depth >= 0) { | 296 | while (depth >= 0) { |
| 237 | std::size_t ind = offset / Common::BitSize<u64>(); | 297 | size_t ind = offset / Common::BitSize<u64>(); |
| 238 | std::size_t which = offset % Common::BitSize<u64>(); | 298 | size_t which = offset % Common::BitSize<u64>(); |
| 239 | const u64 mask = u64(1) << which; | 299 | const u64 mask = u64(1) << which; |
| 240 | 300 | ||
| 241 | u64* bit = std::addressof(bit_storages[depth][ind]); | 301 | u64* bit = std::addressof(m_bit_storages[depth][ind]); |
| 242 | u64 v = *bit; | 302 | u64 v = *bit; |
| 243 | ASSERT((v & mask) != 0); | 303 | ASSERT((v & mask) != 0); |
| 244 | v &= ~mask; | 304 | v &= ~mask; |
| @@ -252,7 +312,7 @@ private: | |||
| 252 | } | 312 | } |
| 253 | 313 | ||
| 254 | private: | 314 | private: |
| 255 | static constexpr s32 GetRequiredDepth(std::size_t region_size) { | 315 | static constexpr s32 GetRequiredDepth(size_t region_size) { |
| 256 | s32 depth = 0; | 316 | s32 depth = 0; |
| 257 | while (true) { | 317 | while (true) { |
| 258 | region_size /= Common::BitSize<u64>(); | 318 | region_size /= Common::BitSize<u64>(); |
| @@ -264,8 +324,8 @@ private: | |||
| 264 | } | 324 | } |
| 265 | 325 | ||
| 266 | public: | 326 | public: |
| 267 | static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size) { | 327 | static constexpr size_t CalculateManagementOverheadSize(size_t region_size) { |
| 268 | std::size_t overhead_bits = 0; | 328 | size_t overhead_bits = 0; |
| 269 | for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { | 329 | for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { |
| 270 | region_size = | 330 | region_size = |
| 271 | Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>(); | 331 | Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>(); |
| @@ -273,6 +333,13 @@ public: | |||
| 273 | } | 333 | } |
| 274 | return overhead_bits * sizeof(u64); | 334 | return overhead_bits * sizeof(u64); |
| 275 | } | 335 | } |
| 336 | |||
| 337 | private: | ||
| 338 | std::array<u64*, MaxDepth> m_bit_storages{}; | ||
| 339 | std::array<u64*, MaxDepth> m_end_storages{}; | ||
| 340 | RandomBitGenerator m_rng; | ||
| 341 | size_t m_num_bits{}; | ||
| 342 | size_t m_used_depths{}; | ||
| 276 | }; | 343 | }; |
| 277 | 344 | ||
| 278 | } // namespace Kernel | 345 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h index aef06e213..cfedaae61 100644 --- a/src/core/hle/kernel/k_page_buffer.h +++ b/src/core/hle/kernel/k_page_buffer.h | |||
| @@ -11,6 +11,16 @@ | |||
| 11 | 11 | ||
| 12 | namespace Kernel { | 12 | namespace Kernel { |
| 13 | 13 | ||
| 14 | class KernelCore; | ||
| 15 | |||
| 16 | class KPageBufferSlabHeap : protected impl::KSlabHeapImpl { | ||
| 17 | public: | ||
| 18 | static constexpr size_t BufferSize = PageSize; | ||
| 19 | |||
| 20 | public: | ||
| 21 | void Initialize(Core::System& system); | ||
| 22 | }; | ||
| 23 | |||
| 14 | class KPageBuffer final : public KSlabAllocated<KPageBuffer> { | 24 | class KPageBuffer final : public KSlabAllocated<KPageBuffer> { |
| 15 | public: | 25 | public: |
| 16 | explicit KPageBuffer(KernelCore&) {} | 26 | explicit KPageBuffer(KernelCore&) {} |
| @@ -21,8 +31,6 @@ public: | |||
| 21 | private: | 31 | private: |
| 22 | [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; | 32 | [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; |
| 23 | }; | 33 | }; |
| 24 | 34 | static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize); | |
| 25 | static_assert(sizeof(KPageBuffer) == PageSize); | ||
| 26 | static_assert(alignof(KPageBuffer) == PageSize); | ||
| 27 | 35 | ||
| 28 | } // namespace Kernel | 36 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h index 968753992..316f172f2 100644 --- a/src/core/hle/kernel/k_page_group.h +++ b/src/core/hle/kernel/k_page_group.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include <list> | 6 | #include <list> |
| 7 | 7 | ||
| 8 | #include "common/alignment.h" | ||
| 8 | #include "common/assert.h" | 9 | #include "common/assert.h" |
| 9 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 10 | #include "core/hle/kernel/memory_types.h" | 11 | #include "core/hle/kernel/memory_types.h" |
| @@ -12,6 +13,89 @@ | |||
| 12 | 13 | ||
| 13 | namespace Kernel { | 14 | namespace Kernel { |
| 14 | 15 | ||
| 16 | class KPageGroup; | ||
| 17 | |||
| 18 | class KBlockInfo { | ||
| 19 | private: | ||
| 20 | friend class KPageGroup; | ||
| 21 | |||
| 22 | public: | ||
| 23 | constexpr KBlockInfo() = default; | ||
| 24 | |||
| 25 | constexpr void Initialize(PAddr addr, size_t np) { | ||
| 26 | ASSERT(Common::IsAligned(addr, PageSize)); | ||
| 27 | ASSERT(static_cast<u32>(np) == np); | ||
| 28 | |||
| 29 | m_page_index = static_cast<u32>(addr) / PageSize; | ||
| 30 | m_num_pages = static_cast<u32>(np); | ||
| 31 | } | ||
| 32 | |||
| 33 | constexpr PAddr GetAddress() const { | ||
| 34 | return m_page_index * PageSize; | ||
| 35 | } | ||
| 36 | constexpr size_t GetNumPages() const { | ||
| 37 | return m_num_pages; | ||
| 38 | } | ||
| 39 | constexpr size_t GetSize() const { | ||
| 40 | return this->GetNumPages() * PageSize; | ||
| 41 | } | ||
| 42 | constexpr PAddr GetEndAddress() const { | ||
| 43 | return (m_page_index + m_num_pages) * PageSize; | ||
| 44 | } | ||
| 45 | constexpr PAddr GetLastAddress() const { | ||
| 46 | return this->GetEndAddress() - 1; | ||
| 47 | } | ||
| 48 | |||
| 49 | constexpr KBlockInfo* GetNext() const { | ||
| 50 | return m_next; | ||
| 51 | } | ||
| 52 | |||
| 53 | constexpr bool IsEquivalentTo(const KBlockInfo& rhs) const { | ||
| 54 | return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages; | ||
| 55 | } | ||
| 56 | |||
| 57 | constexpr bool operator==(const KBlockInfo& rhs) const { | ||
| 58 | return this->IsEquivalentTo(rhs); | ||
| 59 | } | ||
| 60 | |||
| 61 | constexpr bool operator!=(const KBlockInfo& rhs) const { | ||
| 62 | return !(*this == rhs); | ||
| 63 | } | ||
| 64 | |||
| 65 | constexpr bool IsStrictlyBefore(PAddr addr) const { | ||
| 66 | const PAddr end = this->GetEndAddress(); | ||
| 67 | |||
| 68 | if (m_page_index != 0 && end == 0) { | ||
| 69 | return false; | ||
| 70 | } | ||
| 71 | |||
| 72 | return end < addr; | ||
| 73 | } | ||
| 74 | |||
| 75 | constexpr bool operator<(PAddr addr) const { | ||
| 76 | return this->IsStrictlyBefore(addr); | ||
| 77 | } | ||
| 78 | |||
| 79 | constexpr bool TryConcatenate(PAddr addr, size_t np) { | ||
| 80 | if (addr != 0 && addr == this->GetEndAddress()) { | ||
| 81 | m_num_pages += static_cast<u32>(np); | ||
| 82 | return true; | ||
| 83 | } | ||
| 84 | return false; | ||
| 85 | } | ||
| 86 | |||
| 87 | private: | ||
| 88 | constexpr void SetNext(KBlockInfo* next) { | ||
| 89 | m_next = next; | ||
| 90 | } | ||
| 91 | |||
| 92 | private: | ||
| 93 | KBlockInfo* m_next{}; | ||
| 94 | u32 m_page_index{}; | ||
| 95 | u32 m_num_pages{}; | ||
| 96 | }; | ||
| 97 | static_assert(sizeof(KBlockInfo) <= 0x10); | ||
| 98 | |||
| 15 | class KPageGroup final { | 99 | class KPageGroup final { |
| 16 | public: | 100 | public: |
| 17 | class Node final { | 101 | class Node final { |
| @@ -92,6 +176,8 @@ public: | |||
| 92 | return nodes.empty(); | 176 | return nodes.empty(); |
| 93 | } | 177 | } |
| 94 | 178 | ||
| 179 | void Finalize() {} | ||
| 180 | |||
| 95 | private: | 181 | private: |
| 96 | std::list<Node> nodes; | 182 | std::list<Node> nodes; |
| 97 | }; | 183 | }; |
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp index 5ede60168..7b02c7d8b 100644 --- a/src/core/hle/kernel/k_page_heap.cpp +++ b/src/core/hle/kernel/k_page_heap.cpp | |||
| @@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const { | |||
| 44 | return num_free; | 44 | return num_free; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | PAddr KPageHeap::AllocateBlock(s32 index, bool random) { | 47 | PAddr KPageHeap::AllocateByLinearSearch(s32 index) { |
| 48 | const size_t needed_size = m_blocks[index].GetSize(); | 48 | const size_t needed_size = m_blocks[index].GetSize(); |
| 49 | 49 | ||
| 50 | for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { | 50 | for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { |
| 51 | if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) { | 51 | if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) { |
| 52 | if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { | 52 | if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { |
| 53 | this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); | 53 | this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); |
| 54 | } | 54 | } |
| @@ -59,6 +59,88 @@ PAddr KPageHeap::AllocateBlock(s32 index, bool random) { | |||
| 59 | return 0; | 59 | return 0; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) { | ||
| 63 | // Get the size and required alignment. | ||
| 64 | const size_t needed_size = num_pages * PageSize; | ||
| 65 | const size_t align_size = align_pages * PageSize; | ||
| 66 | |||
| 67 | // Determine meta-alignment of our desired alignment size. | ||
| 68 | const size_t align_shift = std::countr_zero(align_size); | ||
| 69 | |||
| 70 | // Decide on a block to allocate from. | ||
| 71 | constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4; | ||
| 72 | { | ||
| 73 | // By default, we'll want to look at all blocks larger than our current one. | ||
| 74 | s32 max_blocks = static_cast<s32>(m_num_blocks); | ||
| 75 | |||
| 76 | // Determine the maximum block we should try to allocate from. | ||
| 77 | size_t possible_alignments = 0; | ||
| 78 | for (s32 i = index; i < max_blocks; ++i) { | ||
| 79 | // Add the possible alignments from blocks at the current size. | ||
| 80 | possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * | ||
| 81 | m_blocks[i].GetNumFreeBlocks(); | ||
| 82 | |||
| 83 | // If there are enough possible alignments, we don't need to look at larger blocks. | ||
| 84 | if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) { | ||
| 85 | max_blocks = i + 1; | ||
| 86 | break; | ||
| 87 | } | ||
| 88 | } | ||
| 89 | |||
| 90 | // If we have any possible alignments which require a larger block, we need to pick one. | ||
| 91 | if (possible_alignments > 0 && index + 1 < max_blocks) { | ||
| 92 | // Select a random alignment from the possibilities. | ||
| 93 | const size_t rnd = m_rng.GenerateRandom(possible_alignments); | ||
| 94 | |||
| 95 | // Determine which block corresponds to the random alignment we chose. | ||
| 96 | possible_alignments = 0; | ||
| 97 | for (s32 i = index; i < max_blocks; ++i) { | ||
| 98 | // Add the possible alignments from blocks at the current size. | ||
| 99 | possible_alignments += | ||
| 100 | (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * | ||
| 101 | m_blocks[i].GetNumFreeBlocks(); | ||
| 102 | |||
| 103 | // If the current block gets us to our random choice, use the current block. | ||
| 104 | if (rnd < possible_alignments) { | ||
| 105 | index = i; | ||
| 106 | break; | ||
| 107 | } | ||
| 108 | } | ||
| 109 | } | ||
| 110 | } | ||
| 111 | |||
| 112 | // Pop a block from the index we selected. | ||
| 113 | if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) { | ||
| 114 | // Determine how much size we have left over. | ||
| 115 | if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; | ||
| 116 | leftover_size > 0) { | ||
| 117 | // Determine how many valid alignments we can have. | ||
| 118 | const size_t possible_alignments = 1 + (leftover_size >> align_shift); | ||
| 119 | |||
| 120 | // Select a random valid alignment. | ||
| 121 | const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift; | ||
| 122 | |||
| 123 | // Free memory before the random offset. | ||
| 124 | if (random_offset != 0) { | ||
| 125 | this->Free(addr, random_offset / PageSize); | ||
| 126 | } | ||
| 127 | |||
| 128 | // Advance our block by the random offset. | ||
| 129 | addr += random_offset; | ||
| 130 | |||
| 131 | // Free memory after our allocated block. | ||
| 132 | if (random_offset != leftover_size) { | ||
| 133 | this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize); | ||
| 134 | } | ||
| 135 | } | ||
| 136 | |||
| 137 | // Return the block we allocated. | ||
| 138 | return addr; | ||
| 139 | } | ||
| 140 | |||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | |||
| 62 | void KPageHeap::FreeBlock(PAddr block, s32 index) { | 144 | void KPageHeap::FreeBlock(PAddr block, s32 index) { |
| 63 | do { | 145 | do { |
| 64 | block = m_blocks[index++].PushBlock(block); | 146 | block = m_blocks[index++].PushBlock(block); |
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h index 0917a8bed..9021edcf7 100644 --- a/src/core/hle/kernel/k_page_heap.h +++ b/src/core/hle/kernel/k_page_heap.h | |||
| @@ -14,13 +14,9 @@ | |||
| 14 | 14 | ||
| 15 | namespace Kernel { | 15 | namespace Kernel { |
| 16 | 16 | ||
| 17 | class KPageHeap final { | 17 | class KPageHeap { |
| 18 | public: | 18 | public: |
| 19 | YUZU_NON_COPYABLE(KPageHeap); | ||
| 20 | YUZU_NON_MOVEABLE(KPageHeap); | ||
| 21 | |||
| 22 | KPageHeap() = default; | 19 | KPageHeap() = default; |
| 23 | ~KPageHeap() = default; | ||
| 24 | 20 | ||
| 25 | constexpr PAddr GetAddress() const { | 21 | constexpr PAddr GetAddress() const { |
| 26 | return m_heap_address; | 22 | return m_heap_address; |
| @@ -57,7 +53,20 @@ public: | |||
| 57 | m_initial_used_size = m_heap_size - free_size - reserved_size; | 53 | m_initial_used_size = m_heap_size - free_size - reserved_size; |
| 58 | } | 54 | } |
| 59 | 55 | ||
| 60 | PAddr AllocateBlock(s32 index, bool random); | 56 | PAddr AllocateBlock(s32 index, bool random) { |
| 57 | if (random) { | ||
| 58 | const size_t block_pages = m_blocks[index].GetNumPages(); | ||
| 59 | return this->AllocateByRandom(index, block_pages, block_pages); | ||
| 60 | } else { | ||
| 61 | return this->AllocateByLinearSearch(index); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { | ||
| 66 | // TODO: linear search support? | ||
| 67 | return this->AllocateByRandom(index, num_pages, align_pages); | ||
| 68 | } | ||
| 69 | |||
| 61 | void Free(PAddr addr, size_t num_pages); | 70 | void Free(PAddr addr, size_t num_pages); |
| 62 | 71 | ||
| 63 | static size_t CalculateManagementOverheadSize(size_t region_size) { | 72 | static size_t CalculateManagementOverheadSize(size_t region_size) { |
| @@ -68,7 +77,7 @@ public: | |||
| 68 | static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { | 77 | static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { |
| 69 | const size_t target_pages = std::max(num_pages, align_pages); | 78 | const size_t target_pages = std::max(num_pages, align_pages); |
| 70 | for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { | 79 | for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { |
| 71 | if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { | 80 | if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { |
| 72 | return static_cast<s32>(i); | 81 | return static_cast<s32>(i); |
| 73 | } | 82 | } |
| 74 | } | 83 | } |
| @@ -77,7 +86,7 @@ public: | |||
| 77 | 86 | ||
| 78 | static constexpr s32 GetBlockIndex(size_t num_pages) { | 87 | static constexpr s32 GetBlockIndex(size_t num_pages) { |
| 79 | for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { | 88 | for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { |
| 80 | if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { | 89 | if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { |
| 81 | return i; | 90 | return i; |
| 82 | } | 91 | } |
| 83 | } | 92 | } |
| @@ -85,7 +94,7 @@ public: | |||
| 85 | } | 94 | } |
| 86 | 95 | ||
| 87 | static constexpr size_t GetBlockSize(size_t index) { | 96 | static constexpr size_t GetBlockSize(size_t index) { |
| 88 | return size_t(1) << MemoryBlockPageShifts[index]; | 97 | return static_cast<size_t>(1) << MemoryBlockPageShifts[index]; |
| 89 | } | 98 | } |
| 90 | 99 | ||
| 91 | static constexpr size_t GetBlockNumPages(size_t index) { | 100 | static constexpr size_t GetBlockNumPages(size_t index) { |
| @@ -93,13 +102,9 @@ public: | |||
| 93 | } | 102 | } |
| 94 | 103 | ||
| 95 | private: | 104 | private: |
| 96 | class Block final { | 105 | class Block { |
| 97 | public: | 106 | public: |
| 98 | YUZU_NON_COPYABLE(Block); | ||
| 99 | YUZU_NON_MOVEABLE(Block); | ||
| 100 | |||
| 101 | Block() = default; | 107 | Block() = default; |
| 102 | ~Block() = default; | ||
| 103 | 108 | ||
| 104 | constexpr size_t GetShift() const { | 109 | constexpr size_t GetShift() const { |
| 105 | return m_block_shift; | 110 | return m_block_shift; |
| @@ -201,6 +206,9 @@ private: | |||
| 201 | }; | 206 | }; |
| 202 | 207 | ||
| 203 | private: | 208 | private: |
| 209 | PAddr AllocateByLinearSearch(s32 index); | ||
| 210 | PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages); | ||
| 211 | |||
| 204 | static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, | 212 | static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, |
| 205 | size_t num_block_shifts); | 213 | size_t num_block_shifts); |
| 206 | 214 | ||
| @@ -209,7 +217,8 @@ private: | |||
| 209 | size_t m_heap_size{}; | 217 | size_t m_heap_size{}; |
| 210 | size_t m_initial_used_size{}; | 218 | size_t m_initial_used_size{}; |
| 211 | size_t m_num_blocks{}; | 219 | size_t m_num_blocks{}; |
| 212 | std::array<Block, NumMemoryBlockPageShifts> m_blocks{}; | 220 | std::array<Block, NumMemoryBlockPageShifts> m_blocks; |
| 221 | KPageBitmap::RandomBitGenerator m_rng; | ||
| 213 | std::vector<u64> m_management_data; | 222 | std::vector<u64> m_management_data; |
| 214 | }; | 223 | }; |
| 215 | 224 | ||
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 307e491cb..5387bf5fe 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include "core/hle/kernel/k_resource_limit.h" | 15 | #include "core/hle/kernel/k_resource_limit.h" |
| 16 | #include "core/hle/kernel/k_scoped_resource_reservation.h" | 16 | #include "core/hle/kernel/k_scoped_resource_reservation.h" |
| 17 | #include "core/hle/kernel/k_system_control.h" | 17 | #include "core/hle/kernel/k_system_control.h" |
| 18 | #include "core/hle/kernel/k_system_resource.h" | ||
| 18 | #include "core/hle/kernel/kernel.h" | 19 | #include "core/hle/kernel/kernel.h" |
| 19 | #include "core/hle/kernel/svc_results.h" | 20 | #include "core/hle/kernel/svc_results.h" |
| 20 | #include "core/memory.h" | 21 | #include "core/memory.h" |
| @@ -23,6 +24,61 @@ namespace Kernel { | |||
| 23 | 24 | ||
| 24 | namespace { | 25 | namespace { |
| 25 | 26 | ||
| 27 | class KScopedLightLockPair { | ||
| 28 | YUZU_NON_COPYABLE(KScopedLightLockPair); | ||
| 29 | YUZU_NON_MOVEABLE(KScopedLightLockPair); | ||
| 30 | |||
| 31 | private: | ||
| 32 | KLightLock* m_lower; | ||
| 33 | KLightLock* m_upper; | ||
| 34 | |||
| 35 | public: | ||
| 36 | KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) { | ||
| 37 | // Ensure our locks are in a consistent order. | ||
| 38 | if (std::addressof(lhs) <= std::addressof(rhs)) { | ||
| 39 | m_lower = std::addressof(lhs); | ||
| 40 | m_upper = std::addressof(rhs); | ||
| 41 | } else { | ||
| 42 | m_lower = std::addressof(rhs); | ||
| 43 | m_upper = std::addressof(lhs); | ||
| 44 | } | ||
| 45 | |||
| 46 | // Acquire both locks. | ||
| 47 | m_lower->Lock(); | ||
| 48 | if (m_lower != m_upper) { | ||
| 49 | m_upper->Lock(); | ||
| 50 | } | ||
| 51 | } | ||
| 52 | |||
| 53 | ~KScopedLightLockPair() { | ||
| 54 | // Unlock the upper lock. | ||
| 55 | if (m_upper != nullptr && m_upper != m_lower) { | ||
| 56 | m_upper->Unlock(); | ||
| 57 | } | ||
| 58 | |||
| 59 | // Unlock the lower lock. | ||
| 60 | if (m_lower != nullptr) { | ||
| 61 | m_lower->Unlock(); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | public: | ||
| 66 | // Utility. | ||
| 67 | void TryUnlockHalf(KLightLock& lock) { | ||
| 68 | // Only allow unlocking if the lock is half the pair. | ||
| 69 | if (m_lower != m_upper) { | ||
| 70 | // We want to be sure the lock is one we own. | ||
| 71 | if (m_lower == std::addressof(lock)) { | ||
| 72 | lock.Unlock(); | ||
| 73 | m_lower = nullptr; | ||
| 74 | } else if (m_upper == std::addressof(lock)) { | ||
| 75 | lock.Unlock(); | ||
| 76 | m_upper = nullptr; | ||
| 77 | } | ||
| 78 | } | ||
| 79 | } | ||
| 80 | }; | ||
| 81 | |||
| 26 | using namespace Common::Literals; | 82 | using namespace Common::Literals; |
| 27 | 83 | ||
| 28 | constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { | 84 | constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { |
| @@ -49,9 +105,10 @@ KPageTable::KPageTable(Core::System& system_) | |||
| 49 | KPageTable::~KPageTable() = default; | 105 | KPageTable::~KPageTable() = default; |
| 50 | 106 | ||
| 51 | Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, | 107 | Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, |
| 52 | VAddr code_addr, size_t code_size, | 108 | bool enable_das_merge, bool from_back, |
| 53 | KMemoryBlockSlabManager* mem_block_slab_manager, | 109 | KMemoryManager::Pool pool, VAddr code_addr, |
| 54 | KMemoryManager::Pool pool) { | 110 | size_t code_size, KSystemResource* system_resource, |
| 111 | KResourceLimit* resource_limit) { | ||
| 55 | 112 | ||
| 56 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { | 113 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { |
| 57 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); | 114 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); |
| @@ -112,11 +169,13 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 112 | 169 | ||
| 113 | // Set other basic fields | 170 | // Set other basic fields |
| 114 | m_enable_aslr = enable_aslr; | 171 | m_enable_aslr = enable_aslr; |
| 115 | m_enable_device_address_space_merge = false; | 172 | m_enable_device_address_space_merge = enable_das_merge; |
| 116 | m_address_space_start = start; | 173 | m_address_space_start = start; |
| 117 | m_address_space_end = end; | 174 | m_address_space_end = end; |
| 118 | m_is_kernel = false; | 175 | m_is_kernel = false; |
| 119 | m_memory_block_slab_manager = mem_block_slab_manager; | 176 | m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer(); |
| 177 | m_block_info_manager = system_resource->GetBlockInfoManagerPointer(); | ||
| 178 | m_resource_limit = resource_limit; | ||
| 120 | 179 | ||
| 121 | // Determine the region we can place our undetermineds in | 180 | // Determine the region we can place our undetermineds in |
| 122 | VAddr alloc_start{}; | 181 | VAddr alloc_start{}; |
| @@ -215,10 +274,22 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 215 | } | 274 | } |
| 216 | } | 275 | } |
| 217 | 276 | ||
| 218 | // Set heap members | 277 | // Set heap and fill members. |
| 219 | m_current_heap_end = m_heap_region_start; | 278 | m_current_heap_end = m_heap_region_start; |
| 220 | m_max_heap_size = 0; | 279 | m_max_heap_size = 0; |
| 221 | m_max_physical_memory_size = 0; | 280 | m_mapped_physical_memory_size = 0; |
| 281 | m_mapped_unsafe_physical_memory = 0; | ||
| 282 | m_mapped_insecure_memory = 0; | ||
| 283 | m_mapped_ipc_server_memory = 0; | ||
| 284 | |||
| 285 | m_heap_fill_value = 0; | ||
| 286 | m_ipc_fill_value = 0; | ||
| 287 | m_stack_fill_value = 0; | ||
| 288 | |||
| 289 | // Set allocation option. | ||
| 290 | m_allocate_option = | ||
| 291 | KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack | ||
| 292 | : KMemoryManager::Direction::FromFront); | ||
| 222 | 293 | ||
| 223 | // Ensure that we regions inside our address space | 294 | // Ensure that we regions inside our address space |
| 224 | auto IsInAddressSpace = [&](VAddr addr) { | 295 | auto IsInAddressSpace = [&](VAddr addr) { |
| @@ -267,6 +338,16 @@ void KPageTable::Finalize() { | |||
| 267 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); | 338 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); |
| 268 | }); | 339 | }); |
| 269 | 340 | ||
| 341 | // Release any insecure mapped memory. | ||
| 342 | if (m_mapped_insecure_memory) { | ||
| 343 | UNIMPLEMENTED(); | ||
| 344 | } | ||
| 345 | |||
| 346 | // Release any ipc server memory. | ||
| 347 | if (m_mapped_ipc_server_memory) { | ||
| 348 | UNIMPLEMENTED(); | ||
| 349 | } | ||
| 350 | |||
| 270 | // Close the backing page table, as the destructor is not called for guest objects. | 351 | // Close the backing page table, as the destructor is not called for guest objects. |
| 271 | m_page_table_impl.reset(); | 352 | m_page_table_impl.reset(); |
| 272 | } | 353 | } |
| @@ -650,7 +731,8 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu | |||
| 650 | 731 | ||
| 651 | Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, | 732 | Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, |
| 652 | VAddr src_addr) { | 733 | VAddr src_addr) { |
| 653 | KScopedLightLock lk(m_general_lock); | 734 | // Acquire the table locks. |
| 735 | KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock); | ||
| 654 | 736 | ||
| 655 | const size_t num_pages{size / PageSize}; | 737 | const size_t num_pages{size / PageSize}; |
| 656 | 738 | ||
| @@ -686,9 +768,753 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s | |||
| 686 | R_SUCCEED(); | 768 | R_SUCCEED(); |
| 687 | } | 769 | } |
| 688 | 770 | ||
| 771 | Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, | ||
| 772 | VAddr address, size_t size, KMemoryPermission test_perm, | ||
| 773 | KMemoryState dst_state) { | ||
| 774 | // Validate pre-conditions. | ||
| 775 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 776 | ASSERT(test_perm == KMemoryPermission::UserReadWrite || | ||
| 777 | test_perm == KMemoryPermission::UserRead); | ||
| 778 | |||
| 779 | // Check that the address is in range. | ||
| 780 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 781 | |||
| 782 | // Get the source permission. | ||
| 783 | const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) | ||
| 784 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 785 | : KMemoryPermission::UserRead; | ||
| 786 | |||
| 787 | // Get aligned extents. | ||
| 788 | const VAddr aligned_src_start = Common::AlignDown((address), PageSize); | ||
| 789 | const VAddr aligned_src_end = Common::AlignUp((address) + size, PageSize); | ||
| 790 | const VAddr mapping_src_start = Common::AlignUp((address), PageSize); | ||
| 791 | const VAddr mapping_src_end = Common::AlignDown((address) + size, PageSize); | ||
| 792 | |||
| 793 | const auto aligned_src_last = (aligned_src_end)-1; | ||
| 794 | const auto mapping_src_last = (mapping_src_end)-1; | ||
| 795 | |||
| 796 | // Get the test state and attribute mask. | ||
| 797 | KMemoryState test_state; | ||
| 798 | KMemoryAttribute test_attr_mask; | ||
| 799 | switch (dst_state) { | ||
| 800 | case KMemoryState::Ipc: | ||
| 801 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 802 | test_attr_mask = | ||
| 803 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 804 | break; | ||
| 805 | case KMemoryState::NonSecureIpc: | ||
| 806 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 807 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 808 | break; | ||
| 809 | case KMemoryState::NonDeviceIpc: | ||
| 810 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 811 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 812 | break; | ||
| 813 | default: | ||
| 814 | R_THROW(ResultInvalidCombination); | ||
| 815 | } | ||
| 816 | |||
| 817 | // Ensure that on failure, we roll back appropriately. | ||
| 818 | size_t mapped_size = 0; | ||
| 819 | ON_RESULT_FAILURE { | ||
| 820 | if (mapped_size > 0) { | ||
| 821 | this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, | ||
| 822 | src_perm); | ||
| 823 | } | ||
| 824 | }; | ||
| 825 | |||
| 826 | size_t blocks_needed = 0; | ||
| 827 | |||
| 828 | // Iterate, mapping as needed. | ||
| 829 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start); | ||
| 830 | while (true) { | ||
| 831 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 832 | |||
| 833 | // Validate the current block. | ||
| 834 | R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, | ||
| 835 | test_attr_mask, KMemoryAttribute::None)); | ||
| 836 | |||
| 837 | if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() && | ||
| 838 | info.GetAddress() < (mapping_src_end)) { | ||
| 839 | const auto cur_start = | ||
| 840 | info.GetAddress() >= (mapping_src_start) ? info.GetAddress() : (mapping_src_start); | ||
| 841 | const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress() | ||
| 842 | : (mapping_src_end); | ||
| 843 | const size_t cur_size = cur_end - cur_start; | ||
| 844 | |||
| 845 | if (info.GetAddress() < (mapping_src_start)) { | ||
| 846 | ++blocks_needed; | ||
| 847 | } | ||
| 848 | if (mapping_src_last < info.GetLastAddress()) { | ||
| 849 | ++blocks_needed; | ||
| 850 | } | ||
| 851 | |||
| 852 | // Set the permissions on the block, if we need to. | ||
| 853 | if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) { | ||
| 854 | R_TRY(Operate(cur_start, cur_size / PageSize, src_perm, | ||
| 855 | OperationType::ChangePermissions)); | ||
| 856 | } | ||
| 857 | |||
| 858 | // Note that we mapped this part. | ||
| 859 | mapped_size += cur_size; | ||
| 860 | } | ||
| 861 | |||
| 862 | // If the block is at the end, we're done. | ||
| 863 | if (aligned_src_last <= info.GetLastAddress()) { | ||
| 864 | break; | ||
| 865 | } | ||
| 866 | |||
| 867 | // Advance. | ||
| 868 | ++it; | ||
| 869 | ASSERT(it != m_memory_block_manager.end()); | ||
| 870 | } | ||
| 871 | |||
| 872 | if (out_blocks_needed != nullptr) { | ||
| 873 | ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 874 | *out_blocks_needed = blocks_needed; | ||
| 875 | } | ||
| 876 | |||
| 877 | R_SUCCEED(); | ||
| 878 | } | ||
| 879 | |||
| 880 | Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr, | ||
| 881 | KMemoryPermission test_perm, KMemoryState dst_state, | ||
| 882 | KPageTable& src_page_table, bool send) { | ||
| 883 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 884 | ASSERT(src_page_table.IsLockedByCurrentThread()); | ||
| 885 | |||
| 886 | // Check that we can theoretically map. | ||
| 887 | const VAddr region_start = m_alias_region_start; | ||
| 888 | const size_t region_size = m_alias_region_end - m_alias_region_start; | ||
| 889 | R_UNLESS(size < region_size, ResultOutOfAddressSpace); | ||
| 890 | |||
| 891 | // Get aligned source extents. | ||
| 892 | const VAddr src_start = src_addr; | ||
| 893 | const VAddr src_end = src_addr + size; | ||
| 894 | const VAddr aligned_src_start = Common::AlignDown((src_start), PageSize); | ||
| 895 | const VAddr aligned_src_end = Common::AlignUp((src_start) + size, PageSize); | ||
| 896 | const VAddr mapping_src_start = Common::AlignUp((src_start), PageSize); | ||
| 897 | const VAddr mapping_src_end = Common::AlignDown((src_start) + size, PageSize); | ||
| 898 | const size_t aligned_src_size = aligned_src_end - aligned_src_start; | ||
| 899 | const size_t mapping_src_size = | ||
| 900 | (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0; | ||
| 901 | |||
| 902 | // Select a random address to map at. | ||
| 903 | VAddr dst_addr = | ||
| 904 | this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, | ||
| 905 | PageSize, 0, this->GetNumGuardPages()); | ||
| 906 | |||
| 907 | R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace); | ||
| 908 | |||
| 909 | // Check that we can perform the operation we're about to perform. | ||
| 910 | ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state)); | ||
| 911 | |||
| 912 | // Create an update allocator. | ||
| 913 | Result allocator_result; | ||
| 914 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 915 | m_memory_block_slab_manager); | ||
| 916 | R_TRY(allocator_result); | ||
| 917 | |||
| 918 | // We're going to perform an update, so create a helper. | ||
| 919 | KScopedPageTableUpdater updater(this); | ||
| 920 | |||
| 921 | // Reserve space for any partial pages we allocate. | ||
| 922 | const size_t unmapped_size = aligned_src_size - mapping_src_size; | ||
| 923 | KScopedResourceReservation memory_reservation( | ||
| 924 | m_resource_limit, LimitableResource::PhysicalMemoryMax, unmapped_size); | ||
| 925 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 926 | |||
| 927 | // Ensure that we manage page references correctly. | ||
| 928 | PAddr start_partial_page = 0; | ||
| 929 | PAddr end_partial_page = 0; | ||
| 930 | VAddr cur_mapped_addr = dst_addr; | ||
| 931 | |||
| 932 | // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll | ||
| 933 | // free on scope exit. | ||
| 934 | SCOPE_EXIT({ | ||
| 935 | if (start_partial_page != 0) { | ||
| 936 | m_system.Kernel().MemoryManager().Close(start_partial_page, 1); | ||
| 937 | } | ||
| 938 | if (end_partial_page != 0) { | ||
| 939 | m_system.Kernel().MemoryManager().Close(end_partial_page, 1); | ||
| 940 | } | ||
| 941 | }); | ||
| 942 | |||
| 943 | ON_RESULT_FAILURE { | ||
| 944 | if (cur_mapped_addr != dst_addr) { | ||
| 945 | // HACK: Manually close the pages. | ||
| 946 | HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize); | ||
| 947 | |||
| 948 | ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, | ||
| 949 | KMemoryPermission::None, OperationType::Unmap) | ||
| 950 | .IsSuccess()); | ||
| 951 | } | ||
| 952 | }; | ||
| 953 | |||
| 954 | // Allocate the start page as needed. | ||
| 955 | if (aligned_src_start < mapping_src_start) { | ||
| 956 | start_partial_page = | ||
| 957 | m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 958 | R_UNLESS(start_partial_page != 0, ResultOutOfMemory); | ||
| 959 | } | ||
| 960 | |||
| 961 | // Allocate the end page as needed. | ||
| 962 | if (mapping_src_end < aligned_src_end && | ||
| 963 | (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { | ||
| 964 | end_partial_page = | ||
| 965 | m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 966 | R_UNLESS(end_partial_page != 0, ResultOutOfMemory); | ||
| 967 | } | ||
| 968 | |||
| 969 | // Get the implementation. | ||
| 970 | auto& src_impl = src_page_table.PageTableImpl(); | ||
| 971 | |||
| 972 | // Get the fill value for partial pages. | ||
| 973 | const auto fill_val = m_ipc_fill_value; | ||
| 974 | |||
| 975 | // Begin traversal. | ||
| 976 | Common::PageTable::TraversalContext context; | ||
| 977 | Common::PageTable::TraversalEntry next_entry; | ||
| 978 | bool traverse_valid = src_impl.BeginTraversal(next_entry, context, aligned_src_start); | ||
| 979 | ASSERT(traverse_valid); | ||
| 980 | |||
| 981 | // Prepare tracking variables. | ||
| 982 | PAddr cur_block_addr = next_entry.phys_addr; | ||
| 983 | size_t cur_block_size = | ||
| 984 | next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1)); | ||
| 985 | size_t tot_block_size = cur_block_size; | ||
| 986 | |||
| 987 | // Map the start page, if we have one. | ||
| 988 | if (start_partial_page != 0) { | ||
| 989 | // Ensure the page holds correct data. | ||
| 990 | const VAddr start_partial_virt = | ||
| 991 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page); | ||
| 992 | if (send) { | ||
| 993 | const size_t partial_offset = src_start - aligned_src_start; | ||
| 994 | size_t copy_size, clear_size; | ||
| 995 | if (src_end < mapping_src_start) { | ||
| 996 | copy_size = size; | ||
| 997 | clear_size = mapping_src_start - src_end; | ||
| 998 | } else { | ||
| 999 | copy_size = mapping_src_start - src_start; | ||
| 1000 | clear_size = 0; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, | ||
| 1004 | partial_offset); | ||
| 1005 | std::memcpy( | ||
| 1006 | m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset), | ||
| 1007 | m_system.Memory().GetPointer<void>( | ||
| 1008 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), cur_block_addr) + | ||
| 1009 | partial_offset), | ||
| 1010 | copy_size); | ||
| 1011 | if (clear_size > 0) { | ||
| 1012 | std::memset(m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset + | ||
| 1013 | copy_size), | ||
| 1014 | fill_val, clear_size); | ||
| 1015 | } | ||
| 1016 | } else { | ||
| 1017 | std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, PageSize); | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | // Map the page. | ||
| 1021 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); | ||
| 1022 | |||
| 1023 | // HACK: Manually open the pages. | ||
| 1024 | HACK_OpenPages(start_partial_page, 1); | ||
| 1025 | |||
| 1026 | // Update tracking extents. | ||
| 1027 | cur_mapped_addr += PageSize; | ||
| 1028 | cur_block_addr += PageSize; | ||
| 1029 | cur_block_size -= PageSize; | ||
| 1030 | |||
| 1031 | // If the block's size was one page, we may need to continue traversal. | ||
| 1032 | if (cur_block_size == 0 && aligned_src_size > PageSize) { | ||
| 1033 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1034 | ASSERT(traverse_valid); | ||
| 1035 | |||
| 1036 | cur_block_addr = next_entry.phys_addr; | ||
| 1037 | cur_block_size = next_entry.block_size; | ||
| 1038 | tot_block_size += next_entry.block_size; | ||
| 1039 | } | ||
| 1040 | } | ||
| 1041 | |||
| 1042 | // Map the remaining pages. | ||
| 1043 | while (aligned_src_start + tot_block_size < mapping_src_end) { | ||
| 1044 | // Continue the traversal. | ||
| 1045 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1046 | ASSERT(traverse_valid); | ||
| 1047 | |||
| 1048 | // Process the block. | ||
| 1049 | if (next_entry.phys_addr != cur_block_addr + cur_block_size) { | ||
| 1050 | // Map the block we've been processing so far. | ||
| 1051 | R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, | ||
| 1052 | cur_block_addr)); | ||
| 1053 | |||
| 1054 | // HACK: Manually open the pages. | ||
| 1055 | HACK_OpenPages(cur_block_addr, cur_block_size / PageSize); | ||
| 1056 | |||
| 1057 | // Update tracking extents. | ||
| 1058 | cur_mapped_addr += cur_block_size; | ||
| 1059 | cur_block_addr = next_entry.phys_addr; | ||
| 1060 | cur_block_size = next_entry.block_size; | ||
| 1061 | } else { | ||
| 1062 | cur_block_size += next_entry.block_size; | ||
| 1063 | } | ||
| 1064 | tot_block_size += next_entry.block_size; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | // Handle the last direct-mapped page. | ||
| 1068 | if (const VAddr mapped_block_end = aligned_src_start + tot_block_size - cur_block_size; | ||
| 1069 | mapped_block_end < mapping_src_end) { | ||
| 1070 | const size_t last_block_size = mapping_src_end - mapped_block_end; | ||
| 1071 | |||
| 1072 | // Map the last block. | ||
| 1073 | R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, | ||
| 1074 | cur_block_addr)); | ||
| 1075 | |||
| 1076 | // HACK: Manually open the pages. | ||
| 1077 | HACK_OpenPages(cur_block_addr, last_block_size / PageSize); | ||
| 1078 | |||
| 1079 | // Update tracking extents. | ||
| 1080 | cur_mapped_addr += last_block_size; | ||
| 1081 | cur_block_addr += last_block_size; | ||
| 1082 | if (mapped_block_end + cur_block_size < aligned_src_end && | ||
| 1083 | cur_block_size == last_block_size) { | ||
| 1084 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1085 | ASSERT(traverse_valid); | ||
| 1086 | |||
| 1087 | cur_block_addr = next_entry.phys_addr; | ||
| 1088 | } | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | // Map the end page, if we have one. | ||
| 1092 | if (end_partial_page != 0) { | ||
| 1093 | // Ensure the page holds correct data. | ||
| 1094 | const VAddr end_partial_virt = | ||
| 1095 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page); | ||
| 1096 | if (send) { | ||
| 1097 | const size_t copy_size = src_end - mapping_src_end; | ||
| 1098 | std::memcpy(m_system.Memory().GetPointer<void>(end_partial_virt), | ||
| 1099 | m_system.Memory().GetPointer<void>(GetHeapVirtualAddress( | ||
| 1100 | m_system.Kernel().MemoryLayout(), cur_block_addr)), | ||
| 1101 | copy_size); | ||
| 1102 | std::memset(m_system.Memory().GetPointer<void>(end_partial_virt + copy_size), fill_val, | ||
| 1103 | PageSize - copy_size); | ||
| 1104 | } else { | ||
| 1105 | std::memset(m_system.Memory().GetPointer<void>(end_partial_virt), fill_val, PageSize); | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | // Map the page. | ||
| 1109 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); | ||
| 1110 | |||
| 1111 | // HACK: Manually open the pages. | ||
| 1112 | HACK_OpenPages(end_partial_page, 1); | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | // Update memory blocks to reflect our changes | ||
| 1116 | m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, | ||
| 1117 | dst_state, test_perm, KMemoryAttribute::None, | ||
| 1118 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1119 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1120 | |||
| 1121 | // Set the output address. | ||
| 1122 | *out_addr = dst_addr + (src_start - aligned_src_start); | ||
| 1123 | |||
| 1124 | // We succeeded. | ||
| 1125 | memory_reservation.Commit(); | ||
| 1126 | R_SUCCEED(); | ||
| 1127 | } | ||
| 1128 | |||
| 1129 | Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, | ||
| 1130 | KPageTable& src_page_table, KMemoryPermission test_perm, | ||
| 1131 | KMemoryState dst_state, bool send) { | ||
| 1132 | // For convenience, alias this. | ||
| 1133 | KPageTable& dst_page_table = *this; | ||
| 1134 | |||
| 1135 | // Acquire the table locks. | ||
| 1136 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 1137 | |||
| 1138 | // We're going to perform an update, so create a helper. | ||
| 1139 | KScopedPageTableUpdater updater(std::addressof(src_page_table)); | ||
| 1140 | |||
| 1141 | // Perform client setup. | ||
| 1142 | size_t num_allocator_blocks; | ||
| 1143 | R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), | ||
| 1144 | std::addressof(num_allocator_blocks), src_addr, size, | ||
| 1145 | test_perm, dst_state)); | ||
| 1146 | |||
| 1147 | // Create an update allocator. | ||
| 1148 | Result allocator_result; | ||
| 1149 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1150 | src_page_table.m_memory_block_slab_manager, | ||
| 1151 | num_allocator_blocks); | ||
| 1152 | R_TRY(allocator_result); | ||
| 1153 | |||
| 1154 | // Get the mapped extents. | ||
| 1155 | const VAddr src_map_start = Common::AlignUp((src_addr), PageSize); | ||
| 1156 | const VAddr src_map_end = Common::AlignDown((src_addr) + size, PageSize); | ||
| 1157 | const size_t src_map_size = src_map_end - src_map_start; | ||
| 1158 | |||
| 1159 | // Ensure that we clean up appropriately if we fail after this. | ||
| 1160 | const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) | ||
| 1161 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 1162 | : KMemoryPermission::UserRead; | ||
| 1163 | ON_RESULT_FAILURE { | ||
| 1164 | if (src_map_end > src_map_start) { | ||
| 1165 | src_page_table.CleanupForIpcClientOnServerSetupFailure( | ||
| 1166 | updater.GetPageList(), src_map_start, src_map_size, src_perm); | ||
| 1167 | } | ||
| 1168 | }; | ||
| 1169 | |||
| 1170 | // Perform server setup. | ||
| 1171 | R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, | ||
| 1172 | src_page_table, send)); | ||
| 1173 | |||
| 1174 | // If anything was mapped, ipc-lock the pages. | ||
| 1175 | if (src_map_start < src_map_end) { | ||
| 1176 | // Get the source permission. | ||
| 1177 | src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, | ||
| 1178 | (src_map_end - src_map_start) / PageSize, | ||
| 1179 | &KMemoryBlock::LockForIpc, src_perm); | ||
| 1180 | } | ||
| 1181 | |||
| 1182 | R_SUCCEED(); | ||
| 1183 | } | ||
| 1184 | |||
| 1185 | Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state) { | ||
| 1186 | // Validate the address. | ||
| 1187 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1188 | |||
| 1189 | // Lock the table. | ||
| 1190 | KScopedLightLock lk(m_general_lock); | ||
| 1191 | |||
| 1192 | // Validate the memory state. | ||
| 1193 | size_t num_allocator_blocks; | ||
| 1194 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 1195 | KMemoryState::All, dst_state, KMemoryPermission::UserRead, | ||
| 1196 | KMemoryPermission::UserRead, KMemoryAttribute::All, | ||
| 1197 | KMemoryAttribute::None)); | ||
| 1198 | |||
| 1199 | // Create an update allocator. | ||
| 1200 | Result allocator_result; | ||
| 1201 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1202 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1203 | R_TRY(allocator_result); | ||
| 1204 | |||
| 1205 | // We're going to perform an update, so create a helper. | ||
| 1206 | KScopedPageTableUpdater updater(this); | ||
| 1207 | |||
| 1208 | // Get aligned extents. | ||
| 1209 | const VAddr aligned_start = Common::AlignDown((address), PageSize); | ||
| 1210 | const VAddr aligned_end = Common::AlignUp((address) + size, PageSize); | ||
| 1211 | const size_t aligned_size = aligned_end - aligned_start; | ||
| 1212 | const size_t aligned_num_pages = aligned_size / PageSize; | ||
| 1213 | |||
| 1214 | // HACK: Manually close the pages. | ||
| 1215 | HACK_ClosePages(aligned_start, aligned_num_pages); | ||
| 1216 | |||
| 1217 | // Unmap the pages. | ||
| 1218 | R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||
| 1219 | |||
| 1220 | // Update memory blocks. | ||
| 1221 | m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, | ||
| 1222 | KMemoryState::None, KMemoryPermission::None, | ||
| 1223 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1224 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1225 | |||
| 1226 | // Release from the resource limit as relevant. | ||
| 1227 | const VAddr mapping_start = Common::AlignUp((address), PageSize); | ||
| 1228 | const VAddr mapping_end = Common::AlignDown((address) + size, PageSize); | ||
| 1229 | const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0; | ||
| 1230 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size); | ||
| 1231 | |||
| 1232 | R_SUCCEED(); | ||
| 1233 | } | ||
| 1234 | |||
| 1235 | Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state) { | ||
| 1236 | // Validate the address. | ||
| 1237 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1238 | |||
| 1239 | // Get aligned source extents. | ||
| 1240 | const VAddr mapping_start = Common::AlignUp((address), PageSize); | ||
| 1241 | const VAddr mapping_end = Common::AlignDown((address) + size, PageSize); | ||
| 1242 | const VAddr mapping_last = mapping_end - 1; | ||
| 1243 | const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0; | ||
| 1244 | |||
| 1245 | // If nothing was mapped, we're actually done immediately. | ||
| 1246 | R_SUCCEED_IF(mapping_size == 0); | ||
| 1247 | |||
| 1248 | // Get the test state and attribute mask. | ||
| 1249 | KMemoryState test_state; | ||
| 1250 | KMemoryAttribute test_attr_mask; | ||
| 1251 | switch (dst_state) { | ||
| 1252 | case KMemoryState::Ipc: | ||
| 1253 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 1254 | test_attr_mask = | ||
| 1255 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 1256 | break; | ||
| 1257 | case KMemoryState::NonSecureIpc: | ||
| 1258 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 1259 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 1260 | break; | ||
| 1261 | case KMemoryState::NonDeviceIpc: | ||
| 1262 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 1263 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 1264 | break; | ||
| 1265 | default: | ||
| 1266 | R_THROW(ResultInvalidCombination); | ||
| 1267 | } | ||
| 1268 | |||
| 1269 | // Lock the table. | ||
| 1270 | // NOTE: Nintendo does this *after* creating the updater below, but this does not follow | ||
| 1271 | // convention elsewhere in KPageTable. | ||
| 1272 | KScopedLightLock lk(m_general_lock); | ||
| 1273 | |||
| 1274 | // We're going to perform an update, so create a helper. | ||
| 1275 | KScopedPageTableUpdater updater(this); | ||
| 1276 | |||
| 1277 | // Ensure that on failure, we roll back appropriately. | ||
| 1278 | size_t mapped_size = 0; | ||
| 1279 | ON_RESULT_FAILURE { | ||
| 1280 | if (mapped_size > 0) { | ||
| 1281 | // Determine where the mapping ends. | ||
| 1282 | const auto mapped_end = (mapping_start) + mapped_size; | ||
| 1283 | const auto mapped_last = mapped_end - 1; | ||
| 1284 | |||
| 1285 | // Get current and next iterators. | ||
| 1286 | KMemoryBlockManager::const_iterator start_it = | ||
| 1287 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 1288 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 1289 | ++next_it; | ||
| 1290 | |||
| 1291 | // Get the current block info. | ||
| 1292 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 1293 | |||
| 1294 | // Create tracking variables. | ||
| 1295 | VAddr cur_address = cur_info.GetAddress(); | ||
| 1296 | size_t cur_size = cur_info.GetSize(); | ||
| 1297 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 1298 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 1299 | bool first = | ||
| 1300 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 1301 | (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == | ||
| 1302 | KMemoryBlockDisableMergeAttribute::None; | ||
| 1303 | |||
| 1304 | while (((cur_address) + cur_size - 1) < mapped_last) { | ||
| 1305 | // Check that we have a next block. | ||
| 1306 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 1307 | |||
| 1308 | // Get the next info. | ||
| 1309 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 1310 | |||
| 1311 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 1312 | |||
| 1313 | const bool next_perm_eq = | ||
| 1314 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 1315 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 1316 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 1317 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 1318 | // We can consolidate the reprotection for the current and next block into a | ||
| 1319 | // single call. | ||
| 1320 | cur_size += next_info.GetSize(); | ||
| 1321 | } else { | ||
| 1322 | // We have to operate on the current block. | ||
| 1323 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 1324 | ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), | ||
| 1325 | OperationType::ChangePermissions) | ||
| 1326 | .IsSuccess()); | ||
| 1327 | } | ||
| 1328 | |||
| 1329 | // Advance. | ||
| 1330 | cur_address = next_info.GetAddress(); | ||
| 1331 | cur_size = next_info.GetSize(); | ||
| 1332 | first = false; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | // Advance. | ||
| 1336 | cur_info = next_info; | ||
| 1337 | cur_perm_eq = next_perm_eq; | ||
| 1338 | cur_needs_set_perm = next_needs_set_perm; | ||
| 1339 | ++next_it; | ||
| 1340 | } | ||
| 1341 | |||
| 1342 | // Process the last block. | ||
| 1343 | if ((first || cur_needs_set_perm) && !cur_perm_eq) { | ||
| 1344 | ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), | ||
| 1345 | OperationType::ChangePermissions) | ||
| 1346 | .IsSuccess()); | ||
| 1347 | } | ||
| 1348 | } | ||
| 1349 | }; | ||
| 1350 | |||
| 1351 | // Iterate, reprotecting as needed. | ||
| 1352 | { | ||
| 1353 | // Get current and next iterators. | ||
| 1354 | KMemoryBlockManager::const_iterator start_it = | ||
| 1355 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 1356 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 1357 | ++next_it; | ||
| 1358 | |||
| 1359 | // Validate the current block. | ||
| 1360 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 1361 | ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None, | ||
| 1362 | KMemoryPermission::None, | ||
| 1363 | test_attr_mask | KMemoryAttribute::IpcLocked, | ||
| 1364 | KMemoryAttribute::IpcLocked) | ||
| 1365 | .IsSuccess()); | ||
| 1366 | |||
| 1367 | // Create tracking variables. | ||
| 1368 | VAddr cur_address = cur_info.GetAddress(); | ||
| 1369 | size_t cur_size = cur_info.GetSize(); | ||
| 1370 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 1371 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 1372 | bool first = | ||
| 1373 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 1374 | (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == | ||
| 1375 | KMemoryBlockDisableMergeAttribute::None; | ||
| 1376 | |||
| 1377 | while ((cur_address + cur_size - 1) < mapping_last) { | ||
| 1378 | // Check that we have a next block. | ||
| 1379 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 1380 | |||
| 1381 | // Get the next info. | ||
| 1382 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 1383 | |||
| 1384 | // Validate the next block. | ||
| 1385 | ASSERT(this->CheckMemoryState(next_info, test_state, test_state, | ||
| 1386 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1387 | test_attr_mask | KMemoryAttribute::IpcLocked, | ||
| 1388 | KMemoryAttribute::IpcLocked) | ||
| 1389 | .IsSuccess()); | ||
| 1390 | |||
| 1391 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 1392 | const bool next_perm_eq = | ||
| 1393 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 1394 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 1395 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 1396 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 1397 | // We can consolidate the reprotection for the current and next block into a single | ||
| 1398 | // call. | ||
| 1399 | cur_size += next_info.GetSize(); | ||
| 1400 | } else { | ||
| 1401 | // We have to operate on the current block. | ||
| 1402 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 1403 | R_TRY(Operate(cur_address, cur_size / PageSize, | ||
| 1404 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 1405 | : cur_info.GetPermission(), | ||
| 1406 | OperationType::ChangePermissions)); | ||
| 1407 | } | ||
| 1408 | |||
| 1409 | // Mark that we mapped the block. | ||
| 1410 | mapped_size += cur_size; | ||
| 1411 | |||
| 1412 | // Advance. | ||
| 1413 | cur_address = next_info.GetAddress(); | ||
| 1414 | cur_size = next_info.GetSize(); | ||
| 1415 | first = false; | ||
| 1416 | } | ||
| 1417 | |||
| 1418 | // Advance. | ||
| 1419 | cur_info = next_info; | ||
| 1420 | cur_perm_eq = next_perm_eq; | ||
| 1421 | cur_needs_set_perm = next_needs_set_perm; | ||
| 1422 | ++next_it; | ||
| 1423 | } | ||
| 1424 | |||
| 1425 | // Process the last block. | ||
| 1426 | const auto lock_count = | ||
| 1427 | cur_info.GetIpcLockCount() + | ||
| 1428 | (next_it != m_memory_block_manager.end() | ||
| 1429 | ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) | ||
| 1430 | : 0); | ||
| 1431 | if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) { | ||
| 1432 | R_TRY(Operate(cur_address, cur_size / PageSize, | ||
| 1433 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 1434 | : cur_info.GetPermission(), | ||
| 1435 | OperationType::ChangePermissions)); | ||
| 1436 | } | ||
| 1437 | } | ||
| 1438 | |||
| 1439 | // Create an update allocator. | ||
| 1440 | // NOTE: Guaranteed zero blocks needed here. | ||
| 1441 | Result allocator_result; | ||
| 1442 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1443 | m_memory_block_slab_manager, 0); | ||
| 1444 | R_TRY(allocator_result); | ||
| 1445 | |||
| 1446 | // Unlock the pages. | ||
| 1447 | m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, | ||
| 1448 | mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, | ||
| 1449 | KMemoryPermission::None); | ||
| 1450 | |||
| 1451 | R_SUCCEED(); | ||
| 1452 | } | ||
| 1453 | |||
| 1454 | void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list, | ||
| 1455 | VAddr address, size_t size, | ||
| 1456 | KMemoryPermission prot_perm) { | ||
| 1457 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1458 | ASSERT(Common::IsAligned(address, PageSize)); | ||
| 1459 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 1460 | |||
| 1461 | // Get the mapped extents. | ||
| 1462 | const VAddr src_map_start = address; | ||
| 1463 | const VAddr src_map_end = address + size; | ||
| 1464 | const VAddr src_map_last = src_map_end - 1; | ||
| 1465 | |||
| 1466 | // This function is only invoked when there's something to do. | ||
| 1467 | ASSERT(src_map_end > src_map_start); | ||
| 1468 | |||
| 1469 | // Iterate over blocks, fixing permissions. | ||
| 1470 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); | ||
| 1471 | while (true) { | ||
| 1472 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1473 | |||
| 1474 | const auto cur_start = | ||
| 1475 | info.GetAddress() >= src_map_start ? info.GetAddress() : src_map_start; | ||
| 1476 | const auto cur_end = | ||
| 1477 | src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); | ||
| 1478 | |||
| 1479 | // If we can, fix the protections on the block. | ||
| 1480 | if ((info.GetIpcLockCount() == 0 && | ||
| 1481 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) || | ||
| 1482 | (info.GetIpcLockCount() != 0 && | ||
| 1483 | (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) { | ||
| 1484 | // Check if we actually need to fix the protections on the block. | ||
| 1485 | if (cur_end == src_map_end || info.GetAddress() <= src_map_start || | ||
| 1486 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) { | ||
| 1487 | ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(), | ||
| 1488 | OperationType::ChangePermissions) | ||
| 1489 | .IsSuccess()); | ||
| 1490 | } | ||
| 1491 | } | ||
| 1492 | |||
| 1493 | // If we're past the end of the region, we're done. | ||
| 1494 | if (src_map_last <= info.GetLastAddress()) { | ||
| 1495 | break; | ||
| 1496 | } | ||
| 1497 | |||
| 1498 | // Advance. | ||
| 1499 | ++it; | ||
| 1500 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1501 | } | ||
| 1502 | } | ||
| 1503 | |||
| 1504 | void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { | ||
| 1505 | m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) { | ||
| 1509 | for (size_t index = 0; index < num_pages; ++index) { | ||
| 1510 | const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize)); | ||
| 1511 | m_system.Kernel().MemoryManager().Close(paddr, 1); | ||
| 1512 | } | ||
| 1513 | } | ||
| 1514 | |||
| 689 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | 1515 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { |
| 690 | // Lock the physical memory lock. | 1516 | // Lock the physical memory lock. |
| 691 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | 1517 | KScopedLightLock phys_lk(m_map_physical_memory_lock); |
| 692 | 1518 | ||
| 693 | // Calculate the last address for convenience. | 1519 | // Calculate the last address for convenience. |
| 694 | const VAddr last_address = address + size - 1; | 1520 | const VAddr last_address = address + size - 1; |
| @@ -742,15 +1568,19 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 742 | { | 1568 | { |
| 743 | // Reserve the memory from the process resource limit. | 1569 | // Reserve the memory from the process resource limit. |
| 744 | KScopedResourceReservation memory_reservation( | 1570 | KScopedResourceReservation memory_reservation( |
| 745 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), | 1571 | m_resource_limit, LimitableResource::PhysicalMemoryMax, size - mapped_size); |
| 746 | LimitableResource::PhysicalMemory, size - mapped_size); | ||
| 747 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 1572 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 748 | 1573 | ||
| 749 | // Allocate pages for the new memory. | 1574 | // Allocate pages for the new memory. |
| 750 | KPageGroup pg; | 1575 | KPageGroup pg; |
| 751 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 1576 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( |
| 752 | &pg, (size - mapped_size) / PageSize, | 1577 | &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); |
| 753 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | 1578 | |
| 1579 | // If we fail in the next bit (or retry), we need to cleanup the pages. | ||
| 1580 | // auto pg_guard = SCOPE_GUARD { | ||
| 1581 | // pg.OpenFirst(); | ||
| 1582 | // pg.Close(); | ||
| 1583 | //}; | ||
| 754 | 1584 | ||
| 755 | // Map the memory. | 1585 | // Map the memory. |
| 756 | { | 1586 | { |
| @@ -810,15 +1640,24 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 810 | 1640 | ||
| 811 | // Create an update allocator. | 1641 | // Create an update allocator. |
| 812 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | 1642 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); |
| 813 | Result allocator_result{ResultSuccess}; | 1643 | Result allocator_result; |
| 814 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1644 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 815 | m_memory_block_slab_manager, | 1645 | m_memory_block_slab_manager, |
| 816 | num_allocator_blocks); | 1646 | num_allocator_blocks); |
| 817 | R_TRY(allocator_result); | 1647 | R_TRY(allocator_result); |
| 818 | 1648 | ||
| 1649 | // We're going to perform an update, so create a helper. | ||
| 1650 | KScopedPageTableUpdater updater(this); | ||
| 1651 | |||
| 1652 | // Prepare to iterate over the memory. | ||
| 1653 | auto pg_it = pg.Nodes().begin(); | ||
| 1654 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 1655 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 1656 | |||
| 819 | // Reset the current tracking address, and make sure we clean up on failure. | 1657 | // Reset the current tracking address, and make sure we clean up on failure. |
| 1658 | // pg_guard.Cancel(); | ||
| 820 | cur_address = address; | 1659 | cur_address = address; |
| 821 | auto unmap_guard = detail::ScopeExit([&] { | 1660 | ON_RESULT_FAILURE { |
| 822 | if (cur_address > address) { | 1661 | if (cur_address > address) { |
| 823 | const VAddr last_unmap_address = cur_address - 1; | 1662 | const VAddr last_unmap_address = cur_address - 1; |
| 824 | 1663 | ||
| @@ -841,6 +1680,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 841 | last_unmap_address + 1 - cur_address) / | 1680 | last_unmap_address + 1 - cur_address) / |
| 842 | PageSize; | 1681 | PageSize; |
| 843 | 1682 | ||
| 1683 | // HACK: Manually close the pages. | ||
| 1684 | HACK_ClosePages(cur_address, cur_pages); | ||
| 1685 | |||
| 844 | // Unmap. | 1686 | // Unmap. |
| 845 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | 1687 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, |
| 846 | OperationType::Unmap) | 1688 | OperationType::Unmap) |
| @@ -857,12 +1699,17 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 857 | ++it; | 1699 | ++it; |
| 858 | } | 1700 | } |
| 859 | } | 1701 | } |
| 860 | }); | ||
| 861 | 1702 | ||
| 862 | // Iterate over the memory. | 1703 | // Release any remaining unmapped memory. |
| 863 | auto pg_it = pg.Nodes().begin(); | 1704 | m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); |
| 864 | PAddr pg_phys_addr = pg_it->GetAddress(); | 1705 | m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); |
| 865 | size_t pg_pages = pg_it->GetNumPages(); | 1706 | for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { |
| 1707 | m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), | ||
| 1708 | pg_it->GetNumPages()); | ||
| 1709 | m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), | ||
| 1710 | pg_it->GetNumPages()); | ||
| 1711 | } | ||
| 1712 | }; | ||
| 866 | 1713 | ||
| 867 | auto it = m_memory_block_manager.FindIterator(cur_address); | 1714 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 868 | while (true) { | 1715 | while (true) { |
| @@ -897,6 +1744,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 897 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | 1744 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, |
| 898 | OperationType::Map, pg_phys_addr)); | 1745 | OperationType::Map, pg_phys_addr)); |
| 899 | 1746 | ||
| 1747 | // HACK: Manually open the pages. | ||
| 1748 | HACK_OpenPages(pg_phys_addr, cur_pages); | ||
| 1749 | |||
| 900 | // Advance. | 1750 | // Advance. |
| 901 | cur_address += cur_pages * PageSize; | 1751 | cur_address += cur_pages * PageSize; |
| 902 | map_pages -= cur_pages; | 1752 | map_pages -= cur_pages; |
| @@ -928,9 +1778,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 928 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, | 1778 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, |
| 929 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); | 1779 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); |
| 930 | 1780 | ||
| 931 | // Cancel our guard. | ||
| 932 | unmap_guard.Cancel(); | ||
| 933 | |||
| 934 | R_SUCCEED(); | 1781 | R_SUCCEED(); |
| 935 | } | 1782 | } |
| 936 | } | 1783 | } |
| @@ -939,7 +1786,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 939 | 1786 | ||
| 940 | Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | 1787 | Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { |
| 941 | // Lock the physical memory lock. | 1788 | // Lock the physical memory lock. |
| 942 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | 1789 | KScopedLightLock phys_lk(m_map_physical_memory_lock); |
| 943 | 1790 | ||
| 944 | // Lock the table. | 1791 | // Lock the table. |
| 945 | KScopedLightLock lk(m_general_lock); | 1792 | KScopedLightLock lk(m_general_lock); |
| @@ -948,8 +1795,11 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 948 | const VAddr last_address = address + size - 1; | 1795 | const VAddr last_address = address + size - 1; |
| 949 | 1796 | ||
| 950 | // Define iteration variables. | 1797 | // Define iteration variables. |
| 951 | VAddr cur_address = 0; | 1798 | VAddr map_start_address = 0; |
| 952 | size_t mapped_size = 0; | 1799 | VAddr map_last_address = 0; |
| 1800 | |||
| 1801 | VAddr cur_address; | ||
| 1802 | size_t mapped_size; | ||
| 953 | size_t num_allocator_blocks = 0; | 1803 | size_t num_allocator_blocks = 0; |
| 954 | 1804 | ||
| 955 | // Check if the memory is mapped. | 1805 | // Check if the memory is mapped. |
| @@ -975,27 +1825,27 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 975 | if (is_normal) { | 1825 | if (is_normal) { |
| 976 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); | 1826 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); |
| 977 | 1827 | ||
| 1828 | if (map_start_address == 0) { | ||
| 1829 | map_start_address = cur_address; | ||
| 1830 | } | ||
| 1831 | map_last_address = | ||
| 1832 | (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; | ||
| 1833 | |||
| 978 | if (info.GetAddress() < address) { | 1834 | if (info.GetAddress() < address) { |
| 979 | ++num_allocator_blocks; | 1835 | ++num_allocator_blocks; |
| 980 | } | 1836 | } |
| 981 | if (last_address < info.GetLastAddress()) { | 1837 | if (last_address < info.GetLastAddress()) { |
| 982 | ++num_allocator_blocks; | 1838 | ++num_allocator_blocks; |
| 983 | } | 1839 | } |
| 1840 | |||
| 1841 | mapped_size += (map_last_address + 1 - cur_address); | ||
| 984 | } | 1842 | } |
| 985 | 1843 | ||
| 986 | // Check if we're done. | 1844 | // Check if we're done. |
| 987 | if (last_address <= info.GetLastAddress()) { | 1845 | if (last_address <= info.GetLastAddress()) { |
| 988 | if (is_normal) { | ||
| 989 | mapped_size += (last_address + 1 - cur_address); | ||
| 990 | } | ||
| 991 | break; | 1846 | break; |
| 992 | } | 1847 | } |
| 993 | 1848 | ||
| 994 | // Track the memory if it's mapped. | ||
| 995 | if (is_normal) { | ||
| 996 | mapped_size += VAddr(info.GetEndAddress()) - cur_address; | ||
| 997 | } | ||
| 998 | |||
| 999 | // Advance. | 1849 | // Advance. |
| 1000 | cur_address = info.GetEndAddress(); | 1850 | cur_address = info.GetEndAddress(); |
| 1001 | ++it; | 1851 | ++it; |
| @@ -1005,125 +1855,22 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1005 | R_SUCCEED_IF(mapped_size == 0); | 1855 | R_SUCCEED_IF(mapped_size == 0); |
| 1006 | } | 1856 | } |
| 1007 | 1857 | ||
| 1008 | // Make a page group for the unmap region. | ||
| 1009 | KPageGroup pg; | ||
| 1010 | { | ||
| 1011 | auto& impl = this->PageTableImpl(); | ||
| 1012 | |||
| 1013 | // Begin traversal. | ||
| 1014 | Common::PageTable::TraversalContext context; | ||
| 1015 | Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; | ||
| 1016 | bool cur_valid = false; | ||
| 1017 | Common::PageTable::TraversalEntry next_entry; | ||
| 1018 | bool next_valid = false; | ||
| 1019 | size_t tot_size = 0; | ||
| 1020 | |||
| 1021 | cur_address = address; | ||
| 1022 | next_valid = impl.BeginTraversal(next_entry, context, cur_address); | ||
| 1023 | next_entry.block_size = | ||
| 1024 | (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1))); | ||
| 1025 | |||
| 1026 | // Iterate, building the group. | ||
| 1027 | while (true) { | ||
| 1028 | if ((!next_valid && !cur_valid) || | ||
| 1029 | (next_valid && cur_valid && | ||
| 1030 | next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { | ||
| 1031 | cur_entry.block_size += next_entry.block_size; | ||
| 1032 | } else { | ||
| 1033 | if (cur_valid) { | ||
| 1034 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); | ||
| 1035 | R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize)); | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | // Update tracking variables. | ||
| 1039 | tot_size += cur_entry.block_size; | ||
| 1040 | cur_entry = next_entry; | ||
| 1041 | cur_valid = next_valid; | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | if (cur_entry.block_size + tot_size >= size) { | ||
| 1045 | break; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | next_valid = impl.ContinueTraversal(next_entry, context); | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | // Add the last block. | ||
| 1052 | if (cur_valid) { | ||
| 1053 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); | ||
| 1054 | R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize)); | ||
| 1055 | } | ||
| 1056 | } | ||
| 1057 | ASSERT(pg.GetNumPages() == mapped_size / PageSize); | ||
| 1058 | |||
| 1059 | // Create an update allocator. | 1858 | // Create an update allocator. |
| 1060 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | 1859 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); |
| 1061 | Result allocator_result{ResultSuccess}; | 1860 | Result allocator_result; |
| 1062 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1861 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1063 | m_memory_block_slab_manager, num_allocator_blocks); | 1862 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1064 | R_TRY(allocator_result); | 1863 | R_TRY(allocator_result); |
| 1065 | 1864 | ||
| 1066 | // Reset the current tracking address, and make sure we clean up on failure. | 1865 | // We're going to perform an update, so create a helper. |
| 1067 | cur_address = address; | 1866 | KScopedPageTableUpdater updater(this); |
| 1068 | auto remap_guard = detail::ScopeExit([&] { | ||
| 1069 | if (cur_address > address) { | ||
| 1070 | const VAddr last_map_address = cur_address - 1; | ||
| 1071 | cur_address = address; | ||
| 1072 | |||
| 1073 | // Iterate over the memory we unmapped. | ||
| 1074 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1075 | auto pg_it = pg.Nodes().begin(); | ||
| 1076 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 1077 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 1078 | |||
| 1079 | while (true) { | ||
| 1080 | // Get the memory info for the pages we unmapped, convert to property. | ||
| 1081 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1082 | |||
| 1083 | // If the memory is normal, we unmapped it and need to re-map it. | ||
| 1084 | if (info.GetState() == KMemoryState::Normal) { | ||
| 1085 | // Determine the range to map. | ||
| 1086 | size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, | ||
| 1087 | last_map_address + 1 - cur_address) / | ||
| 1088 | PageSize; | ||
| 1089 | |||
| 1090 | // While we have pages to map, map them. | ||
| 1091 | while (map_pages > 0) { | ||
| 1092 | // Check if we're at the end of the physical block. | ||
| 1093 | if (pg_pages == 0) { | ||
| 1094 | // Ensure there are more pages to map. | ||
| 1095 | ASSERT(pg_it != pg.Nodes().end()); | ||
| 1096 | |||
| 1097 | // Advance our physical block. | ||
| 1098 | ++pg_it; | ||
| 1099 | pg_phys_addr = pg_it->GetAddress(); | ||
| 1100 | pg_pages = pg_it->GetNumPages(); | ||
| 1101 | } | ||
| 1102 | |||
| 1103 | // Map whatever we can. | ||
| 1104 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 1105 | ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(), | ||
| 1106 | OperationType::Map, pg_phys_addr) == ResultSuccess); | ||
| 1107 | 1867 | ||
| 1108 | // Advance. | 1868 | // Separate the mapping. |
| 1109 | cur_address += cur_pages * PageSize; | 1869 | R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize, |
| 1110 | map_pages -= cur_pages; | 1870 | KMemoryPermission::None, OperationType::Separate)); |
| 1111 | 1871 | ||
| 1112 | pg_phys_addr += cur_pages * PageSize; | 1872 | // Reset the current tracking address, and make sure we clean up on failure. |
| 1113 | pg_pages -= cur_pages; | 1873 | cur_address = address; |
| 1114 | } | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | // Check if we're done. | ||
| 1118 | if (last_map_address <= info.GetLastAddress()) { | ||
| 1119 | break; | ||
| 1120 | } | ||
| 1121 | |||
| 1122 | // Advance. | ||
| 1123 | ++it; | ||
| 1124 | } | ||
| 1125 | } | ||
| 1126 | }); | ||
| 1127 | 1874 | ||
| 1128 | // Iterate over the memory, unmapping as we go. | 1875 | // Iterate over the memory, unmapping as we go. |
| 1129 | auto it = m_memory_block_manager.FindIterator(cur_address); | 1876 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| @@ -1141,8 +1888,12 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1141 | last_address + 1 - cur_address) / | 1888 | last_address + 1 - cur_address) / |
| 1142 | PageSize; | 1889 | PageSize; |
| 1143 | 1890 | ||
| 1891 | // HACK: Manually close the pages. | ||
| 1892 | HACK_ClosePages(cur_address, cur_pages); | ||
| 1893 | |||
| 1144 | // Unmap. | 1894 | // Unmap. |
| 1145 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)); | 1895 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) |
| 1896 | .IsSuccess()); | ||
| 1146 | } | 1897 | } |
| 1147 | 1898 | ||
| 1148 | // Check if we're done. | 1899 | // Check if we're done. |
| @@ -1157,8 +1908,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1157 | 1908 | ||
| 1158 | // Release the memory resource. | 1909 | // Release the memory resource. |
| 1159 | m_mapped_physical_memory_size -= mapped_size; | 1910 | m_mapped_physical_memory_size -= mapped_size; |
| 1160 | auto process{m_system.Kernel().CurrentProcess()}; | 1911 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, mapped_size); |
| 1161 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); | ||
| 1162 | 1912 | ||
| 1163 | // Update memory blocks. | 1913 | // Update memory blocks. |
| 1164 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, | 1914 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, |
| @@ -1166,14 +1916,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1166 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | 1916 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, |
| 1167 | KMemoryBlockDisableMergeAttribute::None); | 1917 | KMemoryBlockDisableMergeAttribute::None); |
| 1168 | 1918 | ||
| 1169 | // TODO(bunnei): This is a workaround until the next set of changes, where we add reference | ||
| 1170 | // counting for mapped pages. Until then, we must manually close the reference to the page | ||
| 1171 | // group. | ||
| 1172 | m_system.Kernel().MemoryManager().Close(pg); | ||
| 1173 | |||
| 1174 | // We succeeded. | 1919 | // We succeeded. |
| 1175 | remap_guard.Cancel(); | ||
| 1176 | |||
| 1177 | R_SUCCEED(); | 1920 | R_SUCCEED(); |
| 1178 | } | 1921 | } |
| 1179 | 1922 | ||
| @@ -1749,8 +2492,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | |||
| 1749 | OperationType::Unmap)); | 2492 | OperationType::Unmap)); |
| 1750 | 2493 | ||
| 1751 | // Release the memory from the resource limit. | 2494 | // Release the memory from the resource limit. |
| 1752 | m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( | 2495 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, num_pages * PageSize); |
| 1753 | LimitableResource::PhysicalMemory, num_pages * PageSize); | ||
| 1754 | 2496 | ||
| 1755 | // Apply the memory block update. | 2497 | // Apply the memory block update. |
| 1756 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, | 2498 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, |
| @@ -1780,8 +2522,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | |||
| 1780 | 2522 | ||
| 1781 | // Reserve memory for the heap extension. | 2523 | // Reserve memory for the heap extension. |
| 1782 | KScopedResourceReservation memory_reservation( | 2524 | KScopedResourceReservation memory_reservation( |
| 1783 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, | 2525 | m_resource_limit, LimitableResource::PhysicalMemoryMax, allocation_size); |
| 1784 | allocation_size); | ||
| 1785 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 2526 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 1786 | 2527 | ||
| 1787 | // Allocate pages for the heap extension. | 2528 | // Allocate pages for the heap extension. |
| @@ -1869,7 +2610,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | |||
| 1869 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | 2610 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); |
| 1870 | } else { | 2611 | } else { |
| 1871 | KPageGroup page_group; | 2612 | KPageGroup page_group; |
| 1872 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 2613 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( |
| 1873 | &page_group, needed_num_pages, | 2614 | &page_group, needed_num_pages, |
| 1874 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | 2615 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); |
| 1875 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | 2616 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); |
| @@ -1883,8 +2624,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | |||
| 1883 | return addr; | 2624 | return addr; |
| 1884 | } | 2625 | } |
| 1885 | 2626 | ||
| 1886 | Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, | 2627 | Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, |
| 1887 | bool is_aligned) { | 2628 | KMemoryPermission perm, bool is_aligned, |
| 2629 | bool check_heap) { | ||
| 1888 | // Lightly validate the range before doing anything else. | 2630 | // Lightly validate the range before doing anything else. |
| 1889 | const size_t num_pages = size / PageSize; | 2631 | const size_t num_pages = size / PageSize; |
| 1890 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 2632 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| @@ -1894,15 +2636,18 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem | |||
| 1894 | 2636 | ||
| 1895 | // Check the memory state. | 2637 | // Check the memory state. |
| 1896 | const auto test_state = | 2638 | const auto test_state = |
| 1897 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); | 2639 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) | |
| 2640 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 1898 | size_t num_allocator_blocks; | 2641 | size_t num_allocator_blocks; |
| 1899 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, | 2642 | KMemoryState old_state; |
| 2643 | R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, | ||
| 2644 | std::addressof(num_allocator_blocks), address, size, test_state, | ||
| 1900 | test_state, perm, perm, | 2645 | test_state, perm, perm, |
| 1901 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, | 2646 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, |
| 1902 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); | 2647 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); |
| 1903 | 2648 | ||
| 1904 | // Create an update allocator. | 2649 | // Create an update allocator. |
| 1905 | Result allocator_result{ResultSuccess}; | 2650 | Result allocator_result; |
| 1906 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 2651 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1907 | m_memory_block_slab_manager, num_allocator_blocks); | 2652 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1908 | R_TRY(allocator_result); | 2653 | R_TRY(allocator_result); |
| @@ -1911,10 +2656,13 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem | |||
| 1911 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | 2656 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, |
| 1912 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); | 2657 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); |
| 1913 | 2658 | ||
| 2659 | // Set whether the locked memory was io. | ||
| 2660 | *out_is_io = old_state == KMemoryState::Io; | ||
| 2661 | |||
| 1914 | R_SUCCEED(); | 2662 | R_SUCCEED(); |
| 1915 | } | 2663 | } |
| 1916 | 2664 | ||
| 1917 | Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { | 2665 | Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap) { |
| 1918 | // Lightly validate the range before doing anything else. | 2666 | // Lightly validate the range before doing anything else. |
| 1919 | const size_t num_pages = size / PageSize; | 2667 | const size_t num_pages = size / PageSize; |
| 1920 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 2668 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| @@ -1923,16 +2671,16 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { | |||
| 1923 | KScopedLightLock lk(m_general_lock); | 2671 | KScopedLightLock lk(m_general_lock); |
| 1924 | 2672 | ||
| 1925 | // Check the memory state. | 2673 | // Check the memory state. |
| 2674 | const auto test_state = KMemoryState::FlagCanDeviceMap | | ||
| 2675 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 1926 | size_t num_allocator_blocks; | 2676 | size_t num_allocator_blocks; |
| 1927 | R_TRY(this->CheckMemoryStateContiguous( | 2677 | R_TRY(this->CheckMemoryStateContiguous( |
| 1928 | std::addressof(num_allocator_blocks), address, size, | 2678 | std::addressof(num_allocator_blocks), address, size, test_state, test_state, |
| 1929 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1930 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1931 | KMemoryPermission::None, KMemoryPermission::None, | 2679 | KMemoryPermission::None, KMemoryPermission::None, |
| 1932 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | 2680 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); |
| 1933 | 2681 | ||
| 1934 | // Create an update allocator. | 2682 | // Create an update allocator. |
| 1935 | Result allocator_result{ResultSuccess}; | 2683 | Result allocator_result; |
| 1936 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 2684 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1937 | m_memory_block_slab_manager, num_allocator_blocks); | 2685 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1938 | R_TRY(allocator_result); | 2686 | R_TRY(allocator_result); |
| @@ -1976,13 +2724,28 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { | |||
| 1976 | R_SUCCEED(); | 2724 | R_SUCCEED(); |
| 1977 | } | 2725 | } |
| 1978 | 2726 | ||
| 2727 | Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size) { | ||
| 2728 | R_RETURN(this->LockMemoryAndOpen( | ||
| 2729 | nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 2730 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All, | ||
| 2731 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, | ||
| 2732 | KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite, | ||
| 2733 | KMemoryAttribute::Locked)); | ||
| 2734 | } | ||
| 2735 | |||
| 2736 | Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) { | ||
| 2737 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 2738 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None, | ||
| 2739 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2740 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 2741 | KMemoryAttribute::Locked, nullptr)); | ||
| 2742 | } | ||
| 2743 | |||
| 1979 | Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { | 2744 | Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { |
| 1980 | R_RETURN(this->LockMemoryAndOpen( | 2745 | R_RETURN(this->LockMemoryAndOpen( |
| 1981 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, | 2746 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, |
| 1982 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | 2747 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, |
| 1983 | KMemoryAttribute::None, | 2748 | KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite, |
| 1984 | static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | | ||
| 1985 | KMemoryPermission::KernelReadWrite), | ||
| 1986 | KMemoryAttribute::Locked)); | 2749 | KMemoryAttribute::Locked)); |
| 1987 | } | 2750 | } |
| 1988 | 2751 | ||
| @@ -2066,6 +2829,10 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | |||
| 2066 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); | 2829 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); |
| 2067 | break; | 2830 | break; |
| 2068 | } | 2831 | } |
| 2832 | case OperationType::Separate: { | ||
| 2833 | // HACK: Unimplemented. | ||
| 2834 | break; | ||
| 2835 | } | ||
| 2069 | case OperationType::ChangePermissions: | 2836 | case OperationType::ChangePermissions: |
| 2070 | case OperationType::ChangePermissionsAndRefresh: | 2837 | case OperationType::ChangePermissionsAndRefresh: |
| 2071 | break; | 2838 | break; |
| @@ -2075,6 +2842,17 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | |||
| 2075 | R_SUCCEED(); | 2842 | R_SUCCEED(); |
| 2076 | } | 2843 | } |
| 2077 | 2844 | ||
| 2845 | void KPageTable::FinalizeUpdate(PageLinkedList* page_list) { | ||
| 2846 | while (page_list->Peek()) { | ||
| 2847 | [[maybe_unused]] auto page = page_list->Pop(); | ||
| 2848 | |||
| 2849 | // TODO(bunnei): Free pages once they are allocated in guest memory | ||
| 2850 | // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page)); | ||
| 2851 | // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0); | ||
| 2852 | // this->GetPageTableManager().Free(page); | ||
| 2853 | } | ||
| 2854 | } | ||
| 2855 | |||
| 2078 | VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | 2856 | VAddr KPageTable::GetRegionAddress(KMemoryState state) const { |
| 2079 | switch (state) { | 2857 | switch (state) { |
| 2080 | case KMemoryState::Free: | 2858 | case KMemoryState::Free: |
| @@ -2101,6 +2879,7 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | |||
| 2101 | case KMemoryState::GeneratedCode: | 2879 | case KMemoryState::GeneratedCode: |
| 2102 | case KMemoryState::CodeOut: | 2880 | case KMemoryState::CodeOut: |
| 2103 | case KMemoryState::Coverage: | 2881 | case KMemoryState::Coverage: |
| 2882 | case KMemoryState::Insecure: | ||
| 2104 | return m_alias_code_region_start; | 2883 | return m_alias_code_region_start; |
| 2105 | case KMemoryState::Code: | 2884 | case KMemoryState::Code: |
| 2106 | case KMemoryState::CodeData: | 2885 | case KMemoryState::CodeData: |
| @@ -2136,6 +2915,7 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const { | |||
| 2136 | case KMemoryState::GeneratedCode: | 2915 | case KMemoryState::GeneratedCode: |
| 2137 | case KMemoryState::CodeOut: | 2916 | case KMemoryState::CodeOut: |
| 2138 | case KMemoryState::Coverage: | 2917 | case KMemoryState::Coverage: |
| 2918 | case KMemoryState::Insecure: | ||
| 2139 | return m_alias_code_region_end - m_alias_code_region_start; | 2919 | return m_alias_code_region_end - m_alias_code_region_start; |
| 2140 | case KMemoryState::Code: | 2920 | case KMemoryState::Code: |
| 2141 | case KMemoryState::CodeData: | 2921 | case KMemoryState::CodeData: |
| @@ -2177,6 +2957,7 @@ bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { | |||
| 2177 | case KMemoryState::GeneratedCode: | 2957 | case KMemoryState::GeneratedCode: |
| 2178 | case KMemoryState::CodeOut: | 2958 | case KMemoryState::CodeOut: |
| 2179 | case KMemoryState::Coverage: | 2959 | case KMemoryState::Coverage: |
| 2960 | case KMemoryState::Insecure: | ||
| 2180 | return is_in_region && !is_in_heap && !is_in_alias; | 2961 | return is_in_region && !is_in_heap && !is_in_alias; |
| 2181 | case KMemoryState::Normal: | 2962 | case KMemoryState::Normal: |
| 2182 | ASSERT(is_in_heap); | 2963 | ASSERT(is_in_heap); |
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index c6aeacd96..950850291 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include "core/hle/kernel/k_memory_layout.h" | 16 | #include "core/hle/kernel/k_memory_layout.h" |
| 17 | #include "core/hle/kernel/k_memory_manager.h" | 17 | #include "core/hle/kernel/k_memory_manager.h" |
| 18 | #include "core/hle/result.h" | 18 | #include "core/hle/result.h" |
| 19 | #include "core/memory.h" | ||
| 19 | 20 | ||
| 20 | namespace Core { | 21 | namespace Core { |
| 21 | class System; | 22 | class System; |
| @@ -23,7 +24,10 @@ class System; | |||
| 23 | 24 | ||
| 24 | namespace Kernel { | 25 | namespace Kernel { |
| 25 | 26 | ||
| 27 | class KBlockInfoManager; | ||
| 26 | class KMemoryBlockManager; | 28 | class KMemoryBlockManager; |
| 29 | class KResourceLimit; | ||
| 30 | class KSystemResource; | ||
| 27 | 31 | ||
| 28 | class KPageTable final { | 32 | class KPageTable final { |
| 29 | public: | 33 | public: |
| @@ -36,9 +40,9 @@ public: | |||
| 36 | ~KPageTable(); | 40 | ~KPageTable(); |
| 37 | 41 | ||
| 38 | Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, | 42 | Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, |
| 39 | VAddr code_addr, size_t code_size, | 43 | bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, |
| 40 | KMemoryBlockSlabManager* mem_block_slab_manager, | 44 | VAddr code_addr, size_t code_size, KSystemResource* system_resource, |
| 41 | KMemoryManager::Pool pool); | 45 | KResourceLimit* resource_limit); |
| 42 | 46 | ||
| 43 | void Finalize(); | 47 | void Finalize(); |
| 44 | 48 | ||
| @@ -74,12 +78,20 @@ public: | |||
| 74 | KMemoryState state, KMemoryPermission perm, | 78 | KMemoryState state, KMemoryPermission perm, |
| 75 | PAddr map_addr = 0); | 79 | PAddr map_addr = 0); |
| 76 | 80 | ||
| 77 | Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, | 81 | Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, |
| 78 | bool is_aligned); | 82 | KMemoryPermission perm, bool is_aligned, bool check_heap); |
| 79 | Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); | 83 | Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap); |
| 80 | 84 | ||
| 81 | Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); | 85 | Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); |
| 82 | 86 | ||
| 87 | Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size); | ||
| 88 | Result UnlockForIpcUserBuffer(VAddr address, size_t size); | ||
| 89 | |||
| 90 | Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table, | ||
| 91 | KMemoryPermission test_perm, KMemoryState dst_state, bool send); | ||
| 92 | Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state); | ||
| 93 | Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state); | ||
| 94 | |||
| 83 | Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); | 95 | Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); |
| 84 | Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); | 96 | Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); |
| 85 | Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, | 97 | Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, |
| @@ -97,13 +109,54 @@ public: | |||
| 97 | 109 | ||
| 98 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; | 110 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; |
| 99 | 111 | ||
| 112 | protected: | ||
| 113 | struct PageLinkedList { | ||
| 114 | private: | ||
| 115 | struct Node { | ||
| 116 | Node* m_next; | ||
| 117 | std::array<u8, PageSize - sizeof(Node*)> m_buffer; | ||
| 118 | }; | ||
| 119 | |||
| 120 | public: | ||
| 121 | constexpr PageLinkedList() = default; | ||
| 122 | |||
| 123 | void Push(Node* n) { | ||
| 124 | ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize)); | ||
| 125 | n->m_next = m_root; | ||
| 126 | m_root = n; | ||
| 127 | } | ||
| 128 | |||
| 129 | void Push(Core::Memory::Memory& memory, VAddr addr) { | ||
| 130 | this->Push(memory.GetPointer<Node>(addr)); | ||
| 131 | } | ||
| 132 | |||
| 133 | Node* Peek() const { | ||
| 134 | return m_root; | ||
| 135 | } | ||
| 136 | |||
| 137 | Node* Pop() { | ||
| 138 | Node* const r = m_root; | ||
| 139 | |||
| 140 | m_root = r->m_next; | ||
| 141 | r->m_next = nullptr; | ||
| 142 | |||
| 143 | return r; | ||
| 144 | } | ||
| 145 | |||
| 146 | private: | ||
| 147 | Node* m_root{}; | ||
| 148 | }; | ||
| 149 | static_assert(std::is_trivially_destructible<PageLinkedList>::value); | ||
| 150 | |||
| 100 | private: | 151 | private: |
| 101 | enum class OperationType : u32 { | 152 | enum class OperationType : u32 { |
| 102 | Map, | 153 | Map = 0, |
| 103 | MapGroup, | 154 | MapFirst = 1, |
| 104 | Unmap, | 155 | MapGroup = 2, |
| 105 | ChangePermissions, | 156 | Unmap = 3, |
| 106 | ChangePermissionsAndRefresh, | 157 | ChangePermissions = 4, |
| 158 | ChangePermissionsAndRefresh = 5, | ||
| 159 | Separate = 6, | ||
| 107 | }; | 160 | }; |
| 108 | 161 | ||
| 109 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = | 162 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = |
| @@ -123,6 +176,7 @@ private: | |||
| 123 | OperationType operation); | 176 | OperationType operation); |
| 124 | Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, | 177 | Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, |
| 125 | PAddr map_addr = 0); | 178 | PAddr map_addr = 0); |
| 179 | void FinalizeUpdate(PageLinkedList* page_list); | ||
| 126 | VAddr GetRegionAddress(KMemoryState state) const; | 180 | VAddr GetRegionAddress(KMemoryState state) const; |
| 127 | size_t GetRegionSize(KMemoryState state) const; | 181 | size_t GetRegionSize(KMemoryState state) const; |
| 128 | 182 | ||
| @@ -199,6 +253,18 @@ private: | |||
| 199 | return *out != 0; | 253 | return *out != 0; |
| 200 | } | 254 | } |
| 201 | 255 | ||
| 256 | Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address, | ||
| 257 | size_t size, KMemoryPermission test_perm, KMemoryState dst_state); | ||
| 258 | Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr, | ||
| 259 | KMemoryPermission test_perm, KMemoryState dst_state, | ||
| 260 | KPageTable& src_page_table, bool send); | ||
| 261 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, | ||
| 262 | size_t size, KMemoryPermission prot_perm); | ||
| 263 | |||
| 264 | // HACK: These will be removed once we automatically manage page reference counts. | ||
| 265 | void HACK_OpenPages(PAddr phys_addr, size_t num_pages); | ||
| 266 | void HACK_ClosePages(VAddr virt_addr, size_t num_pages); | ||
| 267 | |||
| 202 | mutable KLightLock m_general_lock; | 268 | mutable KLightLock m_general_lock; |
| 203 | mutable KLightLock m_map_physical_memory_lock; | 269 | mutable KLightLock m_map_physical_memory_lock; |
| 204 | 270 | ||
| @@ -316,6 +382,31 @@ public: | |||
| 316 | addr + size - 1 <= m_address_space_end - 1; | 382 | addr + size - 1 <= m_address_space_end - 1; |
| 317 | } | 383 | } |
| 318 | 384 | ||
| 385 | public: | ||
| 386 | static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||
| 387 | return layout.GetLinearVirtualAddress(addr); | ||
| 388 | } | ||
| 389 | |||
| 390 | static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||
| 391 | return layout.GetLinearPhysicalAddress(addr); | ||
| 392 | } | ||
| 393 | |||
| 394 | static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||
| 395 | return GetLinearMappedVirtualAddress(layout, addr); | ||
| 396 | } | ||
| 397 | |||
| 398 | static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||
| 399 | return GetLinearMappedPhysicalAddress(layout, addr); | ||
| 400 | } | ||
| 401 | |||
| 402 | static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||
| 403 | return GetLinearMappedVirtualAddress(layout, addr); | ||
| 404 | } | ||
| 405 | |||
| 406 | static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||
| 407 | return GetLinearMappedPhysicalAddress(layout, addr); | ||
| 408 | } | ||
| 409 | |||
| 319 | private: | 410 | private: |
| 320 | constexpr bool IsKernel() const { | 411 | constexpr bool IsKernel() const { |
| 321 | return m_is_kernel; | 412 | return m_is_kernel; |
| @@ -331,6 +422,24 @@ private: | |||
| 331 | } | 422 | } |
| 332 | 423 | ||
| 333 | private: | 424 | private: |
| 425 | class KScopedPageTableUpdater { | ||
| 426 | private: | ||
| 427 | KPageTable* m_pt{}; | ||
| 428 | PageLinkedList m_ll; | ||
| 429 | |||
| 430 | public: | ||
| 431 | explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {} | ||
| 432 | explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {} | ||
| 433 | ~KScopedPageTableUpdater() { | ||
| 434 | m_pt->FinalizeUpdate(this->GetPageList()); | ||
| 435 | } | ||
| 436 | |||
| 437 | PageLinkedList* GetPageList() { | ||
| 438 | return &m_ll; | ||
| 439 | } | ||
| 440 | }; | ||
| 441 | |||
| 442 | private: | ||
| 334 | VAddr m_address_space_start{}; | 443 | VAddr m_address_space_start{}; |
| 335 | VAddr m_address_space_end{}; | 444 | VAddr m_address_space_end{}; |
| 336 | VAddr m_heap_region_start{}; | 445 | VAddr m_heap_region_start{}; |
| @@ -347,20 +456,27 @@ private: | |||
| 347 | VAddr m_alias_code_region_start{}; | 456 | VAddr m_alias_code_region_start{}; |
| 348 | VAddr m_alias_code_region_end{}; | 457 | VAddr m_alias_code_region_end{}; |
| 349 | 458 | ||
| 350 | size_t m_mapped_physical_memory_size{}; | ||
| 351 | size_t m_max_heap_size{}; | 459 | size_t m_max_heap_size{}; |
| 352 | size_t m_max_physical_memory_size{}; | 460 | size_t m_mapped_physical_memory_size{}; |
| 461 | size_t m_mapped_unsafe_physical_memory{}; | ||
| 462 | size_t m_mapped_insecure_memory{}; | ||
| 463 | size_t m_mapped_ipc_server_memory{}; | ||
| 353 | size_t m_address_space_width{}; | 464 | size_t m_address_space_width{}; |
| 354 | 465 | ||
| 355 | KMemoryBlockManager m_memory_block_manager; | 466 | KMemoryBlockManager m_memory_block_manager; |
| 467 | u32 m_allocate_option{}; | ||
| 356 | 468 | ||
| 357 | bool m_is_kernel{}; | 469 | bool m_is_kernel{}; |
| 358 | bool m_enable_aslr{}; | 470 | bool m_enable_aslr{}; |
| 359 | bool m_enable_device_address_space_merge{}; | 471 | bool m_enable_device_address_space_merge{}; |
| 360 | 472 | ||
| 361 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; | 473 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; |
| 474 | KBlockInfoManager* m_block_info_manager{}; | ||
| 475 | KResourceLimit* m_resource_limit{}; | ||
| 362 | 476 | ||
| 363 | u32 m_heap_fill_value{}; | 477 | u32 m_heap_fill_value{}; |
| 478 | u32 m_ipc_fill_value{}; | ||
| 479 | u32 m_stack_fill_value{}; | ||
| 364 | const KMemoryRegion* m_cached_physical_heap_region{}; | 480 | const KMemoryRegion* m_cached_physical_heap_region{}; |
| 365 | 481 | ||
| 366 | KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; | 482 | KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; |
diff --git a/src/core/hle/kernel/k_page_table_manager.h b/src/core/hle/kernel/k_page_table_manager.h new file mode 100644 index 000000000..91a45cde3 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_manager.h | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <atomic> | ||
| 7 | |||
| 8 | #include "common/common_types.h" | ||
| 9 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 10 | #include "core/hle/kernel/k_page_table_slab_heap.h" | ||
| 11 | |||
| 12 | namespace Kernel { | ||
| 13 | |||
| 14 | class KPageTableManager : public KDynamicResourceManager<impl::PageTablePage, true> { | ||
| 15 | public: | ||
| 16 | using RefCount = KPageTableSlabHeap::RefCount; | ||
| 17 | static constexpr size_t PageTableSize = KPageTableSlabHeap::PageTableSize; | ||
| 18 | |||
| 19 | public: | ||
| 20 | KPageTableManager() = default; | ||
| 21 | |||
| 22 | void Initialize(KDynamicPageManager* page_allocator, KPageTableSlabHeap* pt_heap) { | ||
| 23 | m_pt_heap = pt_heap; | ||
| 24 | |||
| 25 | static_assert(std::derived_from<KPageTableSlabHeap, DynamicSlabType>); | ||
| 26 | BaseHeap::Initialize(page_allocator, pt_heap); | ||
| 27 | } | ||
| 28 | |||
| 29 | VAddr Allocate() { | ||
| 30 | return VAddr(BaseHeap::Allocate()); | ||
| 31 | } | ||
| 32 | |||
| 33 | RefCount GetRefCount(VAddr addr) const { | ||
| 34 | return m_pt_heap->GetRefCount(addr); | ||
| 35 | } | ||
| 36 | |||
| 37 | void Open(VAddr addr, int count) { | ||
| 38 | return m_pt_heap->Open(addr, count); | ||
| 39 | } | ||
| 40 | |||
| 41 | bool Close(VAddr addr, int count) { | ||
| 42 | return m_pt_heap->Close(addr, count); | ||
| 43 | } | ||
| 44 | |||
| 45 | bool IsInPageTableHeap(VAddr addr) const { | ||
| 46 | return m_pt_heap->IsInRange(addr); | ||
| 47 | } | ||
| 48 | |||
| 49 | private: | ||
| 50 | using BaseHeap = KDynamicResourceManager<impl::PageTablePage, true>; | ||
| 51 | |||
| 52 | KPageTableSlabHeap* m_pt_heap{}; | ||
| 53 | }; | ||
| 54 | |||
| 55 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_page_table_slab_heap.h b/src/core/hle/kernel/k_page_table_slab_heap.h new file mode 100644 index 000000000..a9543cbd0 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_slab_heap.h | |||
| @@ -0,0 +1,93 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <array> | ||
| 7 | #include <vector> | ||
| 8 | |||
| 9 | #include "common/common_types.h" | ||
| 10 | #include "core/hle/kernel/k_dynamic_slab_heap.h" | ||
| 11 | #include "core/hle/kernel/slab_helpers.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | namespace impl { | ||
| 16 | |||
| 17 | class PageTablePage { | ||
| 18 | public: | ||
| 19 | // Do not initialize anything. | ||
| 20 | PageTablePage() = default; | ||
| 21 | |||
| 22 | private: | ||
| 23 | std::array<u8, PageSize> m_buffer{}; | ||
| 24 | }; | ||
| 25 | static_assert(sizeof(PageTablePage) == PageSize); | ||
| 26 | |||
| 27 | } // namespace impl | ||
| 28 | |||
| 29 | class KPageTableSlabHeap : public KDynamicSlabHeap<impl::PageTablePage, true> { | ||
| 30 | public: | ||
| 31 | using RefCount = u16; | ||
| 32 | static constexpr size_t PageTableSize = sizeof(impl::PageTablePage); | ||
| 33 | static_assert(PageTableSize == PageSize); | ||
| 34 | |||
| 35 | public: | ||
| 36 | KPageTableSlabHeap() = default; | ||
| 37 | |||
| 38 | static constexpr size_t CalculateReferenceCountSize(size_t size) { | ||
| 39 | return (size / PageSize) * sizeof(RefCount); | ||
| 40 | } | ||
| 41 | |||
| 42 | void Initialize(KDynamicPageManager* page_allocator, size_t object_count, RefCount* rc) { | ||
| 43 | BaseHeap::Initialize(page_allocator, object_count); | ||
| 44 | this->Initialize(rc); | ||
| 45 | } | ||
| 46 | |||
| 47 | RefCount GetRefCount(VAddr addr) { | ||
| 48 | ASSERT(this->IsInRange(addr)); | ||
| 49 | return *this->GetRefCountPointer(addr); | ||
| 50 | } | ||
| 51 | |||
| 52 | void Open(VAddr addr, int count) { | ||
| 53 | ASSERT(this->IsInRange(addr)); | ||
| 54 | |||
| 55 | *this->GetRefCountPointer(addr) += static_cast<RefCount>(count); | ||
| 56 | |||
| 57 | ASSERT(this->GetRefCount(addr) > 0); | ||
| 58 | } | ||
| 59 | |||
| 60 | bool Close(VAddr addr, int count) { | ||
| 61 | ASSERT(this->IsInRange(addr)); | ||
| 62 | ASSERT(this->GetRefCount(addr) >= count); | ||
| 63 | |||
| 64 | *this->GetRefCountPointer(addr) -= static_cast<RefCount>(count); | ||
| 65 | return this->GetRefCount(addr) == 0; | ||
| 66 | } | ||
| 67 | |||
| 68 | bool IsInPageTableHeap(VAddr addr) const { | ||
| 69 | return this->IsInRange(addr); | ||
| 70 | } | ||
| 71 | |||
| 72 | private: | ||
| 73 | void Initialize([[maybe_unused]] RefCount* rc) { | ||
| 74 | // TODO(bunnei): Use rc once we support kernel virtual memory allocations. | ||
| 75 | const auto count = this->GetSize() / PageSize; | ||
| 76 | m_ref_counts.resize(count); | ||
| 77 | |||
| 78 | for (size_t i = 0; i < count; i++) { | ||
| 79 | m_ref_counts[i] = 0; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 83 | RefCount* GetRefCountPointer(VAddr addr) { | ||
| 84 | return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize); | ||
| 85 | } | ||
| 86 | |||
| 87 | private: | ||
| 88 | using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>; | ||
| 89 | |||
| 90 | std::vector<RefCount> m_ref_counts; | ||
| 91 | }; | ||
| 92 | |||
| 93 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp index 7a5a9dc2a..77d00ae2c 100644 --- a/src/core/hle/kernel/k_port.cpp +++ b/src/core/hle/kernel/k_port.cpp | |||
| @@ -57,12 +57,6 @@ Result KPort::EnqueueSession(KServerSession* session) { | |||
| 57 | 57 | ||
| 58 | server.EnqueueSession(session); | 58 | server.EnqueueSession(session); |
| 59 | 59 | ||
| 60 | if (auto session_ptr = server.GetSessionRequestHandler().lock()) { | ||
| 61 | session_ptr->ClientConnected(server.AcceptSession()); | ||
| 62 | } else { | ||
| 63 | ASSERT(false); | ||
| 64 | } | ||
| 65 | |||
| 66 | return ResultSuccess; | 60 | return ResultSuccess; |
| 67 | } | 61 | } |
| 68 | 62 | ||
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 8c3495e5a..55a9c5fae 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -38,7 +38,7 @@ namespace { | |||
| 38 | */ | 38 | */ |
| 39 | void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) { | 39 | void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) { |
| 40 | const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); | 40 | const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); |
| 41 | ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1)); | 41 | ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1)); |
| 42 | 42 | ||
| 43 | KThread* thread = KThread::Create(system.Kernel()); | 43 | KThread* thread = KThread::Create(system.Kernel()); |
| 44 | SCOPE_EXIT({ thread->Close(); }); | 44 | SCOPE_EXIT({ thread->Close(); }); |
| @@ -124,7 +124,7 @@ void KProcess::DecrementRunningThreadCount() { | |||
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | u64 KProcess::GetTotalPhysicalMemoryAvailable() { | 126 | u64 KProcess::GetTotalPhysicalMemoryAvailable() { |
| 127 | const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + | 127 | const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) + |
| 128 | page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + | 128 | page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + |
| 129 | main_thread_stack_size}; | 129 | main_thread_stack_size}; |
| 130 | if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); | 130 | if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); |
| @@ -349,8 +349,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 349 | // We currently do not support process-specific system resource | 349 | // We currently do not support process-specific system resource |
| 350 | UNIMPLEMENTED_IF(system_resource_size != 0); | 350 | UNIMPLEMENTED_IF(system_resource_size != 0); |
| 351 | 351 | ||
| 352 | KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, | 352 | KScopedResourceReservation memory_reservation( |
| 353 | code_size + system_resource_size); | 353 | resource_limit, LimitableResource::PhysicalMemoryMax, code_size + system_resource_size); |
| 354 | if (!memory_reservation.Succeeded()) { | 354 | if (!memory_reservation.Succeeded()) { |
| 355 | LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", | 355 | LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", |
| 356 | code_size + system_resource_size); | 356 | code_size + system_resource_size); |
| @@ -358,8 +358,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 358 | } | 358 | } |
| 359 | // Initialize proces address space | 359 | // Initialize proces address space |
| 360 | if (const Result result{page_table.InitializeForProcess( | 360 | if (const Result result{page_table.InitializeForProcess( |
| 361 | metadata.GetAddressSpaceType(), false, 0x8000000, code_size, | 361 | metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application, |
| 362 | &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; | 362 | 0x8000000, code_size, &kernel.GetSystemSystemResource(), resource_limit)}; |
| 363 | result.IsError()) { | 363 | result.IsError()) { |
| 364 | R_RETURN(result); | 364 | R_RETURN(result); |
| 365 | } | 365 | } |
| @@ -406,8 +406,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 406 | 406 | ||
| 407 | void KProcess::Run(s32 main_thread_priority, u64 stack_size) { | 407 | void KProcess::Run(s32 main_thread_priority, u64 stack_size) { |
| 408 | AllocateMainThreadStack(stack_size); | 408 | AllocateMainThreadStack(stack_size); |
| 409 | resource_limit->Reserve(LimitableResource::Threads, 1); | 409 | resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); |
| 410 | resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); | 410 | resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, main_thread_stack_size); |
| 411 | 411 | ||
| 412 | const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; | 412 | const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; |
| 413 | ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); | 413 | ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); |
| @@ -442,7 +442,7 @@ void KProcess::PrepareForTermination() { | |||
| 442 | plr_address = 0; | 442 | plr_address = 0; |
| 443 | 443 | ||
| 444 | if (resource_limit) { | 444 | if (resource_limit) { |
| 445 | resource_limit->Release(LimitableResource::PhysicalMemory, | 445 | resource_limit->Release(LimitableResource::PhysicalMemoryMax, |
| 446 | main_thread_stack_size + image_size); | 446 | main_thread_stack_size + image_size); |
| 447 | } | 447 | } |
| 448 | 448 | ||
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp index 010dcf99e..b9d22b414 100644 --- a/src/core/hle/kernel/k_resource_limit.cpp +++ b/src/core/hle/kernel/k_resource_limit.cpp | |||
| @@ -159,12 +159,13 @@ KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical | |||
| 159 | // TODO(bunnei): These values are the system defaults, the limits for service processes are | 159 | // TODO(bunnei): These values are the system defaults, the limits for service processes are |
| 160 | // lower. These should use the correct limit values. | 160 | // lower. These should use the correct limit values. |
| 161 | 161 | ||
| 162 | ASSERT(resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, physical_memory_size) | 162 | ASSERT(resource_limit->SetLimitValue(LimitableResource::PhysicalMemoryMax, physical_memory_size) |
| 163 | .IsSuccess()); | 163 | .IsSuccess()); |
| 164 | ASSERT(resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess()); | 164 | ASSERT(resource_limit->SetLimitValue(LimitableResource::ThreadCountMax, 800).IsSuccess()); |
| 165 | ASSERT(resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess()); | 165 | ASSERT(resource_limit->SetLimitValue(LimitableResource::EventCountMax, 900).IsSuccess()); |
| 166 | ASSERT(resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200).IsSuccess()); | 166 | ASSERT( |
| 167 | ASSERT(resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess()); | 167 | resource_limit->SetLimitValue(LimitableResource::TransferMemoryCountMax, 200).IsSuccess()); |
| 168 | ASSERT(resource_limit->SetLimitValue(LimitableResource::SessionCountMax, 1133).IsSuccess()); | ||
| 168 | 169 | ||
| 169 | return resource_limit; | 170 | return resource_limit; |
| 170 | } | 171 | } |
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h index 65c98c979..2573d1b7c 100644 --- a/src/core/hle/kernel/k_resource_limit.h +++ b/src/core/hle/kernel/k_resource_limit.h | |||
| @@ -16,15 +16,8 @@ class CoreTiming; | |||
| 16 | 16 | ||
| 17 | namespace Kernel { | 17 | namespace Kernel { |
| 18 | class KernelCore; | 18 | class KernelCore; |
| 19 | enum class LimitableResource : u32 { | 19 | |
| 20 | PhysicalMemory = 0, | 20 | using LimitableResource = Svc::LimitableResource; |
| 21 | Threads = 1, | ||
| 22 | Events = 2, | ||
| 23 | TransferMemory = 3, | ||
| 24 | Sessions = 4, | ||
| 25 | |||
| 26 | Count, | ||
| 27 | }; | ||
| 28 | 21 | ||
| 29 | constexpr bool IsValidResourceType(LimitableResource type) { | 22 | constexpr bool IsValidResourceType(LimitableResource type) { |
| 30 | return type < LimitableResource::Count; | 23 | return type < LimitableResource::Count; |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index c34ce7a17..d6676904b 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -81,8 +81,8 @@ void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) { | |||
| 81 | // HACK: we cannot schedule from this thread, it is not a core thread | 81 | // HACK: we cannot schedule from this thread, it is not a core thread |
| 82 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | 82 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); |
| 83 | 83 | ||
| 84 | // Special case to ensure dummy threads that are waiting block | 84 | // Ensure dummy threads that are waiting block. |
| 85 | GetCurrentThread(kernel).IfDummyThreadTryWait(); | 85 | GetCurrentThread(kernel).DummyThreadBeginWait(); |
| 86 | 86 | ||
| 87 | ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting); | 87 | ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting); |
| 88 | GetCurrentThread(kernel).EnableDispatch(); | 88 | GetCurrentThread(kernel).EnableDispatch(); |
| @@ -314,6 +314,16 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 314 | idle_cores &= ~(1ULL << core_id); | 314 | idle_cores &= ~(1ULL << core_id); |
| 315 | } | 315 | } |
| 316 | 316 | ||
| 317 | // HACK: any waiting dummy threads can wake up now. | ||
| 318 | kernel.GlobalSchedulerContext().WakeupWaitingDummyThreads(); | ||
| 319 | |||
| 320 | // HACK: if we are a dummy thread, and we need to go sleep, indicate | ||
| 321 | // that for when the lock is released. | ||
| 322 | KThread* const cur_thread = GetCurrentThreadPointer(kernel); | ||
| 323 | if (cur_thread->IsDummyThread() && cur_thread->GetState() != ThreadState::Runnable) { | ||
| 324 | cur_thread->RequestDummyThreadWait(); | ||
| 325 | } | ||
| 326 | |||
| 317 | return cores_needing_scheduling; | 327 | return cores_needing_scheduling; |
| 318 | } | 328 | } |
| 319 | 329 | ||
| @@ -374,7 +384,8 @@ void KScheduler::SwitchThread(KThread* next_thread) { | |||
| 374 | 384 | ||
| 375 | void KScheduler::ScheduleImpl() { | 385 | void KScheduler::ScheduleImpl() { |
| 376 | // First, clear the needs scheduling bool. | 386 | // First, clear the needs scheduling bool. |
| 377 | m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | 387 | m_state.needs_scheduling.store(false, std::memory_order_relaxed); |
| 388 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 378 | 389 | ||
| 379 | // Load the appropriate thread pointers for scheduling. | 390 | // Load the appropriate thread pointers for scheduling. |
| 380 | KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; | 391 | KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; |
| @@ -390,7 +401,8 @@ void KScheduler::ScheduleImpl() { | |||
| 390 | // If there aren't, we want to check if the highest priority thread is the same as the current | 401 | // If there aren't, we want to check if the highest priority thread is the same as the current |
| 391 | // thread. | 402 | // thread. |
| 392 | if (highest_priority_thread == cur_thread) { | 403 | if (highest_priority_thread == cur_thread) { |
| 393 | // If they're the same, then we can just return. | 404 | // If they're the same, then we can just issue a memory barrier and return. |
| 405 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 394 | return; | 406 | return; |
| 395 | } | 407 | } |
| 396 | 408 | ||
| @@ -466,7 +478,8 @@ void KScheduler::ScheduleImplFiber() { | |||
| 466 | 478 | ||
| 467 | // We failed to successfully do the context switch, and need to retry. | 479 | // We failed to successfully do the context switch, and need to retry. |
| 468 | // Clear needs_scheduling. | 480 | // Clear needs_scheduling. |
| 469 | m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | 481 | m_state.needs_scheduling.store(false, std::memory_order_relaxed); |
| 482 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 470 | 483 | ||
| 471 | // Refresh the highest priority thread. | 484 | // Refresh the highest priority thread. |
| 472 | highest_priority_thread = m_state.highest_priority_thread; | 485 | highest_priority_thread = m_state.highest_priority_thread; |
| @@ -531,11 +544,23 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, Threa | |||
| 531 | GetPriorityQueue(kernel).Remove(thread); | 544 | GetPriorityQueue(kernel).Remove(thread); |
| 532 | IncrementScheduledCount(thread); | 545 | IncrementScheduledCount(thread); |
| 533 | SetSchedulerUpdateNeeded(kernel); | 546 | SetSchedulerUpdateNeeded(kernel); |
| 547 | |||
| 548 | if (thread->IsDummyThread()) { | ||
| 549 | // HACK: if this is a dummy thread, it should no longer wake up when the | ||
| 550 | // scheduler lock is released. | ||
| 551 | kernel.GlobalSchedulerContext().UnregisterDummyThreadForWakeup(thread); | ||
| 552 | } | ||
| 534 | } else if (cur_state == ThreadState::Runnable) { | 553 | } else if (cur_state == ThreadState::Runnable) { |
| 535 | // If we're now runnable, then we weren't previously, and we should add. | 554 | // If we're now runnable, then we weren't previously, and we should add. |
| 536 | GetPriorityQueue(kernel).PushBack(thread); | 555 | GetPriorityQueue(kernel).PushBack(thread); |
| 537 | IncrementScheduledCount(thread); | 556 | IncrementScheduledCount(thread); |
| 538 | SetSchedulerUpdateNeeded(kernel); | 557 | SetSchedulerUpdateNeeded(kernel); |
| 558 | |||
| 559 | if (thread->IsDummyThread()) { | ||
| 560 | // HACK: if this is a dummy thread, it should wake up when the scheduler | ||
| 561 | // lock is released. | ||
| 562 | kernel.GlobalSchedulerContext().RegisterDummyThreadForWakeup(thread); | ||
| 563 | } | ||
| 539 | } | 564 | } |
| 540 | } | 565 | } |
| 541 | 566 | ||
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 73314b45e..129d60472 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h | |||
| @@ -60,6 +60,9 @@ public: | |||
| 60 | 60 | ||
| 61 | // Release an instance of the lock. | 61 | // Release an instance of the lock. |
| 62 | if ((--lock_count) == 0) { | 62 | if ((--lock_count) == 0) { |
| 63 | // Perform a memory barrier here. | ||
| 64 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 65 | |||
| 63 | // We're no longer going to hold the lock. Take note of what cores need scheduling. | 66 | // We're no longer going to hold the lock. Take note of what cores need scheduling. |
| 64 | const u64 cores_needing_scheduling = | 67 | const u64 cores_needing_scheduling = |
| 65 | SchedulerType::UpdateHighestPriorityThreads(kernel); | 68 | SchedulerType::UpdateHighestPriorityThreads(kernel); |
diff --git a/src/core/hle/kernel/k_server_port.cpp b/src/core/hle/kernel/k_server_port.cpp index e968f26ad..16968ba97 100644 --- a/src/core/hle/kernel/k_server_port.cpp +++ b/src/core/hle/kernel/k_server_port.cpp | |||
| @@ -61,12 +61,6 @@ void KServerPort::Destroy() { | |||
| 61 | 61 | ||
| 62 | // Close our reference to our parent. | 62 | // Close our reference to our parent. |
| 63 | parent->Close(); | 63 | parent->Close(); |
| 64 | |||
| 65 | // Release host emulation members. | ||
| 66 | session_handler.reset(); | ||
| 67 | |||
| 68 | // Ensure that the global list tracking server objects does not hold on to a reference. | ||
| 69 | kernel.UnregisterServerObject(this); | ||
| 70 | } | 64 | } |
| 71 | 65 | ||
| 72 | bool KServerPort::IsSignaled() const { | 66 | bool KServerPort::IsSignaled() const { |
diff --git a/src/core/hle/kernel/k_server_port.h b/src/core/hle/kernel/k_server_port.h index fd4f4bd20..5fc7ee683 100644 --- a/src/core/hle/kernel/k_server_port.h +++ b/src/core/hle/kernel/k_server_port.h | |||
| @@ -27,24 +27,6 @@ public: | |||
| 27 | 27 | ||
| 28 | void Initialize(KPort* parent_port_, std::string&& name_); | 28 | void Initialize(KPort* parent_port_, std::string&& name_); |
| 29 | 29 | ||
| 30 | /// Whether or not this server port has an HLE handler available. | ||
| 31 | bool HasSessionRequestHandler() const { | ||
| 32 | return !session_handler.expired(); | ||
| 33 | } | ||
| 34 | |||
| 35 | /// Gets the HLE handler for this port. | ||
| 36 | SessionRequestHandlerWeakPtr GetSessionRequestHandler() const { | ||
| 37 | return session_handler; | ||
| 38 | } | ||
| 39 | |||
| 40 | /** | ||
| 41 | * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port | ||
| 42 | * will inherit a reference to this handler. | ||
| 43 | */ | ||
| 44 | void SetSessionHandler(SessionRequestHandlerWeakPtr&& handler) { | ||
| 45 | session_handler = std::move(handler); | ||
| 46 | } | ||
| 47 | |||
| 48 | void EnqueueSession(KServerSession* pending_session); | 30 | void EnqueueSession(KServerSession* pending_session); |
| 49 | 31 | ||
| 50 | KServerSession* AcceptSession(); | 32 | KServerSession* AcceptSession(); |
| @@ -65,7 +47,6 @@ private: | |||
| 65 | void CleanupSessions(); | 47 | void CleanupSessions(); |
| 66 | 48 | ||
| 67 | SessionList session_list; | 49 | SessionList session_list; |
| 68 | SessionRequestHandlerWeakPtr session_handler; | ||
| 69 | KPort* parent{}; | 50 | KPort* parent{}; |
| 70 | }; | 51 | }; |
| 71 | 52 | ||
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index faf03fcc8..aa1941f01 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include <tuple> | 4 | #include <tuple> |
| @@ -33,12 +33,10 @@ KServerSession::KServerSession(KernelCore& kernel_) | |||
| 33 | 33 | ||
| 34 | KServerSession::~KServerSession() = default; | 34 | KServerSession::~KServerSession() = default; |
| 35 | 35 | ||
| 36 | void KServerSession::Initialize(KSession* parent_session_, std::string&& name_, | 36 | void KServerSession::Initialize(KSession* parent_session_, std::string&& name_) { |
| 37 | std::shared_ptr<SessionRequestManager> manager_) { | ||
| 38 | // Set member variables. | 37 | // Set member variables. |
| 39 | parent = parent_session_; | 38 | parent = parent_session_; |
| 40 | name = std::move(name_); | 39 | name = std::move(name_); |
| 41 | manager = manager_; | ||
| 42 | } | 40 | } |
| 43 | 41 | ||
| 44 | void KServerSession::Destroy() { | 42 | void KServerSession::Destroy() { |
| @@ -47,18 +45,99 @@ void KServerSession::Destroy() { | |||
| 47 | this->CleanupRequests(); | 45 | this->CleanupRequests(); |
| 48 | 46 | ||
| 49 | parent->Close(); | 47 | parent->Close(); |
| 50 | |||
| 51 | // Release host emulation members. | ||
| 52 | manager.reset(); | ||
| 53 | |||
| 54 | // Ensure that the global list tracking server objects does not hold on to a reference. | ||
| 55 | kernel.UnregisterServerObject(this); | ||
| 56 | } | 48 | } |
| 57 | 49 | ||
| 58 | void KServerSession::OnClientClosed() { | 50 | void KServerSession::OnClientClosed() { |
| 59 | if (manager && manager->HasSessionHandler()) { | 51 | KScopedLightLock lk{m_lock}; |
| 60 | manager->SessionHandler().ClientDisconnected(this); | 52 | |
| 53 | // Handle any pending requests. | ||
| 54 | KSessionRequest* prev_request = nullptr; | ||
| 55 | while (true) { | ||
| 56 | // Declare variables for processing the request. | ||
| 57 | KSessionRequest* request = nullptr; | ||
| 58 | KEvent* event = nullptr; | ||
| 59 | KThread* thread = nullptr; | ||
| 60 | bool cur_request = false; | ||
| 61 | bool terminate = false; | ||
| 62 | |||
| 63 | // Get the next request. | ||
| 64 | { | ||
| 65 | KScopedSchedulerLock sl{kernel}; | ||
| 66 | |||
| 67 | if (m_current_request != nullptr && m_current_request != prev_request) { | ||
| 68 | // Set the request, open a reference as we process it. | ||
| 69 | request = m_current_request; | ||
| 70 | request->Open(); | ||
| 71 | cur_request = true; | ||
| 72 | |||
| 73 | // Get thread and event for the request. | ||
| 74 | thread = request->GetThread(); | ||
| 75 | event = request->GetEvent(); | ||
| 76 | |||
| 77 | // If the thread is terminating, handle that. | ||
| 78 | if (thread->IsTerminationRequested()) { | ||
| 79 | request->ClearThread(); | ||
| 80 | request->ClearEvent(); | ||
| 81 | terminate = true; | ||
| 82 | } | ||
| 83 | |||
| 84 | prev_request = request; | ||
| 85 | } else if (!m_request_list.empty()) { | ||
| 86 | // Pop the request from the front of the list. | ||
| 87 | request = std::addressof(m_request_list.front()); | ||
| 88 | m_request_list.pop_front(); | ||
| 89 | |||
| 90 | // Get thread and event for the request. | ||
| 91 | thread = request->GetThread(); | ||
| 92 | event = request->GetEvent(); | ||
| 93 | } | ||
| 94 | } | ||
| 95 | |||
| 96 | // If there are no requests, we're done. | ||
| 97 | if (request == nullptr) { | ||
| 98 | break; | ||
| 99 | } | ||
| 100 | |||
| 101 | // All requests must have threads. | ||
| 102 | ASSERT(thread != nullptr); | ||
| 103 | |||
| 104 | // Ensure that we close the request when done. | ||
| 105 | SCOPE_EXIT({ request->Close(); }); | ||
| 106 | |||
| 107 | // If we're terminating, close a reference to the thread and event. | ||
| 108 | if (terminate) { | ||
| 109 | thread->Close(); | ||
| 110 | if (event != nullptr) { | ||
| 111 | event->Close(); | ||
| 112 | } | ||
| 113 | } | ||
| 114 | |||
| 115 | // If we need to, reply. | ||
| 116 | if (event != nullptr && !cur_request) { | ||
| 117 | // There must be no mappings. | ||
| 118 | ASSERT(request->GetSendCount() == 0); | ||
| 119 | ASSERT(request->GetReceiveCount() == 0); | ||
| 120 | ASSERT(request->GetExchangeCount() == 0); | ||
| 121 | |||
| 122 | // // Get the process and page table. | ||
| 123 | // KProcess *client_process = thread->GetOwnerProcess(); | ||
| 124 | // auto &client_pt = client_process->GetPageTable(); | ||
| 125 | |||
| 126 | // // Reply to the request. | ||
| 127 | // ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(), | ||
| 128 | // ResultSessionClosed); | ||
| 129 | |||
| 130 | // // Unlock the buffer. | ||
| 131 | // // NOTE: Nintendo does not check the result of this. | ||
| 132 | // client_pt.UnlockForIpcUserBuffer(request->GetAddress(), request->GetSize()); | ||
| 133 | |||
| 134 | // Signal the event. | ||
| 135 | event->Signal(); | ||
| 136 | } | ||
| 61 | } | 137 | } |
| 138 | |||
| 139 | // Notify. | ||
| 140 | this->NotifyAvailable(ResultSessionClosed); | ||
| 62 | } | 141 | } |
| 63 | 142 | ||
| 64 | bool KServerSession::IsSignaled() const { | 143 | bool KServerSession::IsSignaled() const { |
| @@ -73,24 +152,6 @@ bool KServerSession::IsSignaled() const { | |||
| 73 | return !m_request_list.empty() && m_current_request == nullptr; | 152 | return !m_request_list.empty() && m_current_request == nullptr; |
| 74 | } | 153 | } |
| 75 | 154 | ||
| 76 | Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) { | ||
| 77 | u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; | ||
| 78 | auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread); | ||
| 79 | |||
| 80 | context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); | ||
| 81 | |||
| 82 | return manager->QueueSyncRequest(parent, std::move(context)); | ||
| 83 | } | ||
| 84 | |||
| 85 | Result KServerSession::CompleteSyncRequest(HLERequestContext& context) { | ||
| 86 | Result result = manager->CompleteSyncRequest(this, context); | ||
| 87 | |||
| 88 | // The calling thread is waiting for this request to complete, so wake it up. | ||
| 89 | context.GetThread().EndWait(result); | ||
| 90 | |||
| 91 | return result; | ||
| 92 | } | ||
| 93 | |||
| 94 | Result KServerSession::OnRequest(KSessionRequest* request) { | 155 | Result KServerSession::OnRequest(KSessionRequest* request) { |
| 95 | // Create the wait queue. | 156 | // Create the wait queue. |
| 96 | ThreadQueueImplForKServerSessionRequest wait_queue{kernel}; | 157 | ThreadQueueImplForKServerSessionRequest wait_queue{kernel}; |
| @@ -105,24 +166,16 @@ Result KServerSession::OnRequest(KSessionRequest* request) { | |||
| 105 | // Check that we're not terminating. | 166 | // Check that we're not terminating. |
| 106 | R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); | 167 | R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); |
| 107 | 168 | ||
| 108 | if (manager) { | 169 | // Get whether we're empty. |
| 109 | // HLE request. | 170 | const bool was_empty = m_request_list.empty(); |
| 110 | auto& memory{kernel.System().Memory()}; | ||
| 111 | this->QueueSyncRequest(GetCurrentThreadPointer(kernel), memory); | ||
| 112 | } else { | ||
| 113 | // Non-HLE request. | ||
| 114 | |||
| 115 | // Get whether we're empty. | ||
| 116 | const bool was_empty = m_request_list.empty(); | ||
| 117 | 171 | ||
| 118 | // Add the request to the list. | 172 | // Add the request to the list. |
| 119 | request->Open(); | 173 | request->Open(); |
| 120 | m_request_list.push_back(*request); | 174 | m_request_list.push_back(*request); |
| 121 | 175 | ||
| 122 | // If we were empty, signal. | 176 | // If we were empty, signal. |
| 123 | if (was_empty) { | 177 | if (was_empty) { |
| 124 | this->NotifyAvailable(); | 178 | this->NotifyAvailable(); |
| 125 | } | ||
| 126 | } | 179 | } |
| 127 | 180 | ||
| 128 | // If we have a request event, this is asynchronous, and we don't need to wait. | 181 | // If we have a request event, this is asynchronous, and we don't need to wait. |
| @@ -136,7 +189,7 @@ Result KServerSession::OnRequest(KSessionRequest* request) { | |||
| 136 | return GetCurrentThread(kernel).GetWaitResult(); | 189 | return GetCurrentThread(kernel).GetWaitResult(); |
| 137 | } | 190 | } |
| 138 | 191 | ||
| 139 | Result KServerSession::SendReply() { | 192 | Result KServerSession::SendReply(bool is_hle) { |
| 140 | // Lock the session. | 193 | // Lock the session. |
| 141 | KScopedLightLock lk{m_lock}; | 194 | KScopedLightLock lk{m_lock}; |
| 142 | 195 | ||
| @@ -171,13 +224,18 @@ Result KServerSession::SendReply() { | |||
| 171 | Result result = ResultSuccess; | 224 | Result result = ResultSuccess; |
| 172 | if (!closed) { | 225 | if (!closed) { |
| 173 | // If we're not closed, send the reply. | 226 | // If we're not closed, send the reply. |
| 174 | Core::Memory::Memory& memory{kernel.System().Memory()}; | 227 | if (is_hle) { |
| 175 | KThread* server_thread{GetCurrentThreadPointer(kernel)}; | 228 | // HLE servers write directly to a pointer to the thread command buffer. Therefore |
| 176 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); | 229 | // the reply has already been written in this case. |
| 230 | } else { | ||
| 231 | Core::Memory::Memory& memory{kernel.System().Memory()}; | ||
| 232 | KThread* server_thread{GetCurrentThreadPointer(kernel)}; | ||
| 233 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); | ||
| 177 | 234 | ||
| 178 | auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); | 235 | auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); |
| 179 | auto* dst_msg_buffer = memory.GetPointer(client_message); | 236 | auto* dst_msg_buffer = memory.GetPointer(client_message); |
| 180 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); | 237 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); |
| 238 | } | ||
| 181 | } else { | 239 | } else { |
| 182 | result = ResultSessionClosed; | 240 | result = ResultSessionClosed; |
| 183 | } | 241 | } |
| @@ -223,7 +281,8 @@ Result KServerSession::SendReply() { | |||
| 223 | return result; | 281 | return result; |
| 224 | } | 282 | } |
| 225 | 283 | ||
| 226 | Result KServerSession::ReceiveRequest() { | 284 | Result KServerSession::ReceiveRequest(std::shared_ptr<HLERequestContext>* out_context, |
| 285 | std::weak_ptr<SessionRequestManager> manager) { | ||
| 227 | // Lock the session. | 286 | // Lock the session. |
| 228 | KScopedLightLock lk{m_lock}; | 287 | KScopedLightLock lk{m_lock}; |
| 229 | 288 | ||
| @@ -267,12 +326,22 @@ Result KServerSession::ReceiveRequest() { | |||
| 267 | 326 | ||
| 268 | // Receive the message. | 327 | // Receive the message. |
| 269 | Core::Memory::Memory& memory{kernel.System().Memory()}; | 328 | Core::Memory::Memory& memory{kernel.System().Memory()}; |
| 270 | KThread* server_thread{GetCurrentThreadPointer(kernel)}; | 329 | if (out_context != nullptr) { |
| 271 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); | 330 | // HLE request. |
| 331 | u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))}; | ||
| 332 | *out_context = std::make_shared<HLERequestContext>(kernel, memory, this, client_thread); | ||
| 333 | (*out_context)->SetSessionRequestManager(manager); | ||
| 334 | (*out_context) | ||
| 335 | ->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(), | ||
| 336 | cmd_buf); | ||
| 337 | } else { | ||
| 338 | KThread* server_thread{GetCurrentThreadPointer(kernel)}; | ||
| 339 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); | ||
| 272 | 340 | ||
| 273 | auto* src_msg_buffer = memory.GetPointer(client_message); | 341 | auto* src_msg_buffer = memory.GetPointer(client_message); |
| 274 | auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); | 342 | auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); |
| 275 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); | 343 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); |
| 344 | } | ||
| 276 | 345 | ||
| 277 | // We succeeded. | 346 | // We succeeded. |
| 278 | return ResultSuccess; | 347 | return ResultSuccess; |
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h index 32135473b..6e189af8b 100644 --- a/src/core/hle/kernel/k_server_session.h +++ b/src/core/hle/kernel/k_server_session.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| @@ -16,21 +16,11 @@ | |||
| 16 | #include "core/hle/kernel/k_synchronization_object.h" | 16 | #include "core/hle/kernel/k_synchronization_object.h" |
| 17 | #include "core/hle/result.h" | 17 | #include "core/hle/result.h" |
| 18 | 18 | ||
| 19 | namespace Core::Memory { | ||
| 20 | class Memory; | ||
| 21 | } | ||
| 22 | |||
| 23 | namespace Core::Timing { | ||
| 24 | class CoreTiming; | ||
| 25 | struct EventType; | ||
| 26 | } // namespace Core::Timing | ||
| 27 | |||
| 28 | namespace Kernel { | 19 | namespace Kernel { |
| 29 | 20 | ||
| 30 | class HLERequestContext; | 21 | class HLERequestContext; |
| 31 | class KernelCore; | 22 | class KernelCore; |
| 32 | class KSession; | 23 | class KSession; |
| 33 | class SessionRequestHandler; | ||
| 34 | class SessionRequestManager; | 24 | class SessionRequestManager; |
| 35 | class KThread; | 25 | class KThread; |
| 36 | 26 | ||
| @@ -46,8 +36,7 @@ public: | |||
| 46 | 36 | ||
| 47 | void Destroy() override; | 37 | void Destroy() override; |
| 48 | 38 | ||
| 49 | void Initialize(KSession* parent_session_, std::string&& name_, | 39 | void Initialize(KSession* parent_session_, std::string&& name_); |
| 50 | std::shared_ptr<SessionRequestManager> manager_); | ||
| 51 | 40 | ||
| 52 | KSession* GetParent() { | 41 | KSession* GetParent() { |
| 53 | return parent; | 42 | return parent; |
| @@ -60,38 +49,26 @@ public: | |||
| 60 | bool IsSignaled() const override; | 49 | bool IsSignaled() const override; |
| 61 | void OnClientClosed(); | 50 | void OnClientClosed(); |
| 62 | 51 | ||
| 63 | /// Gets the session request manager, which forwards requests to the underlying service | ||
| 64 | std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() { | ||
| 65 | return manager; | ||
| 66 | } | ||
| 67 | |||
| 68 | /// TODO: flesh these out to match the real kernel | 52 | /// TODO: flesh these out to match the real kernel |
| 69 | Result OnRequest(KSessionRequest* request); | 53 | Result OnRequest(KSessionRequest* request); |
| 70 | Result SendReply(); | 54 | Result SendReply(bool is_hle = false); |
| 71 | Result ReceiveRequest(); | 55 | Result ReceiveRequest(std::shared_ptr<HLERequestContext>* out_context = nullptr, |
| 56 | std::weak_ptr<SessionRequestManager> manager = {}); | ||
| 57 | |||
| 58 | Result SendReplyHLE() { | ||
| 59 | return SendReply(true); | ||
| 60 | } | ||
| 72 | 61 | ||
| 73 | private: | 62 | private: |
| 74 | /// Frees up waiting client sessions when this server session is about to die | 63 | /// Frees up waiting client sessions when this server session is about to die |
| 75 | void CleanupRequests(); | 64 | void CleanupRequests(); |
| 76 | 65 | ||
| 77 | /// Queues a sync request from the emulated application. | ||
| 78 | Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory); | ||
| 79 | |||
| 80 | /// Completes a sync request from the emulated application. | ||
| 81 | Result CompleteSyncRequest(HLERequestContext& context); | ||
| 82 | |||
| 83 | /// This session's HLE request handlers; if nullptr, this is not an HLE server | ||
| 84 | std::shared_ptr<SessionRequestManager> manager; | ||
| 85 | |||
| 86 | /// When set to True, converts the session to a domain at the end of the command | ||
| 87 | bool convert_to_domain{}; | ||
| 88 | |||
| 89 | /// KSession that owns this KServerSession | 66 | /// KSession that owns this KServerSession |
| 90 | KSession* parent{}; | 67 | KSession* parent{}; |
| 91 | 68 | ||
| 92 | /// List of threads which are pending a reply. | 69 | /// List of threads which are pending a reply. |
| 93 | boost::intrusive::list<KSessionRequest> m_request_list; | 70 | boost::intrusive::list<KSessionRequest> m_request_list; |
| 94 | KSessionRequest* m_current_request; | 71 | KSessionRequest* m_current_request{}; |
| 95 | 72 | ||
| 96 | KLightLock m_lock; | 73 | KLightLock m_lock; |
| 97 | }; | 74 | }; |
diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp index ee05aa282..b6f6fe9d9 100644 --- a/src/core/hle/kernel/k_session.cpp +++ b/src/core/hle/kernel/k_session.cpp | |||
| @@ -13,8 +13,7 @@ KSession::KSession(KernelCore& kernel_) | |||
| 13 | : KAutoObjectWithSlabHeapAndContainer{kernel_}, server{kernel_}, client{kernel_} {} | 13 | : KAutoObjectWithSlabHeapAndContainer{kernel_}, server{kernel_}, client{kernel_} {} |
| 14 | KSession::~KSession() = default; | 14 | KSession::~KSession() = default; |
| 15 | 15 | ||
| 16 | void KSession::Initialize(KClientPort* port_, const std::string& name_, | 16 | void KSession::Initialize(KClientPort* port_, const std::string& name_) { |
| 17 | std::shared_ptr<SessionRequestManager> manager_) { | ||
| 18 | // Increment reference count. | 17 | // Increment reference count. |
| 19 | // Because reference count is one on creation, this will result | 18 | // Because reference count is one on creation, this will result |
| 20 | // in a reference count of two. Thus, when both server and client are closed | 19 | // in a reference count of two. Thus, when both server and client are closed |
| @@ -26,7 +25,7 @@ void KSession::Initialize(KClientPort* port_, const std::string& name_, | |||
| 26 | KAutoObject::Create(std::addressof(client)); | 25 | KAutoObject::Create(std::addressof(client)); |
| 27 | 26 | ||
| 28 | // Initialize our sub sessions. | 27 | // Initialize our sub sessions. |
| 29 | server.Initialize(this, name_ + ":Server", manager_); | 28 | server.Initialize(this, name_ + ":Server"); |
| 30 | client.Initialize(this, name_ + ":Client"); | 29 | client.Initialize(this, name_ + ":Client"); |
| 31 | 30 | ||
| 32 | // Set state and name. | 31 | // Set state and name. |
| @@ -77,7 +76,7 @@ void KSession::OnClientClosed() { | |||
| 77 | void KSession::PostDestroy(uintptr_t arg) { | 76 | void KSession::PostDestroy(uintptr_t arg) { |
| 78 | // Release the session count resource the owner process holds. | 77 | // Release the session count resource the owner process holds. |
| 79 | KProcess* owner = reinterpret_cast<KProcess*>(arg); | 78 | KProcess* owner = reinterpret_cast<KProcess*>(arg); |
| 80 | owner->GetResourceLimit()->Release(LimitableResource::Sessions, 1); | 79 | owner->GetResourceLimit()->Release(LimitableResource::SessionCountMax, 1); |
| 81 | owner->Close(); | 80 | owner->Close(); |
| 82 | } | 81 | } |
| 83 | 82 | ||
diff --git a/src/core/hle/kernel/k_session.h b/src/core/hle/kernel/k_session.h index c6ead403b..93e5e6f71 100644 --- a/src/core/hle/kernel/k_session.h +++ b/src/core/hle/kernel/k_session.h | |||
| @@ -21,8 +21,7 @@ public: | |||
| 21 | explicit KSession(KernelCore& kernel_); | 21 | explicit KSession(KernelCore& kernel_); |
| 22 | ~KSession() override; | 22 | ~KSession() override; |
| 23 | 23 | ||
| 24 | void Initialize(KClientPort* port_, const std::string& name_, | 24 | void Initialize(KClientPort* port_, const std::string& name_); |
| 25 | std::shared_ptr<SessionRequestManager> manager_ = nullptr); | ||
| 26 | 25 | ||
| 27 | void Finalize() override; | 26 | void Finalize() override; |
| 28 | 27 | ||
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index a039cc591..10cd4c43d 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp | |||
| @@ -14,7 +14,7 @@ namespace Kernel { | |||
| 14 | KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | 14 | KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} |
| 15 | 15 | ||
| 16 | KSharedMemory::~KSharedMemory() { | 16 | KSharedMemory::~KSharedMemory() { |
| 17 | kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size); | 17 | kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size); |
| 18 | } | 18 | } |
| 19 | 19 | ||
| 20 | Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, | 20 | Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, |
| @@ -35,7 +35,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | |||
| 35 | KResourceLimit* reslimit = kernel.GetSystemResourceLimit(); | 35 | KResourceLimit* reslimit = kernel.GetSystemResourceLimit(); |
| 36 | 36 | ||
| 37 | // Reserve memory for ourselves. | 37 | // Reserve memory for ourselves. |
| 38 | KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemory, | 38 | KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax, |
| 39 | size_); | 39 | size_); |
| 40 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 40 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 41 | 41 | ||
| @@ -57,7 +57,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | |||
| 57 | 57 | ||
| 58 | void KSharedMemory::Finalize() { | 58 | void KSharedMemory::Finalize() { |
| 59 | // Release the memory reservation. | 59 | // Release the memory reservation. |
| 60 | resource_limit->Release(LimitableResource::PhysicalMemory, size); | 60 | resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); |
| 61 | resource_limit->Close(); | 61 | resource_limit->Close(); |
| 62 | 62 | ||
| 63 | // Perform inherited finalization. | 63 | // Perform inherited finalization. |
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp new file mode 100644 index 000000000..4cc377a6c --- /dev/null +++ b/src/core/hle/kernel/k_system_resource.cpp | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "core/hle/kernel/k_system_resource.h" | ||
| 5 | |||
| 6 | namespace Kernel { | ||
| 7 | |||
| 8 | Result KSecureSystemResource::Initialize([[maybe_unused]] size_t size, | ||
| 9 | [[maybe_unused]] KResourceLimit* resource_limit, | ||
| 10 | [[maybe_unused]] KMemoryManager::Pool pool) { | ||
| 11 | // Unimplemented | ||
| 12 | UNREACHABLE(); | ||
| 13 | } | ||
| 14 | |||
| 15 | void KSecureSystemResource::Finalize() { | ||
| 16 | // Unimplemented | ||
| 17 | UNREACHABLE(); | ||
| 18 | } | ||
| 19 | |||
| 20 | size_t KSecureSystemResource::CalculateRequiredSecureMemorySize( | ||
| 21 | [[maybe_unused]] size_t size, [[maybe_unused]] KMemoryManager::Pool pool) { | ||
| 22 | // Unimplemented | ||
| 23 | UNREACHABLE(); | ||
| 24 | } | ||
| 25 | |||
| 26 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_system_resource.h b/src/core/hle/kernel/k_system_resource.h new file mode 100644 index 000000000..9a991f725 --- /dev/null +++ b/src/core/hle/kernel/k_system_resource.h | |||
| @@ -0,0 +1,137 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "common/assert.h" | ||
| 7 | #include "common/common_types.h" | ||
| 8 | #include "core/hle/kernel/k_auto_object.h" | ||
| 9 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 10 | #include "core/hle/kernel/k_memory_manager.h" | ||
| 11 | #include "core/hle/kernel/k_page_table_manager.h" | ||
| 12 | #include "core/hle/kernel/k_resource_limit.h" | ||
| 13 | #include "core/hle/kernel/slab_helpers.h" | ||
| 14 | |||
| 15 | namespace Kernel { | ||
| 16 | |||
| 17 | // NOTE: Nintendo's implementation does not have the "is_secure_resource" field, and instead uses | ||
| 18 | // virtual IsSecureResource(). | ||
| 19 | |||
| 20 | class KSystemResource : public KAutoObject { | ||
| 21 | KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject); | ||
| 22 | |||
| 23 | public: | ||
| 24 | explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {} | ||
| 25 | |||
| 26 | protected: | ||
| 27 | void SetSecureResource() { | ||
| 28 | m_is_secure_resource = true; | ||
| 29 | } | ||
| 30 | |||
| 31 | public: | ||
| 32 | virtual void Destroy() override { | ||
| 33 | UNREACHABLE_MSG("KSystemResource::Destroy() was called"); | ||
| 34 | } | ||
| 35 | |||
| 36 | bool IsSecureResource() const { | ||
| 37 | return m_is_secure_resource; | ||
| 38 | } | ||
| 39 | |||
| 40 | void SetManagers(KMemoryBlockSlabManager& mb, KBlockInfoManager& bi, KPageTableManager& pt) { | ||
| 41 | ASSERT(m_p_memory_block_slab_manager == nullptr); | ||
| 42 | ASSERT(m_p_block_info_manager == nullptr); | ||
| 43 | ASSERT(m_p_page_table_manager == nullptr); | ||
| 44 | |||
| 45 | m_p_memory_block_slab_manager = std::addressof(mb); | ||
| 46 | m_p_block_info_manager = std::addressof(bi); | ||
| 47 | m_p_page_table_manager = std::addressof(pt); | ||
| 48 | } | ||
| 49 | |||
| 50 | const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const { | ||
| 51 | return *m_p_memory_block_slab_manager; | ||
| 52 | } | ||
| 53 | const KBlockInfoManager& GetBlockInfoManager() const { | ||
| 54 | return *m_p_block_info_manager; | ||
| 55 | } | ||
| 56 | const KPageTableManager& GetPageTableManager() const { | ||
| 57 | return *m_p_page_table_manager; | ||
| 58 | } | ||
| 59 | |||
| 60 | KMemoryBlockSlabManager& GetMemoryBlockSlabManager() { | ||
| 61 | return *m_p_memory_block_slab_manager; | ||
| 62 | } | ||
| 63 | KBlockInfoManager& GetBlockInfoManager() { | ||
| 64 | return *m_p_block_info_manager; | ||
| 65 | } | ||
| 66 | KPageTableManager& GetPageTableManager() { | ||
| 67 | return *m_p_page_table_manager; | ||
| 68 | } | ||
| 69 | |||
| 70 | KMemoryBlockSlabManager* GetMemoryBlockSlabManagerPointer() { | ||
| 71 | return m_p_memory_block_slab_manager; | ||
| 72 | } | ||
| 73 | KBlockInfoManager* GetBlockInfoManagerPointer() { | ||
| 74 | return m_p_block_info_manager; | ||
| 75 | } | ||
| 76 | KPageTableManager* GetPageTableManagerPointer() { | ||
| 77 | return m_p_page_table_manager; | ||
| 78 | } | ||
| 79 | |||
| 80 | private: | ||
| 81 | KMemoryBlockSlabManager* m_p_memory_block_slab_manager{}; | ||
| 82 | KBlockInfoManager* m_p_block_info_manager{}; | ||
| 83 | KPageTableManager* m_p_page_table_manager{}; | ||
| 84 | bool m_is_secure_resource{false}; | ||
| 85 | }; | ||
| 86 | |||
| 87 | class KSecureSystemResource final | ||
| 88 | : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> { | ||
| 89 | public: | ||
| 90 | explicit KSecureSystemResource(KernelCore& kernel_) | ||
| 91 | : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) { | ||
| 92 | // Mark ourselves as being a secure resource. | ||
| 93 | this->SetSecureResource(); | ||
| 94 | } | ||
| 95 | |||
| 96 | Result Initialize(size_t size, KResourceLimit* resource_limit, KMemoryManager::Pool pool); | ||
| 97 | void Finalize(); | ||
| 98 | |||
| 99 | bool IsInitialized() const { | ||
| 100 | return m_is_initialized; | ||
| 101 | } | ||
| 102 | static void PostDestroy([[maybe_unused]] uintptr_t arg) {} | ||
| 103 | |||
| 104 | size_t CalculateRequiredSecureMemorySize() const { | ||
| 105 | return CalculateRequiredSecureMemorySize(m_resource_size, m_resource_pool); | ||
| 106 | } | ||
| 107 | |||
| 108 | size_t GetSize() const { | ||
| 109 | return m_resource_size; | ||
| 110 | } | ||
| 111 | size_t GetUsedSize() const { | ||
| 112 | return m_dynamic_page_manager.GetUsed() * PageSize; | ||
| 113 | } | ||
| 114 | |||
| 115 | const KDynamicPageManager& GetDynamicPageManager() const { | ||
| 116 | return m_dynamic_page_manager; | ||
| 117 | } | ||
| 118 | |||
| 119 | public: | ||
| 120 | static size_t CalculateRequiredSecureMemorySize(size_t size, KMemoryManager::Pool pool); | ||
| 121 | |||
| 122 | private: | ||
| 123 | bool m_is_initialized{}; | ||
| 124 | KMemoryManager::Pool m_resource_pool{}; | ||
| 125 | KDynamicPageManager m_dynamic_page_manager; | ||
| 126 | KMemoryBlockSlabManager m_memory_block_slab_manager; | ||
| 127 | KBlockInfoManager m_block_info_manager; | ||
| 128 | KPageTableManager m_page_table_manager; | ||
| 129 | KMemoryBlockSlabHeap m_memory_block_heap; | ||
| 130 | KBlockInfoSlabHeap m_block_info_heap; | ||
| 131 | KPageTableSlabHeap m_page_table_heap; | ||
| 132 | KResourceLimit* m_resource_limit{}; | ||
| 133 | VAddr m_resource_address{}; | ||
| 134 | size_t m_resource_size{}; | ||
| 135 | }; | ||
| 136 | |||
| 137 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index b7bfcdce3..21207fe99 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -148,7 +148,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack | |||
| 148 | physical_affinity_mask.SetAffinity(phys_core, true); | 148 | physical_affinity_mask.SetAffinity(phys_core, true); |
| 149 | 149 | ||
| 150 | // Set the thread state. | 150 | // Set the thread state. |
| 151 | thread_state = (type == ThreadType::Main) ? ThreadState::Runnable : ThreadState::Initialized; | 151 | thread_state = (type == ThreadType::Main || type == ThreadType::Dummy) |
| 152 | ? ThreadState::Runnable | ||
| 153 | : ThreadState::Initialized; | ||
| 152 | 154 | ||
| 153 | // Set TLS address. | 155 | // Set TLS address. |
| 154 | tls_address = 0; | 156 | tls_address = 0; |
| @@ -261,9 +263,9 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ | |||
| 261 | R_SUCCEED(); | 263 | R_SUCCEED(); |
| 262 | } | 264 | } |
| 263 | 265 | ||
| 264 | Result KThread::InitializeDummyThread(KThread* thread) { | 266 | Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) { |
| 265 | // Initialize the thread. | 267 | // Initialize the thread. |
| 266 | R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy)); | 268 | R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy)); |
| 267 | 269 | ||
| 268 | // Initialize emulation parameters. | 270 | // Initialize emulation parameters. |
| 269 | thread->stack_parameters.disable_count = 0; | 271 | thread->stack_parameters.disable_count = 0; |
| @@ -301,7 +303,7 @@ void KThread::PostDestroy(uintptr_t arg) { | |||
| 301 | const bool resource_limit_release_hint = (arg & 1); | 303 | const bool resource_limit_release_hint = (arg & 1); |
| 302 | const s64 hint_value = (resource_limit_release_hint ? 0 : 1); | 304 | const s64 hint_value = (resource_limit_release_hint ? 0 : 1); |
| 303 | if (owner != nullptr) { | 305 | if (owner != nullptr) { |
| 304 | owner->GetResourceLimit()->Release(LimitableResource::Threads, 1, hint_value); | 306 | owner->GetResourceLimit()->Release(LimitableResource::ThreadCountMax, 1, hint_value); |
| 305 | owner->Close(); | 307 | owner->Close(); |
| 306 | } | 308 | } |
| 307 | } | 309 | } |
| @@ -1052,7 +1054,7 @@ void KThread::Exit() { | |||
| 1052 | 1054 | ||
| 1053 | // Release the thread resource hint, running thread count from parent. | 1055 | // Release the thread resource hint, running thread count from parent. |
| 1054 | if (parent != nullptr) { | 1056 | if (parent != nullptr) { |
| 1055 | parent->GetResourceLimit()->Release(Kernel::LimitableResource::Threads, 0, 1); | 1057 | parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1); |
| 1056 | resource_limit_release_hint = true; | 1058 | resource_limit_release_hint = true; |
| 1057 | parent->DecrementRunningThreadCount(); | 1059 | parent->DecrementRunningThreadCount(); |
| 1058 | } | 1060 | } |
| @@ -1174,30 +1176,31 @@ Result KThread::Sleep(s64 timeout) { | |||
| 1174 | R_SUCCEED(); | 1176 | R_SUCCEED(); |
| 1175 | } | 1177 | } |
| 1176 | 1178 | ||
| 1177 | void KThread::IfDummyThreadTryWait() { | 1179 | void KThread::RequestDummyThreadWait() { |
| 1178 | if (!IsDummyThread()) { | 1180 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); |
| 1179 | return; | 1181 | ASSERT(this->IsDummyThread()); |
| 1180 | } | 1182 | |
| 1183 | // We will block when the scheduler lock is released. | ||
| 1184 | dummy_thread_runnable.store(false); | ||
| 1185 | } | ||
| 1181 | 1186 | ||
| 1182 | if (GetState() != ThreadState::Waiting) { | 1187 | void KThread::DummyThreadBeginWait() { |
| 1188 | if (!this->IsDummyThread() || kernel.IsPhantomModeForSingleCore()) { | ||
| 1189 | // Occurs in single core mode. | ||
| 1183 | return; | 1190 | return; |
| 1184 | } | 1191 | } |
| 1185 | 1192 | ||
| 1186 | ASSERT(!kernel.IsPhantomModeForSingleCore()); | 1193 | // Block until runnable is no longer false. |
| 1187 | 1194 | dummy_thread_runnable.wait(false); | |
| 1188 | // Block until we are no longer waiting. | ||
| 1189 | std::unique_lock lk(dummy_wait_lock); | ||
| 1190 | dummy_wait_cv.wait( | ||
| 1191 | lk, [&] { return GetState() != ThreadState::Waiting || kernel.IsShuttingDown(); }); | ||
| 1192 | } | 1195 | } |
| 1193 | 1196 | ||
| 1194 | void KThread::IfDummyThreadEndWait() { | 1197 | void KThread::DummyThreadEndWait() { |
| 1195 | if (!IsDummyThread()) { | 1198 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); |
| 1196 | return; | 1199 | ASSERT(this->IsDummyThread()); |
| 1197 | } | ||
| 1198 | 1200 | ||
| 1199 | // Wake up the waiting thread. | 1201 | // Wake up the waiting thread. |
| 1200 | dummy_wait_cv.notify_one(); | 1202 | dummy_thread_runnable.store(true); |
| 1203 | dummy_thread_runnable.notify_one(); | ||
| 1201 | } | 1204 | } |
| 1202 | 1205 | ||
| 1203 | void KThread::BeginWait(KThreadQueue* queue) { | 1206 | void KThread::BeginWait(KThreadQueue* queue) { |
| @@ -1231,9 +1234,6 @@ void KThread::EndWait(Result wait_result_) { | |||
| 1231 | } | 1234 | } |
| 1232 | 1235 | ||
| 1233 | wait_queue->EndWait(this, wait_result_); | 1236 | wait_queue->EndWait(this, wait_result_); |
| 1234 | |||
| 1235 | // Special case for dummy threads to wakeup if necessary. | ||
| 1236 | IfDummyThreadEndWait(); | ||
| 1237 | } | 1237 | } |
| 1238 | } | 1238 | } |
| 1239 | 1239 | ||
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index e2a27d603..f38c92bff 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -415,7 +415,7 @@ public: | |||
| 415 | 415 | ||
| 416 | static void PostDestroy(uintptr_t arg); | 416 | static void PostDestroy(uintptr_t arg); |
| 417 | 417 | ||
| 418 | [[nodiscard]] static Result InitializeDummyThread(KThread* thread); | 418 | [[nodiscard]] static Result InitializeDummyThread(KThread* thread, KProcess* owner); |
| 419 | 419 | ||
| 420 | [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread, | 420 | [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread, |
| 421 | s32 virt_core); | 421 | s32 virt_core); |
| @@ -643,8 +643,9 @@ public: | |||
| 643 | // therefore will not block on guest kernel synchronization primitives. These methods handle | 643 | // therefore will not block on guest kernel synchronization primitives. These methods handle |
| 644 | // blocking as needed. | 644 | // blocking as needed. |
| 645 | 645 | ||
| 646 | void IfDummyThreadTryWait(); | 646 | void RequestDummyThreadWait(); |
| 647 | void IfDummyThreadEndWait(); | 647 | void DummyThreadBeginWait(); |
| 648 | void DummyThreadEndWait(); | ||
| 648 | 649 | ||
| 649 | [[nodiscard]] uintptr_t GetArgument() const { | 650 | [[nodiscard]] uintptr_t GetArgument() const { |
| 650 | return argument; | 651 | return argument; |
| @@ -777,8 +778,7 @@ private: | |||
| 777 | bool is_single_core{}; | 778 | bool is_single_core{}; |
| 778 | ThreadType thread_type{}; | 779 | ThreadType thread_type{}; |
| 779 | StepState step_state{}; | 780 | StepState step_state{}; |
| 780 | std::mutex dummy_wait_lock; | 781 | std::atomic<bool> dummy_thread_runnable{true}; |
| 781 | std::condition_variable dummy_wait_cv; | ||
| 782 | 782 | ||
| 783 | // For debugging | 783 | // For debugging |
| 784 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; | 784 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; |
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp index b0320eb73..9f34c2d46 100644 --- a/src/core/hle/kernel/k_transfer_memory.cpp +++ b/src/core/hle/kernel/k_transfer_memory.cpp | |||
| @@ -37,7 +37,7 @@ void KTransferMemory::Finalize() { | |||
| 37 | 37 | ||
| 38 | void KTransferMemory::PostDestroy(uintptr_t arg) { | 38 | void KTransferMemory::PostDestroy(uintptr_t arg) { |
| 39 | KProcess* owner = reinterpret_cast<KProcess*>(arg); | 39 | KProcess* owner = reinterpret_cast<KProcess*>(arg); |
| 40 | owner->GetResourceLimit()->Release(LimitableResource::TransferMemory, 1); | 40 | owner->GetResourceLimit()->Release(LimitableResource::TransferMemoryCountMax, 1); |
| 41 | owner->Close(); | 41 | owner->Close(); |
| 42 | } | 42 | } |
| 43 | 43 | ||
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index fdc774e30..b77723503 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -28,10 +28,12 @@ | |||
| 28 | #include "core/hle/kernel/k_handle_table.h" | 28 | #include "core/hle/kernel/k_handle_table.h" |
| 29 | #include "core/hle/kernel/k_memory_layout.h" | 29 | #include "core/hle/kernel/k_memory_layout.h" |
| 30 | #include "core/hle/kernel/k_memory_manager.h" | 30 | #include "core/hle/kernel/k_memory_manager.h" |
| 31 | #include "core/hle/kernel/k_page_buffer.h" | ||
| 31 | #include "core/hle/kernel/k_process.h" | 32 | #include "core/hle/kernel/k_process.h" |
| 32 | #include "core/hle/kernel/k_resource_limit.h" | 33 | #include "core/hle/kernel/k_resource_limit.h" |
| 33 | #include "core/hle/kernel/k_scheduler.h" | 34 | #include "core/hle/kernel/k_scheduler.h" |
| 34 | #include "core/hle/kernel/k_shared_memory.h" | 35 | #include "core/hle/kernel/k_shared_memory.h" |
| 36 | #include "core/hle/kernel/k_system_resource.h" | ||
| 35 | #include "core/hle/kernel/k_thread.h" | 37 | #include "core/hle/kernel/k_thread.h" |
| 36 | #include "core/hle/kernel/k_worker_task_manager.h" | 38 | #include "core/hle/kernel/k_worker_task_manager.h" |
| 37 | #include "core/hle/kernel/kernel.h" | 39 | #include "core/hle/kernel/kernel.h" |
| @@ -47,6 +49,11 @@ MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); | |||
| 47 | namespace Kernel { | 49 | namespace Kernel { |
| 48 | 50 | ||
| 49 | struct KernelCore::Impl { | 51 | struct KernelCore::Impl { |
| 52 | static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; | ||
| 53 | static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000; | ||
| 54 | static constexpr size_t BlockInfoSlabHeapSize = 4000; | ||
| 55 | static constexpr size_t ReservedDynamicPageCount = 64; | ||
| 56 | |||
| 50 | explicit Impl(Core::System& system_, KernelCore& kernel_) | 57 | explicit Impl(Core::System& system_, KernelCore& kernel_) |
| 51 | : time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"}, | 58 | : time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"}, |
| 52 | service_thread_barrier{2}, system{system_} {} | 59 | service_thread_barrier{2}, system{system_} {} |
| @@ -60,7 +67,6 @@ struct KernelCore::Impl { | |||
| 60 | global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); | 67 | global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); |
| 61 | global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel); | 68 | global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel); |
| 62 | global_handle_table->Initialize(KHandleTable::MaxTableSize); | 69 | global_handle_table->Initialize(KHandleTable::MaxTableSize); |
| 63 | default_service_thread = CreateServiceThread(kernel, "DefaultServiceThread"); | ||
| 64 | 70 | ||
| 65 | is_phantom_mode_for_singlecore = false; | 71 | is_phantom_mode_for_singlecore = false; |
| 66 | 72 | ||
| @@ -72,7 +78,6 @@ struct KernelCore::Impl { | |||
| 72 | // Initialize kernel memory and resources. | 78 | // Initialize kernel memory and resources. |
| 73 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); | 79 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); |
| 74 | InitializeMemoryLayout(); | 80 | InitializeMemoryLayout(); |
| 75 | Init::InitializeKPageBufferSlabHeap(system); | ||
| 76 | InitializeShutdownThreads(); | 81 | InitializeShutdownThreads(); |
| 77 | InitializePhysicalCores(); | 82 | InitializePhysicalCores(); |
| 78 | InitializePreemption(kernel); | 83 | InitializePreemption(kernel); |
| @@ -82,10 +87,13 @@ struct KernelCore::Impl { | |||
| 82 | const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); | 87 | const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); |
| 83 | ASSERT(pt_heap_region.GetEndAddress() != 0); | 88 | ASSERT(pt_heap_region.GetEndAddress() != 0); |
| 84 | 89 | ||
| 85 | InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize()); | 90 | InitializeResourceManagers(kernel, pt_heap_region.GetAddress(), |
| 91 | pt_heap_region.GetSize()); | ||
| 86 | } | 92 | } |
| 87 | 93 | ||
| 88 | RegisterHostThread(); | 94 | RegisterHostThread(nullptr); |
| 95 | |||
| 96 | default_service_thread = &CreateServiceThread(kernel, "DefaultServiceThread"); | ||
| 89 | } | 97 | } |
| 90 | 98 | ||
| 91 | void InitializeCores() { | 99 | void InitializeCores() { |
| @@ -184,17 +192,6 @@ struct KernelCore::Impl { | |||
| 184 | } | 192 | } |
| 185 | 193 | ||
| 186 | void CloseServices() { | 194 | void CloseServices() { |
| 187 | // Close all open server sessions and ports. | ||
| 188 | std::unordered_set<KAutoObject*> server_objects_; | ||
| 189 | { | ||
| 190 | std::scoped_lock lk(server_objects_lock); | ||
| 191 | server_objects_ = server_objects; | ||
| 192 | server_objects.clear(); | ||
| 193 | } | ||
| 194 | for (auto* server_object : server_objects_) { | ||
| 195 | server_object->Close(); | ||
| 196 | } | ||
| 197 | |||
| 198 | // Ensures all service threads gracefully shutdown. | 195 | // Ensures all service threads gracefully shutdown. |
| 199 | ClearServiceThreads(); | 196 | ClearServiceThreads(); |
| 200 | } | 197 | } |
| @@ -232,18 +229,22 @@ struct KernelCore::Impl { | |||
| 232 | const auto kernel_size{sizes.second}; | 229 | const auto kernel_size{sizes.second}; |
| 233 | 230 | ||
| 234 | // If setting the default system values fails, then something seriously wrong has occurred. | 231 | // If setting the default system values fails, then something seriously wrong has occurred. |
| 235 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size) | 232 | ASSERT( |
| 233 | system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemoryMax, total_size) | ||
| 234 | .IsSuccess()); | ||
| 235 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::ThreadCountMax, 800) | ||
| 236 | .IsSuccess()); | ||
| 237 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::EventCountMax, 900) | ||
| 238 | .IsSuccess()); | ||
| 239 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemoryCountMax, 200) | ||
| 236 | .IsSuccess()); | 240 | .IsSuccess()); |
| 237 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess()); | 241 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::SessionCountMax, 1133) |
| 238 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess()); | ||
| 239 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200) | ||
| 240 | .IsSuccess()); | 242 | .IsSuccess()); |
| 241 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess()); | 243 | system_resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, kernel_size); |
| 242 | system_resource_limit->Reserve(LimitableResource::PhysicalMemory, kernel_size); | ||
| 243 | 244 | ||
| 244 | // Reserve secure applet memory, introduced in firmware 5.0.0 | 245 | // Reserve secure applet memory, introduced in firmware 5.0.0 |
| 245 | constexpr u64 secure_applet_memory_size{4_MiB}; | 246 | constexpr u64 secure_applet_memory_size{4_MiB}; |
| 246 | ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory, | 247 | ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, |
| 247 | secure_applet_memory_size)); | 248 | secure_applet_memory_size)); |
| 248 | } | 249 | } |
| 249 | 250 | ||
| @@ -263,16 +264,82 @@ struct KernelCore::Impl { | |||
| 263 | system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); | 264 | system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); |
| 264 | } | 265 | } |
| 265 | 266 | ||
| 266 | void InitializeResourceManagers(VAddr address, size_t size) { | 267 | void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) { |
| 267 | dynamic_page_manager = std::make_unique<KDynamicPageManager>(); | 268 | // Ensure that the buffer is suitable for our use. |
| 268 | memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | 269 | ASSERT(Common::IsAligned(address, PageSize)); |
| 270 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 271 | |||
| 272 | // Ensure that we have space for our reference counts. | ||
| 273 | const size_t rc_size = | ||
| 274 | Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(size), PageSize); | ||
| 275 | ASSERT(rc_size < size); | ||
| 276 | size -= rc_size; | ||
| 277 | |||
| 278 | // Initialize the resource managers' shared page manager. | ||
| 279 | resource_manager_page_manager = std::make_unique<KDynamicPageManager>(); | ||
| 280 | resource_manager_page_manager->Initialize( | ||
| 281 | address, size, std::max<size_t>(PageSize, KPageBufferSlabHeap::BufferSize)); | ||
| 282 | |||
| 283 | // Initialize the KPageBuffer slab heap. | ||
| 284 | page_buffer_slab_heap.Initialize(system); | ||
| 285 | |||
| 286 | // Initialize the fixed-size slabheaps. | ||
| 287 | app_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | ||
| 288 | sys_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | ||
| 289 | block_info_heap = std::make_unique<KBlockInfoSlabHeap>(); | ||
| 290 | app_memory_block_heap->Initialize(resource_manager_page_manager.get(), | ||
| 291 | ApplicationMemoryBlockSlabHeapSize); | ||
| 292 | sys_memory_block_heap->Initialize(resource_manager_page_manager.get(), | ||
| 293 | SystemMemoryBlockSlabHeapSize); | ||
| 294 | block_info_heap->Initialize(resource_manager_page_manager.get(), BlockInfoSlabHeapSize); | ||
| 295 | |||
| 296 | // Reserve all but a fixed number of remaining pages for the page table heap. | ||
| 297 | const size_t num_pt_pages = resource_manager_page_manager->GetCount() - | ||
| 298 | resource_manager_page_manager->GetUsed() - | ||
| 299 | ReservedDynamicPageCount; | ||
| 300 | page_table_heap = std::make_unique<KPageTableSlabHeap>(); | ||
| 301 | |||
| 302 | // TODO(bunnei): Pass in address once we support kernel virtual memory allocations. | ||
| 303 | page_table_heap->Initialize( | ||
| 304 | resource_manager_page_manager.get(), num_pt_pages, | ||
| 305 | /*GetPointer<KPageTableManager::RefCount>(address + size)*/ nullptr); | ||
| 306 | |||
| 307 | // Setup the slab managers. | ||
| 308 | KDynamicPageManager* const app_dynamic_page_manager = nullptr; | ||
| 309 | KDynamicPageManager* const sys_dynamic_page_manager = | ||
| 310 | /*KTargetSystem::IsDynamicResourceLimitsEnabled()*/ true | ||
| 311 | ? resource_manager_page_manager.get() | ||
| 312 | : nullptr; | ||
| 269 | app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); | 313 | app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); |
| 270 | 314 | sys_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); | |
| 271 | dynamic_page_manager->Initialize(address, size); | 315 | app_block_info_manager = std::make_unique<KBlockInfoManager>(); |
| 272 | static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; | 316 | sys_block_info_manager = std::make_unique<KBlockInfoManager>(); |
| 273 | memory_block_heap->Initialize(dynamic_page_manager.get(), | 317 | app_page_table_manager = std::make_unique<KPageTableManager>(); |
| 274 | ApplicationMemoryBlockSlabHeapSize); | 318 | sys_page_table_manager = std::make_unique<KPageTableManager>(); |
| 275 | app_memory_block_manager->Initialize(nullptr, memory_block_heap.get()); | 319 | |
| 320 | app_memory_block_manager->Initialize(app_dynamic_page_manager, app_memory_block_heap.get()); | ||
| 321 | sys_memory_block_manager->Initialize(sys_dynamic_page_manager, sys_memory_block_heap.get()); | ||
| 322 | |||
| 323 | app_block_info_manager->Initialize(app_dynamic_page_manager, block_info_heap.get()); | ||
| 324 | sys_block_info_manager->Initialize(sys_dynamic_page_manager, block_info_heap.get()); | ||
| 325 | |||
| 326 | app_page_table_manager->Initialize(app_dynamic_page_manager, page_table_heap.get()); | ||
| 327 | sys_page_table_manager->Initialize(sys_dynamic_page_manager, page_table_heap.get()); | ||
| 328 | |||
| 329 | // Check that we have the correct number of dynamic pages available. | ||
| 330 | ASSERT(resource_manager_page_manager->GetCount() - | ||
| 331 | resource_manager_page_manager->GetUsed() == | ||
| 332 | ReservedDynamicPageCount); | ||
| 333 | |||
| 334 | // Create the system page table managers. | ||
| 335 | app_system_resource = std::make_unique<KSystemResource>(kernel); | ||
| 336 | sys_system_resource = std::make_unique<KSystemResource>(kernel); | ||
| 337 | |||
| 338 | // Set the managers for the system resources. | ||
| 339 | app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager, | ||
| 340 | *app_page_table_manager); | ||
| 341 | sys_system_resource->SetManagers(*sys_memory_block_manager, *sys_block_info_manager, | ||
| 342 | *sys_page_table_manager); | ||
| 276 | } | 343 | } |
| 277 | 344 | ||
| 278 | void InitializeShutdownThreads() { | 345 | void InitializeShutdownThreads() { |
| @@ -310,15 +377,18 @@ struct KernelCore::Impl { | |||
| 310 | } | 377 | } |
| 311 | 378 | ||
| 312 | // Gets the dummy KThread for the caller, allocating a new one if this is the first time | 379 | // Gets the dummy KThread for the caller, allocating a new one if this is the first time |
| 313 | KThread* GetHostDummyThread() { | 380 | KThread* GetHostDummyThread(KThread* existing_thread) { |
| 314 | auto initialize = [this](KThread* thread) { | 381 | auto initialize = [this](KThread* thread) { |
| 315 | ASSERT(KThread::InitializeDummyThread(thread).IsSuccess()); | 382 | ASSERT(KThread::InitializeDummyThread(thread, nullptr).IsSuccess()); |
| 316 | thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); | 383 | thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); |
| 317 | return thread; | 384 | return thread; |
| 318 | }; | 385 | }; |
| 319 | 386 | ||
| 320 | thread_local auto raw_thread = KThread(system.Kernel()); | 387 | thread_local KThread raw_thread{system.Kernel()}; |
| 321 | thread_local auto thread = initialize(&raw_thread); | 388 | thread_local KThread* thread = nullptr; |
| 389 | if (thread == nullptr) { | ||
| 390 | thread = (existing_thread == nullptr) ? initialize(&raw_thread) : existing_thread; | ||
| 391 | } | ||
| 322 | 392 | ||
| 323 | return thread; | 393 | return thread; |
| 324 | } | 394 | } |
| @@ -333,9 +403,9 @@ struct KernelCore::Impl { | |||
| 333 | } | 403 | } |
| 334 | 404 | ||
| 335 | /// Registers a new host thread by allocating a host thread ID for it | 405 | /// Registers a new host thread by allocating a host thread ID for it |
| 336 | void RegisterHostThread() { | 406 | void RegisterHostThread(KThread* existing_thread) { |
| 337 | [[maybe_unused]] const auto this_id = GetHostThreadId(); | 407 | [[maybe_unused]] const auto this_id = GetHostThreadId(); |
| 338 | [[maybe_unused]] const auto dummy_thread = GetHostDummyThread(); | 408 | [[maybe_unused]] const auto dummy_thread = GetHostDummyThread(existing_thread); |
| 339 | } | 409 | } |
| 340 | 410 | ||
| 341 | [[nodiscard]] u32 GetCurrentHostThreadID() { | 411 | [[nodiscard]] u32 GetCurrentHostThreadID() { |
| @@ -346,6 +416,8 @@ struct KernelCore::Impl { | |||
| 346 | return this_id; | 416 | return this_id; |
| 347 | } | 417 | } |
| 348 | 418 | ||
| 419 | static inline thread_local bool is_phantom_mode_for_singlecore{false}; | ||
| 420 | |||
| 349 | bool IsPhantomModeForSingleCore() const { | 421 | bool IsPhantomModeForSingleCore() const { |
| 350 | return is_phantom_mode_for_singlecore; | 422 | return is_phantom_mode_for_singlecore; |
| 351 | } | 423 | } |
| @@ -364,7 +436,7 @@ struct KernelCore::Impl { | |||
| 364 | KThread* GetCurrentEmuThread() { | 436 | KThread* GetCurrentEmuThread() { |
| 365 | const auto thread_id = GetCurrentHostThreadID(); | 437 | const auto thread_id = GetCurrentHostThreadID(); |
| 366 | if (thread_id >= Core::Hardware::NUM_CPU_CORES) { | 438 | if (thread_id >= Core::Hardware::NUM_CPU_CORES) { |
| 367 | return GetHostDummyThread(); | 439 | return GetHostDummyThread(nullptr); |
| 368 | } | 440 | } |
| 369 | 441 | ||
| 370 | return current_thread; | 442 | return current_thread; |
| @@ -454,6 +526,9 @@ struct KernelCore::Impl { | |||
| 454 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( | 526 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |
| 455 | misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); | 527 | misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); |
| 456 | 528 | ||
| 529 | // Determine if we'll use extra thread resources. | ||
| 530 | const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); | ||
| 531 | |||
| 457 | // Setup the stack region. | 532 | // Setup the stack region. |
| 458 | constexpr size_t StackRegionSize = 14_MiB; | 533 | constexpr size_t StackRegionSize = 14_MiB; |
| 459 | constexpr size_t StackRegionAlign = KernelAslrAlignment; | 534 | constexpr size_t StackRegionAlign = KernelAslrAlignment; |
| @@ -464,7 +539,8 @@ struct KernelCore::Impl { | |||
| 464 | stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); | 539 | stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); |
| 465 | 540 | ||
| 466 | // Determine the size of the resource region. | 541 | // Determine the size of the resource region. |
| 467 | const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit(); | 542 | const size_t resource_region_size = |
| 543 | memory_layout->GetResourceRegionSizeForInit(use_extra_resources); | ||
| 468 | 544 | ||
| 469 | // Determine the size of the slab region. | 545 | // Determine the size of the slab region. |
| 470 | const size_t slab_region_size = | 546 | const size_t slab_region_size = |
| @@ -698,54 +774,48 @@ struct KernelCore::Impl { | |||
| 698 | return {}; | 774 | return {}; |
| 699 | } | 775 | } |
| 700 | 776 | ||
| 701 | KClientPort* port = &search->second(system.ServiceManager(), system); | 777 | return &search->second(system.ServiceManager(), system); |
| 702 | RegisterServerObject(&port->GetParent()->GetServerPort()); | ||
| 703 | return port; | ||
| 704 | } | 778 | } |
| 705 | 779 | ||
| 706 | void RegisterServerObject(KAutoObject* server_object) { | 780 | void RegisterNamedServiceHandler(std::string name, KServerPort* server_port) { |
| 707 | std::scoped_lock lk(server_objects_lock); | 781 | auto search = service_interface_handlers.find(name); |
| 708 | server_objects.insert(server_object); | 782 | if (search == service_interface_handlers.end()) { |
| 709 | } | 783 | return; |
| 784 | } | ||
| 710 | 785 | ||
| 711 | void UnregisterServerObject(KAutoObject* server_object) { | 786 | search->second(system.ServiceManager(), server_port); |
| 712 | std::scoped_lock lk(server_objects_lock); | ||
| 713 | server_objects.erase(server_object); | ||
| 714 | } | 787 | } |
| 715 | 788 | ||
| 716 | std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(KernelCore& kernel, | 789 | Kernel::ServiceThread& CreateServiceThread(KernelCore& kernel, const std::string& name) { |
| 717 | const std::string& name) { | 790 | auto* ptr = new ServiceThread(kernel, name); |
| 718 | auto service_thread = std::make_shared<Kernel::ServiceThread>(kernel, 1, name); | ||
| 719 | 791 | ||
| 720 | service_threads_manager.QueueWork( | 792 | service_threads_manager.QueueWork( |
| 721 | [this, service_thread]() { service_threads.emplace(service_thread); }); | 793 | [this, ptr]() { service_threads.emplace(ptr, std::unique_ptr<ServiceThread>(ptr)); }); |
| 722 | 794 | ||
| 723 | return service_thread; | 795 | return *ptr; |
| 724 | } | 796 | } |
| 725 | 797 | ||
| 726 | void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) { | 798 | void ReleaseServiceThread(Kernel::ServiceThread& service_thread) { |
| 727 | if (auto strong_ptr = service_thread.lock()) { | 799 | auto* ptr = &service_thread; |
| 728 | if (strong_ptr == default_service_thread.lock()) { | ||
| 729 | // Nothing to do here, the service is using default_service_thread, which will be | ||
| 730 | // released on shutdown. | ||
| 731 | return; | ||
| 732 | } | ||
| 733 | 800 | ||
| 734 | service_threads_manager.QueueWork( | 801 | if (ptr == default_service_thread) { |
| 735 | [this, strong_ptr{std::move(strong_ptr)}]() { service_threads.erase(strong_ptr); }); | 802 | // Nothing to do here, the service is using default_service_thread, which will be |
| 803 | // released on shutdown. | ||
| 804 | return; | ||
| 736 | } | 805 | } |
| 806 | |||
| 807 | service_threads_manager.QueueWork([this, ptr]() { service_threads.erase(ptr); }); | ||
| 737 | } | 808 | } |
| 738 | 809 | ||
| 739 | void ClearServiceThreads() { | 810 | void ClearServiceThreads() { |
| 740 | service_threads_manager.QueueWork([this] { | 811 | service_threads_manager.QueueWork([this] { |
| 741 | service_threads.clear(); | 812 | service_threads.clear(); |
| 742 | default_service_thread.reset(); | 813 | default_service_thread = nullptr; |
| 743 | service_thread_barrier.Sync(); | 814 | service_thread_barrier.Sync(); |
| 744 | }); | 815 | }); |
| 745 | service_thread_barrier.Sync(); | 816 | service_thread_barrier.Sync(); |
| 746 | } | 817 | } |
| 747 | 818 | ||
| 748 | std::mutex server_objects_lock; | ||
| 749 | std::mutex registered_objects_lock; | 819 | std::mutex registered_objects_lock; |
| 750 | std::mutex registered_in_use_objects_lock; | 820 | std::mutex registered_in_use_objects_lock; |
| 751 | 821 | ||
| @@ -763,6 +833,8 @@ struct KernelCore::Impl { | |||
| 763 | Init::KSlabResourceCounts slab_resource_counts{}; | 833 | Init::KSlabResourceCounts slab_resource_counts{}; |
| 764 | KResourceLimit* system_resource_limit{}; | 834 | KResourceLimit* system_resource_limit{}; |
| 765 | 835 | ||
| 836 | KPageBufferSlabHeap page_buffer_slab_heap; | ||
| 837 | |||
| 766 | std::shared_ptr<Core::Timing::EventType> preemption_event; | 838 | std::shared_ptr<Core::Timing::EventType> preemption_event; |
| 767 | 839 | ||
| 768 | // This is the kernel's handle table or supervisor handle table which | 840 | // This is the kernel's handle table or supervisor handle table which |
| @@ -774,8 +846,8 @@ struct KernelCore::Impl { | |||
| 774 | /// Map of named ports managed by the kernel, which can be retrieved using | 846 | /// Map of named ports managed by the kernel, which can be retrieved using |
| 775 | /// the ConnectToPort SVC. | 847 | /// the ConnectToPort SVC. |
| 776 | std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory; | 848 | std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory; |
| 849 | std::unordered_map<std::string, ServiceInterfaceHandlerFn> service_interface_handlers; | ||
| 777 | NamedPortTable named_ports; | 850 | NamedPortTable named_ports; |
| 778 | std::unordered_set<KAutoObject*> server_objects; | ||
| 779 | std::unordered_set<KAutoObject*> registered_objects; | 851 | std::unordered_set<KAutoObject*> registered_objects; |
| 780 | std::unordered_set<KAutoObject*> registered_in_use_objects; | 852 | std::unordered_set<KAutoObject*> registered_in_use_objects; |
| 781 | 853 | ||
| @@ -788,10 +860,20 @@ struct KernelCore::Impl { | |||
| 788 | // Kernel memory management | 860 | // Kernel memory management |
| 789 | std::unique_ptr<KMemoryManager> memory_manager; | 861 | std::unique_ptr<KMemoryManager> memory_manager; |
| 790 | 862 | ||
| 791 | // Dynamic slab managers | 863 | // Resource managers |
| 792 | std::unique_ptr<KDynamicPageManager> dynamic_page_manager; | 864 | std::unique_ptr<KDynamicPageManager> resource_manager_page_manager; |
| 793 | std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap; | 865 | std::unique_ptr<KPageTableSlabHeap> page_table_heap; |
| 866 | std::unique_ptr<KMemoryBlockSlabHeap> app_memory_block_heap; | ||
| 867 | std::unique_ptr<KMemoryBlockSlabHeap> sys_memory_block_heap; | ||
| 868 | std::unique_ptr<KBlockInfoSlabHeap> block_info_heap; | ||
| 869 | std::unique_ptr<KPageTableManager> app_page_table_manager; | ||
| 870 | std::unique_ptr<KPageTableManager> sys_page_table_manager; | ||
| 794 | std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; | 871 | std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; |
| 872 | std::unique_ptr<KMemoryBlockSlabManager> sys_memory_block_manager; | ||
| 873 | std::unique_ptr<KBlockInfoManager> app_block_info_manager; | ||
| 874 | std::unique_ptr<KBlockInfoManager> sys_block_info_manager; | ||
| 875 | std::unique_ptr<KSystemResource> app_system_resource; | ||
| 876 | std::unique_ptr<KSystemResource> sys_system_resource; | ||
| 795 | 877 | ||
| 796 | // Shared memory for services | 878 | // Shared memory for services |
| 797 | Kernel::KSharedMemory* hid_shared_mem{}; | 879 | Kernel::KSharedMemory* hid_shared_mem{}; |
| @@ -804,8 +886,8 @@ struct KernelCore::Impl { | |||
| 804 | std::unique_ptr<KMemoryLayout> memory_layout; | 886 | std::unique_ptr<KMemoryLayout> memory_layout; |
| 805 | 887 | ||
| 806 | // Threads used for services | 888 | // Threads used for services |
| 807 | std::unordered_set<std::shared_ptr<ServiceThread>> service_threads; | 889 | std::unordered_map<ServiceThread*, std::unique_ptr<ServiceThread>> service_threads; |
| 808 | std::weak_ptr<ServiceThread> default_service_thread; | 890 | ServiceThread* default_service_thread{}; |
| 809 | Common::ThreadWorker service_threads_manager; | 891 | Common::ThreadWorker service_threads_manager; |
| 810 | Common::Barrier service_thread_barrier; | 892 | Common::Barrier service_thread_barrier; |
| 811 | 893 | ||
| @@ -814,7 +896,6 @@ struct KernelCore::Impl { | |||
| 814 | 896 | ||
| 815 | bool is_multicore{}; | 897 | bool is_multicore{}; |
| 816 | std::atomic_bool is_shutting_down{}; | 898 | std::atomic_bool is_shutting_down{}; |
| 817 | bool is_phantom_mode_for_singlecore{}; | ||
| 818 | u32 single_core_thread_id{}; | 899 | u32 single_core_thread_id{}; |
| 819 | 900 | ||
| 820 | std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{}; | 901 | std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{}; |
| @@ -981,16 +1062,17 @@ void KernelCore::RegisterNamedService(std::string name, ServiceInterfaceFactory& | |||
| 981 | impl->service_interface_factory.emplace(std::move(name), factory); | 1062 | impl->service_interface_factory.emplace(std::move(name), factory); |
| 982 | } | 1063 | } |
| 983 | 1064 | ||
| 984 | KClientPort* KernelCore::CreateNamedServicePort(std::string name) { | 1065 | void KernelCore::RegisterInterfaceForNamedService(std::string name, |
| 985 | return impl->CreateNamedServicePort(std::move(name)); | 1066 | ServiceInterfaceHandlerFn&& handler) { |
| 1067 | impl->service_interface_handlers.emplace(std::move(name), handler); | ||
| 986 | } | 1068 | } |
| 987 | 1069 | ||
| 988 | void KernelCore::RegisterServerObject(KAutoObject* server_object) { | 1070 | KClientPort* KernelCore::CreateNamedServicePort(std::string name) { |
| 989 | impl->RegisterServerObject(server_object); | 1071 | return impl->CreateNamedServicePort(std::move(name)); |
| 990 | } | 1072 | } |
| 991 | 1073 | ||
| 992 | void KernelCore::UnregisterServerObject(KAutoObject* server_object) { | 1074 | void KernelCore::RegisterNamedServiceHandler(std::string name, KServerPort* server_port) { |
| 993 | impl->UnregisterServerObject(server_object); | 1075 | impl->RegisterNamedServiceHandler(std::move(name), server_port); |
| 994 | } | 1076 | } |
| 995 | 1077 | ||
| 996 | void KernelCore::RegisterKernelObject(KAutoObject* object) { | 1078 | void KernelCore::RegisterKernelObject(KAutoObject* object) { |
| @@ -1045,8 +1127,12 @@ void KernelCore::RegisterCoreThread(std::size_t core_id) { | |||
| 1045 | impl->RegisterCoreThread(core_id); | 1127 | impl->RegisterCoreThread(core_id); |
| 1046 | } | 1128 | } |
| 1047 | 1129 | ||
| 1048 | void KernelCore::RegisterHostThread() { | 1130 | void KernelCore::RegisterHostThread(KThread* existing_thread) { |
| 1049 | impl->RegisterHostThread(); | 1131 | impl->RegisterHostThread(existing_thread); |
| 1132 | |||
| 1133 | if (existing_thread != nullptr) { | ||
| 1134 | ASSERT(GetCurrentEmuThread() == existing_thread); | ||
| 1135 | } | ||
| 1050 | } | 1136 | } |
| 1051 | 1137 | ||
| 1052 | u32 KernelCore::GetCurrentHostThreadID() const { | 1138 | u32 KernelCore::GetCurrentHostThreadID() const { |
| @@ -1069,12 +1155,12 @@ const KMemoryManager& KernelCore::MemoryManager() const { | |||
| 1069 | return *impl->memory_manager; | 1155 | return *impl->memory_manager; |
| 1070 | } | 1156 | } |
| 1071 | 1157 | ||
| 1072 | KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() { | 1158 | KSystemResource& KernelCore::GetSystemSystemResource() { |
| 1073 | return *impl->app_memory_block_manager; | 1159 | return *impl->sys_system_resource; |
| 1074 | } | 1160 | } |
| 1075 | 1161 | ||
| 1076 | const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const { | 1162 | const KSystemResource& KernelCore::GetSystemSystemResource() const { |
| 1077 | return *impl->app_memory_block_manager; | 1163 | return *impl->sys_system_resource; |
| 1078 | } | 1164 | } |
| 1079 | 1165 | ||
| 1080 | Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { | 1166 | Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { |
| @@ -1121,16 +1207,28 @@ void KernelCore::Suspend(bool suspended) { | |||
| 1121 | const bool should_suspend{exception_exited || suspended}; | 1207 | const bool should_suspend{exception_exited || suspended}; |
| 1122 | const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable; | 1208 | const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable; |
| 1123 | 1209 | ||
| 1124 | for (auto* process : GetProcessList()) { | 1210 | std::vector<KScopedAutoObject<KThread>> process_threads; |
| 1125 | process->SetActivity(activity); | 1211 | { |
| 1212 | KScopedSchedulerLock sl{*this}; | ||
| 1213 | |||
| 1214 | if (auto* process = CurrentProcess(); process != nullptr) { | ||
| 1215 | process->SetActivity(activity); | ||
| 1216 | |||
| 1217 | if (!should_suspend) { | ||
| 1218 | // Runnable now; no need to wait. | ||
| 1219 | return; | ||
| 1220 | } | ||
| 1126 | 1221 | ||
| 1127 | if (should_suspend) { | ||
| 1128 | // Wait for execution to stop | ||
| 1129 | for (auto* thread : process->GetThreadList()) { | 1222 | for (auto* thread : process->GetThreadList()) { |
| 1130 | thread->WaitUntilSuspended(); | 1223 | process_threads.emplace_back(thread); |
| 1131 | } | 1224 | } |
| 1132 | } | 1225 | } |
| 1133 | } | 1226 | } |
| 1227 | |||
| 1228 | // Wait for execution to stop. | ||
| 1229 | for (auto& thread : process_threads) { | ||
| 1230 | thread->WaitUntilSuspended(); | ||
| 1231 | } | ||
| 1134 | } | 1232 | } |
| 1135 | 1233 | ||
| 1136 | void KernelCore::ShutdownCores() { | 1234 | void KernelCore::ShutdownCores() { |
| @@ -1162,15 +1260,15 @@ void KernelCore::ExitSVCProfile() { | |||
| 1162 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); | 1260 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); |
| 1163 | } | 1261 | } |
| 1164 | 1262 | ||
| 1165 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | 1263 | Kernel::ServiceThread& KernelCore::CreateServiceThread(const std::string& name) { |
| 1166 | return impl->CreateServiceThread(*this, name); | 1264 | return impl->CreateServiceThread(*this, name); |
| 1167 | } | 1265 | } |
| 1168 | 1266 | ||
| 1169 | std::weak_ptr<Kernel::ServiceThread> KernelCore::GetDefaultServiceThread() const { | 1267 | Kernel::ServiceThread& KernelCore::GetDefaultServiceThread() const { |
| 1170 | return impl->default_service_thread; | 1268 | return *impl->default_service_thread; |
| 1171 | } | 1269 | } |
| 1172 | 1270 | ||
| 1173 | void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) { | 1271 | void KernelCore::ReleaseServiceThread(Kernel::ServiceThread& service_thread) { |
| 1174 | impl->ReleaseServiceThread(service_thread); | 1272 | impl->ReleaseServiceThread(service_thread); |
| 1175 | } | 1273 | } |
| 1176 | 1274 | ||
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 266be2bc4..2e22fe0f6 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -34,22 +34,27 @@ class KClientPort; | |||
| 34 | class GlobalSchedulerContext; | 34 | class GlobalSchedulerContext; |
| 35 | class KAutoObjectWithListContainer; | 35 | class KAutoObjectWithListContainer; |
| 36 | class KClientSession; | 36 | class KClientSession; |
| 37 | class KDebug; | ||
| 38 | class KDynamicPageManager; | ||
| 37 | class KEvent; | 39 | class KEvent; |
| 40 | class KEventInfo; | ||
| 38 | class KHandleTable; | 41 | class KHandleTable; |
| 39 | class KLinkedListNode; | 42 | class KLinkedListNode; |
| 40 | class KMemoryBlockSlabManager; | ||
| 41 | class KMemoryLayout; | 43 | class KMemoryLayout; |
| 42 | class KMemoryManager; | 44 | class KMemoryManager; |
| 43 | class KPageBuffer; | 45 | class KPageBuffer; |
| 46 | class KPageBufferSlabHeap; | ||
| 44 | class KPort; | 47 | class KPort; |
| 45 | class KProcess; | 48 | class KProcess; |
| 46 | class KResourceLimit; | 49 | class KResourceLimit; |
| 47 | class KScheduler; | 50 | class KScheduler; |
| 51 | class KServerPort; | ||
| 48 | class KServerSession; | 52 | class KServerSession; |
| 49 | class KSession; | 53 | class KSession; |
| 50 | class KSessionRequest; | 54 | class KSessionRequest; |
| 51 | class KSharedMemory; | 55 | class KSharedMemory; |
| 52 | class KSharedMemoryInfo; | 56 | class KSharedMemoryInfo; |
| 57 | class KSecureSystemResource; | ||
| 53 | class KThread; | 58 | class KThread; |
| 54 | class KThreadLocalPage; | 59 | class KThreadLocalPage; |
| 55 | class KTransferMemory; | 60 | class KTransferMemory; |
| @@ -63,6 +68,8 @@ class TimeManager; | |||
| 63 | using ServiceInterfaceFactory = | 68 | using ServiceInterfaceFactory = |
| 64 | std::function<KClientPort&(Service::SM::ServiceManager&, Core::System&)>; | 69 | std::function<KClientPort&(Service::SM::ServiceManager&, Core::System&)>; |
| 65 | 70 | ||
| 71 | using ServiceInterfaceHandlerFn = std::function<void(Service::SM::ServiceManager&, KServerPort*)>; | ||
| 72 | |||
| 66 | namespace Init { | 73 | namespace Init { |
| 67 | struct KSlabResourceCounts; | 74 | struct KSlabResourceCounts; |
| 68 | } | 75 | } |
| @@ -192,16 +199,14 @@ public: | |||
| 192 | /// Registers a named HLE service, passing a factory used to open a port to that service. | 199 | /// Registers a named HLE service, passing a factory used to open a port to that service. |
| 193 | void RegisterNamedService(std::string name, ServiceInterfaceFactory&& factory); | 200 | void RegisterNamedService(std::string name, ServiceInterfaceFactory&& factory); |
| 194 | 201 | ||
| 202 | /// Registers a setup function for the named HLE service. | ||
| 203 | void RegisterInterfaceForNamedService(std::string name, ServiceInterfaceHandlerFn&& handler); | ||
| 204 | |||
| 195 | /// Opens a port to a service previously registered with RegisterNamedService. | 205 | /// Opens a port to a service previously registered with RegisterNamedService. |
| 196 | KClientPort* CreateNamedServicePort(std::string name); | 206 | KClientPort* CreateNamedServicePort(std::string name); |
| 197 | 207 | ||
| 198 | /// Registers a server session or port with the gobal emulation state, to be freed on shutdown. | 208 | /// Accepts a session on a port created by CreateNamedServicePort. |
| 199 | /// This is necessary because we do not emulate processes for HLE sessions and ports. | 209 | void RegisterNamedServiceHandler(std::string name, KServerPort* server_port); |
| 200 | void RegisterServerObject(KAutoObject* server_object); | ||
| 201 | |||
| 202 | /// Unregisters a server session or port previously registered with RegisterServerSession when | ||
| 203 | /// it was destroyed during the current emulation session. | ||
| 204 | void UnregisterServerObject(KAutoObject* server_object); | ||
| 205 | 210 | ||
| 206 | /// Registers all kernel objects with the global emulation state, this is purely for tracking | 211 | /// Registers all kernel objects with the global emulation state, this is purely for tracking |
| 207 | /// leaks after emulation has been shutdown. | 212 | /// leaks after emulation has been shutdown. |
| @@ -235,7 +240,7 @@ public: | |||
| 235 | void RegisterCoreThread(std::size_t core_id); | 240 | void RegisterCoreThread(std::size_t core_id); |
| 236 | 241 | ||
| 237 | /// Register the current thread as a non CPU core thread. | 242 | /// Register the current thread as a non CPU core thread. |
| 238 | void RegisterHostThread(); | 243 | void RegisterHostThread(KThread* existing_thread = nullptr); |
| 239 | 244 | ||
| 240 | /// Gets the virtual memory manager for the kernel. | 245 | /// Gets the virtual memory manager for the kernel. |
| 241 | KMemoryManager& MemoryManager(); | 246 | KMemoryManager& MemoryManager(); |
| @@ -243,11 +248,11 @@ public: | |||
| 243 | /// Gets the virtual memory manager for the kernel. | 248 | /// Gets the virtual memory manager for the kernel. |
| 244 | const KMemoryManager& MemoryManager() const; | 249 | const KMemoryManager& MemoryManager() const; |
| 245 | 250 | ||
| 246 | /// Gets the application memory block manager for the kernel. | 251 | /// Gets the system resource manager. |
| 247 | KMemoryBlockSlabManager& GetApplicationMemoryBlockManager(); | 252 | KSystemResource& GetSystemSystemResource(); |
| 248 | 253 | ||
| 249 | /// Gets the application memory block manager for the kernel. | 254 | /// Gets the system resource manager. |
| 250 | const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const; | 255 | const KSystemResource& GetSystemSystemResource() const; |
| 251 | 256 | ||
| 252 | /// Gets the shared memory object for HID services. | 257 | /// Gets the shared memory object for HID services. |
| 253 | Kernel::KSharedMemory& GetHidSharedMem(); | 258 | Kernel::KSharedMemory& GetHidSharedMem(); |
| @@ -304,24 +309,24 @@ public: | |||
| 304 | * See GetDefaultServiceThread. | 309 | * See GetDefaultServiceThread. |
| 305 | * @param name String name for the ServerSession creating this thread, used for debug | 310 | * @param name String name for the ServerSession creating this thread, used for debug |
| 306 | * purposes. | 311 | * purposes. |
| 307 | * @returns The a weak pointer newly created service thread. | 312 | * @returns A reference to the newly created service thread. |
| 308 | */ | 313 | */ |
| 309 | std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name); | 314 | Kernel::ServiceThread& CreateServiceThread(const std::string& name); |
| 310 | 315 | ||
| 311 | /** | 316 | /** |
| 312 | * Gets the default host service thread, which executes HLE service requests. Unless service | 317 | * Gets the default host service thread, which executes HLE service requests. Unless service |
| 313 | * requests need to block on the host, the default service thread should be used in favor of | 318 | * requests need to block on the host, the default service thread should be used in favor of |
| 314 | * creating a new service thread. | 319 | * creating a new service thread. |
| 315 | * @returns The a weak pointer for the default service thread. | 320 | * @returns A reference to the default service thread. |
| 316 | */ | 321 | */ |
| 317 | std::weak_ptr<Kernel::ServiceThread> GetDefaultServiceThread() const; | 322 | Kernel::ServiceThread& GetDefaultServiceThread() const; |
| 318 | 323 | ||
| 319 | /** | 324 | /** |
| 320 | * Releases a HLE service thread, instructing KernelCore to free it. This should be called when | 325 | * Releases a HLE service thread, instructing KernelCore to free it. This should be called when |
| 321 | * the ServerSession associated with the thread is destroyed. | 326 | * the ServerSession associated with the thread is destroyed. |
| 322 | * @param service_thread Service thread to release. | 327 | * @param service_thread Service thread to release. |
| 323 | */ | 328 | */ |
| 324 | void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread); | 329 | void ReleaseServiceThread(Kernel::ServiceThread& service_thread); |
| 325 | 330 | ||
| 326 | /// Workaround for single-core mode when preempting threads while idle. | 331 | /// Workaround for single-core mode when preempting threads while idle. |
| 327 | bool IsPhantomModeForSingleCore() const; | 332 | bool IsPhantomModeForSingleCore() const; |
| @@ -363,6 +368,12 @@ public: | |||
| 363 | return slab_heap_container->thread_local_page; | 368 | return slab_heap_container->thread_local_page; |
| 364 | } else if constexpr (std::is_same_v<T, KSessionRequest>) { | 369 | } else if constexpr (std::is_same_v<T, KSessionRequest>) { |
| 365 | return slab_heap_container->session_request; | 370 | return slab_heap_container->session_request; |
| 371 | } else if constexpr (std::is_same_v<T, KSecureSystemResource>) { | ||
| 372 | return slab_heap_container->secure_system_resource; | ||
| 373 | } else if constexpr (std::is_same_v<T, KEventInfo>) { | ||
| 374 | return slab_heap_container->event_info; | ||
| 375 | } else if constexpr (std::is_same_v<T, KDebug>) { | ||
| 376 | return slab_heap_container->debug; | ||
| 366 | } | 377 | } |
| 367 | } | 378 | } |
| 368 | 379 | ||
| @@ -426,6 +437,9 @@ private: | |||
| 426 | KSlabHeap<KPageBuffer> page_buffer; | 437 | KSlabHeap<KPageBuffer> page_buffer; |
| 427 | KSlabHeap<KThreadLocalPage> thread_local_page; | 438 | KSlabHeap<KThreadLocalPage> thread_local_page; |
| 428 | KSlabHeap<KSessionRequest> session_request; | 439 | KSlabHeap<KSessionRequest> session_request; |
| 440 | KSlabHeap<KSecureSystemResource> secure_system_resource; | ||
| 441 | KSlabHeap<KEventInfo> event_info; | ||
| 442 | KSlabHeap<KDebug> debug; | ||
| 429 | }; | 443 | }; |
| 430 | 444 | ||
| 431 | std::unique_ptr<SlabHeapContainer> slab_heap_container; | 445 | std::unique_ptr<SlabHeapContainer> slab_heap_container; |
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index d4375962f..3044922ac 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp | |||
| @@ -12,7 +12,7 @@ namespace Kernel { | |||
| 12 | 12 | ||
| 13 | PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_) | 13 | PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_) |
| 14 | : core_index{core_index_}, system{system_}, scheduler{scheduler_} { | 14 | : core_index{core_index_}, system{system_}, scheduler{scheduler_} { |
| 15 | #ifdef ARCHITECTURE_x86_64 | 15 | #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) |
| 16 | // TODO(bunnei): Initialization relies on a core being available. We may later replace this with | 16 | // TODO(bunnei): Initialization relies on a core being available. We may later replace this with |
| 17 | // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager. | 17 | // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager. |
| 18 | auto& kernel = system.Kernel(); | 18 | auto& kernel = system.Kernel(); |
| @@ -26,7 +26,7 @@ PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KSche | |||
| 26 | PhysicalCore::~PhysicalCore() = default; | 26 | PhysicalCore::~PhysicalCore() = default; |
| 27 | 27 | ||
| 28 | void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { | 28 | void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { |
| 29 | #ifdef ARCHITECTURE_x86_64 | 29 | #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) |
| 30 | auto& kernel = system.Kernel(); | 30 | auto& kernel = system.Kernel(); |
| 31 | if (!is_64_bit) { | 31 | if (!is_64_bit) { |
| 32 | // We already initialized a 64-bit core, replace with a 32-bit one. | 32 | // We already initialized a 64-bit core, replace with a 32-bit one. |
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index d23d76706..f5c2ab23f 100644 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp | |||
| @@ -1,15 +1,18 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include <condition_variable> | ||
| 5 | #include <functional> | 4 | #include <functional> |
| 5 | #include <map> | ||
| 6 | #include <mutex> | 6 | #include <mutex> |
| 7 | #include <thread> | 7 | #include <thread> |
| 8 | #include <vector> | 8 | #include <vector> |
| 9 | #include <queue> | ||
| 10 | 9 | ||
| 11 | #include "common/scope_exit.h" | 10 | #include "common/scope_exit.h" |
| 12 | #include "common/thread.h" | 11 | #include "common/thread.h" |
| 12 | #include "core/hle/ipc_helpers.h" | ||
| 13 | #include "core/hle/kernel/hle_ipc.h" | ||
| 14 | #include "core/hle/kernel/k_event.h" | ||
| 15 | #include "core/hle/kernel/k_scoped_resource_reservation.h" | ||
| 13 | #include "core/hle/kernel/k_session.h" | 16 | #include "core/hle/kernel/k_session.h" |
| 14 | #include "core/hle/kernel/k_thread.h" | 17 | #include "core/hle/kernel/k_thread.h" |
| 15 | #include "core/hle/kernel/kernel.h" | 18 | #include "core/hle/kernel/kernel.h" |
| @@ -19,101 +22,210 @@ namespace Kernel { | |||
| 19 | 22 | ||
| 20 | class ServiceThread::Impl final { | 23 | class ServiceThread::Impl final { |
| 21 | public: | 24 | public: |
| 22 | explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name); | 25 | explicit Impl(KernelCore& kernel, const std::string& service_name); |
| 23 | ~Impl(); | 26 | ~Impl(); |
| 24 | 27 | ||
| 25 | void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context); | 28 | void WaitAndProcessImpl(); |
| 29 | void SessionClosed(KServerSession* server_session, | ||
| 30 | std::shared_ptr<SessionRequestManager> manager); | ||
| 31 | void LoopProcess(); | ||
| 32 | |||
| 33 | void RegisterServerSession(KServerSession* session, | ||
| 34 | std::shared_ptr<SessionRequestManager> manager); | ||
| 26 | 35 | ||
| 27 | private: | 36 | private: |
| 28 | std::vector<std::jthread> threads; | 37 | KernelCore& kernel; |
| 29 | std::queue<std::function<void()>> requests; | 38 | |
| 30 | std::mutex queue_mutex; | 39 | std::jthread m_host_thread; |
| 31 | std::condition_variable_any condition; | 40 | std::mutex m_session_mutex; |
| 32 | const std::string service_name; | 41 | std::map<KServerSession*, std::shared_ptr<SessionRequestManager>> m_sessions; |
| 42 | KEvent* m_wakeup_event; | ||
| 43 | KProcess* m_process; | ||
| 44 | KThread* m_thread; | ||
| 45 | std::atomic<bool> m_shutdown_requested; | ||
| 46 | const std::string m_service_name; | ||
| 33 | }; | 47 | }; |
| 34 | 48 | ||
| 35 | ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name) | 49 | void ServiceThread::Impl::WaitAndProcessImpl() { |
| 36 | : service_name{name} { | 50 | // Create local list of waitable sessions. |
| 37 | for (std::size_t i = 0; i < num_threads; ++i) { | 51 | std::vector<KSynchronizationObject*> objs; |
| 38 | threads.emplace_back([this, &kernel](std::stop_token stop_token) { | 52 | std::vector<std::shared_ptr<SessionRequestManager>> managers; |
| 39 | Common::SetCurrentThreadName(std::string{service_name}.c_str()); | 53 | |
| 54 | { | ||
| 55 | // Lock to get the set. | ||
| 56 | std::scoped_lock lk{m_session_mutex}; | ||
| 57 | |||
| 58 | // Reserve the needed quantity. | ||
| 59 | objs.reserve(m_sessions.size() + 1); | ||
| 60 | managers.reserve(m_sessions.size()); | ||
| 61 | |||
| 62 | // Copy to our local list. | ||
| 63 | for (const auto& [session, manager] : m_sessions) { | ||
| 64 | objs.push_back(session); | ||
| 65 | managers.push_back(manager); | ||
| 66 | } | ||
| 40 | 67 | ||
| 41 | // Wait for first request before trying to acquire a render context | 68 | // Insert the wakeup event at the end. |
| 42 | { | 69 | objs.push_back(&m_wakeup_event->GetReadableEvent()); |
| 43 | std::unique_lock lock{queue_mutex}; | 70 | } |
| 44 | condition.wait(lock, stop_token, [this] { return !requests.empty(); }); | 71 | |
| 45 | } | 72 | // Wait on the list of sessions. |
| 73 | s32 index{-1}; | ||
| 74 | Result rc = KSynchronizationObject::Wait(kernel, &index, objs.data(), | ||
| 75 | static_cast<s32>(objs.size()), -1); | ||
| 76 | ASSERT(!rc.IsFailure()); | ||
| 77 | |||
| 78 | // If this was the wakeup event, clear it and finish. | ||
| 79 | if (index >= static_cast<s64>(objs.size() - 1)) { | ||
| 80 | m_wakeup_event->Clear(); | ||
| 81 | return; | ||
| 82 | } | ||
| 46 | 83 | ||
| 47 | if (stop_token.stop_requested()) { | 84 | // This event is from a server session. |
| 48 | return; | 85 | auto* server_session = static_cast<KServerSession*>(objs[index]); |
| 49 | } | 86 | auto& manager = managers[index]; |
| 50 | 87 | ||
| 51 | // Allocate a dummy guest thread for this host thread. | 88 | // Fetch the HLE request context. |
| 52 | kernel.RegisterHostThread(); | 89 | std::shared_ptr<HLERequestContext> context; |
| 90 | rc = server_session->ReceiveRequest(&context, manager); | ||
| 53 | 91 | ||
| 54 | while (true) { | 92 | // If the session was closed, handle that. |
| 55 | std::function<void()> task; | 93 | if (rc == ResultSessionClosed) { |
| 94 | SessionClosed(server_session, manager); | ||
| 56 | 95 | ||
| 57 | { | 96 | // Finish. |
| 58 | std::unique_lock lock{queue_mutex}; | 97 | return; |
| 59 | condition.wait(lock, stop_token, [this] { return !requests.empty(); }); | 98 | } |
| 60 | 99 | ||
| 61 | if (stop_token.stop_requested()) { | 100 | // TODO: handle other cases |
| 62 | return; | 101 | ASSERT(rc == ResultSuccess); |
| 63 | } | ||
| 64 | 102 | ||
| 65 | if (requests.empty()) { | 103 | // Perform the request. |
| 66 | continue; | 104 | Result service_rc = manager->CompleteSyncRequest(server_session, *context); |
| 67 | } | ||
| 68 | 105 | ||
| 69 | task = std::move(requests.front()); | 106 | // Reply to the client. |
| 70 | requests.pop(); | 107 | rc = server_session->SendReplyHLE(); |
| 71 | } | ||
| 72 | 108 | ||
| 73 | task(); | 109 | if (rc == ResultSessionClosed || service_rc == IPC::ERR_REMOTE_PROCESS_DEAD) { |
| 74 | } | 110 | SessionClosed(server_session, manager); |
| 75 | }); | 111 | return; |
| 76 | } | 112 | } |
| 113 | |||
| 114 | // TODO: handle other cases | ||
| 115 | ASSERT(rc == ResultSuccess); | ||
| 116 | ASSERT(service_rc == ResultSuccess); | ||
| 77 | } | 117 | } |
| 78 | 118 | ||
| 79 | void ServiceThread::Impl::QueueSyncRequest(KSession& session, | 119 | void ServiceThread::Impl::SessionClosed(KServerSession* server_session, |
| 80 | std::shared_ptr<HLERequestContext>&& context) { | 120 | std::shared_ptr<SessionRequestManager> manager) { |
| 81 | { | 121 | { |
| 82 | std::unique_lock lock{queue_mutex}; | 122 | // Lock to get the set. |
| 123 | std::scoped_lock lk{m_session_mutex}; | ||
| 83 | 124 | ||
| 84 | auto* server_session{&session.GetServerSession()}; | 125 | // Erase the session. |
| 126 | ASSERT(m_sessions.erase(server_session) == 1); | ||
| 127 | } | ||
| 128 | |||
| 129 | // Close our reference to the server session. | ||
| 130 | server_session->Close(); | ||
| 131 | } | ||
| 132 | |||
| 133 | void ServiceThread::Impl::LoopProcess() { | ||
| 134 | Common::SetCurrentThreadName(m_service_name.c_str()); | ||
| 135 | |||
| 136 | kernel.RegisterHostThread(m_thread); | ||
| 137 | |||
| 138 | while (!m_shutdown_requested.load()) { | ||
| 139 | WaitAndProcessImpl(); | ||
| 140 | } | ||
| 141 | } | ||
| 85 | 142 | ||
| 86 | // Open a reference to the session to ensure it is not closes while the service request | 143 | void ServiceThread::Impl::RegisterServerSession(KServerSession* server_session, |
| 87 | // completes asynchronously. | 144 | std::shared_ptr<SessionRequestManager> manager) { |
| 88 | server_session->Open(); | 145 | // Open the server session. |
| 146 | server_session->Open(); | ||
| 89 | 147 | ||
| 90 | requests.emplace([server_session, context{std::move(context)}]() { | 148 | { |
| 91 | // Close the reference. | 149 | // Lock to get the set. |
| 92 | SCOPE_EXIT({ server_session->Close(); }); | 150 | std::scoped_lock lk{m_session_mutex}; |
| 93 | 151 | ||
| 94 | // Complete the service request. | 152 | // Insert the session and manager. |
| 95 | server_session->CompleteSyncRequest(*context); | 153 | m_sessions[server_session] = manager; |
| 96 | }); | ||
| 97 | } | 154 | } |
| 98 | condition.notify_one(); | 155 | |
| 156 | // Signal the wakeup event. | ||
| 157 | m_wakeup_event->Signal(); | ||
| 99 | } | 158 | } |
| 100 | 159 | ||
| 101 | ServiceThread::Impl::~Impl() { | 160 | ServiceThread::Impl::~Impl() { |
| 102 | condition.notify_all(); | 161 | // Shut down the processing thread. |
| 103 | for (auto& thread : threads) { | 162 | m_shutdown_requested.store(true); |
| 104 | thread.request_stop(); | 163 | m_wakeup_event->Signal(); |
| 105 | thread.join(); | 164 | m_host_thread.join(); |
| 165 | |||
| 166 | // Lock mutex. | ||
| 167 | m_session_mutex.lock(); | ||
| 168 | |||
| 169 | // Close all remaining sessions. | ||
| 170 | for (const auto& [server_session, manager] : m_sessions) { | ||
| 171 | server_session->Close(); | ||
| 106 | } | 172 | } |
| 173 | |||
| 174 | // Destroy remaining managers. | ||
| 175 | m_sessions.clear(); | ||
| 176 | |||
| 177 | // Close event. | ||
| 178 | m_wakeup_event->GetReadableEvent().Close(); | ||
| 179 | m_wakeup_event->Close(); | ||
| 180 | |||
| 181 | // Close thread. | ||
| 182 | m_thread->Close(); | ||
| 183 | |||
| 184 | // Close process. | ||
| 185 | m_process->Close(); | ||
| 186 | } | ||
| 187 | |||
| 188 | ServiceThread::Impl::Impl(KernelCore& kernel_, const std::string& service_name) | ||
| 189 | : kernel{kernel_}, m_service_name{service_name} { | ||
| 190 | // Initialize process. | ||
| 191 | m_process = KProcess::Create(kernel); | ||
| 192 | KProcess::Initialize(m_process, kernel.System(), service_name, | ||
| 193 | KProcess::ProcessType::KernelInternal, kernel.GetSystemResourceLimit()); | ||
| 194 | |||
| 195 | // Reserve a new event from the process resource limit | ||
| 196 | KScopedResourceReservation event_reservation(m_process, LimitableResource::EventCountMax); | ||
| 197 | ASSERT(event_reservation.Succeeded()); | ||
| 198 | |||
| 199 | // Initialize event. | ||
| 200 | m_wakeup_event = KEvent::Create(kernel); | ||
| 201 | m_wakeup_event->Initialize(m_process); | ||
| 202 | |||
| 203 | // Commit the event reservation. | ||
| 204 | event_reservation.Commit(); | ||
| 205 | |||
| 206 | // Reserve a new thread from the process resource limit | ||
| 207 | KScopedResourceReservation thread_reservation(m_process, LimitableResource::ThreadCountMax); | ||
| 208 | ASSERT(thread_reservation.Succeeded()); | ||
| 209 | |||
| 210 | // Initialize thread. | ||
| 211 | m_thread = KThread::Create(kernel); | ||
| 212 | ASSERT(KThread::InitializeDummyThread(m_thread, m_process).IsSuccess()); | ||
| 213 | |||
| 214 | // Commit the thread reservation. | ||
| 215 | thread_reservation.Commit(); | ||
| 216 | |||
| 217 | // Start thread. | ||
| 218 | m_host_thread = std::jthread([this] { LoopProcess(); }); | ||
| 107 | } | 219 | } |
| 108 | 220 | ||
| 109 | ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name) | 221 | ServiceThread::ServiceThread(KernelCore& kernel, const std::string& name) |
| 110 | : impl{std::make_unique<Impl>(kernel, num_threads, name)} {} | 222 | : impl{std::make_unique<Impl>(kernel, name)} {} |
| 111 | 223 | ||
| 112 | ServiceThread::~ServiceThread() = default; | 224 | ServiceThread::~ServiceThread() = default; |
| 113 | 225 | ||
| 114 | void ServiceThread::QueueSyncRequest(KSession& session, | 226 | void ServiceThread::RegisterServerSession(KServerSession* session, |
| 115 | std::shared_ptr<HLERequestContext>&& context) { | 227 | std::shared_ptr<SessionRequestManager> manager) { |
| 116 | impl->QueueSyncRequest(session, std::move(context)); | 228 | impl->RegisterServerSession(session, manager); |
| 117 | } | 229 | } |
| 118 | 230 | ||
| 119 | } // namespace Kernel | 231 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h index c5896f2bd..fb4325531 100644 --- a/src/core/hle/kernel/service_thread.h +++ b/src/core/hle/kernel/service_thread.h | |||
| @@ -11,13 +11,15 @@ namespace Kernel { | |||
| 11 | class HLERequestContext; | 11 | class HLERequestContext; |
| 12 | class KernelCore; | 12 | class KernelCore; |
| 13 | class KSession; | 13 | class KSession; |
| 14 | class SessionRequestManager; | ||
| 14 | 15 | ||
| 15 | class ServiceThread final { | 16 | class ServiceThread final { |
| 16 | public: | 17 | public: |
| 17 | explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name); | 18 | explicit ServiceThread(KernelCore& kernel, const std::string& name); |
| 18 | ~ServiceThread(); | 19 | ~ServiceThread(); |
| 19 | 20 | ||
| 20 | void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context); | 21 | void RegisterServerSession(KServerSession* session, |
| 22 | std::shared_ptr<SessionRequestManager> manager); | ||
| 21 | 23 | ||
| 22 | private: | 24 | private: |
| 23 | class Impl; | 25 | class Impl; |
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index 06b51e919..0228ce188 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h | |||
| @@ -53,6 +53,84 @@ public: | |||
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | template <typename Derived, typename Base> | 55 | template <typename Derived, typename Base> |
| 56 | class KAutoObjectWithSlabHeap : public Base { | ||
| 57 | static_assert(std::is_base_of<KAutoObject, Base>::value); | ||
| 58 | |||
| 59 | private: | ||
| 60 | static Derived* Allocate(KernelCore& kernel) { | ||
| 61 | return kernel.SlabHeap<Derived>().Allocate(kernel); | ||
| 62 | } | ||
| 63 | |||
| 64 | static void Free(KernelCore& kernel, Derived* obj) { | ||
| 65 | kernel.SlabHeap<Derived>().Free(obj); | ||
| 66 | } | ||
| 67 | |||
| 68 | public: | ||
| 69 | explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} | ||
| 70 | virtual ~KAutoObjectWithSlabHeap() = default; | ||
| 71 | |||
| 72 | virtual void Destroy() override { | ||
| 73 | const bool is_initialized = this->IsInitialized(); | ||
| 74 | uintptr_t arg = 0; | ||
| 75 | if (is_initialized) { | ||
| 76 | arg = this->GetPostDestroyArgument(); | ||
| 77 | this->Finalize(); | ||
| 78 | } | ||
| 79 | Free(kernel, static_cast<Derived*>(this)); | ||
| 80 | if (is_initialized) { | ||
| 81 | Derived::PostDestroy(arg); | ||
| 82 | } | ||
| 83 | } | ||
| 84 | |||
| 85 | virtual bool IsInitialized() const { | ||
| 86 | return true; | ||
| 87 | } | ||
| 88 | virtual uintptr_t GetPostDestroyArgument() const { | ||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | |||
| 92 | size_t GetSlabIndex() const { | ||
| 93 | return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); | ||
| 94 | } | ||
| 95 | |||
| 96 | public: | ||
| 97 | static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) { | ||
| 98 | kernel.SlabHeap<Derived>().Initialize(memory, memory_size); | ||
| 99 | } | ||
| 100 | |||
| 101 | static Derived* Create(KernelCore& kernel) { | ||
| 102 | Derived* obj = Allocate(kernel); | ||
| 103 | if (obj != nullptr) { | ||
| 104 | KAutoObject::Create(obj); | ||
| 105 | } | ||
| 106 | return obj; | ||
| 107 | } | ||
| 108 | |||
| 109 | static size_t GetObjectSize(KernelCore& kernel) { | ||
| 110 | return kernel.SlabHeap<Derived>().GetObjectSize(); | ||
| 111 | } | ||
| 112 | |||
| 113 | static size_t GetSlabHeapSize(KernelCore& kernel) { | ||
| 114 | return kernel.SlabHeap<Derived>().GetSlabHeapSize(); | ||
| 115 | } | ||
| 116 | |||
| 117 | static size_t GetPeakIndex(KernelCore& kernel) { | ||
| 118 | return kernel.SlabHeap<Derived>().GetPeakIndex(); | ||
| 119 | } | ||
| 120 | |||
| 121 | static uintptr_t GetSlabHeapAddress(KernelCore& kernel) { | ||
| 122 | return kernel.SlabHeap<Derived>().GetSlabHeapAddress(); | ||
| 123 | } | ||
| 124 | |||
| 125 | static size_t GetNumRemaining(KernelCore& kernel) { | ||
| 126 | return kernel.SlabHeap<Derived>().GetNumRemaining(); | ||
| 127 | } | ||
| 128 | |||
| 129 | protected: | ||
| 130 | KernelCore& kernel; | ||
| 131 | }; | ||
| 132 | |||
| 133 | template <typename Derived, typename Base> | ||
| 56 | class KAutoObjectWithSlabHeapAndContainer : public Base { | 134 | class KAutoObjectWithSlabHeapAndContainer : public Base { |
| 57 | static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); | 135 | static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); |
| 58 | 136 | ||
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 4aca5b27d..9962ad171 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include "core/hle/kernel/k_memory_block.h" | 24 | #include "core/hle/kernel/k_memory_block.h" |
| 25 | #include "core/hle/kernel/k_memory_layout.h" | 25 | #include "core/hle/kernel/k_memory_layout.h" |
| 26 | #include "core/hle/kernel/k_page_table.h" | 26 | #include "core/hle/kernel/k_page_table.h" |
| 27 | #include "core/hle/kernel/k_port.h" | ||
| 27 | #include "core/hle/kernel/k_process.h" | 28 | #include "core/hle/kernel/k_process.h" |
| 28 | #include "core/hle/kernel/k_readable_event.h" | 29 | #include "core/hle/kernel/k_readable_event.h" |
| 29 | #include "core/hle/kernel/k_resource_limit.h" | 30 | #include "core/hle/kernel/k_resource_limit.h" |
| @@ -266,7 +267,7 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien | |||
| 266 | 267 | ||
| 267 | // Reserve a new session from the process resource limit. | 268 | // Reserve a new session from the process resource limit. |
| 268 | // FIXME: LimitableResource_SessionCountMax | 269 | // FIXME: LimitableResource_SessionCountMax |
| 269 | KScopedResourceReservation session_reservation(&process, LimitableResource::Sessions); | 270 | KScopedResourceReservation session_reservation(&process, LimitableResource::SessionCountMax); |
| 270 | if (session_reservation.Succeeded()) { | 271 | if (session_reservation.Succeeded()) { |
| 271 | session = T::Create(system.Kernel()); | 272 | session = T::Create(system.Kernel()); |
| 272 | } else { | 273 | } else { |
| @@ -297,7 +298,7 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien | |||
| 297 | 298 | ||
| 298 | // We successfully allocated a session, so add the object we allocated to the resource | 299 | // We successfully allocated a session, so add the object we allocated to the resource |
| 299 | // limit. | 300 | // limit. |
| 300 | // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::Sessions, 1); | 301 | // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::SessionCountMax, 1); |
| 301 | } | 302 | } |
| 302 | 303 | ||
| 303 | // Check that we successfully created a session. | 304 | // Check that we successfully created a session. |
| @@ -382,9 +383,9 @@ static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_n | |||
| 382 | 383 | ||
| 383 | // Create a session. | 384 | // Create a session. |
| 384 | KClientSession* session{}; | 385 | KClientSession* session{}; |
| 385 | R_TRY(port->CreateSession(std::addressof(session), | 386 | R_TRY(port->CreateSession(std::addressof(session))); |
| 386 | std::make_shared<SessionRequestManager>(kernel))); | 387 | |
| 387 | port->Close(); | 388 | kernel.RegisterNamedServiceHandler(port_name, &port->GetParent()->GetServerPort()); |
| 388 | 389 | ||
| 389 | // Register the session in the table, close the extra reference. | 390 | // Register the session in the table, close the extra reference. |
| 390 | handle_table.Register(*out, session); | 391 | handle_table.Register(*out, session); |
| @@ -655,27 +656,12 @@ static Result ArbitrateUnlock32(Core::System& system, u32 address) { | |||
| 655 | return ArbitrateUnlock(system, address); | 656 | return ArbitrateUnlock(system, address); |
| 656 | } | 657 | } |
| 657 | 658 | ||
| 658 | enum class BreakType : u32 { | ||
| 659 | Panic = 0, | ||
| 660 | AssertionFailed = 1, | ||
| 661 | PreNROLoad = 3, | ||
| 662 | PostNROLoad = 4, | ||
| 663 | PreNROUnload = 5, | ||
| 664 | PostNROUnload = 6, | ||
| 665 | CppException = 7, | ||
| 666 | }; | ||
| 667 | |||
| 668 | struct BreakReason { | ||
| 669 | union { | ||
| 670 | u32 raw; | ||
| 671 | BitField<0, 30, BreakType> break_type; | ||
| 672 | BitField<31, 1, u32> signal_debugger; | ||
| 673 | }; | ||
| 674 | }; | ||
| 675 | |||
| 676 | /// Break program execution | 659 | /// Break program execution |
| 677 | static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { | 660 | static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { |
| 678 | BreakReason break_reason{reason}; | 661 | BreakReason break_reason = |
| 662 | static_cast<BreakReason>(reason & ~static_cast<u32>(BreakReason::NotificationOnlyFlag)); | ||
| 663 | bool notification_only = (reason & static_cast<u32>(BreakReason::NotificationOnlyFlag)) != 0; | ||
| 664 | |||
| 679 | bool has_dumped_buffer{}; | 665 | bool has_dumped_buffer{}; |
| 680 | std::vector<u8> debug_buffer; | 666 | std::vector<u8> debug_buffer; |
| 681 | 667 | ||
| @@ -704,57 +690,56 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { | |||
| 704 | } | 690 | } |
| 705 | has_dumped_buffer = true; | 691 | has_dumped_buffer = true; |
| 706 | }; | 692 | }; |
| 707 | switch (break_reason.break_type) { | 693 | switch (break_reason) { |
| 708 | case BreakType::Panic: | 694 | case BreakReason::Panic: |
| 709 | LOG_CRITICAL(Debug_Emulated, "Signalling debugger, PANIC! info1=0x{:016X}, info2=0x{:016X}", | 695 | LOG_CRITICAL(Debug_Emulated, "Userspace PANIC! info1=0x{:016X}, info2=0x{:016X}", info1, |
| 710 | info1, info2); | 696 | info2); |
| 711 | handle_debug_buffer(info1, info2); | 697 | handle_debug_buffer(info1, info2); |
| 712 | break; | 698 | break; |
| 713 | case BreakType::AssertionFailed: | 699 | case BreakReason::Assert: |
| 714 | LOG_CRITICAL(Debug_Emulated, | 700 | LOG_CRITICAL(Debug_Emulated, "Userspace Assertion failed! info1=0x{:016X}, info2=0x{:016X}", |
| 715 | "Signalling debugger, Assertion failed! info1=0x{:016X}, info2=0x{:016X}", | ||
| 716 | info1, info2); | 701 | info1, info2); |
| 717 | handle_debug_buffer(info1, info2); | 702 | handle_debug_buffer(info1, info2); |
| 718 | break; | 703 | break; |
| 719 | case BreakType::PreNROLoad: | 704 | case BreakReason::User: |
| 720 | LOG_WARNING( | 705 | LOG_WARNING(Debug_Emulated, "Userspace Break! 0x{:016X} with size 0x{:016X}", info1, info2); |
| 721 | Debug_Emulated, | 706 | handle_debug_buffer(info1, info2); |
| 722 | "Signalling debugger, Attempting to load an NRO at 0x{:016X} with size 0x{:016X}", | ||
| 723 | info1, info2); | ||
| 724 | break; | 707 | break; |
| 725 | case BreakType::PostNROLoad: | 708 | case BreakReason::PreLoadDll: |
| 726 | LOG_WARNING(Debug_Emulated, | 709 | LOG_INFO(Debug_Emulated, |
| 727 | "Signalling debugger, Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1, | 710 | "Userspace Attempting to load an NRO at 0x{:016X} with size 0x{:016X}", info1, |
| 728 | info2); | 711 | info2); |
| 729 | break; | 712 | break; |
| 730 | case BreakType::PreNROUnload: | 713 | case BreakReason::PostLoadDll: |
| 731 | LOG_WARNING( | 714 | LOG_INFO(Debug_Emulated, "Userspace Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1, |
| 732 | Debug_Emulated, | 715 | info2); |
| 733 | "Signalling debugger, Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}", | ||
| 734 | info1, info2); | ||
| 735 | break; | 716 | break; |
| 736 | case BreakType::PostNROUnload: | 717 | case BreakReason::PreUnloadDll: |
| 737 | LOG_WARNING(Debug_Emulated, | 718 | LOG_INFO(Debug_Emulated, |
| 738 | "Signalling debugger, Unloaded an NRO at 0x{:016X} with size 0x{:016X}", info1, | 719 | "Userspace Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}", info1, |
| 739 | info2); | 720 | info2); |
| 740 | break; | 721 | break; |
| 741 | case BreakType::CppException: | 722 | case BreakReason::PostUnloadDll: |
| 723 | LOG_INFO(Debug_Emulated, "Userspace Unloaded an NRO at 0x{:016X} with size 0x{:016X}", | ||
| 724 | info1, info2); | ||
| 725 | break; | ||
| 726 | case BreakReason::CppException: | ||
| 742 | LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered."); | 727 | LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered."); |
| 743 | break; | 728 | break; |
| 744 | default: | 729 | default: |
| 745 | LOG_WARNING( | 730 | LOG_WARNING( |
| 746 | Debug_Emulated, | 731 | Debug_Emulated, |
| 747 | "Signalling debugger, Unknown break reason {}, info1=0x{:016X}, info2=0x{:016X}", | 732 | "Signalling debugger, Unknown break reason {:#X}, info1=0x{:016X}, info2=0x{:016X}", |
| 748 | static_cast<u32>(break_reason.break_type.Value()), info1, info2); | 733 | reason, info1, info2); |
| 749 | handle_debug_buffer(info1, info2); | 734 | handle_debug_buffer(info1, info2); |
| 750 | break; | 735 | break; |
| 751 | } | 736 | } |
| 752 | 737 | ||
| 753 | system.GetReporter().SaveSvcBreakReport( | 738 | system.GetReporter().SaveSvcBreakReport(reason, notification_only, info1, info2, |
| 754 | static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger.As<bool>(), | 739 | has_dumped_buffer ? std::make_optional(debug_buffer) |
| 755 | info1, info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); | 740 | : std::nullopt); |
| 756 | 741 | ||
| 757 | if (!break_reason.signal_debugger) { | 742 | if (!notification_only) { |
| 758 | LOG_CRITICAL( | 743 | LOG_CRITICAL( |
| 759 | Debug_Emulated, | 744 | Debug_Emulated, |
| 760 | "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", | 745 | "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", |
| @@ -1715,13 +1700,13 @@ static Result QueryProcessMemory(Core::System& system, VAddr memory_info_address | |||
| 1715 | auto& memory{system.Memory()}; | 1700 | auto& memory{system.Memory()}; |
| 1716 | const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()}; | 1701 | const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()}; |
| 1717 | 1702 | ||
| 1718 | memory.Write64(memory_info_address + 0x00, memory_info.addr); | 1703 | memory.Write64(memory_info_address + 0x00, memory_info.base_address); |
| 1719 | memory.Write64(memory_info_address + 0x08, memory_info.size); | 1704 | memory.Write64(memory_info_address + 0x08, memory_info.size); |
| 1720 | memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff); | 1705 | memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff); |
| 1721 | memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attr)); | 1706 | memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attribute)); |
| 1722 | memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.perm)); | 1707 | memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.permission)); |
| 1723 | memory.Write32(memory_info_address + 0x1c, memory_info.ipc_refcount); | 1708 | memory.Write32(memory_info_address + 0x1c, memory_info.ipc_count); |
| 1724 | memory.Write32(memory_info_address + 0x20, memory_info.device_refcount); | 1709 | memory.Write32(memory_info_address + 0x20, memory_info.device_count); |
| 1725 | memory.Write32(memory_info_address + 0x24, 0); | 1710 | memory.Write32(memory_info_address + 0x24, 0); |
| 1726 | 1711 | ||
| 1727 | // Page info appears to be currently unused by the kernel and is always set to zero. | 1712 | // Page info appears to be currently unused by the kernel and is always set to zero. |
| @@ -1942,7 +1927,7 @@ static Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry | |||
| 1942 | 1927 | ||
| 1943 | // Reserve a new thread from the process resource limit (waiting up to 100ms). | 1928 | // Reserve a new thread from the process resource limit (waiting up to 100ms). |
| 1944 | KScopedResourceReservation thread_reservation( | 1929 | KScopedResourceReservation thread_reservation( |
| 1945 | kernel.CurrentProcess(), LimitableResource::Threads, 1, | 1930 | kernel.CurrentProcess(), LimitableResource::ThreadCountMax, 1, |
| 1946 | system.CoreTiming().GetGlobalTimeNs().count() + 100000000); | 1931 | system.CoreTiming().GetGlobalTimeNs().count() + 100000000); |
| 1947 | if (!thread_reservation.Succeeded()) { | 1932 | if (!thread_reservation.Succeeded()) { |
| 1948 | LOG_ERROR(Kernel_SVC, "Could not reserve a new thread"); | 1933 | LOG_ERROR(Kernel_SVC, "Could not reserve a new thread"); |
| @@ -2246,7 +2231,7 @@ static u64 GetSystemTick(Core::System& system) { | |||
| 2246 | auto& core_timing = system.CoreTiming(); | 2231 | auto& core_timing = system.CoreTiming(); |
| 2247 | 2232 | ||
| 2248 | // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) | 2233 | // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) |
| 2249 | const u64 result{system.CoreTiming().GetClockTicks()}; | 2234 | const u64 result{core_timing.GetClockTicks()}; |
| 2250 | 2235 | ||
| 2251 | if (!system.Kernel().IsMulticore()) { | 2236 | if (!system.Kernel().IsMulticore()) { |
| 2252 | core_timing.AddTicks(400U); | 2237 | core_timing.AddTicks(400U); |
| @@ -2343,7 +2328,7 @@ static Result CreateTransferMemory(Core::System& system, Handle* out, VAddr addr | |||
| 2343 | 2328 | ||
| 2344 | // Reserve a new transfer memory from the process resource limit. | 2329 | // Reserve a new transfer memory from the process resource limit. |
| 2345 | KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(), | 2330 | KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(), |
| 2346 | LimitableResource::TransferMemory); | 2331 | LimitableResource::TransferMemoryCountMax); |
| 2347 | R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached); | 2332 | R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached); |
| 2348 | 2333 | ||
| 2349 | // Create the transfer memory. | 2334 | // Create the transfer memory. |
| @@ -2495,7 +2480,7 @@ static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_r | |||
| 2495 | 2480 | ||
| 2496 | // Reserve a new event from the process resource limit | 2481 | // Reserve a new event from the process resource limit |
| 2497 | KScopedResourceReservation event_reservation(kernel.CurrentProcess(), | 2482 | KScopedResourceReservation event_reservation(kernel.CurrentProcess(), |
| 2498 | LimitableResource::Events); | 2483 | LimitableResource::EventCountMax); |
| 2499 | R_UNLESS(event_reservation.Succeeded(), ResultLimitReached); | 2484 | R_UNLESS(event_reservation.Succeeded(), ResultLimitReached); |
| 2500 | 2485 | ||
| 2501 | // Create a new event. | 2486 | // Create a new event. |
| @@ -2538,11 +2523,6 @@ static Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out | |||
| 2538 | static Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) { | 2523 | static Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) { |
| 2539 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type); | 2524 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type); |
| 2540 | 2525 | ||
| 2541 | // This function currently only allows retrieving a process' status. | ||
| 2542 | enum class InfoType { | ||
| 2543 | Status, | ||
| 2544 | }; | ||
| 2545 | |||
| 2546 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 2526 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); |
| 2547 | KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle); | 2527 | KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle); |
| 2548 | if (process.IsNull()) { | 2528 | if (process.IsNull()) { |
| @@ -2551,9 +2531,9 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand | |||
| 2551 | return ResultInvalidHandle; | 2531 | return ResultInvalidHandle; |
| 2552 | } | 2532 | } |
| 2553 | 2533 | ||
| 2554 | const auto info_type = static_cast<InfoType>(type); | 2534 | const auto info_type = static_cast<ProcessInfoType>(type); |
| 2555 | if (info_type != InfoType::Status) { | 2535 | if (info_type != ProcessInfoType::ProcessState) { |
| 2556 | LOG_ERROR(Kernel_SVC, "Expected info_type to be Status but got {} instead", type); | 2536 | LOG_ERROR(Kernel_SVC, "Expected info_type to be ProcessState but got {} instead", type); |
| 2557 | return ResultInvalidEnumValue; | 2537 | return ResultInvalidEnumValue; |
| 2558 | } | 2538 | } |
| 2559 | 2539 | ||
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h index f27cade33..b7ca53085 100644 --- a/src/core/hle/kernel/svc_results.h +++ b/src/core/hle/kernel/svc_results.h | |||
| @@ -37,6 +37,7 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125}; | |||
| 37 | constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; | 37 | constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; |
| 38 | constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; | 38 | constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; |
| 39 | constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; | 39 | constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; |
| 40 | constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259}; | ||
| 40 | constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; | 41 | constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; |
| 41 | 42 | ||
| 42 | } // namespace Kernel | 43 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index abb9847fe..33eebcef6 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h | |||
| @@ -8,6 +8,8 @@ | |||
| 8 | 8 | ||
| 9 | namespace Kernel::Svc { | 9 | namespace Kernel::Svc { |
| 10 | 10 | ||
| 11 | using Handle = u32; | ||
| 12 | |||
| 11 | enum class MemoryState : u32 { | 13 | enum class MemoryState : u32 { |
| 12 | Free = 0x00, | 14 | Free = 0x00, |
| 13 | Io = 0x01, | 15 | Io = 0x01, |
| @@ -22,8 +24,8 @@ enum class MemoryState : u32 { | |||
| 22 | Ipc = 0x0A, | 24 | Ipc = 0x0A, |
| 23 | Stack = 0x0B, | 25 | Stack = 0x0B, |
| 24 | ThreadLocal = 0x0C, | 26 | ThreadLocal = 0x0C, |
| 25 | Transferred = 0x0D, | 27 | Transfered = 0x0D, |
| 26 | SharedTransferred = 0x0E, | 28 | SharedTransfered = 0x0E, |
| 27 | SharedCode = 0x0F, | 29 | SharedCode = 0x0F, |
| 28 | Inaccessible = 0x10, | 30 | Inaccessible = 0x10, |
| 29 | NonSecureIpc = 0x11, | 31 | NonSecureIpc = 0x11, |
| @@ -32,6 +34,7 @@ enum class MemoryState : u32 { | |||
| 32 | GeneratedCode = 0x14, | 34 | GeneratedCode = 0x14, |
| 33 | CodeOut = 0x15, | 35 | CodeOut = 0x15, |
| 34 | Coverage = 0x16, | 36 | Coverage = 0x16, |
| 37 | Insecure = 0x17, | ||
| 35 | }; | 38 | }; |
| 36 | DECLARE_ENUM_FLAG_OPERATORS(MemoryState); | 39 | DECLARE_ENUM_FLAG_OPERATORS(MemoryState); |
| 37 | 40 | ||
| @@ -54,17 +57,6 @@ enum class MemoryPermission : u32 { | |||
| 54 | }; | 57 | }; |
| 55 | DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission); | 58 | DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission); |
| 56 | 59 | ||
| 57 | struct MemoryInfo { | ||
| 58 | u64 addr{}; | ||
| 59 | u64 size{}; | ||
| 60 | MemoryState state{}; | ||
| 61 | MemoryAttribute attr{}; | ||
| 62 | MemoryPermission perm{}; | ||
| 63 | u32 ipc_refcount{}; | ||
| 64 | u32 device_refcount{}; | ||
| 65 | u32 padding{}; | ||
| 66 | }; | ||
| 67 | |||
| 68 | enum class SignalType : u32 { | 60 | enum class SignalType : u32 { |
| 69 | Signal = 0, | 61 | Signal = 0, |
| 70 | SignalAndIncrementIfEqual = 1, | 62 | SignalAndIncrementIfEqual = 1, |
| @@ -83,6 +75,13 @@ enum class YieldType : s64 { | |||
| 83 | ToAnyThread = -2, | 75 | ToAnyThread = -2, |
| 84 | }; | 76 | }; |
| 85 | 77 | ||
| 78 | enum class ThreadExitReason : u32 { | ||
| 79 | ExitThread = 0, | ||
| 80 | TerminateThread = 1, | ||
| 81 | ExitProcess = 2, | ||
| 82 | TerminateProcess = 3, | ||
| 83 | }; | ||
| 84 | |||
| 86 | enum class ThreadActivity : u32 { | 85 | enum class ThreadActivity : u32 { |
| 87 | Runnable = 0, | 86 | Runnable = 0, |
| 88 | Paused = 1, | 87 | Paused = 1, |
| @@ -108,6 +107,489 @@ enum class ProcessState : u32 { | |||
| 108 | DebugBreak = 7, | 107 | DebugBreak = 7, |
| 109 | }; | 108 | }; |
| 110 | 109 | ||
| 110 | enum class ProcessExitReason : u32 { | ||
| 111 | ExitProcess = 0, | ||
| 112 | TerminateProcess = 1, | ||
| 113 | Exception = 2, | ||
| 114 | }; | ||
| 115 | |||
| 111 | constexpr inline size_t ThreadLocalRegionSize = 0x200; | 116 | constexpr inline size_t ThreadLocalRegionSize = 0x200; |
| 112 | 117 | ||
| 118 | struct PageInfo { | ||
| 119 | u32 flags; | ||
| 120 | }; | ||
| 121 | |||
| 122 | // Info Types. | ||
| 123 | enum class InfoType : u32 { | ||
| 124 | CoreMask = 0, | ||
| 125 | PriorityMask = 1, | ||
| 126 | AliasRegionAddress = 2, | ||
| 127 | AliasRegionSize = 3, | ||
| 128 | HeapRegionAddress = 4, | ||
| 129 | HeapRegionSize = 5, | ||
| 130 | TotalMemorySize = 6, | ||
| 131 | UsedMemorySize = 7, | ||
| 132 | DebuggerAttached = 8, | ||
| 133 | ResourceLimit = 9, | ||
| 134 | IdleTickCount = 10, | ||
| 135 | RandomEntropy = 11, | ||
| 136 | AslrRegionAddress = 12, | ||
| 137 | AslrRegionSize = 13, | ||
| 138 | StackRegionAddress = 14, | ||
| 139 | StackRegionSize = 15, | ||
| 140 | SystemResourceSizeTotal = 16, | ||
| 141 | SystemResourceSizeUsed = 17, | ||
| 142 | ProgramId = 18, | ||
| 143 | InitialProcessIdRange = 19, | ||
| 144 | UserExceptionContextAddress = 20, | ||
| 145 | TotalNonSystemMemorySize = 21, | ||
| 146 | UsedNonSystemMemorySize = 22, | ||
| 147 | IsApplication = 23, | ||
| 148 | FreeThreadCount = 24, | ||
| 149 | ThreadTickCount = 25, | ||
| 150 | IsSvcPermitted = 26, | ||
| 151 | |||
| 152 | MesosphereMeta = 65000, | ||
| 153 | MesosphereCurrentProcess = 65001, | ||
| 154 | }; | ||
| 155 | |||
| 156 | enum class BreakReason : u32 { | ||
| 157 | Panic = 0, | ||
| 158 | Assert = 1, | ||
| 159 | User = 2, | ||
| 160 | PreLoadDll = 3, | ||
| 161 | PostLoadDll = 4, | ||
| 162 | PreUnloadDll = 5, | ||
| 163 | PostUnloadDll = 6, | ||
| 164 | CppException = 7, | ||
| 165 | |||
| 166 | NotificationOnlyFlag = 0x80000000, | ||
| 167 | }; | ||
| 168 | |||
| 169 | enum class DebugEvent : u32 { | ||
| 170 | CreateProcess = 0, | ||
| 171 | CreateThread = 1, | ||
| 172 | ExitProcess = 2, | ||
| 173 | ExitThread = 3, | ||
| 174 | Exception = 4, | ||
| 175 | }; | ||
| 176 | |||
| 177 | enum class DebugThreadParam : u32 { | ||
| 178 | Priority = 0, | ||
| 179 | State = 1, | ||
| 180 | IdealCore = 2, | ||
| 181 | CurrentCore = 3, | ||
| 182 | AffinityMask = 4, | ||
| 183 | }; | ||
| 184 | |||
| 185 | enum class DebugException : u32 { | ||
| 186 | UndefinedInstruction = 0, | ||
| 187 | InstructionAbort = 1, | ||
| 188 | DataAbort = 2, | ||
| 189 | AlignmentFault = 3, | ||
| 190 | DebuggerAttached = 4, | ||
| 191 | BreakPoint = 5, | ||
| 192 | UserBreak = 6, | ||
| 193 | DebuggerBreak = 7, | ||
| 194 | UndefinedSystemCall = 8, | ||
| 195 | MemorySystemError = 9, | ||
| 196 | }; | ||
| 197 | |||
| 198 | enum class DebugEventFlag : u32 { | ||
| 199 | Stopped = (1u << 0), | ||
| 200 | }; | ||
| 201 | |||
| 202 | enum class BreakPointType : u32 { | ||
| 203 | HardwareInstruction = 0, | ||
| 204 | HardwareData = 1, | ||
| 205 | }; | ||
| 206 | |||
| 207 | enum class HardwareBreakPointRegisterName : u32 { | ||
| 208 | I0 = 0, | ||
| 209 | I1 = 1, | ||
| 210 | I2 = 2, | ||
| 211 | I3 = 3, | ||
| 212 | I4 = 4, | ||
| 213 | I5 = 5, | ||
| 214 | I6 = 6, | ||
| 215 | I7 = 7, | ||
| 216 | I8 = 8, | ||
| 217 | I9 = 9, | ||
| 218 | I10 = 10, | ||
| 219 | I11 = 11, | ||
| 220 | I12 = 12, | ||
| 221 | I13 = 13, | ||
| 222 | I14 = 14, | ||
| 223 | I15 = 15, | ||
| 224 | D0 = 16, | ||
| 225 | D1 = 17, | ||
| 226 | D2 = 18, | ||
| 227 | D3 = 19, | ||
| 228 | D4 = 20, | ||
| 229 | D5 = 21, | ||
| 230 | D6 = 22, | ||
| 231 | D7 = 23, | ||
| 232 | D8 = 24, | ||
| 233 | D9 = 25, | ||
| 234 | D10 = 26, | ||
| 235 | D11 = 27, | ||
| 236 | D12 = 28, | ||
| 237 | D13 = 29, | ||
| 238 | D14 = 30, | ||
| 239 | D15 = 31, | ||
| 240 | }; | ||
| 241 | |||
| 242 | namespace lp64 { | ||
| 243 | struct LastThreadContext { | ||
| 244 | u64 fp; | ||
| 245 | u64 sp; | ||
| 246 | u64 lr; | ||
| 247 | u64 pc; | ||
| 248 | }; | ||
| 249 | |||
| 250 | struct PhysicalMemoryInfo { | ||
| 251 | PAddr physical_address; | ||
| 252 | u64 virtual_address; | ||
| 253 | u64 size; | ||
| 254 | }; | ||
| 255 | |||
| 256 | struct DebugInfoCreateProcess { | ||
| 257 | u64 program_id; | ||
| 258 | u64 process_id; | ||
| 259 | std::array<char, 0xC> name; | ||
| 260 | u32 flags; | ||
| 261 | u64 user_exception_context_address; // 5.0.0+ | ||
| 262 | }; | ||
| 263 | |||
| 264 | struct DebugInfoCreateThread { | ||
| 265 | u64 thread_id; | ||
| 266 | u64 tls_address; | ||
| 267 | // Removed in 11.0.0 u64 entrypoint; | ||
| 268 | }; | ||
| 269 | |||
| 270 | struct DebugInfoExitProcess { | ||
| 271 | ProcessExitReason reason; | ||
| 272 | }; | ||
| 273 | |||
| 274 | struct DebugInfoExitThread { | ||
| 275 | ThreadExitReason reason; | ||
| 276 | }; | ||
| 277 | |||
| 278 | struct DebugInfoUndefinedInstructionException { | ||
| 279 | u32 insn; | ||
| 280 | }; | ||
| 281 | |||
| 282 | struct DebugInfoDataAbortException { | ||
| 283 | u64 address; | ||
| 284 | }; | ||
| 285 | |||
| 286 | struct DebugInfoAlignmentFaultException { | ||
| 287 | u64 address; | ||
| 288 | }; | ||
| 289 | |||
| 290 | struct DebugInfoBreakPointException { | ||
| 291 | BreakPointType type; | ||
| 292 | u64 address; | ||
| 293 | }; | ||
| 294 | |||
| 295 | struct DebugInfoUserBreakException { | ||
| 296 | BreakReason break_reason; | ||
| 297 | u64 address; | ||
| 298 | u64 size; | ||
| 299 | }; | ||
| 300 | |||
| 301 | struct DebugInfoDebuggerBreakException { | ||
| 302 | std::array<u64, 4> active_thread_ids; | ||
| 303 | }; | ||
| 304 | |||
| 305 | struct DebugInfoUndefinedSystemCallException { | ||
| 306 | u32 id; | ||
| 307 | }; | ||
| 308 | |||
| 309 | union DebugInfoSpecificException { | ||
| 310 | DebugInfoUndefinedInstructionException undefined_instruction; | ||
| 311 | DebugInfoDataAbortException data_abort; | ||
| 312 | DebugInfoAlignmentFaultException alignment_fault; | ||
| 313 | DebugInfoBreakPointException break_point; | ||
| 314 | DebugInfoUserBreakException user_break; | ||
| 315 | DebugInfoDebuggerBreakException debugger_break; | ||
| 316 | DebugInfoUndefinedSystemCallException undefined_system_call; | ||
| 317 | u64 raw; | ||
| 318 | }; | ||
| 319 | |||
| 320 | struct DebugInfoException { | ||
| 321 | DebugException type; | ||
| 322 | u64 address; | ||
| 323 | DebugInfoSpecificException specific; | ||
| 324 | }; | ||
| 325 | |||
| 326 | union DebugInfo { | ||
| 327 | DebugInfoCreateProcess create_process; | ||
| 328 | DebugInfoCreateThread create_thread; | ||
| 329 | DebugInfoExitProcess exit_process; | ||
| 330 | DebugInfoExitThread exit_thread; | ||
| 331 | DebugInfoException exception; | ||
| 332 | }; | ||
| 333 | |||
| 334 | struct DebugEventInfo { | ||
| 335 | DebugEvent type; | ||
| 336 | u32 flags; | ||
| 337 | u64 thread_id; | ||
| 338 | DebugInfo info; | ||
| 339 | }; | ||
| 340 | static_assert(sizeof(DebugEventInfo) >= 0x40); | ||
| 341 | |||
| 342 | struct SecureMonitorArguments { | ||
| 343 | std::array<u64, 8> r; | ||
| 344 | }; | ||
| 345 | static_assert(sizeof(SecureMonitorArguments) == 0x40); | ||
| 346 | } // namespace lp64 | ||
| 347 | |||
| 348 | namespace ilp32 { | ||
| 349 | struct LastThreadContext { | ||
| 350 | u32 fp; | ||
| 351 | u32 sp; | ||
| 352 | u32 lr; | ||
| 353 | u32 pc; | ||
| 354 | }; | ||
| 355 | |||
| 356 | struct PhysicalMemoryInfo { | ||
| 357 | PAddr physical_address; | ||
| 358 | u32 virtual_address; | ||
| 359 | u32 size; | ||
| 360 | }; | ||
| 361 | |||
| 362 | struct DebugInfoCreateProcess { | ||
| 363 | u64 program_id; | ||
| 364 | u64 process_id; | ||
| 365 | std::array<char, 0xC> name; | ||
| 366 | u32 flags; | ||
| 367 | u32 user_exception_context_address; // 5.0.0+ | ||
| 368 | }; | ||
| 369 | |||
| 370 | struct DebugInfoCreateThread { | ||
| 371 | u64 thread_id; | ||
| 372 | u32 tls_address; | ||
| 373 | // Removed in 11.0.0 u32 entrypoint; | ||
| 374 | }; | ||
| 375 | |||
| 376 | struct DebugInfoExitProcess { | ||
| 377 | ProcessExitReason reason; | ||
| 378 | }; | ||
| 379 | |||
| 380 | struct DebugInfoExitThread { | ||
| 381 | ThreadExitReason reason; | ||
| 382 | }; | ||
| 383 | |||
| 384 | struct DebugInfoUndefinedInstructionException { | ||
| 385 | u32 insn; | ||
| 386 | }; | ||
| 387 | |||
| 388 | struct DebugInfoDataAbortException { | ||
| 389 | u32 address; | ||
| 390 | }; | ||
| 391 | |||
| 392 | struct DebugInfoAlignmentFaultException { | ||
| 393 | u32 address; | ||
| 394 | }; | ||
| 395 | |||
| 396 | struct DebugInfoBreakPointException { | ||
| 397 | BreakPointType type; | ||
| 398 | u32 address; | ||
| 399 | }; | ||
| 400 | |||
| 401 | struct DebugInfoUserBreakException { | ||
| 402 | BreakReason break_reason; | ||
| 403 | u32 address; | ||
| 404 | u32 size; | ||
| 405 | }; | ||
| 406 | |||
| 407 | struct DebugInfoDebuggerBreakException { | ||
| 408 | std::array<u64, 4> active_thread_ids; | ||
| 409 | }; | ||
| 410 | |||
| 411 | struct DebugInfoUndefinedSystemCallException { | ||
| 412 | u32 id; | ||
| 413 | }; | ||
| 414 | |||
| 415 | union DebugInfoSpecificException { | ||
| 416 | DebugInfoUndefinedInstructionException undefined_instruction; | ||
| 417 | DebugInfoDataAbortException data_abort; | ||
| 418 | DebugInfoAlignmentFaultException alignment_fault; | ||
| 419 | DebugInfoBreakPointException break_point; | ||
| 420 | DebugInfoUserBreakException user_break; | ||
| 421 | DebugInfoDebuggerBreakException debugger_break; | ||
| 422 | DebugInfoUndefinedSystemCallException undefined_system_call; | ||
| 423 | u64 raw; | ||
| 424 | }; | ||
| 425 | |||
| 426 | struct DebugInfoException { | ||
| 427 | DebugException type; | ||
| 428 | u32 address; | ||
| 429 | DebugInfoSpecificException specific; | ||
| 430 | }; | ||
| 431 | |||
| 432 | union DebugInfo { | ||
| 433 | DebugInfoCreateProcess create_process; | ||
| 434 | DebugInfoCreateThread create_thread; | ||
| 435 | DebugInfoExitProcess exit_process; | ||
| 436 | DebugInfoExitThread exit_thread; | ||
| 437 | DebugInfoException exception; | ||
| 438 | }; | ||
| 439 | |||
| 440 | struct DebugEventInfo { | ||
| 441 | DebugEvent type; | ||
| 442 | u32 flags; | ||
| 443 | u64 thread_id; | ||
| 444 | DebugInfo info; | ||
| 445 | }; | ||
| 446 | |||
| 447 | struct SecureMonitorArguments { | ||
| 448 | std::array<u32, 8> r; | ||
| 449 | }; | ||
| 450 | static_assert(sizeof(SecureMonitorArguments) == 0x20); | ||
| 451 | } // namespace ilp32 | ||
| 452 | |||
| 453 | struct ThreadContext { | ||
| 454 | std::array<u64, 29> r; | ||
| 455 | u64 fp; | ||
| 456 | u64 lr; | ||
| 457 | u64 sp; | ||
| 458 | u64 pc; | ||
| 459 | u32 pstate; | ||
| 460 | u32 padding; | ||
| 461 | std::array<u128, 32> v; | ||
| 462 | u32 fpcr; | ||
| 463 | u32 fpsr; | ||
| 464 | u64 tpidr; | ||
| 465 | }; | ||
| 466 | static_assert(sizeof(ThreadContext) == 0x320); | ||
| 467 | |||
| 468 | struct MemoryInfo { | ||
| 469 | u64 base_address; | ||
| 470 | u64 size; | ||
| 471 | MemoryState state; | ||
| 472 | MemoryAttribute attribute; | ||
| 473 | MemoryPermission permission; | ||
| 474 | u32 ipc_count; | ||
| 475 | u32 device_count; | ||
| 476 | u32 padding; | ||
| 477 | }; | ||
| 478 | |||
| 479 | enum class LimitableResource : u32 { | ||
| 480 | PhysicalMemoryMax = 0, | ||
| 481 | ThreadCountMax = 1, | ||
| 482 | EventCountMax = 2, | ||
| 483 | TransferMemoryCountMax = 3, | ||
| 484 | SessionCountMax = 4, | ||
| 485 | Count, | ||
| 486 | }; | ||
| 487 | |||
| 488 | enum class IoPoolType : u32 { | ||
| 489 | // Not supported. | ||
| 490 | Count = 0, | ||
| 491 | }; | ||
| 492 | |||
| 493 | enum class MemoryMapping : u32 { | ||
| 494 | IoRegister = 0, | ||
| 495 | Uncached = 1, | ||
| 496 | Memory = 2, | ||
| 497 | }; | ||
| 498 | |||
| 499 | enum class KernelDebugType : u32 { | ||
| 500 | Thread = 0, | ||
| 501 | ThreadCallStack = 1, | ||
| 502 | KernelObject = 2, | ||
| 503 | Handle_ = 3, | ||
| 504 | Memory = 4, | ||
| 505 | PageTable = 5, | ||
| 506 | CpuUtilization = 6, | ||
| 507 | Process = 7, | ||
| 508 | SuspendProcess = 8, | ||
| 509 | ResumeProcess = 9, | ||
| 510 | Port = 10, | ||
| 511 | }; | ||
| 512 | |||
| 513 | enum class KernelTraceState : u32 { | ||
| 514 | Disabled = 0, | ||
| 515 | Enabled = 1, | ||
| 516 | }; | ||
| 517 | |||
| 518 | enum class CodeMemoryOperation : u32 { | ||
| 519 | Map = 0, | ||
| 520 | MapToOwner = 1, | ||
| 521 | Unmap = 2, | ||
| 522 | UnmapFromOwner = 3, | ||
| 523 | }; | ||
| 524 | |||
| 525 | enum class InterruptType : u32 { | ||
| 526 | Edge = 0, | ||
| 527 | Level = 1, | ||
| 528 | }; | ||
| 529 | |||
| 530 | enum class DeviceName { | ||
| 531 | Afi = 0, | ||
| 532 | Avpc = 1, | ||
| 533 | Dc = 2, | ||
| 534 | Dcb = 3, | ||
| 535 | Hc = 4, | ||
| 536 | Hda = 5, | ||
| 537 | Isp2 = 6, | ||
| 538 | MsencNvenc = 7, | ||
| 539 | Nv = 8, | ||
| 540 | Nv2 = 9, | ||
| 541 | Ppcs = 10, | ||
| 542 | Sata = 11, | ||
| 543 | Vi = 12, | ||
| 544 | Vic = 13, | ||
| 545 | XusbHost = 14, | ||
| 546 | XusbDev = 15, | ||
| 547 | Tsec = 16, | ||
| 548 | Ppcs1 = 17, | ||
| 549 | Dc1 = 18, | ||
| 550 | Sdmmc1a = 19, | ||
| 551 | Sdmmc2a = 20, | ||
| 552 | Sdmmc3a = 21, | ||
| 553 | Sdmmc4a = 22, | ||
| 554 | Isp2b = 23, | ||
| 555 | Gpu = 24, | ||
| 556 | Gpub = 25, | ||
| 557 | Ppcs2 = 26, | ||
| 558 | Nvdec = 27, | ||
| 559 | Ape = 28, | ||
| 560 | Se = 29, | ||
| 561 | Nvjpg = 30, | ||
| 562 | Hc1 = 31, | ||
| 563 | Se1 = 32, | ||
| 564 | Axiap = 33, | ||
| 565 | Etr = 34, | ||
| 566 | Tsecb = 35, | ||
| 567 | Tsec1 = 36, | ||
| 568 | Tsecb1 = 37, | ||
| 569 | Nvdec1 = 38, | ||
| 570 | Count, | ||
| 571 | }; | ||
| 572 | |||
| 573 | enum class SystemInfoType : u32 { | ||
| 574 | TotalPhysicalMemorySize = 0, | ||
| 575 | UsedPhysicalMemorySize = 1, | ||
| 576 | InitialProcessIdRange = 2, | ||
| 577 | }; | ||
| 578 | |||
| 579 | enum class ProcessInfoType : u32 { | ||
| 580 | ProcessState = 0, | ||
| 581 | }; | ||
| 582 | |||
| 583 | struct CreateProcessParameter { | ||
| 584 | std::array<char, 12> name; | ||
| 585 | u32 version; | ||
| 586 | u64 program_id; | ||
| 587 | u64 code_address; | ||
| 588 | s32 code_num_pages; | ||
| 589 | u32 flags; | ||
| 590 | Handle reslimit; | ||
| 591 | s32 system_resource_num_pages; | ||
| 592 | }; | ||
| 593 | static_assert(sizeof(CreateProcessParameter) == 0x30); | ||
| 594 | |||
| 113 | } // namespace Kernel::Svc | 595 | } // namespace Kernel::Svc |
diff --git a/src/core/hle/result.h b/src/core/hle/result.h index ef4b2d417..56c990728 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h | |||
| @@ -423,16 +423,17 @@ constexpr void UpdateCurrentResultReference<const Result>(Result result_referenc | |||
| 423 | } // namespace ResultImpl | 423 | } // namespace ResultImpl |
| 424 | 424 | ||
| 425 | #define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \ | 425 | #define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \ |
| 426 | [[maybe_unused]] constexpr bool HasPrevRef_##COUNTER_VALUE = \ | 426 | [[maybe_unused]] constexpr bool CONCAT2(HasPrevRef_, COUNTER_VALUE) = \ |
| 427 | std::same_as<decltype(__TmpCurrentResultReference), Result&>; \ | 427 | std::same_as<decltype(__TmpCurrentResultReference), Result&>; \ |
| 428 | [[maybe_unused]] auto& PrevRef_##COUNTER_VALUE = __TmpCurrentResultReference; \ | 428 | [[maybe_unused]] Result CONCAT2(PrevRef_, COUNTER_VALUE) = __TmpCurrentResultReference; \ |
| 429 | [[maybe_unused]] Result __tmp_result_##COUNTER_VALUE = ResultSuccess; \ | 429 | [[maybe_unused]] Result CONCAT2(__tmp_result_, COUNTER_VALUE) = ResultSuccess; \ |
| 430 | Result& __TmpCurrentResultReference = \ | 430 | Result& __TmpCurrentResultReference = CONCAT2(HasPrevRef_, COUNTER_VALUE) \ |
| 431 | HasPrevRef_##COUNTER_VALUE ? PrevRef_##COUNTER_VALUE : __tmp_result_##COUNTER_VALUE | 431 | ? CONCAT2(PrevRef_, COUNTER_VALUE) \ |
| 432 | : CONCAT2(__tmp_result_, COUNTER_VALUE) | ||
| 432 | 433 | ||
| 433 | #define ON_RESULT_RETURN_IMPL(...) \ | 434 | #define ON_RESULT_RETURN_IMPL(...) \ |
| 434 | static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>); \ | 435 | static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>); \ |
| 435 | auto RESULT_GUARD_STATE_##__COUNTER__ = \ | 436 | auto CONCAT2(RESULT_GUARD_STATE_, __COUNTER__) = \ |
| 436 | ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \ | 437 | ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \ |
| 437 | __TmpCurrentResultReference) + \ | 438 | __TmpCurrentResultReference) + \ |
| 438 | [&]() | 439 | [&]() |
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp index bb838e285..85a3f0802 100644 --- a/src/core/hle/service/acc/acc.cpp +++ b/src/core/hle/service/acc/acc.cpp | |||
| @@ -512,10 +512,11 @@ protected: | |||
| 512 | 512 | ||
| 513 | class IManagerForApplication final : public ServiceFramework<IManagerForApplication> { | 513 | class IManagerForApplication final : public ServiceFramework<IManagerForApplication> { |
| 514 | public: | 514 | public: |
| 515 | explicit IManagerForApplication(Core::System& system_, Common::UUID user_id_) | 515 | explicit IManagerForApplication(Core::System& system_, |
| 516 | const std::shared_ptr<ProfileManager>& profile_manager_) | ||
| 516 | : ServiceFramework{system_, "IManagerForApplication"}, | 517 | : ServiceFramework{system_, "IManagerForApplication"}, |
| 517 | ensure_token_id{std::make_shared<EnsureTokenIdCacheAsyncInterface>(system)}, | 518 | ensure_token_id{std::make_shared<EnsureTokenIdCacheAsyncInterface>(system)}, |
| 518 | user_id{user_id_} { | 519 | profile_manager{profile_manager_} { |
| 519 | // clang-format off | 520 | // clang-format off |
| 520 | static const FunctionInfo functions[] = { | 521 | static const FunctionInfo functions[] = { |
| 521 | {0, &IManagerForApplication::CheckAvailability, "CheckAvailability"}, | 522 | {0, &IManagerForApplication::CheckAvailability, "CheckAvailability"}, |
| @@ -545,7 +546,7 @@ private: | |||
| 545 | 546 | ||
| 546 | IPC::ResponseBuilder rb{ctx, 4}; | 547 | IPC::ResponseBuilder rb{ctx, 4}; |
| 547 | rb.Push(ResultSuccess); | 548 | rb.Push(ResultSuccess); |
| 548 | rb.PushRaw<u64>(user_id.Hash()); | 549 | rb.PushRaw<u64>(profile_manager->GetLastOpenedUser().Hash()); |
| 549 | } | 550 | } |
| 550 | 551 | ||
| 551 | void EnsureIdTokenCacheAsync(Kernel::HLERequestContext& ctx) { | 552 | void EnsureIdTokenCacheAsync(Kernel::HLERequestContext& ctx) { |
| @@ -575,17 +576,20 @@ private: | |||
| 575 | 576 | ||
| 576 | IPC::ResponseBuilder rb{ctx, 4}; | 577 | IPC::ResponseBuilder rb{ctx, 4}; |
| 577 | rb.Push(ResultSuccess); | 578 | rb.Push(ResultSuccess); |
| 578 | rb.PushRaw<u64>(user_id.Hash()); | 579 | rb.PushRaw<u64>(profile_manager->GetLastOpenedUser().Hash()); |
| 579 | } | 580 | } |
| 580 | 581 | ||
| 581 | void StoreOpenContext(Kernel::HLERequestContext& ctx) { | 582 | void StoreOpenContext(Kernel::HLERequestContext& ctx) { |
| 582 | LOG_WARNING(Service_ACC, "(STUBBED) called"); | 583 | LOG_DEBUG(Service_ACC, "called"); |
| 584 | |||
| 585 | profile_manager->StoreOpenedUsers(); | ||
| 586 | |||
| 583 | IPC::ResponseBuilder rb{ctx, 2}; | 587 | IPC::ResponseBuilder rb{ctx, 2}; |
| 584 | rb.Push(ResultSuccess); | 588 | rb.Push(ResultSuccess); |
| 585 | } | 589 | } |
| 586 | 590 | ||
| 587 | std::shared_ptr<EnsureTokenIdCacheAsyncInterface> ensure_token_id{}; | 591 | std::shared_ptr<EnsureTokenIdCacheAsyncInterface> ensure_token_id{}; |
| 588 | Common::UUID user_id{}; | 592 | std::shared_ptr<ProfileManager> profile_manager; |
| 589 | }; | 593 | }; |
| 590 | 594 | ||
| 591 | // 6.0.0+ | 595 | // 6.0.0+ |
| @@ -790,7 +794,7 @@ void Module::Interface::GetBaasAccountManagerForApplication(Kernel::HLERequestCo | |||
| 790 | LOG_DEBUG(Service_ACC, "called"); | 794 | LOG_DEBUG(Service_ACC, "called"); |
| 791 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; | 795 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; |
| 792 | rb.Push(ResultSuccess); | 796 | rb.Push(ResultSuccess); |
| 793 | rb.PushIpcInterface<IManagerForApplication>(system, profile_manager->GetLastOpenedUser()); | 797 | rb.PushIpcInterface<IManagerForApplication>(system, profile_manager); |
| 794 | } | 798 | } |
| 795 | 799 | ||
| 796 | void Module::Interface::IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx) { | 800 | void Module::Interface::IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx) { |
| @@ -849,22 +853,10 @@ void Module::Interface::ListQualifiedUsers(Kernel::HLERequestContext& ctx) { | |||
| 849 | rb.Push(ResultSuccess); | 853 | rb.Push(ResultSuccess); |
| 850 | } | 854 | } |
| 851 | 855 | ||
| 852 | void Module::Interface::LoadOpenContext(Kernel::HLERequestContext& ctx) { | ||
| 853 | LOG_WARNING(Service_ACC, "(STUBBED) called"); | ||
| 854 | |||
| 855 | // This is similar to GetBaasAccountManagerForApplication | ||
| 856 | // This command is used concurrently with ListOpenContextStoredUsers | ||
| 857 | // TODO: Find the differences between this and GetBaasAccountManagerForApplication | ||
| 858 | IPC::ResponseBuilder rb{ctx, 2, 0, 1}; | ||
| 859 | rb.Push(ResultSuccess); | ||
| 860 | rb.PushIpcInterface<IManagerForApplication>(system, profile_manager->GetLastOpenedUser()); | ||
| 861 | } | ||
| 862 | |||
| 863 | void Module::Interface::ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx) { | 856 | void Module::Interface::ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx) { |
| 864 | LOG_WARNING(Service_ACC, "(STUBBED) called"); | 857 | LOG_DEBUG(Service_ACC, "called"); |
| 865 | 858 | ||
| 866 | // TODO(ogniK): Handle open contexts | 859 | ctx.WriteBuffer(profile_manager->GetStoredOpenedUsers()); |
| 867 | ctx.WriteBuffer(profile_manager->GetOpenUsers()); | ||
| 868 | IPC::ResponseBuilder rb{ctx, 2}; | 860 | IPC::ResponseBuilder rb{ctx, 2}; |
| 869 | rb.Push(ResultSuccess); | 861 | rb.Push(ResultSuccess); |
| 870 | } | 862 | } |
diff --git a/src/core/hle/service/acc/acc.h b/src/core/hle/service/acc/acc.h index 1621e7c0a..9411b0b92 100644 --- a/src/core/hle/service/acc/acc.h +++ b/src/core/hle/service/acc/acc.h | |||
| @@ -35,7 +35,6 @@ public: | |||
| 35 | void InitializeApplicationInfoV2(Kernel::HLERequestContext& ctx); | 35 | void InitializeApplicationInfoV2(Kernel::HLERequestContext& ctx); |
| 36 | void GetProfileEditor(Kernel::HLERequestContext& ctx); | 36 | void GetProfileEditor(Kernel::HLERequestContext& ctx); |
| 37 | void ListQualifiedUsers(Kernel::HLERequestContext& ctx); | 37 | void ListQualifiedUsers(Kernel::HLERequestContext& ctx); |
| 38 | void LoadOpenContext(Kernel::HLERequestContext& ctx); | ||
| 39 | void ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx); | 38 | void ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx); |
| 40 | void StoreSaveDataThumbnailApplication(Kernel::HLERequestContext& ctx); | 39 | void StoreSaveDataThumbnailApplication(Kernel::HLERequestContext& ctx); |
| 41 | void StoreSaveDataThumbnailSystem(Kernel::HLERequestContext& ctx); | 40 | void StoreSaveDataThumbnailSystem(Kernel::HLERequestContext& ctx); |
diff --git a/src/core/hle/service/acc/acc_u0.cpp b/src/core/hle/service/acc/acc_u0.cpp index 65023b8c2..54844bfe7 100644 --- a/src/core/hle/service/acc/acc_u0.cpp +++ b/src/core/hle/service/acc/acc_u0.cpp | |||
| @@ -28,7 +28,7 @@ ACC_U0::ACC_U0(std::shared_ptr<Module> module_, std::shared_ptr<ProfileManager> | |||
| 28 | {110, &ACC_U0::StoreSaveDataThumbnailApplication, "StoreSaveDataThumbnail"}, | 28 | {110, &ACC_U0::StoreSaveDataThumbnailApplication, "StoreSaveDataThumbnail"}, |
| 29 | {111, nullptr, "ClearSaveDataThumbnail"}, | 29 | {111, nullptr, "ClearSaveDataThumbnail"}, |
| 30 | {120, nullptr, "CreateGuestLoginRequest"}, | 30 | {120, nullptr, "CreateGuestLoginRequest"}, |
| 31 | {130, &ACC_U0::LoadOpenContext, "LoadOpenContext"}, // 5.0.0+ | 31 | {130, nullptr, "LoadOpenContext"}, // 5.0.0+ |
| 32 | {131, &ACC_U0::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 6.0.0+ | 32 | {131, &ACC_U0::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 6.0.0+ |
| 33 | {140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"}, // 6.0.0+ | 33 | {140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"}, // 6.0.0+ |
| 34 | {141, &ACC_U0::ListQualifiedUsers, "ListQualifiedUsers"}, // 6.0.0+ | 34 | {141, &ACC_U0::ListQualifiedUsers, "ListQualifiedUsers"}, // 6.0.0+ |
diff --git a/src/core/hle/service/acc/profile_manager.cpp b/src/core/hle/service/acc/profile_manager.cpp index a58da4d5f..481e0d141 100644 --- a/src/core/hle/service/acc/profile_manager.cpp +++ b/src/core/hle/service/acc/profile_manager.cpp | |||
| @@ -261,6 +261,31 @@ UUID ProfileManager::GetLastOpenedUser() const { | |||
| 261 | return last_opened_user; | 261 | return last_opened_user; |
| 262 | } | 262 | } |
| 263 | 263 | ||
| 264 | /// Gets the list of stored opened users. | ||
| 265 | UserIDArray ProfileManager::GetStoredOpenedUsers() const { | ||
| 266 | UserIDArray output{}; | ||
| 267 | std::ranges::transform(stored_opened_profiles, output.begin(), [](const ProfileInfo& p) { | ||
| 268 | if (p.is_open) | ||
| 269 | return p.user_uuid; | ||
| 270 | return Common::InvalidUUID; | ||
| 271 | }); | ||
| 272 | std::stable_partition(output.begin(), output.end(), | ||
| 273 | [](const UUID& uuid) { return uuid.IsValid(); }); | ||
| 274 | return output; | ||
| 275 | } | ||
| 276 | |||
| 277 | /// Captures the opened users, which can be queried across process launches with | ||
| 278 | /// ListOpenContextStoredUsers. | ||
| 279 | void ProfileManager::StoreOpenedUsers() { | ||
| 280 | size_t profile_index{}; | ||
| 281 | stored_opened_profiles = {}; | ||
| 282 | std::for_each(profiles.begin(), profiles.end(), [&](const auto& profile) { | ||
| 283 | if (profile.is_open) { | ||
| 284 | stored_opened_profiles[profile_index++] = profile; | ||
| 285 | } | ||
| 286 | }); | ||
| 287 | } | ||
| 288 | |||
| 264 | /// Return the users profile base and the unknown arbitary data. | 289 | /// Return the users profile base and the unknown arbitary data. |
| 265 | bool ProfileManager::GetProfileBaseAndData(std::optional<std::size_t> index, ProfileBase& profile, | 290 | bool ProfileManager::GetProfileBaseAndData(std::optional<std::size_t> index, ProfileBase& profile, |
| 266 | UserData& data) const { | 291 | UserData& data) const { |
diff --git a/src/core/hle/service/acc/profile_manager.h b/src/core/hle/service/acc/profile_manager.h index 135f7d0d5..993a5a57a 100644 --- a/src/core/hle/service/acc/profile_manager.h +++ b/src/core/hle/service/acc/profile_manager.h | |||
| @@ -86,6 +86,8 @@ public: | |||
| 86 | UserIDArray GetOpenUsers() const; | 86 | UserIDArray GetOpenUsers() const; |
| 87 | UserIDArray GetAllUsers() const; | 87 | UserIDArray GetAllUsers() const; |
| 88 | Common::UUID GetLastOpenedUser() const; | 88 | Common::UUID GetLastOpenedUser() const; |
| 89 | UserIDArray GetStoredOpenedUsers() const; | ||
| 90 | void StoreOpenedUsers(); | ||
| 89 | 91 | ||
| 90 | bool CanSystemRegisterUser() const; | 92 | bool CanSystemRegisterUser() const; |
| 91 | 93 | ||
| @@ -101,6 +103,7 @@ private: | |||
| 101 | bool RemoveProfileAtIndex(std::size_t index); | 103 | bool RemoveProfileAtIndex(std::size_t index); |
| 102 | 104 | ||
| 103 | std::array<ProfileInfo, MAX_USERS> profiles{}; | 105 | std::array<ProfileInfo, MAX_USERS> profiles{}; |
| 106 | std::array<ProfileInfo, MAX_USERS> stored_opened_profiles{}; | ||
| 104 | std::size_t user_count{}; | 107 | std::size_t user_count{}; |
| 105 | Common::UUID last_opened_user{}; | 108 | Common::UUID last_opened_user{}; |
| 106 | }; | 109 | }; |
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index e55233054..8ea7fd760 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp | |||
| @@ -299,7 +299,7 @@ ISelfController::ISelfController(Core::System& system_, NVFlinger::NVFlinger& nv | |||
| 299 | {100, &ISelfController::SetAlbumImageTakenNotificationEnabled, "SetAlbumImageTakenNotificationEnabled"}, | 299 | {100, &ISelfController::SetAlbumImageTakenNotificationEnabled, "SetAlbumImageTakenNotificationEnabled"}, |
| 300 | {110, nullptr, "SetApplicationAlbumUserData"}, | 300 | {110, nullptr, "SetApplicationAlbumUserData"}, |
| 301 | {120, &ISelfController::SaveCurrentScreenshot, "SaveCurrentScreenshot"}, | 301 | {120, &ISelfController::SaveCurrentScreenshot, "SaveCurrentScreenshot"}, |
| 302 | {130, nullptr, "SetRecordVolumeMuted"}, | 302 | {130, &ISelfController::SetRecordVolumeMuted, "SetRecordVolumeMuted"}, |
| 303 | {1000, nullptr, "GetDebugStorageChannel"}, | 303 | {1000, nullptr, "GetDebugStorageChannel"}, |
| 304 | }; | 304 | }; |
| 305 | // clang-format on | 305 | // clang-format on |
| @@ -597,6 +597,17 @@ void ISelfController::SaveCurrentScreenshot(Kernel::HLERequestContext& ctx) { | |||
| 597 | rb.Push(ResultSuccess); | 597 | rb.Push(ResultSuccess); |
| 598 | } | 598 | } |
| 599 | 599 | ||
| 600 | void ISelfController::SetRecordVolumeMuted(Kernel::HLERequestContext& ctx) { | ||
| 601 | IPC::RequestParser rp{ctx}; | ||
| 602 | |||
| 603 | const auto is_record_volume_muted = rp.Pop<bool>(); | ||
| 604 | |||
| 605 | LOG_WARNING(Service_AM, "(STUBBED) called. is_record_volume_muted={}", is_record_volume_muted); | ||
| 606 | |||
| 607 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 608 | rb.Push(ResultSuccess); | ||
| 609 | } | ||
| 610 | |||
| 600 | AppletMessageQueue::AppletMessageQueue(Core::System& system) | 611 | AppletMessageQueue::AppletMessageQueue(Core::System& system) |
| 601 | : service_context{system, "AppletMessageQueue"} { | 612 | : service_context{system, "AppletMessageQueue"} { |
| 602 | on_new_message = service_context.CreateEvent("AMMessageQueue:OnMessageReceived"); | 613 | on_new_message = service_context.CreateEvent("AMMessageQueue:OnMessageReceived"); |
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h index bb75c6281..a0fbfcfc5 100644 --- a/src/core/hle/service/am/am.h +++ b/src/core/hle/service/am/am.h | |||
| @@ -182,6 +182,7 @@ private: | |||
| 182 | void GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequestContext& ctx); | 182 | void GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequestContext& ctx); |
| 183 | void SetAlbumImageTakenNotificationEnabled(Kernel::HLERequestContext& ctx); | 183 | void SetAlbumImageTakenNotificationEnabled(Kernel::HLERequestContext& ctx); |
| 184 | void SaveCurrentScreenshot(Kernel::HLERequestContext& ctx); | 184 | void SaveCurrentScreenshot(Kernel::HLERequestContext& ctx); |
| 185 | void SetRecordVolumeMuted(Kernel::HLERequestContext& ctx); | ||
| 185 | 186 | ||
| 186 | enum class ScreenshotPermission : u32 { | 187 | enum class ScreenshotPermission : u32 { |
| 187 | Inherit = 0, | 188 | Inherit = 0, |
diff --git a/src/core/hle/service/audio/audin_u.cpp b/src/core/hle/service/audio/audin_u.cpp index 48a9a73a0..608925dfc 100644 --- a/src/core/hle/service/audio/audin_u.cpp +++ b/src/core/hle/service/audio/audin_u.cpp | |||
| @@ -17,7 +17,7 @@ using namespace AudioCore::AudioIn; | |||
| 17 | class IAudioIn final : public ServiceFramework<IAudioIn> { | 17 | class IAudioIn final : public ServiceFramework<IAudioIn> { |
| 18 | public: | 18 | public: |
| 19 | explicit IAudioIn(Core::System& system_, Manager& manager, size_t session_id, | 19 | explicit IAudioIn(Core::System& system_, Manager& manager, size_t session_id, |
| 20 | std::string& device_name, const AudioInParameter& in_params, u32 handle, | 20 | const std::string& device_name, const AudioInParameter& in_params, u32 handle, |
| 21 | u64 applet_resource_user_id) | 21 | u64 applet_resource_user_id) |
| 22 | : ServiceFramework{system_, "IAudioIn"}, | 22 | : ServiceFramework{system_, "IAudioIn"}, |
| 23 | service_context{system_, "IAudioIn"}, event{service_context.CreateEvent("AudioInEvent")}, | 23 | service_context{system_, "IAudioIn"}, event{service_context.CreateEvent("AudioInEvent")}, |
diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp index 49c092301..122290c6a 100644 --- a/src/core/hle/service/audio/audout_u.cpp +++ b/src/core/hle/service/audio/audout_u.cpp | |||
| @@ -24,7 +24,7 @@ using namespace AudioCore::AudioOut; | |||
| 24 | class IAudioOut final : public ServiceFramework<IAudioOut> { | 24 | class IAudioOut final : public ServiceFramework<IAudioOut> { |
| 25 | public: | 25 | public: |
| 26 | explicit IAudioOut(Core::System& system_, AudioCore::AudioOut::Manager& manager, | 26 | explicit IAudioOut(Core::System& system_, AudioCore::AudioOut::Manager& manager, |
| 27 | size_t session_id, std::string& device_name, | 27 | size_t session_id, const std::string& device_name, |
| 28 | const AudioOutParameter& in_params, u32 handle, u64 applet_resource_user_id) | 28 | const AudioOutParameter& in_params, u32 handle, u64 applet_resource_user_id) |
| 29 | : ServiceFramework{system_, "IAudioOut", ServiceThreadType::CreateNew}, | 29 | : ServiceFramework{system_, "IAudioOut", ServiceThreadType::CreateNew}, |
| 30 | service_context{system_, "IAudioOut"}, event{service_context.CreateEvent( | 30 | service_context{system_, "IAudioOut"}, event{service_context.CreateEvent( |
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp index 3b26e96de..2f871de31 100644 --- a/src/core/hle/service/hid/controllers/npad.cpp +++ b/src/core/hle/service/hid/controllers/npad.cpp | |||
| @@ -868,7 +868,7 @@ bool Controller_NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id, | |||
| 868 | return false; | 868 | return false; |
| 869 | } | 869 | } |
| 870 | 870 | ||
| 871 | if (!controller.device->IsVibrationEnabled()) { | 871 | if (!controller.device->IsVibrationEnabled(device_index)) { |
| 872 | if (controller.vibration[device_index].latest_vibration_value.low_amplitude != 0.0f || | 872 | if (controller.vibration[device_index].latest_vibration_value.low_amplitude != 0.0f || |
| 873 | controller.vibration[device_index].latest_vibration_value.high_amplitude != 0.0f) { | 873 | controller.vibration[device_index].latest_vibration_value.high_amplitude != 0.0f) { |
| 874 | // Send an empty vibration to stop any vibrations. | 874 | // Send an empty vibration to stop any vibrations. |
| @@ -1001,7 +1001,7 @@ void Controller_NPad::InitializeVibrationDeviceAtIndex(Core::HID::NpadIdType npa | |||
| 1001 | } | 1001 | } |
| 1002 | 1002 | ||
| 1003 | controller.vibration[device_index].device_mounted = | 1003 | controller.vibration[device_index].device_mounted = |
| 1004 | controller.device->TestVibration(device_index); | 1004 | controller.device->IsVibrationEnabled(device_index); |
| 1005 | } | 1005 | } |
| 1006 | 1006 | ||
| 1007 | void Controller_NPad::SetPermitVibrationSession(bool permit_vibration_session) { | 1007 | void Controller_NPad::SetPermitVibrationSession(bool permit_vibration_session) { |
diff --git a/src/core/hle/service/kernel_helpers.cpp b/src/core/hle/service/kernel_helpers.cpp index af133af93..42991928e 100644 --- a/src/core/hle/service/kernel_helpers.cpp +++ b/src/core/hle/service/kernel_helpers.cpp | |||
| @@ -31,7 +31,7 @@ ServiceContext::~ServiceContext() { | |||
| 31 | Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) { | 31 | Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) { |
| 32 | // Reserve a new event from the process resource limit | 32 | // Reserve a new event from the process resource limit |
| 33 | Kernel::KScopedResourceReservation event_reservation(process, | 33 | Kernel::KScopedResourceReservation event_reservation(process, |
| 34 | Kernel::LimitableResource::Events); | 34 | Kernel::LimitableResource::EventCountMax); |
| 35 | if (!event_reservation.Succeeded()) { | 35 | if (!event_reservation.Succeeded()) { |
| 36 | LOG_CRITICAL(Service, "Resource limit reached!"); | 36 | LOG_CRITICAL(Service, "Resource limit reached!"); |
| 37 | return {}; | 37 | return {}; |
diff --git a/src/core/hle/service/nfp/amiibo_crypto.cpp b/src/core/hle/service/nfp/amiibo_crypto.cpp index c32a6816b..167e29572 100644 --- a/src/core/hle/service/nfp/amiibo_crypto.cpp +++ b/src/core/hle/service/nfp/amiibo_crypto.cpp | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <mbedtls/hmac_drbg.h> | 9 | #include <mbedtls/hmac_drbg.h> |
| 10 | 10 | ||
| 11 | #include "common/fs/file.h" | 11 | #include "common/fs/file.h" |
| 12 | #include "common/fs/fs.h" | ||
| 12 | #include "common/fs/path_util.h" | 13 | #include "common/fs/path_util.h" |
| 13 | #include "common/logging/log.h" | 14 | #include "common/logging/log.h" |
| 14 | #include "core/hle/service/mii/mii_manager.h" | 15 | #include "core/hle/service/mii/mii_manager.h" |
| @@ -279,7 +280,7 @@ bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info) { | |||
| 279 | Common::FS::FileType::BinaryFile}; | 280 | Common::FS::FileType::BinaryFile}; |
| 280 | 281 | ||
| 281 | if (!keys_file.IsOpen()) { | 282 | if (!keys_file.IsOpen()) { |
| 282 | LOG_ERROR(Service_NFP, "No keys detected"); | 283 | LOG_ERROR(Service_NFP, "Failed to open key file"); |
| 283 | return false; | 284 | return false; |
| 284 | } | 285 | } |
| 285 | 286 | ||
| @@ -295,6 +296,11 @@ bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info) { | |||
| 295 | return true; | 296 | return true; |
| 296 | } | 297 | } |
| 297 | 298 | ||
| 299 | bool IsKeyAvailable() { | ||
| 300 | const auto yuzu_keys_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::KeysDir); | ||
| 301 | return Common::FS::Exists(yuzu_keys_dir / "key_retail.bin"); | ||
| 302 | } | ||
| 303 | |||
| 298 | bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data) { | 304 | bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data) { |
| 299 | InternalKey locked_secret{}; | 305 | InternalKey locked_secret{}; |
| 300 | InternalKey unfixed_info{}; | 306 | InternalKey unfixed_info{}; |
diff --git a/src/core/hle/service/nfp/amiibo_crypto.h b/src/core/hle/service/nfp/amiibo_crypto.h index 0175ced91..1fa61174e 100644 --- a/src/core/hle/service/nfp/amiibo_crypto.h +++ b/src/core/hle/service/nfp/amiibo_crypto.h | |||
| @@ -91,6 +91,9 @@ void Cipher(const DerivedKeys& keys, const NTAG215File& in_data, NTAG215File& ou | |||
| 91 | /// Loads both amiibo keys from key_retail.bin | 91 | /// Loads both amiibo keys from key_retail.bin |
| 92 | bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info); | 92 | bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info); |
| 93 | 93 | ||
| 94 | /// Returns true if key_retail.bin exist | ||
| 95 | bool IsKeyAvailable(); | ||
| 96 | |||
| 94 | /// Decodes encripted amiibo data returns true if output is valid | 97 | /// Decodes encripted amiibo data returns true if output is valid |
| 95 | bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data); | 98 | bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data); |
| 96 | 99 | ||
diff --git a/src/core/hle/service/nfp/nfp_device.cpp b/src/core/hle/service/nfp/nfp_device.cpp index 76f8a267a..b19672560 100644 --- a/src/core/hle/service/nfp/nfp_device.cpp +++ b/src/core/hle/service/nfp/nfp_device.cpp | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include "core/hle/ipc_helpers.h" | 17 | #include "core/hle/ipc_helpers.h" |
| 18 | #include "core/hle/kernel/k_event.h" | 18 | #include "core/hle/kernel/k_event.h" |
| 19 | #include "core/hle/service/mii/mii_manager.h" | 19 | #include "core/hle/service/mii/mii_manager.h" |
| 20 | #include "core/hle/service/mii/types.h" | ||
| 20 | #include "core/hle/service/nfp/amiibo_crypto.h" | 21 | #include "core/hle/service/nfp/amiibo_crypto.h" |
| 21 | #include "core/hle/service/nfp/nfp.h" | 22 | #include "core/hle/service/nfp/nfp.h" |
| 22 | #include "core/hle/service/nfp/nfp_device.h" | 23 | #include "core/hle/service/nfp/nfp_device.h" |
| @@ -233,6 +234,14 @@ Result NfpDevice::Mount(MountTarget mount_target_) { | |||
| 233 | return NotAnAmiibo; | 234 | return NotAnAmiibo; |
| 234 | } | 235 | } |
| 235 | 236 | ||
| 237 | // Mark amiibos as read only when keys are missing | ||
| 238 | if (!AmiiboCrypto::IsKeyAvailable()) { | ||
| 239 | LOG_ERROR(Service_NFP, "No keys detected"); | ||
| 240 | device_state = DeviceState::TagMounted; | ||
| 241 | mount_target = MountTarget::Rom; | ||
| 242 | return ResultSuccess; | ||
| 243 | } | ||
| 244 | |||
| 236 | if (!AmiiboCrypto::DecodeAmiibo(encrypted_tag_data, tag_data)) { | 245 | if (!AmiiboCrypto::DecodeAmiibo(encrypted_tag_data, tag_data)) { |
| 237 | LOG_ERROR(Service_NFP, "Can't decode amiibo {}", device_state); | 246 | LOG_ERROR(Service_NFP, "Can't decode amiibo {}", device_state); |
| 238 | return CorruptedData; | 247 | return CorruptedData; |
diff --git a/src/core/hle/service/nfp/nfp_device.h b/src/core/hle/service/nfp/nfp_device.h index a5b72cf19..76d0e9ae4 100644 --- a/src/core/hle/service/nfp/nfp_device.h +++ b/src/core/hle/service/nfp/nfp_device.h | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | 8 | ||
| 9 | #include "common/common_funcs.h" | 9 | #include "common/common_funcs.h" |
| 10 | #include "core/hle/service/kernel_helpers.h" | 10 | #include "core/hle/service/kernel_helpers.h" |
| 11 | #include "core/hle/service/mii/types.h" | ||
| 12 | #include "core/hle/service/nfp/nfp_types.h" | 11 | #include "core/hle/service/nfp/nfp_types.h" |
| 13 | #include "core/hle/service/service.h" | 12 | #include "core/hle/service/service.h" |
| 14 | 13 | ||
diff --git a/src/core/hle/service/nfp/nfp_types.h b/src/core/hle/service/nfp/nfp_types.h index c09f9ddb6..63d5917cb 100644 --- a/src/core/hle/service/nfp/nfp_types.h +++ b/src/core/hle/service/nfp/nfp_types.h | |||
| @@ -17,11 +17,6 @@ enum class ServiceType : u32 { | |||
| 17 | System, | 17 | System, |
| 18 | }; | 18 | }; |
| 19 | 19 | ||
| 20 | enum class State : u32 { | ||
| 21 | NonInitialized, | ||
| 22 | Initialized, | ||
| 23 | }; | ||
| 24 | |||
| 25 | enum class DeviceState : u32 { | 20 | enum class DeviceState : u32 { |
| 26 | Initialized, | 21 | Initialized, |
| 27 | SearchingForTag, | 22 | SearchingForTag, |
diff --git a/src/core/hle/service/nfp/nfp_user.cpp b/src/core/hle/service/nfp/nfp_user.cpp index 4ed53b534..33e2ef518 100644 --- a/src/core/hle/service/nfp/nfp_user.cpp +++ b/src/core/hle/service/nfp/nfp_user.cpp | |||
| @@ -6,12 +6,9 @@ | |||
| 6 | 6 | ||
| 7 | #include "common/logging/log.h" | 7 | #include "common/logging/log.h" |
| 8 | #include "core/core.h" | 8 | #include "core/core.h" |
| 9 | #include "core/hid/emulated_controller.h" | ||
| 10 | #include "core/hid/hid_core.h" | ||
| 11 | #include "core/hid/hid_types.h" | 9 | #include "core/hid/hid_types.h" |
| 12 | #include "core/hle/ipc_helpers.h" | 10 | #include "core/hle/ipc_helpers.h" |
| 13 | #include "core/hle/kernel/k_event.h" | 11 | #include "core/hle/kernel/k_event.h" |
| 14 | #include "core/hle/service/mii/mii_manager.h" | ||
| 15 | #include "core/hle/service/nfp/nfp_device.h" | 12 | #include "core/hle/service/nfp/nfp_device.h" |
| 16 | #include "core/hle/service/nfp/nfp_result.h" | 13 | #include "core/hle/service/nfp/nfp_result.h" |
| 17 | #include "core/hle/service/nfp/nfp_user.h" | 14 | #include "core/hle/service/nfp/nfp_user.h" |
diff --git a/src/core/hle/service/nfp/nfp_user.h b/src/core/hle/service/nfp/nfp_user.h index 68c60ae82..47aff3695 100644 --- a/src/core/hle/service/nfp/nfp_user.h +++ b/src/core/hle/service/nfp/nfp_user.h | |||
| @@ -4,8 +4,7 @@ | |||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include "core/hle/service/kernel_helpers.h" | 6 | #include "core/hle/service/kernel_helpers.h" |
| 7 | #include "core/hle/service/nfp/nfp.h" | 7 | #include "core/hle/service/service.h" |
| 8 | #include "core/hle/service/nfp/nfp_types.h" | ||
| 9 | 8 | ||
| 10 | namespace Service::NFP { | 9 | namespace Service::NFP { |
| 11 | class NfpDevice; | 10 | class NfpDevice; |
| @@ -15,6 +14,11 @@ public: | |||
| 15 | explicit IUser(Core::System& system_); | 14 | explicit IUser(Core::System& system_); |
| 16 | 15 | ||
| 17 | private: | 16 | private: |
| 17 | enum class State : u32 { | ||
| 18 | NonInitialized, | ||
| 19 | Initialized, | ||
| 20 | }; | ||
| 21 | |||
| 18 | void Initialize(Kernel::HLERequestContext& ctx); | 22 | void Initialize(Kernel::HLERequestContext& ctx); |
| 19 | void Finalize(Kernel::HLERequestContext& ctx); | 23 | void Finalize(Kernel::HLERequestContext& ctx); |
| 20 | void ListDevices(Kernel::HLERequestContext& ctx); | 24 | void ListDevices(Kernel::HLERequestContext& ctx); |
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index fbd8a74a5..a51ca5444 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp | |||
| @@ -255,15 +255,16 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna | |||
| 255 | .address = handle_description->address, | 255 | .address = handle_description->address, |
| 256 | .size = handle_description->size, | 256 | .size = handle_description->size, |
| 257 | .was_uncached = handle_description->flags.map_uncached.Value() != 0, | 257 | .was_uncached = handle_description->flags.map_uncached.Value() != 0, |
| 258 | .can_unlock = true, | ||
| 258 | }; | 259 | }; |
| 259 | } else { | 260 | } else { |
| 260 | return std::nullopt; | 261 | return std::nullopt; |
| 261 | } | 262 | } |
| 262 | 263 | ||
| 263 | // Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed | 264 | // If the handle hasn't been freed from memory, mark that |
| 264 | if (!hWeak.expired()) { | 265 | if (!hWeak.expired()) { |
| 265 | LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle); | 266 | LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle); |
| 266 | freeInfo.address = 0; | 267 | freeInfo.can_unlock = false; |
| 267 | } | 268 | } |
| 268 | 269 | ||
| 269 | return freeInfo; | 270 | return freeInfo; |
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index b9dd3801f..a8e573890 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h | |||
| @@ -105,6 +105,7 @@ public: | |||
| 105 | u64 address; //!< Address the handle referred to before deletion | 105 | u64 address; //!< Address the handle referred to before deletion |
| 106 | u64 size; //!< Page-aligned handle size | 106 | u64 size; //!< Page-aligned handle size |
| 107 | bool was_uncached; //!< If the handle was allocated as uncached | 107 | bool was_uncached; //!< If the handle was allocated as uncached |
| 108 | bool can_unlock; //!< If the address region is ready to be unlocked | ||
| 108 | }; | 109 | }; |
| 109 | 110 | ||
| 110 | explicit NvMap(Tegra::Host1x::Host1x& host1x); | 111 | explicit NvMap(Tegra::Host1x::Host1x& host1x); |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index b60679021..fa29db758 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp | |||
| @@ -126,10 +126,12 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 126 | LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); | 126 | LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); |
| 127 | return result; | 127 | return result; |
| 128 | } | 128 | } |
| 129 | bool is_out_io{}; | ||
| 129 | ASSERT(system.CurrentProcess() | 130 | ASSERT(system.CurrentProcess() |
| 130 | ->PageTable() | 131 | ->PageTable() |
| 131 | .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, | 132 | .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address, |
| 132 | Kernel::KMemoryPermission::None, true) | 133 | handle_description->size, |
| 134 | Kernel::KMemoryPermission::None, true, false) | ||
| 133 | .IsSuccess()); | 135 | .IsSuccess()); |
| 134 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 136 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 135 | return result; | 137 | return result; |
| @@ -251,10 +253,12 @@ NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) { | |||
| 251 | } | 253 | } |
| 252 | 254 | ||
| 253 | if (auto freeInfo{file.FreeHandle(params.handle, false)}) { | 255 | if (auto freeInfo{file.FreeHandle(params.handle, false)}) { |
| 254 | ASSERT(system.CurrentProcess() | 256 | if (freeInfo->can_unlock) { |
| 255 | ->PageTable() | 257 | ASSERT(system.CurrentProcess() |
| 256 | .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size) | 258 | ->PageTable() |
| 257 | .IsSuccess()); | 259 | .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size) |
| 260 | .IsSuccess()); | ||
| 261 | } | ||
| 258 | params.address = freeInfo->address; | 262 | params.address = freeInfo->address; |
| 259 | params.size = static_cast<u32>(freeInfo->size); | 263 | params.size = static_cast<u32>(freeInfo->size); |
| 260 | params.flags.raw = 0; | 264 | params.flags.raw = 0; |
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp index 77ddbb6ef..41ba44b21 100644 --- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp | |||
| @@ -742,6 +742,13 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) { | |||
| 742 | return Status::NoError; | 742 | return Status::NoError; |
| 743 | } | 743 | } |
| 744 | 744 | ||
| 745 | // HACK: We are not Android. Remove handle for items in queue, and clear queue. | ||
| 746 | // Allows synchronous destruction of nvmap handles. | ||
| 747 | for (auto& item : core->queue) { | ||
| 748 | nvmap.FreeHandle(item.graphic_buffer->BufferId(), true); | ||
| 749 | } | ||
| 750 | core->queue.clear(); | ||
| 751 | |||
| 745 | switch (api) { | 752 | switch (api) { |
| 746 | case NativeWindowApi::Egl: | 753 | case NativeWindowApi::Egl: |
| 747 | case NativeWindowApi::Cpu: | 754 | case NativeWindowApi::Cpu: |
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index dad93b38e..c3af12c90 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp | |||
| @@ -138,6 +138,19 @@ std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) { | |||
| 138 | return itr->GetID(); | 138 | return itr->GetID(); |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | bool NVFlinger::CloseDisplay(u64 display_id) { | ||
| 142 | const auto lock_guard = Lock(); | ||
| 143 | auto* const display = FindDisplay(display_id); | ||
| 144 | |||
| 145 | if (display == nullptr) { | ||
| 146 | return false; | ||
| 147 | } | ||
| 148 | |||
| 149 | display->Reset(); | ||
| 150 | |||
| 151 | return true; | ||
| 152 | } | ||
| 153 | |||
| 141 | std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { | 154 | std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { |
| 142 | const auto lock_guard = Lock(); | 155 | const auto lock_guard = Lock(); |
| 143 | auto* const display = FindDisplay(display_id); | 156 | auto* const display = FindDisplay(display_id); |
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index b8191c595..460bef976 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h | |||
| @@ -58,6 +58,11 @@ public: | |||
| 58 | /// If an invalid display name is provided, then an empty optional is returned. | 58 | /// If an invalid display name is provided, then an empty optional is returned. |
| 59 | [[nodiscard]] std::optional<u64> OpenDisplay(std::string_view name); | 59 | [[nodiscard]] std::optional<u64> OpenDisplay(std::string_view name); |
| 60 | 60 | ||
| 61 | /// Closes the specified display by its ID. | ||
| 62 | /// | ||
| 63 | /// Returns false if an invalid display ID is provided. | ||
| 64 | [[nodiscard]] bool CloseDisplay(u64 display_id); | ||
| 65 | |||
| 61 | /// Creates a layer on the specified display and returns the layer ID. | 66 | /// Creates a layer on the specified display and returns the layer ID. |
| 62 | /// | 67 | /// |
| 63 | /// If an invalid display ID is specified, then an empty optional is returned. | 68 | /// If an invalid display ID is specified, then an empty optional is returned. |
diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp index 5db6588e4..5ab41c0c4 100644 --- a/src/core/hle/service/service.cpp +++ b/src/core/hle/service/service.cpp | |||
| @@ -99,6 +99,12 @@ ServiceFrameworkBase::ServiceFrameworkBase(Core::System& system_, const char* se | |||
| 99 | ServiceFrameworkBase::~ServiceFrameworkBase() { | 99 | ServiceFrameworkBase::~ServiceFrameworkBase() { |
| 100 | // Wait for other threads to release access before destroying | 100 | // Wait for other threads to release access before destroying |
| 101 | const auto guard = LockService(); | 101 | const auto guard = LockService(); |
| 102 | |||
| 103 | if (named_port != nullptr) { | ||
| 104 | named_port->GetClientPort().Close(); | ||
| 105 | named_port->GetServerPort().Close(); | ||
| 106 | named_port = nullptr; | ||
| 107 | } | ||
| 102 | } | 108 | } |
| 103 | 109 | ||
| 104 | void ServiceFrameworkBase::InstallAsService(SM::ServiceManager& service_manager) { | 110 | void ServiceFrameworkBase::InstallAsService(SM::ServiceManager& service_manager) { |
| @@ -113,15 +119,16 @@ void ServiceFrameworkBase::InstallAsService(SM::ServiceManager& service_manager) | |||
| 113 | Kernel::KClientPort& ServiceFrameworkBase::CreatePort() { | 119 | Kernel::KClientPort& ServiceFrameworkBase::CreatePort() { |
| 114 | const auto guard = LockService(); | 120 | const auto guard = LockService(); |
| 115 | 121 | ||
| 116 | ASSERT(!service_registered); | 122 | if (named_port == nullptr) { |
| 123 | ASSERT(!service_registered); | ||
| 117 | 124 | ||
| 118 | auto* port = Kernel::KPort::Create(kernel); | 125 | named_port = Kernel::KPort::Create(kernel); |
| 119 | port->Initialize(max_sessions, false, service_name); | 126 | named_port->Initialize(max_sessions, false, service_name); |
| 120 | port->GetServerPort().SetSessionHandler(shared_from_this()); | ||
| 121 | 127 | ||
| 122 | service_registered = true; | 128 | service_registered = true; |
| 129 | } | ||
| 123 | 130 | ||
| 124 | return port->GetClientPort(); | 131 | return named_port->GetClientPort(); |
| 125 | } | 132 | } |
| 126 | 133 | ||
| 127 | void ServiceFrameworkBase::RegisterHandlersBase(const FunctionInfoBase* functions, std::size_t n) { | 134 | void ServiceFrameworkBase::RegisterHandlersBase(const FunctionInfoBase* functions, std::size_t n) { |
| @@ -199,7 +206,6 @@ Result ServiceFrameworkBase::HandleSyncRequest(Kernel::KServerSession& session, | |||
| 199 | switch (ctx.GetCommandType()) { | 206 | switch (ctx.GetCommandType()) { |
| 200 | case IPC::CommandType::Close: | 207 | case IPC::CommandType::Close: |
| 201 | case IPC::CommandType::TIPC_Close: { | 208 | case IPC::CommandType::TIPC_Close: { |
| 202 | session.Close(); | ||
| 203 | IPC::ResponseBuilder rb{ctx, 2}; | 209 | IPC::ResponseBuilder rb{ctx, 2}; |
| 204 | rb.Push(ResultSuccess); | 210 | rb.Push(ResultSuccess); |
| 205 | result = IPC::ERR_REMOTE_PROCESS_DEAD; | 211 | result = IPC::ERR_REMOTE_PROCESS_DEAD; |
| @@ -244,6 +250,7 @@ Services::Services(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system | |||
| 244 | system.GetFileSystemController().CreateFactories(*system.GetFilesystem(), false); | 250 | system.GetFileSystemController().CreateFactories(*system.GetFilesystem(), false); |
| 245 | 251 | ||
| 246 | system.Kernel().RegisterNamedService("sm:", SM::ServiceManager::InterfaceFactory); | 252 | system.Kernel().RegisterNamedService("sm:", SM::ServiceManager::InterfaceFactory); |
| 253 | system.Kernel().RegisterInterfaceForNamedService("sm:", SM::ServiceManager::SessionHandler); | ||
| 247 | 254 | ||
| 248 | Account::InstallInterfaces(system); | 255 | Account::InstallInterfaces(system); |
| 249 | AM::InstallInterfaces(*sm, *nv_flinger, system); | 256 | AM::InstallInterfaces(*sm, *nv_flinger, system); |
diff --git a/src/core/hle/service/service.h b/src/core/hle/service/service.h index ec9deeee4..22e2119d7 100644 --- a/src/core/hle/service/service.h +++ b/src/core/hle/service/service.h | |||
| @@ -20,6 +20,7 @@ class System; | |||
| 20 | namespace Kernel { | 20 | namespace Kernel { |
| 21 | class HLERequestContext; | 21 | class HLERequestContext; |
| 22 | class KClientPort; | 22 | class KClientPort; |
| 23 | class KPort; | ||
| 23 | class KServerSession; | 24 | class KServerSession; |
| 24 | class ServiceThread; | 25 | class ServiceThread; |
| 25 | } // namespace Kernel | 26 | } // namespace Kernel |
| @@ -98,6 +99,9 @@ protected: | |||
| 98 | /// Identifier string used to connect to the service. | 99 | /// Identifier string used to connect to the service. |
| 99 | std::string service_name; | 100 | std::string service_name; |
| 100 | 101 | ||
| 102 | /// Port used by ManageNamedPort. | ||
| 103 | Kernel::KPort* named_port{}; | ||
| 104 | |||
| 101 | private: | 105 | private: |
| 102 | template <typename T> | 106 | template <typename T> |
| 103 | friend class ServiceFramework; | 107 | friend class ServiceFramework; |
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp index 48e70f93c..84720094f 100644 --- a/src/core/hle/service/sm/sm.cpp +++ b/src/core/hle/service/sm/sm.cpp | |||
| @@ -23,7 +23,13 @@ constexpr Result ERR_INVALID_NAME(ErrorModule::SM, 6); | |||
| 23 | constexpr Result ERR_SERVICE_NOT_REGISTERED(ErrorModule::SM, 7); | 23 | constexpr Result ERR_SERVICE_NOT_REGISTERED(ErrorModule::SM, 7); |
| 24 | 24 | ||
| 25 | ServiceManager::ServiceManager(Kernel::KernelCore& kernel_) : kernel{kernel_} {} | 25 | ServiceManager::ServiceManager(Kernel::KernelCore& kernel_) : kernel{kernel_} {} |
| 26 | ServiceManager::~ServiceManager() = default; | 26 | |
| 27 | ServiceManager::~ServiceManager() { | ||
| 28 | for (auto& [name, port] : service_ports) { | ||
| 29 | port->GetClientPort().Close(); | ||
| 30 | port->GetServerPort().Close(); | ||
| 31 | } | ||
| 32 | } | ||
| 27 | 33 | ||
| 28 | void ServiceManager::InvokeControlRequest(Kernel::HLERequestContext& context) { | 34 | void ServiceManager::InvokeControlRequest(Kernel::HLERequestContext& context) { |
| 29 | controller_interface->InvokeRequest(context); | 35 | controller_interface->InvokeRequest(context); |
| @@ -43,6 +49,10 @@ Kernel::KClientPort& ServiceManager::InterfaceFactory(ServiceManager& self, Core | |||
| 43 | return self.sm_interface->CreatePort(); | 49 | return self.sm_interface->CreatePort(); |
| 44 | } | 50 | } |
| 45 | 51 | ||
| 52 | void ServiceManager::SessionHandler(ServiceManager& self, Kernel::KServerPort* server_port) { | ||
| 53 | self.sm_interface->AcceptSession(server_port); | ||
| 54 | } | ||
| 55 | |||
| 46 | Result ServiceManager::RegisterService(std::string name, u32 max_sessions, | 56 | Result ServiceManager::RegisterService(std::string name, u32 max_sessions, |
| 47 | Kernel::SessionRequestHandlerPtr handler) { | 57 | Kernel::SessionRequestHandlerPtr handler) { |
| 48 | 58 | ||
| @@ -53,7 +63,11 @@ Result ServiceManager::RegisterService(std::string name, u32 max_sessions, | |||
| 53 | return ERR_ALREADY_REGISTERED; | 63 | return ERR_ALREADY_REGISTERED; |
| 54 | } | 64 | } |
| 55 | 65 | ||
| 56 | registered_services.emplace(std::move(name), handler); | 66 | auto* port = Kernel::KPort::Create(kernel); |
| 67 | port->Initialize(ServerSessionCountMax, false, name); | ||
| 68 | |||
| 69 | service_ports.emplace(name, port); | ||
| 70 | registered_services.emplace(name, handler); | ||
| 57 | 71 | ||
| 58 | return ResultSuccess; | 72 | return ResultSuccess; |
| 59 | } | 73 | } |
| @@ -68,25 +82,20 @@ Result ServiceManager::UnregisterService(const std::string& name) { | |||
| 68 | } | 82 | } |
| 69 | 83 | ||
| 70 | registered_services.erase(iter); | 84 | registered_services.erase(iter); |
| 85 | service_ports.erase(name); | ||
| 86 | |||
| 71 | return ResultSuccess; | 87 | return ResultSuccess; |
| 72 | } | 88 | } |
| 73 | 89 | ||
| 74 | ResultVal<Kernel::KPort*> ServiceManager::GetServicePort(const std::string& name) { | 90 | ResultVal<Kernel::KPort*> ServiceManager::GetServicePort(const std::string& name) { |
| 75 | CASCADE_CODE(ValidateServiceName(name)); | 91 | CASCADE_CODE(ValidateServiceName(name)); |
| 76 | auto it = registered_services.find(name); | 92 | auto it = service_ports.find(name); |
| 77 | if (it == registered_services.end()) { | 93 | if (it == service_ports.end()) { |
| 78 | LOG_ERROR(Service_SM, "Server is not registered! service={}", name); | 94 | LOG_ERROR(Service_SM, "Server is not registered! service={}", name); |
| 79 | return ERR_SERVICE_NOT_REGISTERED; | 95 | return ERR_SERVICE_NOT_REGISTERED; |
| 80 | } | 96 | } |
| 81 | 97 | ||
| 82 | auto* port = Kernel::KPort::Create(kernel); | 98 | return it->second; |
| 83 | SCOPE_EXIT({ port->Close(); }); | ||
| 84 | |||
| 85 | port->Initialize(ServerSessionCountMax, false, name); | ||
| 86 | auto handler = it->second; | ||
| 87 | port->GetServerPort().SetSessionHandler(std::move(handler)); | ||
| 88 | |||
| 89 | return port; | ||
| 90 | } | 99 | } |
| 91 | 100 | ||
| 92 | /** | 101 | /** |
| @@ -145,23 +154,20 @@ ResultVal<Kernel::KClientSession*> SM::GetServiceImpl(Kernel::HLERequestContext& | |||
| 145 | 154 | ||
| 146 | // Find the named port. | 155 | // Find the named port. |
| 147 | auto port_result = service_manager.GetServicePort(name); | 156 | auto port_result = service_manager.GetServicePort(name); |
| 148 | if (port_result.Failed()) { | 157 | auto service = service_manager.GetService<Kernel::SessionRequestHandler>(name); |
| 158 | if (port_result.Failed() || !service) { | ||
| 149 | LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, port_result.Code().raw); | 159 | LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, port_result.Code().raw); |
| 150 | return port_result.Code(); | 160 | return port_result.Code(); |
| 151 | } | 161 | } |
| 152 | auto& port = port_result.Unwrap(); | 162 | auto& port = port_result.Unwrap(); |
| 153 | SCOPE_EXIT({ port->GetClientPort().Close(); }); | ||
| 154 | |||
| 155 | kernel.RegisterServerObject(&port->GetServerPort()); | ||
| 156 | 163 | ||
| 157 | // Create a new session. | 164 | // Create a new session. |
| 158 | Kernel::KClientSession* session{}; | 165 | Kernel::KClientSession* session{}; |
| 159 | if (const auto result = port->GetClientPort().CreateSession( | 166 | if (const auto result = port->GetClientPort().CreateSession(&session); result.IsError()) { |
| 160 | std::addressof(session), std::make_shared<Kernel::SessionRequestManager>(kernel)); | ||
| 161 | result.IsError()) { | ||
| 162 | LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, result.raw); | 167 | LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, result.raw); |
| 163 | return result; | 168 | return result; |
| 164 | } | 169 | } |
| 170 | service->AcceptSession(&port->GetServerPort()); | ||
| 165 | 171 | ||
| 166 | LOG_DEBUG(Service_SM, "called service={} -> session={}", name, session->GetId()); | 172 | LOG_DEBUG(Service_SM, "called service={} -> session={}", name, session->GetId()); |
| 167 | 173 | ||
diff --git a/src/core/hle/service/sm/sm.h b/src/core/hle/service/sm/sm.h index 878decc6f..02a5dde9e 100644 --- a/src/core/hle/service/sm/sm.h +++ b/src/core/hle/service/sm/sm.h | |||
| @@ -51,6 +51,7 @@ private: | |||
| 51 | class ServiceManager { | 51 | class ServiceManager { |
| 52 | public: | 52 | public: |
| 53 | static Kernel::KClientPort& InterfaceFactory(ServiceManager& self, Core::System& system); | 53 | static Kernel::KClientPort& InterfaceFactory(ServiceManager& self, Core::System& system); |
| 54 | static void SessionHandler(ServiceManager& self, Kernel::KServerPort* server_port); | ||
| 54 | 55 | ||
| 55 | explicit ServiceManager(Kernel::KernelCore& kernel_); | 56 | explicit ServiceManager(Kernel::KernelCore& kernel_); |
| 56 | ~ServiceManager(); | 57 | ~ServiceManager(); |
| @@ -78,6 +79,7 @@ private: | |||
| 78 | 79 | ||
| 79 | /// Map of registered services, retrieved using GetServicePort. | 80 | /// Map of registered services, retrieved using GetServicePort. |
| 80 | std::unordered_map<std::string, Kernel::SessionRequestHandlerPtr> registered_services; | 81 | std::unordered_map<std::string, Kernel::SessionRequestHandlerPtr> registered_services; |
| 82 | std::unordered_map<std::string, Kernel::KPort*> service_ports; | ||
| 81 | 83 | ||
| 82 | /// Kernel context | 84 | /// Kernel context |
| 83 | Kernel::KernelCore& kernel; | 85 | Kernel::KernelCore& kernel; |
diff --git a/src/core/hle/service/sm/sm_controller.cpp b/src/core/hle/service/sm/sm_controller.cpp index 273f79568..1cf9dd1c4 100644 --- a/src/core/hle/service/sm/sm_controller.cpp +++ b/src/core/hle/service/sm/sm_controller.cpp | |||
| @@ -15,10 +15,9 @@ | |||
| 15 | namespace Service::SM { | 15 | namespace Service::SM { |
| 16 | 16 | ||
| 17 | void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) { | 17 | void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) { |
| 18 | ASSERT_MSG(!ctx.Session()->GetSessionRequestManager()->IsDomain(), | 18 | ASSERT_MSG(!ctx.GetManager()->IsDomain(), "Session is already a domain"); |
| 19 | "Session is already a domain"); | ||
| 20 | LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId()); | 19 | LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId()); |
| 21 | ctx.Session()->GetSessionRequestManager()->ConvertToDomainOnRequestEnd(); | 20 | ctx.GetManager()->ConvertToDomainOnRequestEnd(); |
| 22 | 21 | ||
| 23 | IPC::ResponseBuilder rb{ctx, 3}; | 22 | IPC::ResponseBuilder rb{ctx, 3}; |
| 24 | rb.Push(ResultSuccess); | 23 | rb.Push(ResultSuccess); |
| @@ -28,23 +27,35 @@ void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) { | |||
| 28 | void Controller::CloneCurrentObject(Kernel::HLERequestContext& ctx) { | 27 | void Controller::CloneCurrentObject(Kernel::HLERequestContext& ctx) { |
| 29 | LOG_DEBUG(Service, "called"); | 28 | LOG_DEBUG(Service, "called"); |
| 30 | 29 | ||
| 31 | auto& parent_session = *ctx.Session()->GetParent(); | 30 | auto& process = *ctx.GetThread().GetOwnerProcess(); |
| 32 | auto& parent_port = parent_session.GetParent()->GetParent()->GetClientPort(); | 31 | auto session_manager = ctx.GetManager(); |
| 33 | auto& session_manager = parent_session.GetServerSession().GetSessionRequestManager(); | ||
| 34 | 32 | ||
| 35 | // Create a session. | 33 | // FIXME: this is duplicated from the SVC, it should just call it instead |
| 36 | Kernel::KClientSession* session{}; | 34 | // once this is a proper process |
| 37 | const Result result = parent_port.CreateSession(std::addressof(session), session_manager); | 35 | |
| 38 | if (result.IsError()) { | 36 | // Reserve a new session from the process resource limit. |
| 39 | LOG_CRITICAL(Service, "CreateSession failed with error 0x{:08X}", result.raw); | 37 | Kernel::KScopedResourceReservation session_reservation( |
| 40 | IPC::ResponseBuilder rb{ctx, 2}; | 38 | &process, Kernel::LimitableResource::SessionCountMax); |
| 41 | rb.Push(result); | 39 | ASSERT(session_reservation.Succeeded()); |
| 42 | } | 40 | |
| 41 | // Create the session. | ||
| 42 | Kernel::KSession* session = Kernel::KSession::Create(system.Kernel()); | ||
| 43 | ASSERT(session != nullptr); | ||
| 44 | |||
| 45 | // Initialize the session. | ||
| 46 | session->Initialize(nullptr, ""); | ||
| 47 | |||
| 48 | // Commit the session reservation. | ||
| 49 | session_reservation.Commit(); | ||
| 50 | |||
| 51 | // Register with manager. | ||
| 52 | session_manager->SessionHandler().RegisterSession(&session->GetServerSession(), | ||
| 53 | session_manager); | ||
| 43 | 54 | ||
| 44 | // We succeeded. | 55 | // We succeeded. |
| 45 | IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; | 56 | IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; |
| 46 | rb.Push(ResultSuccess); | 57 | rb.Push(ResultSuccess); |
| 47 | rb.PushMoveObjects(session); | 58 | rb.PushMoveObjects(session->GetClientSession()); |
| 48 | } | 59 | } |
| 49 | 60 | ||
| 50 | void Controller::CloneCurrentObjectEx(Kernel::HLERequestContext& ctx) { | 61 | void Controller::CloneCurrentObjectEx(Kernel::HLERequestContext& ctx) { |
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h index 33d5f398c..0b65a65da 100644 --- a/src/core/hle/service/vi/display/vi_display.h +++ b/src/core/hle/service/vi/display/vi_display.h | |||
| @@ -106,6 +106,12 @@ public: | |||
| 106 | /// | 106 | /// |
| 107 | void CloseLayer(u64 layer_id); | 107 | void CloseLayer(u64 layer_id); |
| 108 | 108 | ||
| 109 | /// Resets the display for a new connection. | ||
| 110 | void Reset() { | ||
| 111 | layers.clear(); | ||
| 112 | got_vsync_event = false; | ||
| 113 | } | ||
| 114 | |||
| 109 | /// Attempts to find a layer with the given ID. | 115 | /// Attempts to find a layer with the given ID. |
| 110 | /// | 116 | /// |
| 111 | /// @param layer_id The layer ID. | 117 | /// @param layer_id The layer ID. |
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index 9c917cacf..bb283e74e 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp | |||
| @@ -324,10 +324,10 @@ private: | |||
| 324 | IPC::RequestParser rp{ctx}; | 324 | IPC::RequestParser rp{ctx}; |
| 325 | const u64 display = rp.Pop<u64>(); | 325 | const u64 display = rp.Pop<u64>(); |
| 326 | 326 | ||
| 327 | LOG_WARNING(Service_VI, "(STUBBED) called. display=0x{:016X}", display); | 327 | const Result rc = nv_flinger.CloseDisplay(display) ? ResultSuccess : ResultUnknown; |
| 328 | 328 | ||
| 329 | IPC::ResponseBuilder rb{ctx, 2}; | 329 | IPC::ResponseBuilder rb{ctx, 2}; |
| 330 | rb.Push(ResultSuccess); | 330 | rb.Push(rc); |
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | void CreateManagedLayer(Kernel::HLERequestContext& ctx) { | 333 | void CreateManagedLayer(Kernel::HLERequestContext& ctx) { |
| @@ -508,10 +508,10 @@ private: | |||
| 508 | IPC::RequestParser rp{ctx}; | 508 | IPC::RequestParser rp{ctx}; |
| 509 | const u64 display_id = rp.Pop<u64>(); | 509 | const u64 display_id = rp.Pop<u64>(); |
| 510 | 510 | ||
| 511 | LOG_WARNING(Service_VI, "(STUBBED) called. display_id=0x{:016X}", display_id); | 511 | const Result rc = nv_flinger.CloseDisplay(display_id) ? ResultSuccess : ResultUnknown; |
| 512 | 512 | ||
| 513 | IPC::ResponseBuilder rb{ctx, 2}; | 513 | IPC::ResponseBuilder rb{ctx, 2}; |
| 514 | rb.Push(ResultSuccess); | 514 | rb.Push(rc); |
| 515 | } | 515 | } |
| 516 | 516 | ||
| 517 | // This literally does nothing internally in the actual service itself, | 517 | // This literally does nothing internally in the actual service itself, |
diff --git a/src/core/internal_network/socket_proxy.cpp b/src/core/internal_network/socket_proxy.cpp index 7d5d37bbc..1e1c42cea 100644 --- a/src/core/internal_network/socket_proxy.cpp +++ b/src/core/internal_network/socket_proxy.cpp | |||
| @@ -11,6 +11,10 @@ | |||
| 11 | #include "core/internal_network/network_interface.h" | 11 | #include "core/internal_network/network_interface.h" |
| 12 | #include "core/internal_network/socket_proxy.h" | 12 | #include "core/internal_network/socket_proxy.h" |
| 13 | 13 | ||
| 14 | #if YUZU_UNIX | ||
| 15 | #include <sys/socket.h> | ||
| 16 | #endif | ||
| 17 | |||
| 14 | namespace Network { | 18 | namespace Network { |
| 15 | 19 | ||
| 16 | ProxySocket::ProxySocket(RoomNetwork& room_network_) noexcept : room_network{room_network_} {} | 20 | ProxySocket::ProxySocket(RoomNetwork& room_network_) noexcept : room_network{room_network_} {} |
diff --git a/src/input_common/drivers/gc_adapter.cpp b/src/input_common/drivers/gc_adapter.cpp index f4dd24e7d..826fa2109 100644 --- a/src/input_common/drivers/gc_adapter.cpp +++ b/src/input_common/drivers/gc_adapter.cpp | |||
| @@ -324,7 +324,7 @@ bool GCAdapter::GetGCEndpoint(libusb_device* device) { | |||
| 324 | return true; | 324 | return true; |
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | Common::Input::VibrationError GCAdapter::SetRumble( | 327 | Common::Input::VibrationError GCAdapter::SetVibration( |
| 328 | const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) { | 328 | const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) { |
| 329 | const auto mean_amplitude = (vibration.low_amplitude + vibration.high_amplitude) * 0.5f; | 329 | const auto mean_amplitude = (vibration.low_amplitude + vibration.high_amplitude) * 0.5f; |
| 330 | const auto processed_amplitude = | 330 | const auto processed_amplitude = |
| @@ -338,6 +338,10 @@ Common::Input::VibrationError GCAdapter::SetRumble( | |||
| 338 | return Common::Input::VibrationError::None; | 338 | return Common::Input::VibrationError::None; |
| 339 | } | 339 | } |
| 340 | 340 | ||
| 341 | bool GCAdapter::IsVibrationEnabled([[maybe_unused]] const PadIdentifier& identifier) { | ||
| 342 | return rumble_enabled; | ||
| 343 | } | ||
| 344 | |||
| 341 | void GCAdapter::UpdateVibrations() { | 345 | void GCAdapter::UpdateVibrations() { |
| 342 | // Use 8 states to keep the switching between on/off fast enough for | 346 | // Use 8 states to keep the switching between on/off fast enough for |
| 343 | // a human to feel different vibration strenght | 347 | // a human to feel different vibration strenght |
diff --git a/src/input_common/drivers/gc_adapter.h b/src/input_common/drivers/gc_adapter.h index 8682da847..7f81767f7 100644 --- a/src/input_common/drivers/gc_adapter.h +++ b/src/input_common/drivers/gc_adapter.h | |||
| @@ -25,9 +25,11 @@ public: | |||
| 25 | explicit GCAdapter(std::string input_engine_); | 25 | explicit GCAdapter(std::string input_engine_); |
| 26 | ~GCAdapter() override; | 26 | ~GCAdapter() override; |
| 27 | 27 | ||
| 28 | Common::Input::VibrationError SetRumble( | 28 | Common::Input::VibrationError SetVibration( |
| 29 | const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) override; | 29 | const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) override; |
| 30 | 30 | ||
| 31 | bool IsVibrationEnabled(const PadIdentifier& identifier) override; | ||
| 32 | |||
| 31 | /// Used for automapping features | 33 | /// Used for automapping features |
| 32 | std::vector<Common::ParamPackage> GetInputDevices() const override; | 34 | std::vector<Common::ParamPackage> GetInputDevices() const override; |
| 33 | ButtonMapping GetButtonMappingForDevice(const Common::ParamPackage& params) override; | 35 | ButtonMapping GetButtonMappingForDevice(const Common::ParamPackage& params) override; |
diff --git a/src/input_common/drivers/sdl_driver.cpp b/src/input_common/drivers/sdl_driver.cpp index c175a8853..45ce588f0 100644 --- a/src/input_common/drivers/sdl_driver.cpp +++ b/src/input_common/drivers/sdl_driver.cpp | |||
| @@ -114,6 +114,20 @@ public: | |||
| 114 | } | 114 | } |
| 115 | return false; | 115 | return false; |
| 116 | } | 116 | } |
| 117 | |||
| 118 | void EnableVibration(bool is_enabled) { | ||
| 119 | has_vibration = is_enabled; | ||
| 120 | is_vibration_tested = true; | ||
| 121 | } | ||
| 122 | |||
| 123 | bool HasVibration() const { | ||
| 124 | return has_vibration; | ||
| 125 | } | ||
| 126 | |||
| 127 | bool IsVibrationTested() const { | ||
| 128 | return is_vibration_tested; | ||
| 129 | } | ||
| 130 | |||
| 117 | /** | 131 | /** |
| 118 | * The Pad identifier of the joystick | 132 | * The Pad identifier of the joystick |
| 119 | */ | 133 | */ |
| @@ -236,6 +250,8 @@ private: | |||
| 236 | u64 last_motion_update{}; | 250 | u64 last_motion_update{}; |
| 237 | bool has_gyro{false}; | 251 | bool has_gyro{false}; |
| 238 | bool has_accel{false}; | 252 | bool has_accel{false}; |
| 253 | bool has_vibration{false}; | ||
| 254 | bool is_vibration_tested{false}; | ||
| 239 | BasicMotion motion; | 255 | BasicMotion motion; |
| 240 | }; | 256 | }; |
| 241 | 257 | ||
| @@ -517,7 +533,7 @@ std::vector<Common::ParamPackage> SDLDriver::GetInputDevices() const { | |||
| 517 | return devices; | 533 | return devices; |
| 518 | } | 534 | } |
| 519 | 535 | ||
| 520 | Common::Input::VibrationError SDLDriver::SetRumble( | 536 | Common::Input::VibrationError SDLDriver::SetVibration( |
| 521 | const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) { | 537 | const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) { |
| 522 | const auto joystick = | 538 | const auto joystick = |
| 523 | GetSDLJoystickByGUID(identifier.guid.RawString(), static_cast<int>(identifier.port)); | 539 | GetSDLJoystickByGUID(identifier.guid.RawString(), static_cast<int>(identifier.port)); |
| @@ -546,13 +562,6 @@ Common::Input::VibrationError SDLDriver::SetRumble( | |||
| 546 | .type = Common::Input::VibrationAmplificationType::Exponential, | 562 | .type = Common::Input::VibrationAmplificationType::Exponential, |
| 547 | }; | 563 | }; |
| 548 | 564 | ||
| 549 | if (vibration.type == Common::Input::VibrationAmplificationType::Test) { | ||
| 550 | if (!joystick->RumblePlay(new_vibration)) { | ||
| 551 | return Common::Input::VibrationError::Unknown; | ||
| 552 | } | ||
| 553 | return Common::Input::VibrationError::None; | ||
| 554 | } | ||
| 555 | |||
| 556 | vibration_queue.Push(VibrationRequest{ | 565 | vibration_queue.Push(VibrationRequest{ |
| 557 | .identifier = identifier, | 566 | .identifier = identifier, |
| 558 | .vibration = new_vibration, | 567 | .vibration = new_vibration, |
| @@ -561,6 +570,45 @@ Common::Input::VibrationError SDLDriver::SetRumble( | |||
| 561 | return Common::Input::VibrationError::None; | 570 | return Common::Input::VibrationError::None; |
| 562 | } | 571 | } |
| 563 | 572 | ||
| 573 | bool SDLDriver::IsVibrationEnabled(const PadIdentifier& identifier) { | ||
| 574 | const auto joystick = | ||
| 575 | GetSDLJoystickByGUID(identifier.guid.RawString(), static_cast<int>(identifier.port)); | ||
| 576 | |||
| 577 | constexpr Common::Input::VibrationStatus test_vibration{ | ||
| 578 | .low_amplitude = 1, | ||
| 579 | .low_frequency = 160.0f, | ||
| 580 | .high_amplitude = 1, | ||
| 581 | .high_frequency = 320.0f, | ||
| 582 | .type = Common::Input::VibrationAmplificationType::Exponential, | ||
| 583 | }; | ||
| 584 | |||
| 585 | constexpr Common::Input::VibrationStatus zero_vibration{ | ||
| 586 | .low_amplitude = 0, | ||
| 587 | .low_frequency = 160.0f, | ||
| 588 | .high_amplitude = 0, | ||
| 589 | .high_frequency = 320.0f, | ||
| 590 | .type = Common::Input::VibrationAmplificationType::Exponential, | ||
| 591 | }; | ||
| 592 | |||
| 593 | if (joystick->IsVibrationTested()) { | ||
| 594 | return joystick->HasVibration(); | ||
| 595 | } | ||
| 596 | |||
| 597 | // First vibration might fail | ||
| 598 | joystick->RumblePlay(test_vibration); | ||
| 599 | |||
| 600 | // Wait for about 15ms to ensure the controller is ready for the stop command | ||
| 601 | std::this_thread::sleep_for(std::chrono::milliseconds(15)); | ||
| 602 | |||
| 603 | if (!joystick->RumblePlay(zero_vibration)) { | ||
| 604 | joystick->EnableVibration(false); | ||
| 605 | return false; | ||
| 606 | } | ||
| 607 | |||
| 608 | joystick->EnableVibration(true); | ||
| 609 | return true; | ||
| 610 | } | ||
| 611 | |||
| 564 | void SDLDriver::SendVibrations() { | 612 | void SDLDriver::SendVibrations() { |
| 565 | while (!vibration_queue.Empty()) { | 613 | while (!vibration_queue.Empty()) { |
| 566 | VibrationRequest request; | 614 | VibrationRequest request; |
diff --git a/src/input_common/drivers/sdl_driver.h b/src/input_common/drivers/sdl_driver.h index fc3a44572..d1b4471cf 100644 --- a/src/input_common/drivers/sdl_driver.h +++ b/src/input_common/drivers/sdl_driver.h | |||
| @@ -61,9 +61,11 @@ public: | |||
| 61 | 61 | ||
| 62 | bool IsStickInverted(const Common::ParamPackage& params) override; | 62 | bool IsStickInverted(const Common::ParamPackage& params) override; |
| 63 | 63 | ||
| 64 | Common::Input::VibrationError SetRumble( | 64 | Common::Input::VibrationError SetVibration( |
| 65 | const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) override; | 65 | const PadIdentifier& identifier, const Common::Input::VibrationStatus& vibration) override; |
| 66 | 66 | ||
| 67 | bool IsVibrationEnabled(const PadIdentifier& identifier) override; | ||
| 68 | |||
| 67 | private: | 69 | private: |
| 68 | struct VibrationRequest { | 70 | struct VibrationRequest { |
| 69 | PadIdentifier identifier; | 71 | PadIdentifier identifier; |
diff --git a/src/input_common/input_engine.h b/src/input_common/input_engine.h index cfbdb26bd..d4c264a8e 100644 --- a/src/input_common/input_engine.h +++ b/src/input_common/input_engine.h | |||
| @@ -108,12 +108,17 @@ public: | |||
| 108 | [[maybe_unused]] const Common::Input::LedStatus& led_status) {} | 108 | [[maybe_unused]] const Common::Input::LedStatus& led_status) {} |
| 109 | 109 | ||
| 110 | // Sets rumble to a controller | 110 | // Sets rumble to a controller |
| 111 | virtual Common::Input::VibrationError SetRumble( | 111 | virtual Common::Input::VibrationError SetVibration( |
| 112 | [[maybe_unused]] const PadIdentifier& identifier, | 112 | [[maybe_unused]] const PadIdentifier& identifier, |
| 113 | [[maybe_unused]] const Common::Input::VibrationStatus& vibration) { | 113 | [[maybe_unused]] const Common::Input::VibrationStatus& vibration) { |
| 114 | return Common::Input::VibrationError::NotSupported; | 114 | return Common::Input::VibrationError::NotSupported; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | // Returns true if device supports vibrations | ||
| 118 | virtual bool IsVibrationEnabled([[maybe_unused]] const PadIdentifier& identifier) { | ||
| 119 | return false; | ||
| 120 | } | ||
| 121 | |||
| 117 | // Sets polling mode to a controller | 122 | // Sets polling mode to a controller |
| 118 | virtual Common::Input::PollingError SetPollingMode( | 123 | virtual Common::Input::PollingError SetPollingMode( |
| 119 | [[maybe_unused]] const PadIdentifier& identifier, | 124 | [[maybe_unused]] const PadIdentifier& identifier, |
diff --git a/src/input_common/input_poller.cpp b/src/input_common/input_poller.cpp index ccc3076ca..4ac182147 100644 --- a/src/input_common/input_poller.cpp +++ b/src/input_common/input_poller.cpp | |||
| @@ -763,7 +763,11 @@ public: | |||
| 763 | 763 | ||
| 764 | Common::Input::VibrationError SetVibration( | 764 | Common::Input::VibrationError SetVibration( |
| 765 | const Common::Input::VibrationStatus& vibration_status) override { | 765 | const Common::Input::VibrationStatus& vibration_status) override { |
| 766 | return input_engine->SetRumble(identifier, vibration_status); | 766 | return input_engine->SetVibration(identifier, vibration_status); |
| 767 | } | ||
| 768 | |||
| 769 | bool IsVibrationEnabled() override { | ||
| 770 | return input_engine->IsVibrationEnabled(identifier); | ||
| 767 | } | 771 | } |
| 768 | 772 | ||
| 769 | Common::Input::PollingError SetPollingMode(Common::Input::PollingMode polling_mode) override { | 773 | Common::Input::PollingError SetPollingMode(Common::Input::PollingMode polling_mode) override { |
diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt index bcdd60db9..545d69c7e 100644 --- a/src/shader_recompiler/CMakeLists.txt +++ b/src/shader_recompiler/CMakeLists.txt | |||
| @@ -224,6 +224,7 @@ add_library(shader_recompiler STATIC | |||
| 224 | ir_opt/lower_fp16_to_fp32.cpp | 224 | ir_opt/lower_fp16_to_fp32.cpp |
| 225 | ir_opt/lower_int64_to_int32.cpp | 225 | ir_opt/lower_int64_to_int32.cpp |
| 226 | ir_opt/passes.h | 226 | ir_opt/passes.h |
| 227 | ir_opt/position_pass.cpp | ||
| 227 | ir_opt/rescaling_pass.cpp | 228 | ir_opt/rescaling_pass.cpp |
| 228 | ir_opt/ssa_rewrite_pass.cpp | 229 | ir_opt/ssa_rewrite_pass.cpp |
| 229 | ir_opt/texture_pass.cpp | 230 | ir_opt/texture_pass.cpp |
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm.cpp b/src/shader_recompiler/backend/glasm/emit_glasm.cpp index 01f9abc71..3b0176bf6 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm.cpp | |||
| @@ -450,6 +450,9 @@ std::string EmitGLASM(const Profile& profile, const RuntimeInfo& runtime_info, I | |||
| 450 | if (program.info.uses_rescaling_uniform) { | 450 | if (program.info.uses_rescaling_uniform) { |
| 451 | header += "PARAM scaling[1]={program.local[0..0]};"; | 451 | header += "PARAM scaling[1]={program.local[0..0]};"; |
| 452 | } | 452 | } |
| 453 | if (program.info.uses_render_area) { | ||
| 454 | header += "PARAM render_area[1]={program.local[1..1]};"; | ||
| 455 | } | ||
| 453 | header += "TEMP "; | 456 | header += "TEMP "; |
| 454 | for (size_t index = 0; index < ctx.reg_alloc.NumUsedRegisters(); ++index) { | 457 | for (size_t index = 0; index < ctx.reg_alloc.NumUsedRegisters(); ++index) { |
| 455 | header += fmt::format("R{},", index); | 458 | header += fmt::format("R{},", index); |
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_bitwise_conversion.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_bitwise_conversion.cpp index 2fc2a0ac6..5bfdecc09 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_bitwise_conversion.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_bitwise_conversion.cpp | |||
| @@ -43,6 +43,10 @@ void EmitBitCastU64F64(EmitContext&, IR::Inst& inst, const IR::Value& value) { | |||
| 43 | Alias(inst, value); | 43 | Alias(inst, value); |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | void EmitBitCastS32F32(EmitContext&, IR::Inst& inst, const IR::Value& value) { | ||
| 47 | Alias(inst, value); | ||
| 48 | } | ||
| 49 | |||
| 46 | void EmitBitCastF16U16(EmitContext&, IR::Inst& inst, const IR::Value& value) { | 50 | void EmitBitCastF16U16(EmitContext&, IR::Inst& inst, const IR::Value& value) { |
| 47 | Alias(inst, value); | 51 | Alias(inst, value); |
| 48 | } | 52 | } |
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp index 7e8f37563..d6562c842 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp | |||
| @@ -379,6 +379,18 @@ void EmitInvocationId(EmitContext& ctx, IR::Inst& inst) { | |||
| 379 | ctx.Add("MOV.S {}.x,primitive_invocation.x;", inst); | 379 | ctx.Add("MOV.S {}.x,primitive_invocation.x;", inst); |
| 380 | } | 380 | } |
| 381 | 381 | ||
| 382 | void EmitInvocationInfo(EmitContext& ctx, IR::Inst& inst) { | ||
| 383 | switch (ctx.stage) { | ||
| 384 | case Stage::TessellationControl: | ||
| 385 | case Stage::TessellationEval: | ||
| 386 | ctx.Add("SHL.U {}.x,primitive.vertexcount,16;", inst); | ||
| 387 | break; | ||
| 388 | default: | ||
| 389 | LOG_WARNING(Shader, "(STUBBED) called"); | ||
| 390 | ctx.Add("MOV.S {}.x,0x00ff0000;", inst); | ||
| 391 | } | ||
| 392 | } | ||
| 393 | |||
| 382 | void EmitSampleId(EmitContext& ctx, IR::Inst& inst) { | 394 | void EmitSampleId(EmitContext& ctx, IR::Inst& inst) { |
| 383 | ctx.Add("MOV.S {}.x,fragment.sampleid.x;", inst); | 395 | ctx.Add("MOV.S {}.x,fragment.sampleid.x;", inst); |
| 384 | } | 396 | } |
| @@ -396,6 +408,10 @@ void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst) { | |||
| 396 | ctx.Add("MOV.F {}.x,scaling[0].z;", inst); | 408 | ctx.Add("MOV.F {}.x,scaling[0].z;", inst); |
| 397 | } | 409 | } |
| 398 | 410 | ||
| 411 | void EmitRenderArea(EmitContext& ctx, IR::Inst& inst) { | ||
| 412 | ctx.Add("MOV.F {},render_area[0];", inst); | ||
| 413 | } | ||
| 414 | |||
| 399 | void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, ScalarU32 word_offset) { | 415 | void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, ScalarU32 word_offset) { |
| 400 | ctx.Add("MOV.U {},lmem[{}].x;", inst, word_offset); | 416 | ctx.Add("MOV.U {},lmem[{}].x;", inst, word_offset); |
| 401 | } | 417 | } |
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h index 8b0ac3031..eaaf9ba39 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h +++ b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h | |||
| @@ -69,10 +69,12 @@ void EmitSetOFlag(EmitContext& ctx); | |||
| 69 | void EmitWorkgroupId(EmitContext& ctx, IR::Inst& inst); | 69 | void EmitWorkgroupId(EmitContext& ctx, IR::Inst& inst); |
| 70 | void EmitLocalInvocationId(EmitContext& ctx, IR::Inst& inst); | 70 | void EmitLocalInvocationId(EmitContext& ctx, IR::Inst& inst); |
| 71 | void EmitInvocationId(EmitContext& ctx, IR::Inst& inst); | 71 | void EmitInvocationId(EmitContext& ctx, IR::Inst& inst); |
| 72 | void EmitInvocationInfo(EmitContext& ctx, IR::Inst& inst); | ||
| 72 | void EmitSampleId(EmitContext& ctx, IR::Inst& inst); | 73 | void EmitSampleId(EmitContext& ctx, IR::Inst& inst); |
| 73 | void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst); | 74 | void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst); |
| 74 | void EmitYDirection(EmitContext& ctx, IR::Inst& inst); | 75 | void EmitYDirection(EmitContext& ctx, IR::Inst& inst); |
| 75 | void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst); | 76 | void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst); |
| 77 | void EmitRenderArea(EmitContext& ctx, IR::Inst& inst); | ||
| 76 | void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, ScalarU32 word_offset); | 78 | void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, ScalarU32 word_offset); |
| 77 | void EmitWriteLocal(EmitContext& ctx, ScalarU32 word_offset, ScalarU32 value); | 79 | void EmitWriteLocal(EmitContext& ctx, ScalarU32 word_offset, ScalarU32 value); |
| 78 | void EmitUndefU1(EmitContext& ctx, IR::Inst& inst); | 80 | void EmitUndefU1(EmitContext& ctx, IR::Inst& inst); |
| @@ -195,6 +197,7 @@ void EmitSelectF64(EmitContext& ctx, ScalarS32 cond, Register true_value, Regist | |||
| 195 | void EmitBitCastU16F16(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); | 197 | void EmitBitCastU16F16(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); |
| 196 | void EmitBitCastU32F32(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); | 198 | void EmitBitCastU32F32(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); |
| 197 | void EmitBitCastU64F64(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); | 199 | void EmitBitCastU64F64(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); |
| 200 | void EmitBitCastS32F32(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); | ||
| 198 | void EmitBitCastF16U16(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); | 201 | void EmitBitCastF16U16(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); |
| 199 | void EmitBitCastF32U32(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); | 202 | void EmitBitCastF32U32(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); |
| 200 | void EmitBitCastF64U64(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); | 203 | void EmitBitCastF64U64(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); |
diff --git a/src/shader_recompiler/backend/glasm/glasm_emit_context.cpp b/src/shader_recompiler/backend/glasm/glasm_emit_context.cpp index 89603c1c4..333a91cc5 100644 --- a/src/shader_recompiler/backend/glasm/glasm_emit_context.cpp +++ b/src/shader_recompiler/backend/glasm/glasm_emit_context.cpp | |||
| @@ -95,6 +95,10 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile | |||
| 95 | if (info.uses_invocation_id) { | 95 | if (info.uses_invocation_id) { |
| 96 | Add("ATTRIB primitive_invocation=primitive.invocation;"); | 96 | Add("ATTRIB primitive_invocation=primitive.invocation;"); |
| 97 | } | 97 | } |
| 98 | if (info.uses_invocation_info && | ||
| 99 | (stage == Stage::TessellationControl || stage == Stage::TessellationEval)) { | ||
| 100 | Add("ATTRIB primitive_vertexcount = primitive.vertexcount;"); | ||
| 101 | } | ||
| 98 | if (info.stores_tess_level_outer) { | 102 | if (info.stores_tess_level_outer) { |
| 99 | Add("OUTPUT result_patch_tessouter[]={{result.patch.tessouter[0..3]}};"); | 103 | Add("OUTPUT result_patch_tessouter[]={{result.patch.tessouter[0..3]}};"); |
| 100 | } | 104 | } |
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp index 1be4a0f59..8e5e6cf1f 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp | |||
| @@ -48,6 +48,10 @@ void EmitBitCastU64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) | |||
| 48 | ctx.AddU64("{}=doubleBitsToUint64({});", inst, value); | 48 | ctx.AddU64("{}=doubleBitsToUint64({});", inst, value); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | void EmitBitCastS32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { | ||
| 52 | ctx.AddF32("{}=ftoi({});", inst, value); | ||
| 53 | } | ||
| 54 | |||
| 51 | void EmitBitCastF16U16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst) { | 55 | void EmitBitCastF16U16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst) { |
| 52 | NotImplemented(); | 56 | NotImplemented(); |
| 53 | } | 57 | } |
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp index fad8d1e30..c1671c37b 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp | |||
| @@ -399,6 +399,18 @@ void EmitInvocationId(EmitContext& ctx, IR::Inst& inst) { | |||
| 399 | ctx.AddU32("{}=uint(gl_InvocationID);", inst); | 399 | ctx.AddU32("{}=uint(gl_InvocationID);", inst); |
| 400 | } | 400 | } |
| 401 | 401 | ||
| 402 | void EmitInvocationInfo(EmitContext& ctx, IR::Inst& inst) { | ||
| 403 | switch (ctx.stage) { | ||
| 404 | case Stage::TessellationControl: | ||
| 405 | case Stage::TessellationEval: | ||
| 406 | ctx.AddU32("{}=uint(gl_PatchVerticesIn)<<16;", inst); | ||
| 407 | break; | ||
| 408 | default: | ||
| 409 | LOG_WARNING(Shader, "(STUBBED) called"); | ||
| 410 | ctx.AddU32("{}=uint(0x00ff0000);", inst); | ||
| 411 | } | ||
| 412 | } | ||
| 413 | |||
| 402 | void EmitSampleId(EmitContext& ctx, IR::Inst& inst) { | 414 | void EmitSampleId(EmitContext& ctx, IR::Inst& inst) { |
| 403 | ctx.AddU32("{}=uint(gl_SampleID);", inst); | 415 | ctx.AddU32("{}=uint(gl_SampleID);", inst); |
| 404 | } | 416 | } |
| @@ -416,6 +428,10 @@ void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst) { | |||
| 416 | ctx.AddF32("{}=scaling.z;", inst); | 428 | ctx.AddF32("{}=scaling.z;", inst); |
| 417 | } | 429 | } |
| 418 | 430 | ||
| 431 | void EmitRenderArea(EmitContext& ctx, IR::Inst& inst) { | ||
| 432 | ctx.AddF32x4("{}=render_area;", inst); | ||
| 433 | } | ||
| 434 | |||
| 419 | void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, std::string_view word_offset) { | 435 | void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, std::string_view word_offset) { |
| 420 | ctx.AddU32("{}=lmem[{}];", inst, word_offset); | 436 | ctx.AddU32("{}=lmem[{}];", inst, word_offset); |
| 421 | } | 437 | } |
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h index 639691ba6..4151c89de 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h +++ b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h | |||
| @@ -83,10 +83,12 @@ void EmitSetOFlag(EmitContext& ctx); | |||
| 83 | void EmitWorkgroupId(EmitContext& ctx, IR::Inst& inst); | 83 | void EmitWorkgroupId(EmitContext& ctx, IR::Inst& inst); |
| 84 | void EmitLocalInvocationId(EmitContext& ctx, IR::Inst& inst); | 84 | void EmitLocalInvocationId(EmitContext& ctx, IR::Inst& inst); |
| 85 | void EmitInvocationId(EmitContext& ctx, IR::Inst& inst); | 85 | void EmitInvocationId(EmitContext& ctx, IR::Inst& inst); |
| 86 | void EmitInvocationInfo(EmitContext& ctx, IR::Inst& inst); | ||
| 86 | void EmitSampleId(EmitContext& ctx, IR::Inst& inst); | 87 | void EmitSampleId(EmitContext& ctx, IR::Inst& inst); |
| 87 | void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst); | 88 | void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst); |
| 88 | void EmitYDirection(EmitContext& ctx, IR::Inst& inst); | 89 | void EmitYDirection(EmitContext& ctx, IR::Inst& inst); |
| 89 | void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst); | 90 | void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst); |
| 91 | void EmitRenderArea(EmitContext& ctx, IR::Inst& inst); | ||
| 90 | void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, std::string_view word_offset); | 92 | void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, std::string_view word_offset); |
| 91 | void EmitWriteLocal(EmitContext& ctx, std::string_view word_offset, std::string_view value); | 93 | void EmitWriteLocal(EmitContext& ctx, std::string_view word_offset, std::string_view value); |
| 92 | void EmitUndefU1(EmitContext& ctx, IR::Inst& inst); | 94 | void EmitUndefU1(EmitContext& ctx, IR::Inst& inst); |
| @@ -229,6 +231,7 @@ void EmitSelectF64(EmitContext& ctx, IR::Inst& inst, std::string_view cond, | |||
| 229 | void EmitBitCastU16F16(EmitContext& ctx, IR::Inst& inst); | 231 | void EmitBitCastU16F16(EmitContext& ctx, IR::Inst& inst); |
| 230 | void EmitBitCastU32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value); | 232 | void EmitBitCastU32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value); |
| 231 | void EmitBitCastU64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value); | 233 | void EmitBitCastU64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value); |
| 234 | void EmitBitCastS32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value); | ||
| 232 | void EmitBitCastF16U16(EmitContext& ctx, IR::Inst& inst); | 235 | void EmitBitCastF16U16(EmitContext& ctx, IR::Inst& inst); |
| 233 | void EmitBitCastF32U32(EmitContext& ctx, IR::Inst& inst, std::string_view value); | 236 | void EmitBitCastF32U32(EmitContext& ctx, IR::Inst& inst, std::string_view value); |
| 234 | void EmitBitCastF64U64(EmitContext& ctx, IR::Inst& inst, std::string_view value); | 237 | void EmitBitCastF64U64(EmitContext& ctx, IR::Inst& inst, std::string_view value); |
diff --git a/src/shader_recompiler/backend/glsl/glsl_emit_context.cpp b/src/shader_recompiler/backend/glsl/glsl_emit_context.cpp index c767a9dc3..5d01ec0cd 100644 --- a/src/shader_recompiler/backend/glsl/glsl_emit_context.cpp +++ b/src/shader_recompiler/backend/glsl/glsl_emit_context.cpp | |||
| @@ -358,6 +358,9 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile | |||
| 358 | if (info.uses_rescaling_uniform) { | 358 | if (info.uses_rescaling_uniform) { |
| 359 | header += "layout(location=0) uniform vec4 scaling;"; | 359 | header += "layout(location=0) uniform vec4 scaling;"; |
| 360 | } | 360 | } |
| 361 | if (info.uses_render_area) { | ||
| 362 | header += "layout(location=1) uniform vec4 render_area;"; | ||
| 363 | } | ||
| 361 | DefineConstantBuffers(bindings); | 364 | DefineConstantBuffers(bindings); |
| 362 | DefineConstantBufferIndirect(); | 365 | DefineConstantBufferIndirect(); |
| 363 | DefineStorageBuffers(bindings); | 366 | DefineStorageBuffers(bindings); |
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.h b/src/shader_recompiler/backend/spirv/emit_spirv.h index 7567b6fc9..937881484 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv.h +++ b/src/shader_recompiler/backend/spirv/emit_spirv.h | |||
| @@ -23,8 +23,12 @@ struct RescalingLayout { | |||
| 23 | alignas(16) std::array<u32, NUM_IMAGE_SCALING_WORDS> rescaling_images; | 23 | alignas(16) std::array<u32, NUM_IMAGE_SCALING_WORDS> rescaling_images; |
| 24 | u32 down_factor; | 24 | u32 down_factor; |
| 25 | }; | 25 | }; |
| 26 | struct RenderAreaLayout { | ||
| 27 | std::array<f32, 4> render_area; | ||
| 28 | }; | ||
| 26 | constexpr u32 RESCALING_LAYOUT_WORDS_OFFSET = offsetof(RescalingLayout, rescaling_textures); | 29 | constexpr u32 RESCALING_LAYOUT_WORDS_OFFSET = offsetof(RescalingLayout, rescaling_textures); |
| 27 | constexpr u32 RESCALING_LAYOUT_DOWN_FACTOR_OFFSET = offsetof(RescalingLayout, down_factor); | 30 | constexpr u32 RESCALING_LAYOUT_DOWN_FACTOR_OFFSET = offsetof(RescalingLayout, down_factor); |
| 31 | constexpr u32 RENDERAREA_LAYOUT_OFFSET = offsetof(RenderAreaLayout, render_area); | ||
| 28 | 32 | ||
| 29 | [[nodiscard]] std::vector<u32> EmitSPIRV(const Profile& profile, const RuntimeInfo& runtime_info, | 33 | [[nodiscard]] std::vector<u32> EmitSPIRV(const Profile& profile, const RuntimeInfo& runtime_info, |
| 30 | IR::Program& program, Bindings& bindings); | 34 | IR::Program& program, Bindings& bindings); |
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp index c4ca28d11..50daacd95 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp | |||
| @@ -18,6 +18,10 @@ void EmitBitCastU64F64(EmitContext&) { | |||
| 18 | throw NotImplementedException("SPIR-V Instruction"); | 18 | throw NotImplementedException("SPIR-V Instruction"); |
| 19 | } | 19 | } |
| 20 | 20 | ||
| 21 | void EmitBitCastS32F32(EmitContext&) { | ||
| 22 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 23 | } | ||
| 24 | |||
| 21 | void EmitBitCastF16U16(EmitContext&) { | 25 | void EmitBitCastF16U16(EmitContext&) { |
| 22 | throw NotImplementedException("SPIR-V Instruction"); | 26 | throw NotImplementedException("SPIR-V Instruction"); |
| 23 | } | 27 | } |
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp index 2c68aba39..5b3b5d1f3 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp | |||
| @@ -353,7 +353,6 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) { | |||
| 353 | case IR::Attribute::TessellationEvaluationPointV: | 353 | case IR::Attribute::TessellationEvaluationPointV: |
| 354 | return ctx.OpLoad(ctx.F32[1], | 354 | return ctx.OpLoad(ctx.F32[1], |
| 355 | ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord, ctx.Const(1U))); | 355 | ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord, ctx.Const(1U))); |
| 356 | |||
| 357 | default: | 356 | default: |
| 358 | throw NotImplementedException("Read attribute {}", attr); | 357 | throw NotImplementedException("Read attribute {}", attr); |
| 359 | } | 358 | } |
| @@ -513,6 +512,18 @@ Id EmitInvocationId(EmitContext& ctx) { | |||
| 513 | return ctx.OpLoad(ctx.U32[1], ctx.invocation_id); | 512 | return ctx.OpLoad(ctx.U32[1], ctx.invocation_id); |
| 514 | } | 513 | } |
| 515 | 514 | ||
| 515 | Id EmitInvocationInfo(EmitContext& ctx) { | ||
| 516 | switch (ctx.stage) { | ||
| 517 | case Stage::TessellationControl: | ||
| 518 | case Stage::TessellationEval: | ||
| 519 | return ctx.OpShiftLeftLogical(ctx.U32[1], ctx.OpLoad(ctx.U32[1], ctx.patch_vertices_in), | ||
| 520 | ctx.Const(16u)); | ||
| 521 | default: | ||
| 522 | LOG_WARNING(Shader, "(STUBBED) called"); | ||
| 523 | return ctx.Const(0x00ff0000u); | ||
| 524 | } | ||
| 525 | } | ||
| 526 | |||
| 516 | Id EmitSampleId(EmitContext& ctx) { | 527 | Id EmitSampleId(EmitContext& ctx) { |
| 517 | return ctx.OpLoad(ctx.U32[1], ctx.sample_id); | 528 | return ctx.OpLoad(ctx.U32[1], ctx.sample_id); |
| 518 | } | 529 | } |
| @@ -537,6 +548,17 @@ Id EmitResolutionDownFactor(EmitContext& ctx) { | |||
| 537 | } | 548 | } |
| 538 | } | 549 | } |
| 539 | 550 | ||
| 551 | Id EmitRenderArea(EmitContext& ctx) { | ||
| 552 | if (ctx.profile.unified_descriptor_binding) { | ||
| 553 | const Id pointer_type{ctx.TypePointer(spv::StorageClass::PushConstant, ctx.F32[4])}; | ||
| 554 | const Id index{ctx.Const(ctx.render_are_member_index)}; | ||
| 555 | const Id pointer{ctx.OpAccessChain(pointer_type, ctx.render_area_push_constant, index)}; | ||
| 556 | return ctx.OpLoad(ctx.F32[4], pointer); | ||
| 557 | } else { | ||
| 558 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 559 | } | ||
| 560 | } | ||
| 561 | |||
| 540 | Id EmitLoadLocal(EmitContext& ctx, Id word_offset) { | 562 | Id EmitLoadLocal(EmitContext& ctx, Id word_offset) { |
| 541 | const Id pointer{ctx.OpAccessChain(ctx.private_u32, ctx.local_memory, word_offset)}; | 563 | const Id pointer{ctx.OpAccessChain(ctx.private_u32, ctx.local_memory, word_offset)}; |
| 542 | return ctx.OpLoad(ctx.U32[1], pointer); | 564 | return ctx.OpLoad(ctx.U32[1], pointer); |
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h index 984d072b4..e31cdc5e8 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h +++ b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h | |||
| @@ -72,10 +72,12 @@ void EmitSetOFlag(EmitContext& ctx); | |||
| 72 | Id EmitWorkgroupId(EmitContext& ctx); | 72 | Id EmitWorkgroupId(EmitContext& ctx); |
| 73 | Id EmitLocalInvocationId(EmitContext& ctx); | 73 | Id EmitLocalInvocationId(EmitContext& ctx); |
| 74 | Id EmitInvocationId(EmitContext& ctx); | 74 | Id EmitInvocationId(EmitContext& ctx); |
| 75 | Id EmitInvocationInfo(EmitContext& ctx); | ||
| 75 | Id EmitSampleId(EmitContext& ctx); | 76 | Id EmitSampleId(EmitContext& ctx); |
| 76 | Id EmitIsHelperInvocation(EmitContext& ctx); | 77 | Id EmitIsHelperInvocation(EmitContext& ctx); |
| 77 | Id EmitYDirection(EmitContext& ctx); | 78 | Id EmitYDirection(EmitContext& ctx); |
| 78 | Id EmitResolutionDownFactor(EmitContext& ctx); | 79 | Id EmitResolutionDownFactor(EmitContext& ctx); |
| 80 | Id EmitRenderArea(EmitContext& ctx); | ||
| 79 | Id EmitLoadLocal(EmitContext& ctx, Id word_offset); | 81 | Id EmitLoadLocal(EmitContext& ctx, Id word_offset); |
| 80 | void EmitWriteLocal(EmitContext& ctx, Id word_offset, Id value); | 82 | void EmitWriteLocal(EmitContext& ctx, Id word_offset, Id value); |
| 81 | Id EmitUndefU1(EmitContext& ctx); | 83 | Id EmitUndefU1(EmitContext& ctx); |
| @@ -177,7 +179,8 @@ Id EmitSelectF64(EmitContext& ctx, Id cond, Id true_value, Id false_value); | |||
| 177 | void EmitBitCastU16F16(EmitContext& ctx); | 179 | void EmitBitCastU16F16(EmitContext& ctx); |
| 178 | Id EmitBitCastU32F32(EmitContext& ctx, Id value); | 180 | Id EmitBitCastU32F32(EmitContext& ctx, Id value); |
| 179 | void EmitBitCastU64F64(EmitContext& ctx); | 181 | void EmitBitCastU64F64(EmitContext& ctx); |
| 180 | void EmitBitCastF16U16(EmitContext& ctx); | 182 | void EmitBitCastS32F32(EmitContext& ctx); |
| 183 | void EmitBitCastF16U16(EmitContext&); | ||
| 181 | Id EmitBitCastF32U32(EmitContext& ctx, Id value); | 184 | Id EmitBitCastF32U32(EmitContext& ctx, Id value); |
| 182 | void EmitBitCastF64U64(EmitContext& ctx); | 185 | void EmitBitCastF64U64(EmitContext& ctx); |
| 183 | Id EmitPackUint2x32(EmitContext& ctx, Id value); | 186 | Id EmitPackUint2x32(EmitContext& ctx, Id value); |
diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp index aecc4c612..0bfc2dd89 100644 --- a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp +++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp | |||
| @@ -473,6 +473,7 @@ EmitContext::EmitContext(const Profile& profile_, const RuntimeInfo& runtime_inf | |||
| 473 | DefineAttributeMemAccess(program.info); | 473 | DefineAttributeMemAccess(program.info); |
| 474 | DefineGlobalMemoryFunctions(program.info); | 474 | DefineGlobalMemoryFunctions(program.info); |
| 475 | DefineRescalingInput(program.info); | 475 | DefineRescalingInput(program.info); |
| 476 | DefineRenderArea(program.info); | ||
| 476 | } | 477 | } |
| 477 | 478 | ||
| 478 | EmitContext::~EmitContext() = default; | 479 | EmitContext::~EmitContext() = default; |
| @@ -982,6 +983,36 @@ void EmitContext::DefineRescalingInputUniformConstant() { | |||
| 982 | } | 983 | } |
| 983 | } | 984 | } |
| 984 | 985 | ||
| 986 | void EmitContext::DefineRenderArea(const Info& info) { | ||
| 987 | if (!info.uses_render_area) { | ||
| 988 | return; | ||
| 989 | } | ||
| 990 | |||
| 991 | if (profile.unified_descriptor_binding) { | ||
| 992 | boost::container::static_vector<Id, 1> members{}; | ||
| 993 | u32 member_index{0}; | ||
| 994 | |||
| 995 | members.push_back(F32[4]); | ||
| 996 | render_are_member_index = member_index++; | ||
| 997 | |||
| 998 | const Id push_constant_struct{TypeStruct(std::span(members.data(), members.size()))}; | ||
| 999 | Decorate(push_constant_struct, spv::Decoration::Block); | ||
| 1000 | Name(push_constant_struct, "RenderAreaInfo"); | ||
| 1001 | |||
| 1002 | MemberDecorate(push_constant_struct, render_are_member_index, spv::Decoration::Offset, 0); | ||
| 1003 | MemberName(push_constant_struct, render_are_member_index, "render_area"); | ||
| 1004 | |||
| 1005 | const Id pointer_type{TypePointer(spv::StorageClass::PushConstant, push_constant_struct)}; | ||
| 1006 | render_area_push_constant = | ||
| 1007 | AddGlobalVariable(pointer_type, spv::StorageClass::PushConstant); | ||
| 1008 | Name(render_area_push_constant, "render_area_push_constants"); | ||
| 1009 | |||
| 1010 | if (profile.supported_spirv >= 0x00010400) { | ||
| 1011 | interfaces.push_back(render_area_push_constant); | ||
| 1012 | } | ||
| 1013 | } | ||
| 1014 | } | ||
| 1015 | |||
| 985 | void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) { | 1016 | void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) { |
| 986 | if (info.constant_buffer_descriptors.empty()) { | 1017 | if (info.constant_buffer_descriptors.empty()) { |
| 987 | return; | 1018 | return; |
| @@ -1294,6 +1325,10 @@ void EmitContext::DefineInputs(const IR::Program& program) { | |||
| 1294 | if (info.uses_invocation_id) { | 1325 | if (info.uses_invocation_id) { |
| 1295 | invocation_id = DefineInput(*this, U32[1], false, spv::BuiltIn::InvocationId); | 1326 | invocation_id = DefineInput(*this, U32[1], false, spv::BuiltIn::InvocationId); |
| 1296 | } | 1327 | } |
| 1328 | if (info.uses_invocation_info && | ||
| 1329 | (stage == Shader::Stage::TessellationControl || stage == Shader::Stage::TessellationEval)) { | ||
| 1330 | patch_vertices_in = DefineInput(*this, U32[1], false, spv::BuiltIn::PatchVertices); | ||
| 1331 | } | ||
| 1297 | if (info.uses_sample_id) { | 1332 | if (info.uses_sample_id) { |
| 1298 | sample_id = DefineInput(*this, U32[1], false, spv::BuiltIn::SampleId); | 1333 | sample_id = DefineInput(*this, U32[1], false, spv::BuiltIn::SampleId); |
| 1299 | } | 1334 | } |
diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.h b/src/shader_recompiler/backend/spirv/spirv_emit_context.h index bc25b8b84..dde45b4bc 100644 --- a/src/shader_recompiler/backend/spirv/spirv_emit_context.h +++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.h | |||
| @@ -204,6 +204,7 @@ public: | |||
| 204 | Id workgroup_id{}; | 204 | Id workgroup_id{}; |
| 205 | Id local_invocation_id{}; | 205 | Id local_invocation_id{}; |
| 206 | Id invocation_id{}; | 206 | Id invocation_id{}; |
| 207 | Id patch_vertices_in{}; | ||
| 207 | Id sample_id{}; | 208 | Id sample_id{}; |
| 208 | Id is_helper_invocation{}; | 209 | Id is_helper_invocation{}; |
| 209 | Id subgroup_local_invocation_id{}; | 210 | Id subgroup_local_invocation_id{}; |
| @@ -243,6 +244,9 @@ public: | |||
| 243 | u32 texture_rescaling_index{}; | 244 | u32 texture_rescaling_index{}; |
| 244 | u32 image_rescaling_index{}; | 245 | u32 image_rescaling_index{}; |
| 245 | 246 | ||
| 247 | Id render_area_push_constant{}; | ||
| 248 | u32 render_are_member_index{}; | ||
| 249 | |||
| 246 | Id local_memory{}; | 250 | Id local_memory{}; |
| 247 | 251 | ||
| 248 | Id shared_memory_u8{}; | 252 | Id shared_memory_u8{}; |
| @@ -318,6 +322,7 @@ private: | |||
| 318 | void DefineRescalingInput(const Info& info); | 322 | void DefineRescalingInput(const Info& info); |
| 319 | void DefineRescalingInputPushConstant(); | 323 | void DefineRescalingInputPushConstant(); |
| 320 | void DefineRescalingInputUniformConstant(); | 324 | void DefineRescalingInputUniformConstant(); |
| 325 | void DefineRenderArea(const Info& info); | ||
| 321 | 326 | ||
| 322 | void DefineInputs(const IR::Program& program); | 327 | void DefineInputs(const IR::Program& program); |
| 323 | void DefineOutputs(const IR::Program& program); | 328 | void DefineOutputs(const IR::Program& program); |
diff --git a/src/shader_recompiler/environment.h b/src/shader_recompiler/environment.h index 9729d48c6..402f2664f 100644 --- a/src/shader_recompiler/environment.h +++ b/src/shader_recompiler/environment.h | |||
| @@ -22,6 +22,10 @@ public: | |||
| 22 | 22 | ||
| 23 | [[nodiscard]] virtual TextureType ReadTextureType(u32 raw_handle) = 0; | 23 | [[nodiscard]] virtual TextureType ReadTextureType(u32 raw_handle) = 0; |
| 24 | 24 | ||
| 25 | [[nodiscard]] virtual TexturePixelFormat ReadTexturePixelFormat(u32 raw_handle) = 0; | ||
| 26 | |||
| 27 | [[nodiscard]] virtual u32 ReadViewportTransformState() = 0; | ||
| 28 | |||
| 25 | [[nodiscard]] virtual u32 TextureBoundBuffer() const = 0; | 29 | [[nodiscard]] virtual u32 TextureBoundBuffer() const = 0; |
| 26 | 30 | ||
| 27 | [[nodiscard]] virtual u32 LocalMemorySize() const = 0; | 31 | [[nodiscard]] virtual u32 LocalMemorySize() const = 0; |
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.cpp b/src/shader_recompiler/frontend/ir/ir_emitter.cpp index 11086ed8c..0cdac0eff 100644 --- a/src/shader_recompiler/frontend/ir/ir_emitter.cpp +++ b/src/shader_recompiler/frontend/ir/ir_emitter.cpp | |||
| @@ -362,6 +362,10 @@ U32 IREmitter::InvocationId() { | |||
| 362 | return Inst<U32>(Opcode::InvocationId); | 362 | return Inst<U32>(Opcode::InvocationId); |
| 363 | } | 363 | } |
| 364 | 364 | ||
| 365 | U32 IREmitter::InvocationInfo() { | ||
| 366 | return Inst<U32>(Opcode::InvocationInfo); | ||
| 367 | } | ||
| 368 | |||
| 365 | U32 IREmitter::SampleId() { | 369 | U32 IREmitter::SampleId() { |
| 366 | return Inst<U32>(Opcode::SampleId); | 370 | return Inst<U32>(Opcode::SampleId); |
| 367 | } | 371 | } |
| @@ -378,6 +382,14 @@ F32 IREmitter::ResolutionDownFactor() { | |||
| 378 | return Inst<F32>(Opcode::ResolutionDownFactor); | 382 | return Inst<F32>(Opcode::ResolutionDownFactor); |
| 379 | } | 383 | } |
| 380 | 384 | ||
| 385 | F32 IREmitter::RenderAreaWidth() { | ||
| 386 | return F32(CompositeExtract(Inst<Value>(Opcode::RenderArea), 0)); | ||
| 387 | } | ||
| 388 | |||
| 389 | F32 IREmitter::RenderAreaHeight() { | ||
| 390 | return F32(CompositeExtract(Inst<Value>(Opcode::RenderArea), 1)); | ||
| 391 | } | ||
| 392 | |||
| 381 | U32 IREmitter::LaneId() { | 393 | U32 IREmitter::LaneId() { |
| 382 | return Inst<U32>(Opcode::LaneId); | 394 | return Inst<U32>(Opcode::LaneId); |
| 383 | } | 395 | } |
| @@ -684,6 +696,11 @@ IR::U32 IREmitter::BitCast<IR::U32, IR::F32>(const IR::F32& value) { | |||
| 684 | } | 696 | } |
| 685 | 697 | ||
| 686 | template <> | 698 | template <> |
| 699 | IR::S32 IREmitter::BitCast<IR::S32, IR::F32>(const IR::F32& value) { | ||
| 700 | return Inst<IR::S32>(Opcode::BitCastS32F32, value); | ||
| 701 | } | ||
| 702 | |||
| 703 | template <> | ||
| 687 | IR::F32 IREmitter::BitCast<IR::F32, IR::U32>(const IR::U32& value) { | 704 | IR::F32 IREmitter::BitCast<IR::F32, IR::U32>(const IR::U32& value) { |
| 688 | return Inst<IR::F32>(Opcode::BitCastF32U32, value); | 705 | return Inst<IR::F32>(Opcode::BitCastF32U32, value); |
| 689 | } | 706 | } |
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.h b/src/shader_recompiler/frontend/ir/ir_emitter.h index 25839a371..2df992feb 100644 --- a/src/shader_recompiler/frontend/ir/ir_emitter.h +++ b/src/shader_recompiler/frontend/ir/ir_emitter.h | |||
| @@ -97,12 +97,16 @@ public: | |||
| 97 | [[nodiscard]] U32 LocalInvocationIdZ(); | 97 | [[nodiscard]] U32 LocalInvocationIdZ(); |
| 98 | 98 | ||
| 99 | [[nodiscard]] U32 InvocationId(); | 99 | [[nodiscard]] U32 InvocationId(); |
| 100 | [[nodiscard]] U32 InvocationInfo(); | ||
| 100 | [[nodiscard]] U32 SampleId(); | 101 | [[nodiscard]] U32 SampleId(); |
| 101 | [[nodiscard]] U1 IsHelperInvocation(); | 102 | [[nodiscard]] U1 IsHelperInvocation(); |
| 102 | [[nodiscard]] F32 YDirection(); | 103 | [[nodiscard]] F32 YDirection(); |
| 103 | 104 | ||
| 104 | [[nodiscard]] F32 ResolutionDownFactor(); | 105 | [[nodiscard]] F32 ResolutionDownFactor(); |
| 105 | 106 | ||
| 107 | [[nodiscard]] F32 RenderAreaWidth(); | ||
| 108 | [[nodiscard]] F32 RenderAreaHeight(); | ||
| 109 | |||
| 106 | [[nodiscard]] U32 LaneId(); | 110 | [[nodiscard]] U32 LaneId(); |
| 107 | 111 | ||
| 108 | [[nodiscard]] U32 LoadGlobalU8(const U64& address); | 112 | [[nodiscard]] U32 LoadGlobalU8(const U64& address); |
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp index 468782eb1..84417980b 100644 --- a/src/shader_recompiler/frontend/ir/microinstruction.cpp +++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp | |||
| @@ -325,11 +325,6 @@ void Inst::AddPhiOperand(Block* predecessor, const Value& value) { | |||
| 325 | phi_args.emplace_back(predecessor, value); | 325 | phi_args.emplace_back(predecessor, value); |
| 326 | } | 326 | } |
| 327 | 327 | ||
| 328 | void Inst::ErasePhiOperand(size_t index) { | ||
| 329 | const auto operand_it{phi_args.begin() + static_cast<ptrdiff_t>(index)}; | ||
| 330 | phi_args.erase(operand_it); | ||
| 331 | } | ||
| 332 | |||
| 333 | void Inst::OrderPhiArgs() { | 328 | void Inst::OrderPhiArgs() { |
| 334 | if (op != Opcode::Phi) { | 329 | if (op != Opcode::Phi) { |
| 335 | throw LogicError("{} is not a Phi instruction", op); | 330 | throw LogicError("{} is not a Phi instruction", op); |
diff --git a/src/shader_recompiler/frontend/ir/opcodes.h b/src/shader_recompiler/frontend/ir/opcodes.h index 752879a18..e70d7745c 100644 --- a/src/shader_recompiler/frontend/ir/opcodes.h +++ b/src/shader_recompiler/frontend/ir/opcodes.h | |||
| @@ -37,6 +37,7 @@ constexpr Type U8{Type::U8}; | |||
| 37 | constexpr Type U16{Type::U16}; | 37 | constexpr Type U16{Type::U16}; |
| 38 | constexpr Type U32{Type::U32}; | 38 | constexpr Type U32{Type::U32}; |
| 39 | constexpr Type U64{Type::U64}; | 39 | constexpr Type U64{Type::U64}; |
| 40 | constexpr Type S32{Type::S32}; | ||
| 40 | constexpr Type F16{Type::F16}; | 41 | constexpr Type F16{Type::F16}; |
| 41 | constexpr Type F32{Type::F32}; | 42 | constexpr Type F32{Type::F32}; |
| 42 | constexpr Type F64{Type::F64}; | 43 | constexpr Type F64{Type::F64}; |
diff --git a/src/shader_recompiler/frontend/ir/opcodes.inc b/src/shader_recompiler/frontend/ir/opcodes.inc index 86410ddfc..1fe3749cc 100644 --- a/src/shader_recompiler/frontend/ir/opcodes.inc +++ b/src/shader_recompiler/frontend/ir/opcodes.inc | |||
| @@ -59,10 +59,12 @@ OPCODE(SetOFlag, Void, U1, | |||
| 59 | OPCODE(WorkgroupId, U32x3, ) | 59 | OPCODE(WorkgroupId, U32x3, ) |
| 60 | OPCODE(LocalInvocationId, U32x3, ) | 60 | OPCODE(LocalInvocationId, U32x3, ) |
| 61 | OPCODE(InvocationId, U32, ) | 61 | OPCODE(InvocationId, U32, ) |
| 62 | OPCODE(InvocationInfo, U32, ) | ||
| 62 | OPCODE(SampleId, U32, ) | 63 | OPCODE(SampleId, U32, ) |
| 63 | OPCODE(IsHelperInvocation, U1, ) | 64 | OPCODE(IsHelperInvocation, U1, ) |
| 64 | OPCODE(YDirection, F32, ) | 65 | OPCODE(YDirection, F32, ) |
| 65 | OPCODE(ResolutionDownFactor, F32, ) | 66 | OPCODE(ResolutionDownFactor, F32, ) |
| 67 | OPCODE(RenderArea, F32x4, ) | ||
| 66 | 68 | ||
| 67 | // Undefined | 69 | // Undefined |
| 68 | OPCODE(UndefU1, U1, ) | 70 | OPCODE(UndefU1, U1, ) |
| @@ -173,6 +175,7 @@ OPCODE(SelectF64, F64, U1, | |||
| 173 | OPCODE(BitCastU16F16, U16, F16, ) | 175 | OPCODE(BitCastU16F16, U16, F16, ) |
| 174 | OPCODE(BitCastU32F32, U32, F32, ) | 176 | OPCODE(BitCastU32F32, U32, F32, ) |
| 175 | OPCODE(BitCastU64F64, U64, F64, ) | 177 | OPCODE(BitCastU64F64, U64, F64, ) |
| 178 | OPCODE(BitCastS32F32, S32, F32, ) | ||
| 176 | OPCODE(BitCastF16U16, F16, U16, ) | 179 | OPCODE(BitCastF16U16, F16, U16, ) |
| 177 | OPCODE(BitCastF32U32, F32, U32, ) | 180 | OPCODE(BitCastF32U32, F32, U32, ) |
| 178 | OPCODE(BitCastF64U64, F64, U64, ) | 181 | OPCODE(BitCastF64U64, F64, U64, ) |
diff --git a/src/shader_recompiler/frontend/ir/patch.h b/src/shader_recompiler/frontend/ir/patch.h index 1e37c8eb6..5077e56c2 100644 --- a/src/shader_recompiler/frontend/ir/patch.h +++ b/src/shader_recompiler/frontend/ir/patch.h | |||
| @@ -14,8 +14,6 @@ enum class Patch : u64 { | |||
| 14 | TessellationLodBottom, | 14 | TessellationLodBottom, |
| 15 | TessellationLodInteriorU, | 15 | TessellationLodInteriorU, |
| 16 | TessellationLodInteriorV, | 16 | TessellationLodInteriorV, |
| 17 | ComponentPadding0, | ||
| 18 | ComponentPadding1, | ||
| 19 | Component0, | 17 | Component0, |
| 20 | Component1, | 18 | Component1, |
| 21 | Component2, | 19 | Component2, |
| @@ -137,7 +135,7 @@ enum class Patch : u64 { | |||
| 137 | Component118, | 135 | Component118, |
| 138 | Component119, | 136 | Component119, |
| 139 | }; | 137 | }; |
| 140 | static_assert(static_cast<u64>(Patch::Component119) == 127); | 138 | static_assert(static_cast<u64>(Patch::Component119) == 125); |
| 141 | 139 | ||
| 142 | [[nodiscard]] bool IsGeneric(Patch patch) noexcept; | 140 | [[nodiscard]] bool IsGeneric(Patch patch) noexcept; |
| 143 | 141 | ||
diff --git a/src/shader_recompiler/frontend/ir/type.h b/src/shader_recompiler/frontend/ir/type.h index 04c8c4ddb..5a7c706ad 100644 --- a/src/shader_recompiler/frontend/ir/type.h +++ b/src/shader_recompiler/frontend/ir/type.h | |||
| @@ -24,21 +24,22 @@ enum class Type { | |||
| 24 | U16 = 1 << 7, | 24 | U16 = 1 << 7, |
| 25 | U32 = 1 << 8, | 25 | U32 = 1 << 8, |
| 26 | U64 = 1 << 9, | 26 | U64 = 1 << 9, |
| 27 | F16 = 1 << 10, | 27 | S32 = 1 << 10, |
| 28 | F32 = 1 << 11, | 28 | F16 = 1 << 11, |
| 29 | F64 = 1 << 12, | 29 | F32 = 1 << 12, |
| 30 | U32x2 = 1 << 13, | 30 | F64 = 1 << 13, |
| 31 | U32x3 = 1 << 14, | 31 | U32x2 = 1 << 14, |
| 32 | U32x4 = 1 << 15, | 32 | U32x3 = 1 << 15, |
| 33 | F16x2 = 1 << 16, | 33 | U32x4 = 1 << 16, |
| 34 | F16x3 = 1 << 17, | 34 | F16x2 = 1 << 17, |
| 35 | F16x4 = 1 << 18, | 35 | F16x3 = 1 << 18, |
| 36 | F32x2 = 1 << 19, | 36 | F16x4 = 1 << 19, |
| 37 | F32x3 = 1 << 20, | 37 | F32x2 = 1 << 20, |
| 38 | F32x4 = 1 << 21, | 38 | F32x3 = 1 << 21, |
| 39 | F64x2 = 1 << 22, | 39 | F32x4 = 1 << 22, |
| 40 | F64x3 = 1 << 23, | 40 | F64x2 = 1 << 23, |
| 41 | F64x4 = 1 << 24, | 41 | F64x3 = 1 << 24, |
| 42 | F64x4 = 1 << 25, | ||
| 42 | }; | 43 | }; |
| 43 | DECLARE_ENUM_FLAG_OPERATORS(Type) | 44 | DECLARE_ENUM_FLAG_OPERATORS(Type) |
| 44 | 45 | ||
diff --git a/src/shader_recompiler/frontend/ir/value.cpp b/src/shader_recompiler/frontend/ir/value.cpp index 346169328..30ba12316 100644 --- a/src/shader_recompiler/frontend/ir/value.cpp +++ b/src/shader_recompiler/frontend/ir/value.cpp | |||
| @@ -23,6 +23,8 @@ Value::Value(u16 value) noexcept : type{Type::U16}, imm_u16{value} {} | |||
| 23 | 23 | ||
| 24 | Value::Value(u32 value) noexcept : type{Type::U32}, imm_u32{value} {} | 24 | Value::Value(u32 value) noexcept : type{Type::U32}, imm_u32{value} {} |
| 25 | 25 | ||
| 26 | Value::Value(s32 value) noexcept : type{Type::S32}, imm_s32{value} {} | ||
| 27 | |||
| 26 | Value::Value(f32 value) noexcept : type{Type::F32}, imm_f32{value} {} | 28 | Value::Value(f32 value) noexcept : type{Type::F32}, imm_f32{value} {} |
| 27 | 29 | ||
| 28 | Value::Value(u64 value) noexcept : type{Type::U64}, imm_u64{value} {} | 30 | Value::Value(u64 value) noexcept : type{Type::U64}, imm_u64{value} {} |
| @@ -69,6 +71,7 @@ bool Value::operator==(const Value& other) const { | |||
| 69 | return imm_u16 == other.imm_u16; | 71 | return imm_u16 == other.imm_u16; |
| 70 | case Type::U32: | 72 | case Type::U32: |
| 71 | case Type::F32: | 73 | case Type::F32: |
| 74 | case Type::S32: | ||
| 72 | return imm_u32 == other.imm_u32; | 75 | return imm_u32 == other.imm_u32; |
| 73 | case Type::U64: | 76 | case Type::U64: |
| 74 | case Type::F64: | 77 | case Type::F64: |
diff --git a/src/shader_recompiler/frontend/ir/value.h b/src/shader_recompiler/frontend/ir/value.h index 1a2e4ccb6..e8bbb93a5 100644 --- a/src/shader_recompiler/frontend/ir/value.h +++ b/src/shader_recompiler/frontend/ir/value.h | |||
| @@ -44,6 +44,7 @@ public: | |||
| 44 | explicit Value(u8 value) noexcept; | 44 | explicit Value(u8 value) noexcept; |
| 45 | explicit Value(u16 value) noexcept; | 45 | explicit Value(u16 value) noexcept; |
| 46 | explicit Value(u32 value) noexcept; | 46 | explicit Value(u32 value) noexcept; |
| 47 | explicit Value(s32 value) noexcept; | ||
| 47 | explicit Value(f32 value) noexcept; | 48 | explicit Value(f32 value) noexcept; |
| 48 | explicit Value(u64 value) noexcept; | 49 | explicit Value(u64 value) noexcept; |
| 49 | explicit Value(f64 value) noexcept; | 50 | explicit Value(f64 value) noexcept; |
| @@ -66,6 +67,7 @@ public: | |||
| 66 | [[nodiscard]] u8 U8() const; | 67 | [[nodiscard]] u8 U8() const; |
| 67 | [[nodiscard]] u16 U16() const; | 68 | [[nodiscard]] u16 U16() const; |
| 68 | [[nodiscard]] u32 U32() const; | 69 | [[nodiscard]] u32 U32() const; |
| 70 | [[nodiscard]] s32 S32() const; | ||
| 69 | [[nodiscard]] f32 F32() const; | 71 | [[nodiscard]] f32 F32() const; |
| 70 | [[nodiscard]] u64 U64() const; | 72 | [[nodiscard]] u64 U64() const; |
| 71 | [[nodiscard]] f64 F64() const; | 73 | [[nodiscard]] f64 F64() const; |
| @@ -85,6 +87,7 @@ private: | |||
| 85 | u8 imm_u8; | 87 | u8 imm_u8; |
| 86 | u16 imm_u16; | 88 | u16 imm_u16; |
| 87 | u32 imm_u32; | 89 | u32 imm_u32; |
| 90 | s32 imm_s32; | ||
| 88 | f32 imm_f32; | 91 | f32 imm_f32; |
| 89 | u64 imm_u64; | 92 | u64 imm_u64; |
| 90 | f64 imm_f64; | 93 | f64 imm_f64; |
| @@ -178,13 +181,9 @@ public: | |||
| 178 | 181 | ||
| 179 | /// Get a pointer to the block of a phi argument. | 182 | /// Get a pointer to the block of a phi argument. |
| 180 | [[nodiscard]] Block* PhiBlock(size_t index) const; | 183 | [[nodiscard]] Block* PhiBlock(size_t index) const; |
| 181 | |||
| 182 | /// Add phi operand to a phi instruction. | 184 | /// Add phi operand to a phi instruction. |
| 183 | void AddPhiOperand(Block* predecessor, const Value& value); | 185 | void AddPhiOperand(Block* predecessor, const Value& value); |
| 184 | 186 | ||
| 185 | // Erase the phi operand at the given index. | ||
| 186 | void ErasePhiOperand(size_t index); | ||
| 187 | |||
| 188 | /// Orders the Phi arguments from farthest away to nearest. | 187 | /// Orders the Phi arguments from farthest away to nearest. |
| 189 | void OrderPhiArgs(); | 188 | void OrderPhiArgs(); |
| 190 | 189 | ||
| @@ -270,6 +269,7 @@ using U8 = TypedValue<Type::U8>; | |||
| 270 | using U16 = TypedValue<Type::U16>; | 269 | using U16 = TypedValue<Type::U16>; |
| 271 | using U32 = TypedValue<Type::U32>; | 270 | using U32 = TypedValue<Type::U32>; |
| 272 | using U64 = TypedValue<Type::U64>; | 271 | using U64 = TypedValue<Type::U64>; |
| 272 | using S32 = TypedValue<Type::S32>; | ||
| 273 | using F16 = TypedValue<Type::F16>; | 273 | using F16 = TypedValue<Type::F16>; |
| 274 | using F32 = TypedValue<Type::F32>; | 274 | using F32 = TypedValue<Type::F32>; |
| 275 | using F64 = TypedValue<Type::F64>; | 275 | using F64 = TypedValue<Type::F64>; |
| @@ -381,6 +381,14 @@ inline u32 Value::U32() const { | |||
| 381 | return imm_u32; | 381 | return imm_u32; |
| 382 | } | 382 | } |
| 383 | 383 | ||
| 384 | inline s32 Value::S32() const { | ||
| 385 | if (IsIdentity()) { | ||
| 386 | return inst->Arg(0).S32(); | ||
| 387 | } | ||
| 388 | DEBUG_ASSERT(type == Type::S32); | ||
| 389 | return imm_s32; | ||
| 390 | } | ||
| 391 | |||
| 384 | inline f32 Value::F32() const { | 392 | inline f32 Value::F32() const { |
| 385 | if (IsIdentity()) { | 393 | if (IsIdentity()) { |
| 386 | return inst->Arg(0).F32(); | 394 | return inst->Arg(0).F32(); |
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp index 52be12f9c..753c62098 100644 --- a/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp +++ b/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp | |||
| @@ -117,8 +117,7 @@ enum class SpecialRegister : u64 { | |||
| 117 | case SpecialRegister::SR_THREAD_KILL: | 117 | case SpecialRegister::SR_THREAD_KILL: |
| 118 | return IR::U32{ir.Select(ir.IsHelperInvocation(), ir.Imm32(-1), ir.Imm32(0))}; | 118 | return IR::U32{ir.Select(ir.IsHelperInvocation(), ir.Imm32(-1), ir.Imm32(0))}; |
| 119 | case SpecialRegister::SR_INVOCATION_INFO: | 119 | case SpecialRegister::SR_INVOCATION_INFO: |
| 120 | LOG_WARNING(Shader, "(STUBBED) SR_INVOCATION_INFO"); | 120 | return ir.InvocationInfo(); |
| 121 | return ir.Imm32(0x00ff'0000); | ||
| 122 | case SpecialRegister::SR_TID: { | 121 | case SpecialRegister::SR_TID: { |
| 123 | const IR::Value tid{ir.LocalInvocationId()}; | 122 | const IR::Value tid{ir.LocalInvocationId()}; |
| 124 | return ir.BitFieldInsert(ir.BitFieldInsert(IR::U32{ir.CompositeExtract(tid, 0)}, | 123 | return ir.BitFieldInsert(ir.BitFieldInsert(IR::U32{ir.CompositeExtract(tid, 0)}, |
diff --git a/src/shader_recompiler/frontend/maxwell/translate_program.cpp b/src/shader_recompiler/frontend/maxwell/translate_program.cpp index b58741d4d..376aae0ea 100644 --- a/src/shader_recompiler/frontend/maxwell/translate_program.cpp +++ b/src/shader_recompiler/frontend/maxwell/translate_program.cpp | |||
| @@ -220,8 +220,10 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo | |||
| 220 | 220 | ||
| 221 | Optimization::ConstantPropagationPass(program); | 221 | Optimization::ConstantPropagationPass(program); |
| 222 | 222 | ||
| 223 | Optimization::PositionPass(env, program); | ||
| 224 | |||
| 223 | Optimization::GlobalMemoryToStorageBufferPass(program); | 225 | Optimization::GlobalMemoryToStorageBufferPass(program); |
| 224 | Optimization::TexturePass(env, program); | 226 | Optimization::TexturePass(env, program, host_info); |
| 225 | 227 | ||
| 226 | if (Settings::values.resolution_info.active) { | 228 | if (Settings::values.resolution_info.active) { |
| 227 | Optimization::RescalingPass(program); | 229 | Optimization::RescalingPass(program); |
diff --git a/src/shader_recompiler/host_translate_info.h b/src/shader_recompiler/host_translate_info.h index 881874310..cc1500690 100644 --- a/src/shader_recompiler/host_translate_info.h +++ b/src/shader_recompiler/host_translate_info.h | |||
| @@ -13,6 +13,7 @@ struct HostTranslateInfo { | |||
| 13 | bool support_float16{}; ///< True when the device supports 16-bit floats | 13 | bool support_float16{}; ///< True when the device supports 16-bit floats |
| 14 | bool support_int64{}; ///< True when the device supports 64-bit integers | 14 | bool support_int64{}; ///< True when the device supports 64-bit integers |
| 15 | bool needs_demote_reorder{}; ///< True when the device needs DemoteToHelperInvocation reordered | 15 | bool needs_demote_reorder{}; ///< True when the device needs DemoteToHelperInvocation reordered |
| 16 | bool support_snorm_render_buffer{}; ///< True when the device supports SNORM render buffers | ||
| 16 | }; | 17 | }; |
| 17 | 18 | ||
| 18 | } // namespace Shader | 19 | } // namespace Shader |
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp index 7cff8ecdc..5a4195217 100644 --- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp +++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp | |||
| @@ -468,6 +468,9 @@ void VisitUsages(Info& info, IR::Inst& inst) { | |||
| 468 | case IR::Opcode::InvocationId: | 468 | case IR::Opcode::InvocationId: |
| 469 | info.uses_invocation_id = true; | 469 | info.uses_invocation_id = true; |
| 470 | break; | 470 | break; |
| 471 | case IR::Opcode::InvocationInfo: | ||
| 472 | info.uses_invocation_info = true; | ||
| 473 | break; | ||
| 471 | case IR::Opcode::SampleId: | 474 | case IR::Opcode::SampleId: |
| 472 | info.uses_sample_id = true; | 475 | info.uses_sample_id = true; |
| 473 | break; | 476 | break; |
diff --git a/src/shader_recompiler/ir_opt/dead_code_elimination_pass.cpp b/src/shader_recompiler/ir_opt/dead_code_elimination_pass.cpp index 9a7d47344..1bd8afd6f 100644 --- a/src/shader_recompiler/ir_opt/dead_code_elimination_pass.cpp +++ b/src/shader_recompiler/ir_opt/dead_code_elimination_pass.cpp | |||
| @@ -1,104 +1,24 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include <algorithm> | ||
| 5 | |||
| 6 | #include <boost/container/small_vector.hpp> | ||
| 7 | |||
| 8 | #include "shader_recompiler/frontend/ir/basic_block.h" | 4 | #include "shader_recompiler/frontend/ir/basic_block.h" |
| 9 | #include "shader_recompiler/frontend/ir/value.h" | 5 | #include "shader_recompiler/frontend/ir/value.h" |
| 10 | #include "shader_recompiler/ir_opt/passes.h" | 6 | #include "shader_recompiler/ir_opt/passes.h" |
| 11 | 7 | ||
| 12 | namespace Shader::Optimization { | 8 | namespace Shader::Optimization { |
| 13 | namespace { | 9 | |
| 14 | template <bool TEST_USES> | 10 | void DeadCodeEliminationPass(IR::Program& program) { |
| 15 | void DeadInstElimination(IR::Block* const block) { | ||
| 16 | // We iterate over the instructions in reverse order. | 11 | // We iterate over the instructions in reverse order. |
| 17 | // This is because removing an instruction reduces the number of uses for earlier instructions. | 12 | // This is because removing an instruction reduces the number of uses for earlier instructions. |
| 18 | auto it{block->end()}; | 13 | for (IR::Block* const block : program.post_order_blocks) { |
| 19 | while (it != block->begin()) { | 14 | auto it{block->end()}; |
| 20 | --it; | 15 | while (it != block->begin()) { |
| 21 | if constexpr (TEST_USES) { | 16 | --it; |
| 22 | if (it->HasUses() || it->MayHaveSideEffects()) { | 17 | if (!it->HasUses() && !it->MayHaveSideEffects()) { |
| 23 | continue; | 18 | it->Invalidate(); |
| 24 | } | 19 | it = block->Instructions().erase(it); |
| 25 | } | ||
| 26 | it->Invalidate(); | ||
| 27 | it = block->Instructions().erase(it); | ||
| 28 | } | ||
| 29 | } | ||
| 30 | |||
| 31 | void DeletedPhiArgElimination(IR::Program& program, std::span<const IR::Block*> dead_blocks) { | ||
| 32 | for (IR::Block* const block : program.blocks) { | ||
| 33 | for (IR::Inst& phi : *block) { | ||
| 34 | if (!IR::IsPhi(phi)) { | ||
| 35 | continue; | ||
| 36 | } | ||
| 37 | for (size_t i = 0; i < phi.NumArgs(); ++i) { | ||
| 38 | if (std::ranges::find(dead_blocks, phi.PhiBlock(i)) == dead_blocks.end()) { | ||
| 39 | continue; | ||
| 40 | } | ||
| 41 | // Phi operand at this index is an unreachable block | ||
| 42 | phi.ErasePhiOperand(i); | ||
| 43 | --i; | ||
| 44 | } | ||
| 45 | } | ||
| 46 | } | ||
| 47 | } | ||
| 48 | |||
| 49 | void DeadBranchElimination(IR::Program& program) { | ||
| 50 | boost::container::small_vector<const IR::Block*, 3> dead_blocks; | ||
| 51 | const auto begin_it{program.syntax_list.begin()}; | ||
| 52 | for (auto node_it = begin_it; node_it != program.syntax_list.end(); ++node_it) { | ||
| 53 | if (node_it->type != IR::AbstractSyntaxNode::Type::If) { | ||
| 54 | continue; | ||
| 55 | } | ||
| 56 | IR::Inst* const cond_ref{node_it->data.if_node.cond.Inst()}; | ||
| 57 | const IR::U1 cond{cond_ref->Arg(0)}; | ||
| 58 | if (!cond.IsImmediate()) { | ||
| 59 | continue; | ||
| 60 | } | ||
| 61 | if (cond.U1()) { | ||
| 62 | continue; | ||
| 63 | } | ||
| 64 | // False immediate condition. Remove condition ref, erase the entire branch. | ||
| 65 | cond_ref->Invalidate(); | ||
| 66 | // Account for nested if-statements within the if(false) branch | ||
| 67 | u32 nested_ifs{1u}; | ||
| 68 | while (node_it->type != IR::AbstractSyntaxNode::Type::EndIf || nested_ifs > 0) { | ||
| 69 | node_it = program.syntax_list.erase(node_it); | ||
| 70 | switch (node_it->type) { | ||
| 71 | case IR::AbstractSyntaxNode::Type::If: | ||
| 72 | ++nested_ifs; | ||
| 73 | break; | ||
| 74 | case IR::AbstractSyntaxNode::Type::EndIf: | ||
| 75 | --nested_ifs; | ||
| 76 | break; | ||
| 77 | case IR::AbstractSyntaxNode::Type::Block: { | ||
| 78 | IR::Block* const block{node_it->data.block}; | ||
| 79 | DeadInstElimination<false>(block); | ||
| 80 | dead_blocks.push_back(block); | ||
| 81 | break; | ||
| 82 | } | ||
| 83 | default: | ||
| 84 | break; | ||
| 85 | } | 20 | } |
| 86 | } | 21 | } |
| 87 | // Erase EndIf node of the if(false) branch | ||
| 88 | node_it = program.syntax_list.erase(node_it); | ||
| 89 | // Account for loop increment | ||
| 90 | --node_it; | ||
| 91 | } | ||
| 92 | if (!dead_blocks.empty()) { | ||
| 93 | DeletedPhiArgElimination(program, std::span(dead_blocks.data(), dead_blocks.size())); | ||
| 94 | } | ||
| 95 | } | ||
| 96 | } // namespace | ||
| 97 | |||
| 98 | void DeadCodeEliminationPass(IR::Program& program) { | ||
| 99 | DeadBranchElimination(program); | ||
| 100 | for (IR::Block* const block : program.post_order_blocks) { | ||
| 101 | DeadInstElimination<true>(block); | ||
| 102 | } | 22 | } |
| 103 | } | 23 | } |
| 104 | 24 | ||
diff --git a/src/shader_recompiler/ir_opt/passes.h b/src/shader_recompiler/ir_opt/passes.h index 6ff8e4266..586a0668f 100644 --- a/src/shader_recompiler/ir_opt/passes.h +++ b/src/shader_recompiler/ir_opt/passes.h | |||
| @@ -6,6 +6,10 @@ | |||
| 6 | #include "shader_recompiler/environment.h" | 6 | #include "shader_recompiler/environment.h" |
| 7 | #include "shader_recompiler/frontend/ir/program.h" | 7 | #include "shader_recompiler/frontend/ir/program.h" |
| 8 | 8 | ||
| 9 | namespace Shader { | ||
| 10 | struct HostTranslateInfo; | ||
| 11 | } | ||
| 12 | |||
| 9 | namespace Shader::Optimization { | 13 | namespace Shader::Optimization { |
| 10 | 14 | ||
| 11 | void CollectShaderInfoPass(Environment& env, IR::Program& program); | 15 | void CollectShaderInfoPass(Environment& env, IR::Program& program); |
| @@ -17,7 +21,8 @@ void LowerFp16ToFp32(IR::Program& program); | |||
| 17 | void LowerInt64ToInt32(IR::Program& program); | 21 | void LowerInt64ToInt32(IR::Program& program); |
| 18 | void RescalingPass(IR::Program& program); | 22 | void RescalingPass(IR::Program& program); |
| 19 | void SsaRewritePass(IR::Program& program); | 23 | void SsaRewritePass(IR::Program& program); |
| 20 | void TexturePass(Environment& env, IR::Program& program); | 24 | void PositionPass(Environment& env, IR::Program& program); |
| 25 | void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo& host_info); | ||
| 21 | void VerificationPass(const IR::Program& program); | 26 | void VerificationPass(const IR::Program& program); |
| 22 | 27 | ||
| 23 | // Dual Vertex | 28 | // Dual Vertex |
diff --git a/src/shader_recompiler/ir_opt/position_pass.cpp b/src/shader_recompiler/ir_opt/position_pass.cpp new file mode 100644 index 000000000..3c20b7189 --- /dev/null +++ b/src/shader_recompiler/ir_opt/position_pass.cpp | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include <boost/container/small_vector.hpp> | ||
| 5 | |||
| 6 | #include "shader_recompiler/frontend/ir/basic_block.h" | ||
| 7 | #include "shader_recompiler/frontend/ir/ir_emitter.h" | ||
| 8 | #include "shader_recompiler/frontend/ir/value.h" | ||
| 9 | #include "shader_recompiler/ir_opt/passes.h" | ||
| 10 | |||
| 11 | namespace Shader::Optimization { | ||
| 12 | |||
| 13 | namespace { | ||
| 14 | struct PositionInst { | ||
| 15 | IR::Inst* inst; | ||
| 16 | IR::Block* block; | ||
| 17 | IR::Attribute attr; | ||
| 18 | }; | ||
| 19 | using PositionInstVector = boost::container::small_vector<PositionInst, 24>; | ||
| 20 | } // Anonymous namespace | ||
| 21 | |||
| 22 | void PositionPass(Environment& env, IR::Program& program) { | ||
| 23 | if (env.ShaderStage() != Stage::VertexB || env.ReadViewportTransformState()) { | ||
| 24 | return; | ||
| 25 | } | ||
| 26 | |||
| 27 | Info& info{program.info}; | ||
| 28 | info.uses_render_area = true; | ||
| 29 | |||
| 30 | PositionInstVector to_replace; | ||
| 31 | for (IR::Block* const block : program.post_order_blocks) { | ||
| 32 | for (IR::Inst& inst : block->Instructions()) { | ||
| 33 | switch (inst.GetOpcode()) { | ||
| 34 | case IR::Opcode::SetAttribute: { | ||
| 35 | const IR::Attribute attr{inst.Arg(0).Attribute()}; | ||
| 36 | switch (attr) { | ||
| 37 | case IR::Attribute::PositionX: | ||
| 38 | case IR::Attribute::PositionY: { | ||
| 39 | to_replace.push_back(PositionInst{.inst = &inst, .block = block, .attr = attr}); | ||
| 40 | break; | ||
| 41 | } | ||
| 42 | default: | ||
| 43 | break; | ||
| 44 | } | ||
| 45 | break; | ||
| 46 | } | ||
| 47 | default: | ||
| 48 | break; | ||
| 49 | } | ||
| 50 | } | ||
| 51 | } | ||
| 52 | |||
| 53 | for (PositionInst& position_inst : to_replace) { | ||
| 54 | IR::IREmitter ir{*position_inst.block, | ||
| 55 | IR::Block::InstructionList::s_iterator_to(*position_inst.inst)}; | ||
| 56 | const IR::F32 value(position_inst.inst->Arg(1)); | ||
| 57 | const IR::F32F64 scale(ir.Imm32(2.f)); | ||
| 58 | const IR::F32 negative_one{ir.Imm32(-1.f)}; | ||
| 59 | switch (position_inst.attr) { | ||
| 60 | case IR::Attribute::PositionX: { | ||
| 61 | position_inst.inst->SetArg( | ||
| 62 | 1, | ||
| 63 | ir.FPFma(value, ir.FPMul(ir.FPRecip(ir.RenderAreaWidth()), scale), negative_one)); | ||
| 64 | break; | ||
| 65 | } | ||
| 66 | case IR::Attribute::PositionY: { | ||
| 67 | position_inst.inst->SetArg( | ||
| 68 | 1, | ||
| 69 | ir.FPFma(value, ir.FPMul(ir.FPRecip(ir.RenderAreaHeight()), scale), negative_one)); | ||
| 70 | break; | ||
| 71 | } | ||
| 72 | default: | ||
| 73 | break; | ||
| 74 | } | ||
| 75 | } | ||
| 76 | } | ||
| 77 | } // namespace Shader::Optimization | ||
diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp index e8be58357..f5c86fcb1 100644 --- a/src/shader_recompiler/ir_opt/texture_pass.cpp +++ b/src/shader_recompiler/ir_opt/texture_pass.cpp | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include "shader_recompiler/frontend/ir/basic_block.h" | 11 | #include "shader_recompiler/frontend/ir/basic_block.h" |
| 12 | #include "shader_recompiler/frontend/ir/breadth_first_search.h" | 12 | #include "shader_recompiler/frontend/ir/breadth_first_search.h" |
| 13 | #include "shader_recompiler/frontend/ir/ir_emitter.h" | 13 | #include "shader_recompiler/frontend/ir/ir_emitter.h" |
| 14 | #include "shader_recompiler/host_translate_info.h" | ||
| 14 | #include "shader_recompiler/ir_opt/passes.h" | 15 | #include "shader_recompiler/ir_opt/passes.h" |
| 15 | #include "shader_recompiler/shader_info.h" | 16 | #include "shader_recompiler/shader_info.h" |
| 16 | 17 | ||
| @@ -363,6 +364,14 @@ TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) { | |||
| 363 | return env.ReadTextureType(lhs_raw | rhs_raw); | 364 | return env.ReadTextureType(lhs_raw | rhs_raw); |
| 364 | } | 365 | } |
| 365 | 366 | ||
| 367 | TexturePixelFormat ReadTexturePixelFormat(Environment& env, const ConstBufferAddr& cbuf) { | ||
| 368 | const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index}; | ||
| 369 | const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset}; | ||
| 370 | const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset)}; | ||
| 371 | const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)}; | ||
| 372 | return env.ReadTexturePixelFormat(lhs_raw | rhs_raw); | ||
| 373 | } | ||
| 374 | |||
| 366 | class Descriptors { | 375 | class Descriptors { |
| 367 | public: | 376 | public: |
| 368 | explicit Descriptors(TextureBufferDescriptors& texture_buffer_descriptors_, | 377 | explicit Descriptors(TextureBufferDescriptors& texture_buffer_descriptors_, |
| @@ -451,9 +460,41 @@ void PatchImageSampleImplicitLod(IR::Block& block, IR::Inst& inst) { | |||
| 451 | ir.FPMul(IR::F32(ir.CompositeExtract(coord, 1)), | 460 | ir.FPMul(IR::F32(ir.CompositeExtract(coord, 1)), |
| 452 | ir.FPRecip(ir.ConvertUToF(32, 32, ir.CompositeExtract(texture_size, 1)))))); | 461 | ir.FPRecip(ir.ConvertUToF(32, 32, ir.CompositeExtract(texture_size, 1)))))); |
| 453 | } | 462 | } |
| 463 | |||
| 464 | void PatchTexelFetch(IR::Block& block, IR::Inst& inst, TexturePixelFormat pixel_format) { | ||
| 465 | const auto it{IR::Block::InstructionList::s_iterator_to(inst)}; | ||
| 466 | IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)}; | ||
| 467 | auto get_max_value = [pixel_format]() -> float { | ||
| 468 | switch (pixel_format) { | ||
| 469 | case TexturePixelFormat::A8B8G8R8_SNORM: | ||
| 470 | case TexturePixelFormat::R8G8_SNORM: | ||
| 471 | case TexturePixelFormat::R8_SNORM: | ||
| 472 | return 1.f / std::numeric_limits<char>::max(); | ||
| 473 | case TexturePixelFormat::R16G16B16A16_SNORM: | ||
| 474 | case TexturePixelFormat::R16G16_SNORM: | ||
| 475 | case TexturePixelFormat::R16_SNORM: | ||
| 476 | return 1.f / std::numeric_limits<short>::max(); | ||
| 477 | default: | ||
| 478 | throw InvalidArgument("Invalid texture pixel format"); | ||
| 479 | } | ||
| 480 | }; | ||
| 481 | |||
| 482 | const IR::Value new_inst{&*block.PrependNewInst(it, inst)}; | ||
| 483 | const IR::F32 x(ir.CompositeExtract(new_inst, 0)); | ||
| 484 | const IR::F32 y(ir.CompositeExtract(new_inst, 1)); | ||
| 485 | const IR::F32 z(ir.CompositeExtract(new_inst, 2)); | ||
| 486 | const IR::F32 w(ir.CompositeExtract(new_inst, 3)); | ||
| 487 | const IR::F16F32F64 max_value(ir.Imm32(get_max_value())); | ||
| 488 | const IR::Value converted = | ||
| 489 | ir.CompositeConstruct(ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(x)), max_value), | ||
| 490 | ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(y)), max_value), | ||
| 491 | ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(z)), max_value), | ||
| 492 | ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(w)), max_value)); | ||
| 493 | inst.ReplaceUsesWith(converted); | ||
| 494 | } | ||
| 454 | } // Anonymous namespace | 495 | } // Anonymous namespace |
| 455 | 496 | ||
| 456 | void TexturePass(Environment& env, IR::Program& program) { | 497 | void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo& host_info) { |
| 457 | TextureInstVector to_replace; | 498 | TextureInstVector to_replace; |
| 458 | for (IR::Block* const block : program.post_order_blocks) { | 499 | for (IR::Block* const block : program.post_order_blocks) { |
| 459 | for (IR::Inst& inst : block->Instructions()) { | 500 | for (IR::Inst& inst : block->Instructions()) { |
| @@ -597,6 +638,14 @@ void TexturePass(Environment& env, IR::Program& program) { | |||
| 597 | } else { | 638 | } else { |
| 598 | inst->SetArg(0, IR::Value{}); | 639 | inst->SetArg(0, IR::Value{}); |
| 599 | } | 640 | } |
| 641 | |||
| 642 | if (!host_info.support_snorm_render_buffer && inst->GetOpcode() == IR::Opcode::ImageFetch && | ||
| 643 | flags.type == TextureType::Buffer) { | ||
| 644 | const auto pixel_format = ReadTexturePixelFormat(env, cbuf); | ||
| 645 | if (pixel_format != TexturePixelFormat::OTHER) { | ||
| 646 | PatchTexelFetch(*texture_inst.block, *texture_inst.inst, pixel_format); | ||
| 647 | } | ||
| 648 | } | ||
| 600 | } | 649 | } |
| 601 | } | 650 | } |
| 602 | 651 | ||
diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h index 81097bf1a..ee6252bb5 100644 --- a/src/shader_recompiler/shader_info.h +++ b/src/shader_recompiler/shader_info.h | |||
| @@ -29,6 +29,16 @@ enum class TextureType : u32 { | |||
| 29 | }; | 29 | }; |
| 30 | constexpr u32 NUM_TEXTURE_TYPES = 9; | 30 | constexpr u32 NUM_TEXTURE_TYPES = 9; |
| 31 | 31 | ||
| 32 | enum class TexturePixelFormat : u32 { | ||
| 33 | A8B8G8R8_SNORM, | ||
| 34 | R8_SNORM, | ||
| 35 | R8G8_SNORM, | ||
| 36 | R16G16B16A16_SNORM, | ||
| 37 | R16G16_SNORM, | ||
| 38 | R16_SNORM, | ||
| 39 | OTHER | ||
| 40 | }; | ||
| 41 | |||
| 32 | enum class ImageFormat : u32 { | 42 | enum class ImageFormat : u32 { |
| 33 | Typeless, | 43 | Typeless, |
| 34 | R8_UINT, | 44 | R8_UINT, |
| @@ -117,6 +127,7 @@ struct Info { | |||
| 117 | bool uses_workgroup_id{}; | 127 | bool uses_workgroup_id{}; |
| 118 | bool uses_local_invocation_id{}; | 128 | bool uses_local_invocation_id{}; |
| 119 | bool uses_invocation_id{}; | 129 | bool uses_invocation_id{}; |
| 130 | bool uses_invocation_info{}; | ||
| 120 | bool uses_sample_id{}; | 131 | bool uses_sample_id{}; |
| 121 | bool uses_is_helper_invocation{}; | 132 | bool uses_is_helper_invocation{}; |
| 122 | bool uses_subgroup_invocation_id{}; | 133 | bool uses_subgroup_invocation_id{}; |
| @@ -182,6 +193,7 @@ struct Info { | |||
| 182 | bool uses_shadow_lod{}; | 193 | bool uses_shadow_lod{}; |
| 183 | bool uses_rescaling_uniform{}; | 194 | bool uses_rescaling_uniform{}; |
| 184 | bool uses_cbuf_indirect{}; | 195 | bool uses_cbuf_indirect{}; |
| 196 | bool uses_render_area{}; | ||
| 185 | 197 | ||
| 186 | IR::Type used_constant_buffer_types{}; | 198 | IR::Type used_constant_buffer_types{}; |
| 187 | IR::Type used_storage_buffer_types{}; | 199 | IR::Type used_storage_buffer_types{}; |
diff --git a/src/tests/video_core/buffer_base.cpp b/src/tests/video_core/buffer_base.cpp index 71121e42a..f7236afab 100644 --- a/src/tests/video_core/buffer_base.cpp +++ b/src/tests/video_core/buffer_base.cpp | |||
| @@ -44,7 +44,7 @@ public: | |||
| 44 | 44 | ||
| 45 | [[nodiscard]] unsigned Count() const noexcept { | 45 | [[nodiscard]] unsigned Count() const noexcept { |
| 46 | unsigned count = 0; | 46 | unsigned count = 0; |
| 47 | for (const auto [index, value] : page_table) { | 47 | for (const auto& [index, value] : page_table) { |
| 48 | count += value; | 48 | count += value; |
| 49 | } | 49 | } |
| 50 | return count; | 50 | return count; |
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 106991969..d7f7d336c 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt | |||
| @@ -73,8 +73,6 @@ add_library(video_core STATIC | |||
| 73 | macro/macro_hle.h | 73 | macro/macro_hle.h |
| 74 | macro/macro_interpreter.cpp | 74 | macro/macro_interpreter.cpp |
| 75 | macro/macro_interpreter.h | 75 | macro/macro_interpreter.h |
| 76 | macro/macro_jit_x64.cpp | ||
| 77 | macro/macro_jit_x64.h | ||
| 78 | fence_manager.h | 76 | fence_manager.h |
| 79 | gpu.cpp | 77 | gpu.cpp |
| 80 | gpu.h | 78 | gpu.h |
| @@ -245,7 +243,7 @@ add_library(video_core STATIC | |||
| 245 | create_target_directory_groups(video_core) | 243 | create_target_directory_groups(video_core) |
| 246 | 244 | ||
| 247 | target_link_libraries(video_core PUBLIC common core) | 245 | target_link_libraries(video_core PUBLIC common core) |
| 248 | target_link_libraries(video_core PUBLIC glad shader_recompiler xbyak) | 246 | target_link_libraries(video_core PUBLIC glad shader_recompiler) |
| 249 | 247 | ||
| 250 | if (YUZU_USE_BUNDLED_FFMPEG AND NOT WIN32) | 248 | if (YUZU_USE_BUNDLED_FFMPEG AND NOT WIN32) |
| 251 | add_dependencies(video_core ffmpeg-build) | 249 | add_dependencies(video_core ffmpeg-build) |
| @@ -282,8 +280,19 @@ else() | |||
| 282 | 280 | ||
| 283 | -Wno-sign-conversion | 281 | -Wno-sign-conversion |
| 284 | ) | 282 | ) |
| 283 | |||
| 284 | # xbyak | ||
| 285 | set_source_files_properties(macro/macro_jit_x64.cpp PROPERTIES COMPILE_OPTIONS "-Wno-conversion;-Wno-shadow") | ||
| 285 | endif() | 286 | endif() |
| 286 | 287 | ||
| 287 | if (ARCHITECTURE_x86_64) | 288 | if (ARCHITECTURE_x86_64) |
| 289 | target_sources(video_core PRIVATE | ||
| 290 | macro/macro_jit_x64.cpp | ||
| 291 | macro/macro_jit_x64.h | ||
| 292 | ) | ||
| 293 | target_link_libraries(video_core PUBLIC xbyak) | ||
| 294 | endif() | ||
| 295 | |||
| 296 | if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) | ||
| 288 | target_link_libraries(video_core PRIVATE dynarmic) | 297 | target_link_libraries(video_core PRIVATE dynarmic) |
| 289 | endif() | 298 | endif() |
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index b1a22b76c..4a2f2c1fd 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp | |||
| @@ -117,10 +117,15 @@ void Maxwell3D::InitializeRegisterDefaults() { | |||
| 117 | 117 | ||
| 118 | shadow_state = regs; | 118 | shadow_state = regs; |
| 119 | 119 | ||
| 120 | mme_inline[MAXWELL3D_REG_INDEX(draw.end)] = true; | 120 | draw_command[MAXWELL3D_REG_INDEX(draw.end)] = true; |
| 121 | mme_inline[MAXWELL3D_REG_INDEX(draw.begin)] = true; | 121 | draw_command[MAXWELL3D_REG_INDEX(draw.begin)] = true; |
| 122 | mme_inline[MAXWELL3D_REG_INDEX(vertex_buffer.count)] = true; | 122 | draw_command[MAXWELL3D_REG_INDEX(vertex_buffer.first)] = true; |
| 123 | mme_inline[MAXWELL3D_REG_INDEX(index_buffer.count)] = true; | 123 | draw_command[MAXWELL3D_REG_INDEX(vertex_buffer.count)] = true; |
| 124 | draw_command[MAXWELL3D_REG_INDEX(index_buffer.first)] = true; | ||
| 125 | draw_command[MAXWELL3D_REG_INDEX(index_buffer.count)] = true; | ||
| 126 | draw_command[MAXWELL3D_REG_INDEX(draw_inline_index)] = true; | ||
| 127 | draw_command[MAXWELL3D_REG_INDEX(inline_index_2x16.even)] = true; | ||
| 128 | draw_command[MAXWELL3D_REG_INDEX(inline_index_4x8.index0)] = true; | ||
| 124 | } | 129 | } |
| 125 | 130 | ||
| 126 | void Maxwell3D::ProcessMacro(u32 method, const u32* base_start, u32 amount, bool is_last_call) { | 131 | void Maxwell3D::ProcessMacro(u32 method, const u32* base_start, u32 amount, bool is_last_call) { |
| @@ -208,25 +213,21 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume | |||
| 208 | return ProcessCBBind(3); | 213 | return ProcessCBBind(3); |
| 209 | case MAXWELL3D_REG_INDEX(bind_groups[4].raw_config): | 214 | case MAXWELL3D_REG_INDEX(bind_groups[4].raw_config): |
| 210 | return ProcessCBBind(4); | 215 | return ProcessCBBind(4); |
| 211 | case MAXWELL3D_REG_INDEX(draw.end): | ||
| 212 | return DrawArrays(); | ||
| 213 | case MAXWELL3D_REG_INDEX(index_buffer32_first): | 216 | case MAXWELL3D_REG_INDEX(index_buffer32_first): |
| 214 | regs.index_buffer.count = regs.index_buffer32_first.count; | 217 | regs.index_buffer.count = regs.index_buffer32_first.count; |
| 215 | regs.index_buffer.first = regs.index_buffer32_first.first; | 218 | regs.index_buffer.first = regs.index_buffer32_first.first; |
| 216 | dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; | 219 | dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; |
| 217 | return DrawArrays(); | 220 | return ProcessDraw(); |
| 218 | case MAXWELL3D_REG_INDEX(index_buffer16_first): | 221 | case MAXWELL3D_REG_INDEX(index_buffer16_first): |
| 219 | regs.index_buffer.count = regs.index_buffer16_first.count; | 222 | regs.index_buffer.count = regs.index_buffer16_first.count; |
| 220 | regs.index_buffer.first = regs.index_buffer16_first.first; | 223 | regs.index_buffer.first = regs.index_buffer16_first.first; |
| 221 | dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; | 224 | dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; |
| 222 | return DrawArrays(); | 225 | return ProcessDraw(); |
| 223 | case MAXWELL3D_REG_INDEX(index_buffer8_first): | 226 | case MAXWELL3D_REG_INDEX(index_buffer8_first): |
| 224 | regs.index_buffer.count = regs.index_buffer8_first.count; | 227 | regs.index_buffer.count = regs.index_buffer8_first.count; |
| 225 | regs.index_buffer.first = regs.index_buffer8_first.first; | 228 | regs.index_buffer.first = regs.index_buffer8_first.first; |
| 226 | dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; | 229 | dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; |
| 227 | // a macro calls this one over and over, should it increase instancing? | 230 | return ProcessDraw(); |
| 228 | // Used by Hades and likely other Vulkan games. | ||
| 229 | return DrawArrays(); | ||
| 230 | case MAXWELL3D_REG_INDEX(topology_override): | 231 | case MAXWELL3D_REG_INDEX(topology_override): |
| 231 | use_topology_override = true; | 232 | use_topology_override = true; |
| 232 | return; | 233 | return; |
| @@ -261,14 +262,13 @@ void Maxwell3D::CallMacroMethod(u32 method, const std::vector<u32>& parameters) | |||
| 261 | 262 | ||
| 262 | // Execute the current macro. | 263 | // Execute the current macro. |
| 263 | macro_engine->Execute(macro_positions[entry], parameters); | 264 | macro_engine->Execute(macro_positions[entry], parameters); |
| 264 | if (mme_draw.current_mode != MMEDrawMode::Undefined) { | 265 | |
| 265 | FlushMMEInlineDraw(); | 266 | ProcessDeferredDraw(); |
| 266 | } | ||
| 267 | } | 267 | } |
| 268 | 268 | ||
| 269 | void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) { | 269 | void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) { |
| 270 | // It is an error to write to a register other than the current macro's ARG register before it | 270 | // It is an error to write to a register other than the current macro's ARG register before |
| 271 | // has finished execution. | 271 | // it has finished execution. |
| 272 | if (executing_macro != 0) { | 272 | if (executing_macro != 0) { |
| 273 | ASSERT(method == executing_macro + 1); | 273 | ASSERT(method == executing_macro + 1); |
| 274 | } | 274 | } |
| @@ -283,9 +283,33 @@ void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) { | |||
| 283 | ASSERT_MSG(method < Regs::NUM_REGS, | 283 | ASSERT_MSG(method < Regs::NUM_REGS, |
| 284 | "Invalid Maxwell3D register, increase the size of the Regs structure"); | 284 | "Invalid Maxwell3D register, increase the size of the Regs structure"); |
| 285 | 285 | ||
| 286 | const u32 argument = ProcessShadowRam(method, method_argument); | 286 | if (draw_command[method]) { |
| 287 | ProcessDirtyRegisters(method, argument); | 287 | regs.reg_array[method] = method_argument; |
| 288 | ProcessMethodCall(method, argument, method_argument, is_last_call); | 288 | deferred_draw_method.push_back(method); |
| 289 | auto u32_to_u8 = [&](const u32 argument) { | ||
| 290 | inline_index_draw_indexes.push_back(static_cast<u8>(argument & 0x000000ff)); | ||
| 291 | inline_index_draw_indexes.push_back(static_cast<u8>((argument & 0x0000ff00) >> 8)); | ||
| 292 | inline_index_draw_indexes.push_back(static_cast<u8>((argument & 0x00ff0000) >> 16)); | ||
| 293 | inline_index_draw_indexes.push_back(static_cast<u8>((argument & 0xff000000) >> 24)); | ||
| 294 | }; | ||
| 295 | if (MAXWELL3D_REG_INDEX(draw_inline_index) == method) { | ||
| 296 | u32_to_u8(method_argument); | ||
| 297 | } else if (MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method) { | ||
| 298 | u32_to_u8(regs.inline_index_2x16.even); | ||
| 299 | u32_to_u8(regs.inline_index_2x16.odd); | ||
| 300 | } else if (MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) { | ||
| 301 | u32_to_u8(regs.inline_index_4x8.index0); | ||
| 302 | u32_to_u8(regs.inline_index_4x8.index1); | ||
| 303 | u32_to_u8(regs.inline_index_4x8.index2); | ||
| 304 | u32_to_u8(regs.inline_index_4x8.index3); | ||
| 305 | } | ||
| 306 | } else { | ||
| 307 | ProcessDeferredDraw(); | ||
| 308 | |||
| 309 | const u32 argument = ProcessShadowRam(method, method_argument); | ||
| 310 | ProcessDirtyRegisters(method, argument); | ||
| 311 | ProcessMethodCall(method, argument, method_argument, is_last_call); | ||
| 312 | } | ||
| 289 | } | 313 | } |
| 290 | 314 | ||
| 291 | void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, | 315 | void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, |
| @@ -326,55 +350,6 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, | |||
| 326 | } | 350 | } |
| 327 | } | 351 | } |
| 328 | 352 | ||
| 329 | void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) { | ||
| 330 | if (mme_draw.current_mode == MMEDrawMode::Undefined) { | ||
| 331 | if (mme_draw.gl_begin_consume) { | ||
| 332 | mme_draw.current_mode = expected_mode; | ||
| 333 | mme_draw.current_count = count; | ||
| 334 | mme_draw.instance_count = 1; | ||
| 335 | mme_draw.gl_begin_consume = false; | ||
| 336 | mme_draw.gl_end_count = 0; | ||
| 337 | } | ||
| 338 | return; | ||
| 339 | } else { | ||
| 340 | if (mme_draw.current_mode == expected_mode && count == mme_draw.current_count && | ||
| 341 | mme_draw.instance_mode && mme_draw.gl_begin_consume) { | ||
| 342 | mme_draw.instance_count++; | ||
| 343 | mme_draw.gl_begin_consume = false; | ||
| 344 | return; | ||
| 345 | } else { | ||
| 346 | FlushMMEInlineDraw(); | ||
| 347 | } | ||
| 348 | } | ||
| 349 | // Tail call in case it needs to retry. | ||
| 350 | StepInstance(expected_mode, count); | ||
| 351 | } | ||
| 352 | |||
| 353 | void Maxwell3D::CallMethodFromMME(u32 method, u32 method_argument) { | ||
| 354 | if (mme_inline[method]) { | ||
| 355 | regs.reg_array[method] = method_argument; | ||
| 356 | if (method == MAXWELL3D_REG_INDEX(vertex_buffer.count) || | ||
| 357 | method == MAXWELL3D_REG_INDEX(index_buffer.count)) { | ||
| 358 | const MMEDrawMode expected_mode = method == MAXWELL3D_REG_INDEX(vertex_buffer.count) | ||
| 359 | ? MMEDrawMode::Array | ||
| 360 | : MMEDrawMode::Indexed; | ||
| 361 | StepInstance(expected_mode, method_argument); | ||
| 362 | } else if (method == MAXWELL3D_REG_INDEX(draw.begin)) { | ||
| 363 | mme_draw.instance_mode = | ||
| 364 | (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) || | ||
| 365 | (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged); | ||
| 366 | mme_draw.gl_begin_consume = true; | ||
| 367 | } else { | ||
| 368 | mme_draw.gl_end_count++; | ||
| 369 | } | ||
| 370 | } else { | ||
| 371 | if (mme_draw.current_mode != MMEDrawMode::Undefined) { | ||
| 372 | FlushMMEInlineDraw(); | ||
| 373 | } | ||
| 374 | CallMethod(method, method_argument, true); | ||
| 375 | } | ||
| 376 | } | ||
| 377 | |||
| 378 | void Maxwell3D::ProcessTopologyOverride() { | 353 | void Maxwell3D::ProcessTopologyOverride() { |
| 379 | using PrimitiveTopology = Maxwell3D::Regs::PrimitiveTopology; | 354 | using PrimitiveTopology = Maxwell3D::Regs::PrimitiveTopology; |
| 380 | using PrimitiveTopologyOverride = Maxwell3D::Regs::PrimitiveTopologyOverride; | 355 | using PrimitiveTopologyOverride = Maxwell3D::Regs::PrimitiveTopologyOverride; |
| @@ -404,41 +379,6 @@ void Maxwell3D::ProcessTopologyOverride() { | |||
| 404 | } | 379 | } |
| 405 | } | 380 | } |
| 406 | 381 | ||
| 407 | void Maxwell3D::FlushMMEInlineDraw() { | ||
| 408 | LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(), | ||
| 409 | regs.vertex_buffer.count); | ||
| 410 | ASSERT_MSG(!(regs.index_buffer.count && regs.vertex_buffer.count), "Both indexed and direct?"); | ||
| 411 | ASSERT(mme_draw.instance_count == mme_draw.gl_end_count); | ||
| 412 | |||
| 413 | // Both instance configuration registers can not be set at the same time. | ||
| 414 | ASSERT_MSG(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::First || | ||
| 415 | regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged, | ||
| 416 | "Illegal combination of instancing parameters"); | ||
| 417 | |||
| 418 | ProcessTopologyOverride(); | ||
| 419 | |||
| 420 | const bool is_indexed = mme_draw.current_mode == MMEDrawMode::Indexed; | ||
| 421 | if (ShouldExecute()) { | ||
| 422 | rasterizer->Draw(is_indexed, true); | ||
| 423 | } | ||
| 424 | |||
| 425 | // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if | ||
| 426 | // the game is trying to draw indexed or direct mode. This needs to be verified on HW still - | ||
| 427 | // it's possible that it is incorrect and that there is some other register used to specify the | ||
| 428 | // drawing mode. | ||
| 429 | if (is_indexed) { | ||
| 430 | regs.index_buffer.count = 0; | ||
| 431 | } else { | ||
| 432 | regs.vertex_buffer.count = 0; | ||
| 433 | } | ||
| 434 | mme_draw.current_mode = MMEDrawMode::Undefined; | ||
| 435 | mme_draw.current_count = 0; | ||
| 436 | mme_draw.instance_count = 0; | ||
| 437 | mme_draw.instance_mode = false; | ||
| 438 | mme_draw.gl_begin_consume = false; | ||
| 439 | mme_draw.gl_end_count = 0; | ||
| 440 | } | ||
| 441 | |||
| 442 | void Maxwell3D::ProcessMacroUpload(u32 data) { | 382 | void Maxwell3D::ProcessMacroUpload(u32 data) { |
| 443 | macro_engine->AddCode(regs.load_mme.instruction_ptr++, data); | 383 | macro_engine->AddCode(regs.load_mme.instruction_ptr++, data); |
| 444 | } | 384 | } |
| @@ -573,42 +513,6 @@ void Maxwell3D::ProcessSyncPoint() { | |||
| 573 | rasterizer->SignalSyncPoint(sync_point); | 513 | rasterizer->SignalSyncPoint(sync_point); |
| 574 | } | 514 | } |
| 575 | 515 | ||
| 576 | void Maxwell3D::DrawArrays() { | ||
| 577 | LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(), | ||
| 578 | regs.vertex_buffer.count); | ||
| 579 | ASSERT_MSG(!(regs.index_buffer.count && regs.vertex_buffer.count), "Both indexed and direct?"); | ||
| 580 | |||
| 581 | // Both instance configuration registers can not be set at the same time. | ||
| 582 | ASSERT_MSG(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::First || | ||
| 583 | regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged, | ||
| 584 | "Illegal combination of instancing parameters"); | ||
| 585 | |||
| 586 | ProcessTopologyOverride(); | ||
| 587 | |||
| 588 | if (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) { | ||
| 589 | // Increment the current instance *before* drawing. | ||
| 590 | state.current_instance++; | ||
| 591 | } else if (regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged) { | ||
| 592 | // Reset the current instance to 0. | ||
| 593 | state.current_instance = 0; | ||
| 594 | } | ||
| 595 | |||
| 596 | const bool is_indexed{regs.index_buffer.count && !regs.vertex_buffer.count}; | ||
| 597 | if (ShouldExecute()) { | ||
| 598 | rasterizer->Draw(is_indexed, false); | ||
| 599 | } | ||
| 600 | |||
| 601 | // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if | ||
| 602 | // the game is trying to draw indexed or direct mode. This needs to be verified on HW still - | ||
| 603 | // it's possible that it is incorrect and that there is some other register used to specify the | ||
| 604 | // drawing mode. | ||
| 605 | if (is_indexed) { | ||
| 606 | regs.index_buffer.count = 0; | ||
| 607 | } else { | ||
| 608 | regs.vertex_buffer.count = 0; | ||
| 609 | } | ||
| 610 | } | ||
| 611 | |||
| 612 | std::optional<u64> Maxwell3D::GetQueryResult() { | 516 | std::optional<u64> Maxwell3D::GetQueryResult() { |
| 613 | switch (regs.report_semaphore.query.report) { | 517 | switch (regs.report_semaphore.query.report) { |
| 614 | case Regs::ReportSemaphore::Report::Payload: | 518 | case Regs::ReportSemaphore::Report::Payload: |
| @@ -691,4 +595,83 @@ void Maxwell3D::ProcessClearBuffers() { | |||
| 691 | rasterizer->Clear(); | 595 | rasterizer->Clear(); |
| 692 | } | 596 | } |
| 693 | 597 | ||
| 598 | void Maxwell3D::ProcessDraw(u32 instance_count) { | ||
| 599 | LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(), | ||
| 600 | regs.vertex_buffer.count); | ||
| 601 | |||
| 602 | ASSERT_MSG(!(regs.index_buffer.count && regs.vertex_buffer.count), "Both indexed and direct?"); | ||
| 603 | |||
| 604 | // Both instance configuration registers can not be set at the same time. | ||
| 605 | ASSERT_MSG(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::First || | ||
| 606 | regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged, | ||
| 607 | "Illegal combination of instancing parameters"); | ||
| 608 | |||
| 609 | ProcessTopologyOverride(); | ||
| 610 | |||
| 611 | const bool is_indexed = regs.index_buffer.count && !regs.vertex_buffer.count; | ||
| 612 | if (ShouldExecute()) { | ||
| 613 | rasterizer->Draw(is_indexed, instance_count); | ||
| 614 | } | ||
| 615 | |||
| 616 | if (is_indexed) { | ||
| 617 | regs.index_buffer.count = 0; | ||
| 618 | } else { | ||
| 619 | regs.vertex_buffer.count = 0; | ||
| 620 | } | ||
| 621 | } | ||
| 622 | |||
| 623 | void Maxwell3D::ProcessDeferredDraw() { | ||
| 624 | if (deferred_draw_method.empty()) { | ||
| 625 | return; | ||
| 626 | } | ||
| 627 | |||
| 628 | enum class DrawMode { | ||
| 629 | Undefined, | ||
| 630 | General, | ||
| 631 | Instance, | ||
| 632 | }; | ||
| 633 | DrawMode draw_mode{DrawMode::Undefined}; | ||
| 634 | u32 method_count = static_cast<u32>(deferred_draw_method.size()); | ||
| 635 | u32 method = deferred_draw_method[method_count - 1]; | ||
| 636 | if (MAXWELL3D_REG_INDEX(draw.end) != method) { | ||
| 637 | return; | ||
| 638 | } | ||
| 639 | draw_mode = (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) || | ||
| 640 | (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged) | ||
| 641 | ? DrawMode::Instance | ||
| 642 | : DrawMode::General; | ||
| 643 | u32 instance_count = 0; | ||
| 644 | if (draw_mode == DrawMode::Instance) { | ||
| 645 | u32 vertex_buffer_count = 0; | ||
| 646 | u32 index_buffer_count = 0; | ||
| 647 | for (u32 index = 0; index < method_count; ++index) { | ||
| 648 | method = deferred_draw_method[index]; | ||
| 649 | if (method == MAXWELL3D_REG_INDEX(vertex_buffer.count)) { | ||
| 650 | instance_count = ++vertex_buffer_count; | ||
| 651 | } else if (method == MAXWELL3D_REG_INDEX(index_buffer.count)) { | ||
| 652 | instance_count = ++index_buffer_count; | ||
| 653 | } | ||
| 654 | } | ||
| 655 | ASSERT_MSG(!(vertex_buffer_count && index_buffer_count), | ||
| 656 | "Instance both indexed and direct?"); | ||
| 657 | } else { | ||
| 658 | instance_count = 1; | ||
| 659 | for (u32 index = 0; index < method_count; ++index) { | ||
| 660 | method = deferred_draw_method[index]; | ||
| 661 | if (MAXWELL3D_REG_INDEX(draw_inline_index) == method || | ||
| 662 | MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method || | ||
| 663 | MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) { | ||
| 664 | regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4); | ||
| 665 | regs.index_buffer.format = Regs::IndexFormat::UnsignedInt; | ||
| 666 | break; | ||
| 667 | } | ||
| 668 | } | ||
| 669 | } | ||
| 670 | |||
| 671 | ProcessDraw(instance_count); | ||
| 672 | |||
| 673 | deferred_draw_method.clear(); | ||
| 674 | inline_index_draw_indexes.clear(); | ||
| 675 | } | ||
| 676 | |||
| 694 | } // namespace Tegra::Engines | 677 | } // namespace Tegra::Engines |
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 75e3b868d..910ab213a 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h | |||
| @@ -1739,14 +1739,11 @@ public: | |||
| 1739 | Footprint_1x1_Virtual = 2, | 1739 | Footprint_1x1_Virtual = 2, |
| 1740 | }; | 1740 | }; |
| 1741 | 1741 | ||
| 1742 | struct InlineIndex4x8Align { | 1742 | struct InlineIndex4x8 { |
| 1743 | union { | 1743 | union { |
| 1744 | BitField<0, 30, u32> count; | 1744 | BitField<0, 30, u32> count; |
| 1745 | BitField<30, 2, u32> start; | 1745 | BitField<30, 2, u32> start; |
| 1746 | }; | 1746 | }; |
| 1747 | }; | ||
| 1748 | |||
| 1749 | struct InlineIndex4x8Index { | ||
| 1750 | union { | 1747 | union { |
| 1751 | BitField<0, 8, u32> index0; | 1748 | BitField<0, 8, u32> index0; |
| 1752 | BitField<8, 8, u32> index1; | 1749 | BitField<8, 8, u32> index1; |
| @@ -2836,8 +2833,7 @@ public: | |||
| 2836 | u32 depth_write_enabled; ///< 0x12E8 | 2833 | u32 depth_write_enabled; ///< 0x12E8 |
| 2837 | u32 alpha_test_enabled; ///< 0x12EC | 2834 | u32 alpha_test_enabled; ///< 0x12EC |
| 2838 | INSERT_PADDING_BYTES_NOINIT(0x10); | 2835 | INSERT_PADDING_BYTES_NOINIT(0x10); |
| 2839 | InlineIndex4x8Align inline_index_4x8_align; ///< 0x1300 | 2836 | InlineIndex4x8 inline_index_4x8; ///< 0x1300 |
| 2840 | InlineIndex4x8Index inline_index_4x8_index; ///< 0x1304 | ||
| 2841 | D3DCullMode d3d_cull_mode; ///< 0x1308 | 2837 | D3DCullMode d3d_cull_mode; ///< 0x1308 |
| 2842 | ComparisonOp depth_test_func; ///< 0x130C | 2838 | ComparisonOp depth_test_func; ///< 0x130C |
| 2843 | f32 alpha_test_ref; ///< 0x1310 | 2839 | f32 alpha_test_ref; ///< 0x1310 |
| @@ -2974,7 +2970,7 @@ public: | |||
| 2974 | CullFace gl_cull_face; ///< 0x1920 | 2970 | CullFace gl_cull_face; ///< 0x1920 |
| 2975 | Viewport::PixelCenter viewport_pixel_center; ///< 0x1924 | 2971 | Viewport::PixelCenter viewport_pixel_center; ///< 0x1924 |
| 2976 | INSERT_PADDING_BYTES_NOINIT(0x4); | 2972 | INSERT_PADDING_BYTES_NOINIT(0x4); |
| 2977 | u32 viewport_scale_offset_enbled; ///< 0x192C | 2973 | u32 viewport_scale_offset_enabled; ///< 0x192C |
| 2978 | INSERT_PADDING_BYTES_NOINIT(0xC); | 2974 | INSERT_PADDING_BYTES_NOINIT(0xC); |
| 2979 | ViewportClipControl viewport_clip_control; ///< 0x193C | 2975 | ViewportClipControl viewport_clip_control; ///< 0x193C |
| 2980 | UserClip::Op user_clip_op; ///< 0x1940 | 2976 | UserClip::Op user_clip_op; ///< 0x1940 |
| @@ -3048,8 +3044,6 @@ public: | |||
| 3048 | }; | 3044 | }; |
| 3049 | 3045 | ||
| 3050 | std::array<ShaderStageInfo, Regs::MaxShaderStage> shader_stages; | 3046 | std::array<ShaderStageInfo, Regs::MaxShaderStage> shader_stages; |
| 3051 | |||
| 3052 | u32 current_instance = 0; ///< Current instance to be used to simulate instanced rendering. | ||
| 3053 | }; | 3047 | }; |
| 3054 | 3048 | ||
| 3055 | State state{}; | 3049 | State state{}; |
| @@ -3064,11 +3058,6 @@ public: | |||
| 3064 | void CallMultiMethod(u32 method, const u32* base_start, u32 amount, | 3058 | void CallMultiMethod(u32 method, const u32* base_start, u32 amount, |
| 3065 | u32 methods_pending) override; | 3059 | u32 methods_pending) override; |
| 3066 | 3060 | ||
| 3067 | /// Write the value to the register identified by method. | ||
| 3068 | void CallMethodFromMME(u32 method, u32 method_argument); | ||
| 3069 | |||
| 3070 | void FlushMMEInlineDraw(); | ||
| 3071 | |||
| 3072 | bool ShouldExecute() const { | 3061 | bool ShouldExecute() const { |
| 3073 | return execute_on; | 3062 | return execute_on; |
| 3074 | } | 3063 | } |
| @@ -3081,21 +3070,6 @@ public: | |||
| 3081 | return *rasterizer; | 3070 | return *rasterizer; |
| 3082 | } | 3071 | } |
| 3083 | 3072 | ||
| 3084 | enum class MMEDrawMode : u32 { | ||
| 3085 | Undefined, | ||
| 3086 | Array, | ||
| 3087 | Indexed, | ||
| 3088 | }; | ||
| 3089 | |||
| 3090 | struct MMEDrawState { | ||
| 3091 | MMEDrawMode current_mode{MMEDrawMode::Undefined}; | ||
| 3092 | u32 current_count{}; | ||
| 3093 | u32 instance_count{}; | ||
| 3094 | bool instance_mode{}; | ||
| 3095 | bool gl_begin_consume{}; | ||
| 3096 | u32 gl_end_count{}; | ||
| 3097 | } mme_draw; | ||
| 3098 | |||
| 3099 | struct DirtyState { | 3073 | struct DirtyState { |
| 3100 | using Flags = std::bitset<std::numeric_limits<u8>::max()>; | 3074 | using Flags = std::bitset<std::numeric_limits<u8>::max()>; |
| 3101 | using Table = std::array<u8, Regs::NUM_REGS>; | 3075 | using Table = std::array<u8, Regs::NUM_REGS>; |
| @@ -3105,6 +3079,8 @@ public: | |||
| 3105 | Tables tables{}; | 3079 | Tables tables{}; |
| 3106 | } dirty; | 3080 | } dirty; |
| 3107 | 3081 | ||
| 3082 | std::vector<u8> inline_index_draw_indexes; | ||
| 3083 | |||
| 3108 | private: | 3084 | private: |
| 3109 | void InitializeRegisterDefaults(); | 3085 | void InitializeRegisterDefaults(); |
| 3110 | 3086 | ||
| @@ -3164,14 +3140,12 @@ private: | |||
| 3164 | /// Handles a write to the CB_BIND register. | 3140 | /// Handles a write to the CB_BIND register. |
| 3165 | void ProcessCBBind(size_t stage_index); | 3141 | void ProcessCBBind(size_t stage_index); |
| 3166 | 3142 | ||
| 3167 | /// Handles a write to the VERTEX_END_GL register, triggering a draw. | ||
| 3168 | void DrawArrays(); | ||
| 3169 | |||
| 3170 | /// Handles use of topology overrides (e.g., to avoid using a topology assigned from a macro) | 3143 | /// Handles use of topology overrides (e.g., to avoid using a topology assigned from a macro) |
| 3171 | void ProcessTopologyOverride(); | 3144 | void ProcessTopologyOverride(); |
| 3172 | 3145 | ||
| 3173 | // Handles a instance drawcall from MME | 3146 | void ProcessDraw(u32 instance_count = 1); |
| 3174 | void StepInstance(MMEDrawMode expected_mode, u32 count); | 3147 | |
| 3148 | void ProcessDeferredDraw(); | ||
| 3175 | 3149 | ||
| 3176 | /// Returns a query's value or an empty object if the value will be deferred through a cache. | 3150 | /// Returns a query's value or an empty object if the value will be deferred through a cache. |
| 3177 | std::optional<u64> GetQueryResult(); | 3151 | std::optional<u64> GetQueryResult(); |
| @@ -3184,8 +3158,6 @@ private: | |||
| 3184 | /// Start offsets of each macro in macro_memory | 3158 | /// Start offsets of each macro in macro_memory |
| 3185 | std::array<u32, 0x80> macro_positions{}; | 3159 | std::array<u32, 0x80> macro_positions{}; |
| 3186 | 3160 | ||
| 3187 | std::array<bool, Regs::NUM_REGS> mme_inline{}; | ||
| 3188 | |||
| 3189 | /// Macro method that is currently being executed / being fed parameters. | 3161 | /// Macro method that is currently being executed / being fed parameters. |
| 3190 | u32 executing_macro = 0; | 3162 | u32 executing_macro = 0; |
| 3191 | /// Parameters that have been submitted to the macro call so far. | 3163 | /// Parameters that have been submitted to the macro call so far. |
| @@ -3198,6 +3170,9 @@ private: | |||
| 3198 | 3170 | ||
| 3199 | bool execute_on{true}; | 3171 | bool execute_on{true}; |
| 3200 | bool use_topology_override{false}; | 3172 | bool use_topology_override{false}; |
| 3173 | |||
| 3174 | std::array<bool, Regs::NUM_REGS> draw_command{}; | ||
| 3175 | std::vector<u32> deferred_draw_method; | ||
| 3201 | }; | 3176 | }; |
| 3202 | 3177 | ||
| 3203 | #define ASSERT_REG_POSITION(field_name, position) \ | 3178 | #define ASSERT_REG_POSITION(field_name, position) \ |
| @@ -3402,8 +3377,7 @@ ASSERT_REG_POSITION(alpha_to_coverage_dither, 0x12E0); | |||
| 3402 | ASSERT_REG_POSITION(blend_per_target_enabled, 0x12E4); | 3377 | ASSERT_REG_POSITION(blend_per_target_enabled, 0x12E4); |
| 3403 | ASSERT_REG_POSITION(depth_write_enabled, 0x12E8); | 3378 | ASSERT_REG_POSITION(depth_write_enabled, 0x12E8); |
| 3404 | ASSERT_REG_POSITION(alpha_test_enabled, 0x12EC); | 3379 | ASSERT_REG_POSITION(alpha_test_enabled, 0x12EC); |
| 3405 | ASSERT_REG_POSITION(inline_index_4x8_align, 0x1300); | 3380 | ASSERT_REG_POSITION(inline_index_4x8, 0x1300); |
| 3406 | ASSERT_REG_POSITION(inline_index_4x8_index, 0x1304); | ||
| 3407 | ASSERT_REG_POSITION(d3d_cull_mode, 0x1308); | 3381 | ASSERT_REG_POSITION(d3d_cull_mode, 0x1308); |
| 3408 | ASSERT_REG_POSITION(depth_test_func, 0x130C); | 3382 | ASSERT_REG_POSITION(depth_test_func, 0x130C); |
| 3409 | ASSERT_REG_POSITION(alpha_test_ref, 0x1310); | 3383 | ASSERT_REG_POSITION(alpha_test_ref, 0x1310); |
| @@ -3508,7 +3482,7 @@ ASSERT_REG_POSITION(gl_cull_test_enabled, 0x1918); | |||
| 3508 | ASSERT_REG_POSITION(gl_front_face, 0x191C); | 3482 | ASSERT_REG_POSITION(gl_front_face, 0x191C); |
| 3509 | ASSERT_REG_POSITION(gl_cull_face, 0x1920); | 3483 | ASSERT_REG_POSITION(gl_cull_face, 0x1920); |
| 3510 | ASSERT_REG_POSITION(viewport_pixel_center, 0x1924); | 3484 | ASSERT_REG_POSITION(viewport_pixel_center, 0x1924); |
| 3511 | ASSERT_REG_POSITION(viewport_scale_offset_enbled, 0x192C); | 3485 | ASSERT_REG_POSITION(viewport_scale_offset_enabled, 0x192C); |
| 3512 | ASSERT_REG_POSITION(viewport_clip_control, 0x193C); | 3486 | ASSERT_REG_POSITION(viewport_clip_control, 0x193C); |
| 3513 | ASSERT_REG_POSITION(user_clip_op, 0x1940); | 3487 | ASSERT_REG_POSITION(user_clip_op, 0x1940); |
| 3514 | ASSERT_REG_POSITION(render_enable_override, 0x1944); | 3488 | ASSERT_REG_POSITION(render_enable_override, 0x1944); |
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp index f61d5998e..505d81c1e 100644 --- a/src/video_core/macro/macro.cpp +++ b/src/video_core/macro/macro.cpp | |||
| @@ -16,7 +16,10 @@ | |||
| 16 | #include "video_core/macro/macro.h" | 16 | #include "video_core/macro/macro.h" |
| 17 | #include "video_core/macro/macro_hle.h" | 17 | #include "video_core/macro/macro_hle.h" |
| 18 | #include "video_core/macro/macro_interpreter.h" | 18 | #include "video_core/macro/macro_interpreter.h" |
| 19 | |||
| 20 | #ifdef ARCHITECTURE_x86_64 | ||
| 19 | #include "video_core/macro/macro_jit_x64.h" | 21 | #include "video_core/macro/macro_jit_x64.h" |
| 22 | #endif | ||
| 20 | 23 | ||
| 21 | namespace Tegra { | 24 | namespace Tegra { |
| 22 | 25 | ||
diff --git a/src/video_core/macro/macro_hle.cpp b/src/video_core/macro/macro_hle.cpp index 8a8adbb42..f896591bf 100644 --- a/src/video_core/macro/macro_hle.cpp +++ b/src/video_core/macro/macro_hle.cpp | |||
| @@ -22,35 +22,29 @@ void HLE_771BB18C62444DA0(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& | |||
| 22 | maxwell3d.regs.draw.topology.Assign( | 22 | maxwell3d.regs.draw.topology.Assign( |
| 23 | static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0] & 0x3ffffff)); | 23 | static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0] & 0x3ffffff)); |
| 24 | maxwell3d.regs.global_base_instance_index = parameters[5]; | 24 | maxwell3d.regs.global_base_instance_index = parameters[5]; |
| 25 | maxwell3d.mme_draw.instance_count = instance_count; | ||
| 26 | maxwell3d.regs.global_base_vertex_index = parameters[3]; | 25 | maxwell3d.regs.global_base_vertex_index = parameters[3]; |
| 27 | maxwell3d.regs.index_buffer.count = parameters[1]; | 26 | maxwell3d.regs.index_buffer.count = parameters[1]; |
| 28 | maxwell3d.regs.index_buffer.first = parameters[4]; | 27 | maxwell3d.regs.index_buffer.first = parameters[4]; |
| 29 | 28 | ||
| 30 | if (maxwell3d.ShouldExecute()) { | 29 | if (maxwell3d.ShouldExecute()) { |
| 31 | maxwell3d.Rasterizer().Draw(true, true); | 30 | maxwell3d.Rasterizer().Draw(true, instance_count); |
| 32 | } | 31 | } |
| 33 | maxwell3d.regs.index_buffer.count = 0; | 32 | maxwell3d.regs.index_buffer.count = 0; |
| 34 | maxwell3d.mme_draw.instance_count = 0; | ||
| 35 | maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; | ||
| 36 | } | 33 | } |
| 37 | 34 | ||
| 38 | void HLE_0D61FC9FAAC9FCAD(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) { | 35 | void HLE_0D61FC9FAAC9FCAD(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) { |
| 39 | const u32 count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]); | 36 | const u32 instance_count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]); |
| 40 | 37 | ||
| 41 | maxwell3d.regs.vertex_buffer.first = parameters[3]; | 38 | maxwell3d.regs.vertex_buffer.first = parameters[3]; |
| 42 | maxwell3d.regs.vertex_buffer.count = parameters[1]; | 39 | maxwell3d.regs.vertex_buffer.count = parameters[1]; |
| 43 | maxwell3d.regs.global_base_instance_index = parameters[4]; | 40 | maxwell3d.regs.global_base_instance_index = parameters[4]; |
| 44 | maxwell3d.regs.draw.topology.Assign( | 41 | maxwell3d.regs.draw.topology.Assign( |
| 45 | static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0])); | 42 | static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0])); |
| 46 | maxwell3d.mme_draw.instance_count = count; | ||
| 47 | 43 | ||
| 48 | if (maxwell3d.ShouldExecute()) { | 44 | if (maxwell3d.ShouldExecute()) { |
| 49 | maxwell3d.Rasterizer().Draw(false, true); | 45 | maxwell3d.Rasterizer().Draw(false, instance_count); |
| 50 | } | 46 | } |
| 51 | maxwell3d.regs.vertex_buffer.count = 0; | 47 | maxwell3d.regs.vertex_buffer.count = 0; |
| 52 | maxwell3d.mme_draw.instance_count = 0; | ||
| 53 | maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; | ||
| 54 | } | 48 | } |
| 55 | 49 | ||
| 56 | void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) { | 50 | void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) { |
| @@ -63,24 +57,21 @@ void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& | |||
| 63 | maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; | 57 | maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; |
| 64 | maxwell3d.regs.global_base_vertex_index = element_base; | 58 | maxwell3d.regs.global_base_vertex_index = element_base; |
| 65 | maxwell3d.regs.global_base_instance_index = base_instance; | 59 | maxwell3d.regs.global_base_instance_index = base_instance; |
| 66 | maxwell3d.mme_draw.instance_count = instance_count; | 60 | maxwell3d.CallMethod(0x8e3, 0x640, true); |
| 67 | maxwell3d.CallMethodFromMME(0x8e3, 0x640); | 61 | maxwell3d.CallMethod(0x8e4, element_base, true); |
| 68 | maxwell3d.CallMethodFromMME(0x8e4, element_base); | 62 | maxwell3d.CallMethod(0x8e5, base_instance, true); |
| 69 | maxwell3d.CallMethodFromMME(0x8e5, base_instance); | ||
| 70 | maxwell3d.regs.draw.topology.Assign( | 63 | maxwell3d.regs.draw.topology.Assign( |
| 71 | static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0])); | 64 | static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[0])); |
| 72 | if (maxwell3d.ShouldExecute()) { | 65 | if (maxwell3d.ShouldExecute()) { |
| 73 | maxwell3d.Rasterizer().Draw(true, true); | 66 | maxwell3d.Rasterizer().Draw(true, instance_count); |
| 74 | } | 67 | } |
| 75 | maxwell3d.regs.vertex_id_base = 0x0; | 68 | maxwell3d.regs.vertex_id_base = 0x0; |
| 76 | maxwell3d.regs.index_buffer.count = 0; | 69 | maxwell3d.regs.index_buffer.count = 0; |
| 77 | maxwell3d.regs.global_base_vertex_index = 0x0; | 70 | maxwell3d.regs.global_base_vertex_index = 0x0; |
| 78 | maxwell3d.regs.global_base_instance_index = 0x0; | 71 | maxwell3d.regs.global_base_instance_index = 0x0; |
| 79 | maxwell3d.mme_draw.instance_count = 0; | 72 | maxwell3d.CallMethod(0x8e3, 0x640, true); |
| 80 | maxwell3d.CallMethodFromMME(0x8e3, 0x640); | 73 | maxwell3d.CallMethod(0x8e4, 0x0, true); |
| 81 | maxwell3d.CallMethodFromMME(0x8e4, 0x0); | 74 | maxwell3d.CallMethod(0x8e5, 0x0, true); |
| 82 | maxwell3d.CallMethodFromMME(0x8e5, 0x0); | ||
| 83 | maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; | ||
| 84 | } | 75 | } |
| 85 | 76 | ||
| 86 | // Multidraw Indirect | 77 | // Multidraw Indirect |
| @@ -91,11 +82,9 @@ void HLE_3F5E74B9C9A50164(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& | |||
| 91 | maxwell3d.regs.index_buffer.count = 0; | 82 | maxwell3d.regs.index_buffer.count = 0; |
| 92 | maxwell3d.regs.global_base_vertex_index = 0x0; | 83 | maxwell3d.regs.global_base_vertex_index = 0x0; |
| 93 | maxwell3d.regs.global_base_instance_index = 0x0; | 84 | maxwell3d.regs.global_base_instance_index = 0x0; |
| 94 | maxwell3d.mme_draw.instance_count = 0; | 85 | maxwell3d.CallMethod(0x8e3, 0x640, true); |
| 95 | maxwell3d.CallMethodFromMME(0x8e3, 0x640); | 86 | maxwell3d.CallMethod(0x8e4, 0x0, true); |
| 96 | maxwell3d.CallMethodFromMME(0x8e4, 0x0); | 87 | maxwell3d.CallMethod(0x8e5, 0x0, true); |
| 97 | maxwell3d.CallMethodFromMME(0x8e5, 0x0); | ||
| 98 | maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; | ||
| 99 | maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; | 88 | maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; |
| 100 | }); | 89 | }); |
| 101 | const u32 start_indirect = parameters[0]; | 90 | const u32 start_indirect = parameters[0]; |
| @@ -127,15 +116,13 @@ void HLE_3F5E74B9C9A50164(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& | |||
| 127 | maxwell3d.regs.index_buffer.count = num_vertices; | 116 | maxwell3d.regs.index_buffer.count = num_vertices; |
| 128 | maxwell3d.regs.global_base_vertex_index = base_vertex; | 117 | maxwell3d.regs.global_base_vertex_index = base_vertex; |
| 129 | maxwell3d.regs.global_base_instance_index = base_instance; | 118 | maxwell3d.regs.global_base_instance_index = base_instance; |
| 130 | maxwell3d.mme_draw.instance_count = instance_count; | 119 | maxwell3d.CallMethod(0x8e3, 0x640, true); |
| 131 | maxwell3d.CallMethodFromMME(0x8e3, 0x640); | 120 | maxwell3d.CallMethod(0x8e4, base_vertex, true); |
| 132 | maxwell3d.CallMethodFromMME(0x8e4, base_vertex); | 121 | maxwell3d.CallMethod(0x8e5, base_instance, true); |
| 133 | maxwell3d.CallMethodFromMME(0x8e5, base_instance); | ||
| 134 | maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; | 122 | maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; |
| 135 | if (maxwell3d.ShouldExecute()) { | 123 | if (maxwell3d.ShouldExecute()) { |
| 136 | maxwell3d.Rasterizer().Draw(true, true); | 124 | maxwell3d.Rasterizer().Draw(true, instance_count); |
| 137 | } | 125 | } |
| 138 | maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; | ||
| 139 | } | 126 | } |
| 140 | } | 127 | } |
| 141 | 128 | ||
diff --git a/src/video_core/macro/macro_interpreter.cpp b/src/video_core/macro/macro_interpreter.cpp index f670b1bca..c0d32c112 100644 --- a/src/video_core/macro/macro_interpreter.cpp +++ b/src/video_core/macro/macro_interpreter.cpp | |||
| @@ -335,7 +335,7 @@ void MacroInterpreterImpl::SetMethodAddress(u32 address) { | |||
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | void MacroInterpreterImpl::Send(u32 value) { | 337 | void MacroInterpreterImpl::Send(u32 value) { |
| 338 | maxwell3d.CallMethodFromMME(method_address.address, value); | 338 | maxwell3d.CallMethod(method_address.address, value, true); |
| 339 | // Increment the method address by the method increment. | 339 | // Increment the method address by the method increment. |
| 340 | method_address.address.Assign(method_address.address.Value() + | 340 | method_address.address.Assign(method_address.address.Value() + |
| 341 | method_address.increment.Value()); | 341 | method_address.increment.Value()); |
diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp index a302a9603..25c1ce798 100644 --- a/src/video_core/macro/macro_jit_x64.cpp +++ b/src/video_core/macro/macro_jit_x64.cpp | |||
| @@ -346,7 +346,7 @@ void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) { | |||
| 346 | } | 346 | } |
| 347 | 347 | ||
| 348 | void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) { | 348 | void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) { |
| 349 | maxwell3d->CallMethodFromMME(method_address.address, value); | 349 | maxwell3d->CallMethod(method_address.address, value, true); |
| 350 | } | 350 | } |
| 351 | 351 | ||
| 352 | void MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) { | 352 | void MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) { |
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 384350dbd..8c8dfcca6 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp | |||
| @@ -45,7 +45,7 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 | |||
| 45 | kind_valus.fill(PTEKind::INVALID); | 45 | kind_valus.fill(PTEKind::INVALID); |
| 46 | big_kinds.resize(big_page_table_size / 32, kind_valus); | 46 | big_kinds.resize(big_page_table_size / 32, kind_valus); |
| 47 | entries.resize(page_table_size / 32, 0); | 47 | entries.resize(page_table_size / 32, 0); |
| 48 | kinds.resize(big_page_table_size / 32, kind_valus); | 48 | kinds.resize(page_table_size / 32, kind_valus); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | MemoryManager::~MemoryManager() = default; | 51 | MemoryManager::~MemoryManager() = default; |
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index d2d40884c..1cbfef090 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h | |||
| @@ -40,7 +40,7 @@ public: | |||
| 40 | virtual ~RasterizerInterface() = default; | 40 | virtual ~RasterizerInterface() = default; |
| 41 | 41 | ||
| 42 | /// Dispatches a draw invocation | 42 | /// Dispatches a draw invocation |
| 43 | virtual void Draw(bool is_indexed, bool is_instanced) = 0; | 43 | virtual void Draw(bool is_indexed, u32 instance_count) = 0; |
| 44 | 44 | ||
| 45 | /// Clear the current framebuffer | 45 | /// Clear the current framebuffer |
| 46 | virtual void Clear() = 0; | 46 | virtual void Clear() = 0; |
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index 08f4d69ab..6af4ae793 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp | |||
| @@ -29,17 +29,17 @@ constexpr std::array PROGRAM_LUT{ | |||
| 29 | [[nodiscard]] GLenum GetTextureBufferFormat(GLenum gl_format) { | 29 | [[nodiscard]] GLenum GetTextureBufferFormat(GLenum gl_format) { |
| 30 | switch (gl_format) { | 30 | switch (gl_format) { |
| 31 | case GL_RGBA8_SNORM: | 31 | case GL_RGBA8_SNORM: |
| 32 | return GL_RGBA8; | 32 | return GL_RGBA8I; |
| 33 | case GL_R8_SNORM: | 33 | case GL_R8_SNORM: |
| 34 | return GL_R8; | 34 | return GL_R8I; |
| 35 | case GL_RGBA16_SNORM: | 35 | case GL_RGBA16_SNORM: |
| 36 | return GL_RGBA16; | 36 | return GL_RGBA16I; |
| 37 | case GL_R16_SNORM: | 37 | case GL_R16_SNORM: |
| 38 | return GL_R16; | 38 | return GL_R16I; |
| 39 | case GL_RG16_SNORM: | 39 | case GL_RG16_SNORM: |
| 40 | return GL_RG16; | 40 | return GL_RG16I; |
| 41 | case GL_RG8_SNORM: | 41 | case GL_RG8_SNORM: |
| 42 | return GL_RG8; | 42 | return GL_RG8I; |
| 43 | default: | 43 | default: |
| 44 | return gl_format; | 44 | return gl_format; |
| 45 | } | 45 | } |
| @@ -96,9 +96,6 @@ GLuint Buffer::View(u32 offset, u32 size, PixelFormat format) { | |||
| 96 | texture.Create(GL_TEXTURE_BUFFER); | 96 | texture.Create(GL_TEXTURE_BUFFER); |
| 97 | const GLenum gl_format{MaxwellToGL::GetFormatTuple(format).internal_format}; | 97 | const GLenum gl_format{MaxwellToGL::GetFormatTuple(format).internal_format}; |
| 98 | const GLenum texture_format{GetTextureBufferFormat(gl_format)}; | 98 | const GLenum texture_format{GetTextureBufferFormat(gl_format)}; |
| 99 | if (texture_format != gl_format) { | ||
| 100 | LOG_WARNING(Render_OpenGL, "Emulating SNORM texture buffer with UNORM."); | ||
| 101 | } | ||
| 102 | glTextureBufferRange(texture.handle, texture_format, buffer.handle, offset, size); | 99 | glTextureBufferRange(texture.handle, texture_format, buffer.handle, offset, size); |
| 103 | views.push_back({ | 100 | views.push_back({ |
| 104 | .offset = offset, | 101 | .offset = offset, |
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp index 1d20a79ec..c115dabe1 100644 --- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp +++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp | |||
| @@ -503,6 +503,17 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { | |||
| 503 | float_image_scaling_mask, down_factor, 0.0f); | 503 | float_image_scaling_mask, down_factor, 0.0f); |
| 504 | } | 504 | } |
| 505 | } | 505 | } |
| 506 | if (info.uses_render_area) { | ||
| 507 | const auto render_area_width(static_cast<GLfloat>(regs.surface_clip.width)); | ||
| 508 | const auto render_area_height(static_cast<GLfloat>(regs.surface_clip.height)); | ||
| 509 | if (use_assembly) { | ||
| 510 | glProgramLocalParameter4fARB(AssemblyStage(stage), 1, render_area_width, | ||
| 511 | render_area_height, 0.0f, 0.0f); | ||
| 512 | } else { | ||
| 513 | glProgramUniform4f(source_programs[stage].handle, 1, render_area_width, | ||
| 514 | render_area_height, 0.0f, 0.0f); | ||
| 515 | } | ||
| 516 | } | ||
| 506 | }}; | 517 | }}; |
| 507 | if constexpr (Spec::enabled_stages[0]) { | 518 | if constexpr (Spec::enabled_stages[0]) { |
| 508 | prepare_stage(0); | 519 | prepare_stage(0); |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index e5c09a969..8a8b5ce54 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp | |||
| @@ -205,7 +205,7 @@ void RasterizerOpenGL::Clear() { | |||
| 205 | ++num_queued_commands; | 205 | ++num_queued_commands; |
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { | 208 | void RasterizerOpenGL::Draw(bool is_indexed, u32 instance_count) { |
| 209 | MICROPROFILE_SCOPE(OpenGL_Drawing); | 209 | MICROPROFILE_SCOPE(OpenGL_Drawing); |
| 210 | 210 | ||
| 211 | SCOPE_EXIT({ gpu.TickWork(); }); | 211 | SCOPE_EXIT({ gpu.TickWork(); }); |
| @@ -222,14 +222,15 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { | |||
| 222 | pipeline->SetEngine(maxwell3d, gpu_memory); | 222 | pipeline->SetEngine(maxwell3d, gpu_memory); |
| 223 | pipeline->Configure(is_indexed); | 223 | pipeline->Configure(is_indexed); |
| 224 | 224 | ||
| 225 | BindInlineIndexBuffer(); | ||
| 226 | |||
| 225 | SyncState(); | 227 | SyncState(); |
| 226 | 228 | ||
| 227 | const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology); | 229 | const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology); |
| 228 | BeginTransformFeedback(pipeline, primitive_mode); | 230 | BeginTransformFeedback(pipeline, primitive_mode); |
| 229 | 231 | ||
| 230 | const GLuint base_instance = static_cast<GLuint>(maxwell3d->regs.global_base_instance_index); | 232 | const GLuint base_instance = static_cast<GLuint>(maxwell3d->regs.global_base_instance_index); |
| 231 | const GLsizei num_instances = | 233 | const GLsizei num_instances = static_cast<GLsizei>(instance_count); |
| 232 | static_cast<GLsizei>(is_instanced ? maxwell3d->mme_draw.instance_count : 1); | ||
| 233 | if (is_indexed) { | 234 | if (is_indexed) { |
| 234 | const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.global_base_vertex_index); | 235 | const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.global_base_vertex_index); |
| 235 | const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.index_buffer.count); | 236 | const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.index_buffer.count); |
| @@ -617,6 +618,16 @@ void RasterizerOpenGL::SyncViewport() { | |||
| 617 | } | 618 | } |
| 618 | flags[Dirty::Viewport0 + index] = false; | 619 | flags[Dirty::Viewport0 + index] = false; |
| 619 | 620 | ||
| 621 | if (!regs.viewport_scale_offset_enabled) { | ||
| 622 | const auto x = static_cast<GLfloat>(regs.surface_clip.x); | ||
| 623 | const auto y = static_cast<GLfloat>(regs.surface_clip.y); | ||
| 624 | const auto width = static_cast<GLfloat>(regs.surface_clip.width); | ||
| 625 | const auto height = static_cast<GLfloat>(regs.surface_clip.height); | ||
| 626 | glViewportIndexedf(static_cast<GLuint>(index), x, y, width != 0.0f ? width : 1.0f, | ||
| 627 | height != 0.0f ? height : 1.0f); | ||
| 628 | continue; | ||
| 629 | } | ||
| 630 | |||
| 620 | const auto& src = regs.viewport_transform[index]; | 631 | const auto& src = regs.viewport_transform[index]; |
| 621 | GLfloat x = conv(src.translate_x - src.scale_x); | 632 | GLfloat x = conv(src.translate_x - src.scale_x); |
| 622 | GLfloat y = conv(src.translate_y - src.scale_y); | 633 | GLfloat y = conv(src.translate_y - src.scale_y); |
| @@ -1129,6 +1140,16 @@ void RasterizerOpenGL::ReleaseChannel(s32 channel_id) { | |||
| 1129 | query_cache.EraseChannel(channel_id); | 1140 | query_cache.EraseChannel(channel_id); |
| 1130 | } | 1141 | } |
| 1131 | 1142 | ||
| 1143 | void RasterizerOpenGL::BindInlineIndexBuffer() { | ||
| 1144 | if (maxwell3d->inline_index_draw_indexes.empty()) { | ||
| 1145 | return; | ||
| 1146 | } | ||
| 1147 | const auto data_count = static_cast<u32>(maxwell3d->inline_index_draw_indexes.size()); | ||
| 1148 | auto buffer = Buffer(buffer_cache_runtime, *this, 0, data_count); | ||
| 1149 | buffer.ImmediateUpload(0, maxwell3d->inline_index_draw_indexes); | ||
| 1150 | buffer_cache_runtime.BindIndexBuffer(buffer, 0, data_count); | ||
| 1151 | } | ||
| 1152 | |||
| 1132 | AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} | 1153 | AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} |
| 1133 | 1154 | ||
| 1134 | bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { | 1155 | bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 45131b785..793e0d608 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h | |||
| @@ -68,7 +68,7 @@ public: | |||
| 68 | StateTracker& state_tracker_); | 68 | StateTracker& state_tracker_); |
| 69 | ~RasterizerOpenGL() override; | 69 | ~RasterizerOpenGL() override; |
| 70 | 70 | ||
| 71 | void Draw(bool is_indexed, bool is_instanced) override; | 71 | void Draw(bool is_indexed, u32 instance_count) override; |
| 72 | void Clear() override; | 72 | void Clear() override; |
| 73 | void DispatchCompute() override; | 73 | void DispatchCompute() override; |
| 74 | void ResetCounter(VideoCore::QueryType type) override; | 74 | void ResetCounter(VideoCore::QueryType type) override; |
| @@ -199,6 +199,8 @@ private: | |||
| 199 | /// End a transform feedback | 199 | /// End a transform feedback |
| 200 | void EndTransformFeedback(); | 200 | void EndTransformFeedback(); |
| 201 | 201 | ||
| 202 | void BindInlineIndexBuffer(); | ||
| 203 | |||
| 202 | Tegra::GPU& gpu; | 204 | Tegra::GPU& gpu; |
| 203 | 205 | ||
| 204 | const Device& device; | 206 | const Device& device; |
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index e94cfdb1a..3fe04a115 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp | |||
| @@ -49,7 +49,7 @@ using VideoCommon::LoadPipelines; | |||
| 49 | using VideoCommon::SerializePipeline; | 49 | using VideoCommon::SerializePipeline; |
| 50 | using Context = ShaderContext::Context; | 50 | using Context = ShaderContext::Context; |
| 51 | 51 | ||
| 52 | constexpr u32 CACHE_VERSION = 6; | 52 | constexpr u32 CACHE_VERSION = 7; |
| 53 | 53 | ||
| 54 | template <typename Container> | 54 | template <typename Container> |
| 55 | auto MakeSpan(Container& container) { | 55 | auto MakeSpan(Container& container) { |
| @@ -76,7 +76,8 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key, | |||
| 76 | } | 76 | } |
| 77 | break; | 77 | break; |
| 78 | case Shader::Stage::TessellationEval: | 78 | case Shader::Stage::TessellationEval: |
| 79 | info.tess_clockwise = key.tessellation_clockwise != 0; | 79 | // Flip the face, as OpenGL's drawing is flipped. |
| 80 | info.tess_clockwise = key.tessellation_clockwise == 0; | ||
| 80 | info.tess_primitive = [&key] { | 81 | info.tess_primitive = [&key] { |
| 81 | switch (key.tessellation_primitive) { | 82 | switch (key.tessellation_primitive) { |
| 82 | case Maxwell::Tessellation::DomainType::Isolines: | 83 | case Maxwell::Tessellation::DomainType::Isolines: |
| @@ -218,6 +219,7 @@ ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindo | |||
| 218 | .support_float16 = false, | 219 | .support_float16 = false, |
| 219 | .support_int64 = device.HasShaderInt64(), | 220 | .support_int64 = device.HasShaderInt64(), |
| 220 | .needs_demote_reorder = device.IsAmd(), | 221 | .needs_demote_reorder = device.IsAmd(), |
| 222 | .support_snorm_render_buffer = false, | ||
| 221 | } { | 223 | } { |
| 222 | if (use_asynchronous_shaders) { | 224 | if (use_asynchronous_shaders) { |
| 223 | workers = CreateWorkers(); | 225 | workers = CreateWorkers(); |
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp index a359f96f1..d53b422ca 100644 --- a/src/video_core/renderer_opengl/gl_state_tracker.cpp +++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp | |||
| @@ -70,8 +70,8 @@ void SetupDirtyViewports(Tables& tables) { | |||
| 70 | FillBlock(tables[1], OFF(viewport_transform), NUM(viewport_transform), Viewports); | 70 | FillBlock(tables[1], OFF(viewport_transform), NUM(viewport_transform), Viewports); |
| 71 | FillBlock(tables[1], OFF(viewports), NUM(viewports), Viewports); | 71 | FillBlock(tables[1], OFF(viewports), NUM(viewports), Viewports); |
| 72 | 72 | ||
| 73 | tables[0][OFF(viewport_scale_offset_enbled)] = ViewportTransform; | 73 | tables[0][OFF(viewport_scale_offset_enabled)] = ViewportTransform; |
| 74 | tables[1][OFF(viewport_scale_offset_enbled)] = Viewports; | 74 | tables[1][OFF(viewport_scale_offset_enabled)] = Viewports; |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | void SetupDirtyScissors(Tables& tables) { | 77 | void SetupDirtyScissors(Tables& tables) { |
diff --git a/src/video_core/renderer_vulkan/pipeline_helper.h b/src/video_core/renderer_vulkan/pipeline_helper.h index b24f3424a..b7843e995 100644 --- a/src/video_core/renderer_vulkan/pipeline_helper.h +++ b/src/video_core/renderer_vulkan/pipeline_helper.h | |||
| @@ -68,13 +68,15 @@ public: | |||
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | vk::PipelineLayout CreatePipelineLayout(VkDescriptorSetLayout descriptor_set_layout) const { | 70 | vk::PipelineLayout CreatePipelineLayout(VkDescriptorSetLayout descriptor_set_layout) const { |
| 71 | using Shader::Backend::SPIRV::RenderAreaLayout; | ||
| 71 | using Shader::Backend::SPIRV::RescalingLayout; | 72 | using Shader::Backend::SPIRV::RescalingLayout; |
| 72 | const u32 size_offset = is_compute ? sizeof(RescalingLayout::down_factor) : 0u; | 73 | const u32 size_offset = is_compute ? sizeof(RescalingLayout::down_factor) : 0u; |
| 73 | const VkPushConstantRange range{ | 74 | const VkPushConstantRange range{ |
| 74 | .stageFlags = static_cast<VkShaderStageFlags>( | 75 | .stageFlags = static_cast<VkShaderStageFlags>( |
| 75 | is_compute ? VK_SHADER_STAGE_COMPUTE_BIT : VK_SHADER_STAGE_ALL_GRAPHICS), | 76 | is_compute ? VK_SHADER_STAGE_COMPUTE_BIT : VK_SHADER_STAGE_ALL_GRAPHICS), |
| 76 | .offset = 0, | 77 | .offset = 0, |
| 77 | .size = static_cast<u32>(sizeof(RescalingLayout)) - size_offset, | 78 | .size = static_cast<u32>(sizeof(RescalingLayout)) - size_offset + |
| 79 | static_cast<u32>(sizeof(RenderAreaLayout)), | ||
| 78 | }; | 80 | }; |
| 79 | return device->GetLogical().CreatePipelineLayout({ | 81 | return device->GetLogical().CreatePipelineLayout({ |
| 80 | .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, | 82 | .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, |
| @@ -167,6 +169,12 @@ private: | |||
| 167 | u32 image_bit{1u}; | 169 | u32 image_bit{1u}; |
| 168 | }; | 170 | }; |
| 169 | 171 | ||
| 172 | class RenderAreaPushConstant { | ||
| 173 | public: | ||
| 174 | bool uses_render_area{}; | ||
| 175 | std::array<f32, 4> words{}; | ||
| 176 | }; | ||
| 177 | |||
| 170 | inline void PushImageDescriptors(TextureCache& texture_cache, | 178 | inline void PushImageDescriptors(TextureCache& texture_cache, |
| 171 | UpdateDescriptorQueue& update_descriptor_queue, | 179 | UpdateDescriptorQueue& update_descriptor_queue, |
| 172 | const Shader::Info& info, RescalingPushConstant& rescaling, | 180 | const Shader::Info& info, RescalingPushConstant& rescaling, |
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index cb7fa2078..89426121f 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp | |||
| @@ -480,11 +480,15 @@ void BlitScreen::RefreshResources(const Tegra::FramebufferConfig& framebuffer) { | |||
| 480 | fsr.reset(); | 480 | fsr.reset(); |
| 481 | } | 481 | } |
| 482 | 482 | ||
| 483 | if (framebuffer.width == raw_width && framebuffer.height == raw_height && !raw_images.empty()) { | 483 | if (framebuffer.width == raw_width && framebuffer.height == raw_height && |
| 484 | framebuffer.pixel_format == pixel_format && !raw_images.empty()) { | ||
| 484 | return; | 485 | return; |
| 485 | } | 486 | } |
| 487 | |||
| 486 | raw_width = framebuffer.width; | 488 | raw_width = framebuffer.width; |
| 487 | raw_height = framebuffer.height; | 489 | raw_height = framebuffer.height; |
| 490 | pixel_format = framebuffer.pixel_format; | ||
| 491 | |||
| 488 | ReleaseRawImages(); | 492 | ReleaseRawImages(); |
| 489 | 493 | ||
| 490 | CreateStagingBuffer(framebuffer); | 494 | CreateStagingBuffer(framebuffer); |
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h index 29e2ea925..a2b73ec54 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.h +++ b/src/video_core/renderer_vulkan/vk_blit_screen.h | |||
| @@ -28,6 +28,10 @@ namespace VideoCore { | |||
| 28 | class RasterizerInterface; | 28 | class RasterizerInterface; |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | namespace Service::android { | ||
| 32 | enum class PixelFormat : u32; | ||
| 33 | } | ||
| 34 | |||
| 31 | namespace Vulkan { | 35 | namespace Vulkan { |
| 32 | 36 | ||
| 33 | struct ScreenInfo; | 37 | struct ScreenInfo; |
| @@ -156,6 +160,7 @@ private: | |||
| 156 | 160 | ||
| 157 | u32 raw_width = 0; | 161 | u32 raw_width = 0; |
| 158 | u32 raw_height = 0; | 162 | u32 raw_height = 0; |
| 163 | Service::android::PixelFormat pixel_format{}; | ||
| 159 | 164 | ||
| 160 | std::unique_ptr<FSR> fsr; | 165 | std::unique_ptr<FSR> fsr; |
| 161 | }; | 166 | }; |
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index c3f66c8a3..1aa116cea 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp | |||
| @@ -31,6 +31,7 @@ namespace { | |||
| 31 | using boost::container::small_vector; | 31 | using boost::container::small_vector; |
| 32 | using boost::container::static_vector; | 32 | using boost::container::static_vector; |
| 33 | using Shader::ImageBufferDescriptor; | 33 | using Shader::ImageBufferDescriptor; |
| 34 | using Shader::Backend::SPIRV::RENDERAREA_LAYOUT_OFFSET; | ||
| 34 | using Shader::Backend::SPIRV::RESCALING_LAYOUT_DOWN_FACTOR_OFFSET; | 35 | using Shader::Backend::SPIRV::RESCALING_LAYOUT_DOWN_FACTOR_OFFSET; |
| 35 | using Shader::Backend::SPIRV::RESCALING_LAYOUT_WORDS_OFFSET; | 36 | using Shader::Backend::SPIRV::RESCALING_LAYOUT_WORDS_OFFSET; |
| 36 | using Tegra::Texture::TexturePair; | 37 | using Tegra::Texture::TexturePair; |
| @@ -433,12 +434,19 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { | |||
| 433 | update_descriptor_queue.Acquire(); | 434 | update_descriptor_queue.Acquire(); |
| 434 | 435 | ||
| 435 | RescalingPushConstant rescaling; | 436 | RescalingPushConstant rescaling; |
| 437 | RenderAreaPushConstant render_area; | ||
| 436 | const VkSampler* samplers_it{samplers.data()}; | 438 | const VkSampler* samplers_it{samplers.data()}; |
| 437 | const VideoCommon::ImageViewInOut* views_it{views.data()}; | 439 | const VideoCommon::ImageViewInOut* views_it{views.data()}; |
| 438 | const auto prepare_stage{[&](size_t stage) LAMBDA_FORCEINLINE { | 440 | const auto prepare_stage{[&](size_t stage) LAMBDA_FORCEINLINE { |
| 439 | buffer_cache.BindHostStageBuffers(stage); | 441 | buffer_cache.BindHostStageBuffers(stage); |
| 440 | PushImageDescriptors(texture_cache, update_descriptor_queue, stage_infos[stage], rescaling, | 442 | PushImageDescriptors(texture_cache, update_descriptor_queue, stage_infos[stage], rescaling, |
| 441 | samplers_it, views_it); | 443 | samplers_it, views_it); |
| 444 | const auto& info{stage_infos[0]}; | ||
| 445 | if (info.uses_render_area) { | ||
| 446 | render_area.uses_render_area = true; | ||
| 447 | render_area.words = {static_cast<float>(regs.surface_clip.width), | ||
| 448 | static_cast<float>(regs.surface_clip.height)}; | ||
| 449 | } | ||
| 442 | }}; | 450 | }}; |
| 443 | if constexpr (Spec::enabled_stages[0]) { | 451 | if constexpr (Spec::enabled_stages[0]) { |
| 444 | prepare_stage(0); | 452 | prepare_stage(0); |
| @@ -455,10 +463,11 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { | |||
| 455 | if constexpr (Spec::enabled_stages[4]) { | 463 | if constexpr (Spec::enabled_stages[4]) { |
| 456 | prepare_stage(4); | 464 | prepare_stage(4); |
| 457 | } | 465 | } |
| 458 | ConfigureDraw(rescaling); | 466 | ConfigureDraw(rescaling, render_area); |
| 459 | } | 467 | } |
| 460 | 468 | ||
| 461 | void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling) { | 469 | void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling, |
| 470 | const RenderAreaPushConstant& render_area) { | ||
| 462 | texture_cache.UpdateRenderTargets(false); | 471 | texture_cache.UpdateRenderTargets(false); |
| 463 | scheduler.RequestRenderpass(texture_cache.GetFramebuffer()); | 472 | scheduler.RequestRenderpass(texture_cache.GetFramebuffer()); |
| 464 | 473 | ||
| @@ -474,7 +483,9 @@ void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling) { | |||
| 474 | const bool bind_pipeline{scheduler.UpdateGraphicsPipeline(this)}; | 483 | const bool bind_pipeline{scheduler.UpdateGraphicsPipeline(this)}; |
| 475 | const void* const descriptor_data{update_descriptor_queue.UpdateData()}; | 484 | const void* const descriptor_data{update_descriptor_queue.UpdateData()}; |
| 476 | scheduler.Record([this, descriptor_data, bind_pipeline, rescaling_data = rescaling.Data(), | 485 | scheduler.Record([this, descriptor_data, bind_pipeline, rescaling_data = rescaling.Data(), |
| 477 | is_rescaling, update_rescaling](vk::CommandBuffer cmdbuf) { | 486 | is_rescaling, update_rescaling, |
| 487 | uses_render_area = render_area.uses_render_area, | ||
| 488 | render_area_data = render_area.words](vk::CommandBuffer cmdbuf) { | ||
| 478 | if (bind_pipeline) { | 489 | if (bind_pipeline) { |
| 479 | cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline); | 490 | cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline); |
| 480 | } | 491 | } |
| @@ -488,6 +499,11 @@ void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling) { | |||
| 488 | RESCALING_LAYOUT_DOWN_FACTOR_OFFSET, sizeof(scale_down_factor), | 499 | RESCALING_LAYOUT_DOWN_FACTOR_OFFSET, sizeof(scale_down_factor), |
| 489 | &scale_down_factor); | 500 | &scale_down_factor); |
| 490 | } | 501 | } |
| 502 | if (uses_render_area) { | ||
| 503 | cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_ALL_GRAPHICS, | ||
| 504 | RENDERAREA_LAYOUT_OFFSET, sizeof(render_area_data), | ||
| 505 | &render_area_data); | ||
| 506 | } | ||
| 491 | if (!descriptor_set_layout) { | 507 | if (!descriptor_set_layout) { |
| 492 | return; | 508 | return; |
| 493 | } | 509 | } |
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h index 85602592b..6bf577d25 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h | |||
| @@ -62,6 +62,7 @@ class Device; | |||
| 62 | class PipelineStatistics; | 62 | class PipelineStatistics; |
| 63 | class RenderPassCache; | 63 | class RenderPassCache; |
| 64 | class RescalingPushConstant; | 64 | class RescalingPushConstant; |
| 65 | class RenderAreaPushConstant; | ||
| 65 | class Scheduler; | 66 | class Scheduler; |
| 66 | class UpdateDescriptorQueue; | 67 | class UpdateDescriptorQueue; |
| 67 | 68 | ||
| @@ -119,7 +120,8 @@ private: | |||
| 119 | template <typename Spec> | 120 | template <typename Spec> |
| 120 | void ConfigureImpl(bool is_indexed); | 121 | void ConfigureImpl(bool is_indexed); |
| 121 | 122 | ||
| 122 | void ConfigureDraw(const RescalingPushConstant& rescaling); | 123 | void ConfigureDraw(const RescalingPushConstant& rescaling, |
| 124 | const RenderAreaPushConstant& render_are); | ||
| 123 | 125 | ||
| 124 | void MakePipeline(VkRenderPass render_pass); | 126 | void MakePipeline(VkRenderPass render_pass); |
| 125 | 127 | ||
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index 13d5a1f67..d4b0a542a 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp | |||
| @@ -53,7 +53,7 @@ using VideoCommon::FileEnvironment; | |||
| 53 | using VideoCommon::GenericEnvironment; | 53 | using VideoCommon::GenericEnvironment; |
| 54 | using VideoCommon::GraphicsEnvironment; | 54 | using VideoCommon::GraphicsEnvironment; |
| 55 | 55 | ||
| 56 | constexpr u32 CACHE_VERSION = 6; | 56 | constexpr u32 CACHE_VERSION = 7; |
| 57 | 57 | ||
| 58 | template <typename Container> | 58 | template <typename Container> |
| 59 | auto MakeSpan(Container& container) { | 59 | auto MakeSpan(Container& container) { |
| @@ -166,6 +166,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program | |||
| 166 | } | 166 | } |
| 167 | break; | 167 | break; |
| 168 | case Shader::Stage::TessellationEval: | 168 | case Shader::Stage::TessellationEval: |
| 169 | info.tess_clockwise = key.state.tessellation_clockwise != 0; | ||
| 169 | info.tess_primitive = [&key] { | 170 | info.tess_primitive = [&key] { |
| 170 | const u32 raw{key.state.tessellation_primitive.Value()}; | 171 | const u32 raw{key.state.tessellation_primitive.Value()}; |
| 171 | switch (static_cast<Maxwell::Tessellation::DomainType>(raw)) { | 172 | switch (static_cast<Maxwell::Tessellation::DomainType>(raw)) { |
| @@ -325,6 +326,7 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device | |||
| 325 | .support_int64 = device.IsShaderInt64Supported(), | 326 | .support_int64 = device.IsShaderInt64Supported(), |
| 326 | .needs_demote_reorder = driver_id == VK_DRIVER_ID_AMD_PROPRIETARY_KHR || | 327 | .needs_demote_reorder = driver_id == VK_DRIVER_ID_AMD_PROPRIETARY_KHR || |
| 327 | driver_id == VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR, | 328 | driver_id == VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR, |
| 329 | .support_snorm_render_buffer = true, | ||
| 328 | }; | 330 | }; |
| 329 | } | 331 | } |
| 330 | 332 | ||
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 47dfb45a1..f69c0c50f 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp | |||
| @@ -127,11 +127,10 @@ VkRect2D GetScissorState(const Maxwell& regs, size_t index, u32 up_scale = 1, u3 | |||
| 127 | return scissor; | 127 | return scissor; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instanced, | 130 | DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_indexed) { |
| 131 | bool is_indexed) { | ||
| 132 | DrawParams params{ | 131 | DrawParams params{ |
| 133 | .base_instance = regs.global_base_instance_index, | 132 | .base_instance = regs.global_base_instance_index, |
| 134 | .num_instances = is_instanced ? num_instances : 1, | 133 | .num_instances = num_instances, |
| 135 | .base_vertex = is_indexed ? regs.global_base_vertex_index : regs.vertex_buffer.first, | 134 | .base_vertex = is_indexed ? regs.global_base_vertex_index : regs.vertex_buffer.first, |
| 136 | .num_vertices = is_indexed ? regs.index_buffer.count : regs.vertex_buffer.count, | 135 | .num_vertices = is_indexed ? regs.index_buffer.count : regs.vertex_buffer.count, |
| 137 | .first_index = is_indexed ? regs.index_buffer.first : 0, | 136 | .first_index = is_indexed ? regs.index_buffer.first : 0, |
| @@ -157,12 +156,10 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra | |||
| 157 | staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), | 156 | staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), |
| 158 | update_descriptor_queue(device, scheduler), | 157 | update_descriptor_queue(device, scheduler), |
| 159 | blit_image(device, scheduler, state_tracker, descriptor_pool), | 158 | blit_image(device, scheduler, state_tracker, descriptor_pool), |
| 160 | astc_decoder_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue, | ||
| 161 | memory_allocator), | ||
| 162 | render_pass_cache(device), texture_cache_runtime{device, scheduler, | 159 | render_pass_cache(device), texture_cache_runtime{device, scheduler, |
| 163 | memory_allocator, staging_pool, | 160 | memory_allocator, staging_pool, |
| 164 | blit_image, astc_decoder_pass, | 161 | blit_image, render_pass_cache, |
| 165 | render_pass_cache}, | 162 | descriptor_pool, update_descriptor_queue}, |
| 166 | texture_cache(texture_cache_runtime, *this), | 163 | texture_cache(texture_cache_runtime, *this), |
| 167 | buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, | 164 | buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, |
| 168 | update_descriptor_queue, descriptor_pool), | 165 | update_descriptor_queue, descriptor_pool), |
| @@ -177,7 +174,7 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra | |||
| 177 | 174 | ||
| 178 | RasterizerVulkan::~RasterizerVulkan() = default; | 175 | RasterizerVulkan::~RasterizerVulkan() = default; |
| 179 | 176 | ||
| 180 | void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { | 177 | void RasterizerVulkan::Draw(bool is_indexed, u32 instance_count) { |
| 181 | MICROPROFILE_SCOPE(Vulkan_Drawing); | 178 | MICROPROFILE_SCOPE(Vulkan_Drawing); |
| 182 | 179 | ||
| 183 | SCOPE_EXIT({ gpu.TickWork(); }); | 180 | SCOPE_EXIT({ gpu.TickWork(); }); |
| @@ -194,13 +191,15 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { | |||
| 194 | pipeline->SetEngine(maxwell3d, gpu_memory); | 191 | pipeline->SetEngine(maxwell3d, gpu_memory); |
| 195 | pipeline->Configure(is_indexed); | 192 | pipeline->Configure(is_indexed); |
| 196 | 193 | ||
| 194 | BindInlineIndexBuffer(); | ||
| 195 | |||
| 197 | BeginTransformFeedback(); | 196 | BeginTransformFeedback(); |
| 198 | 197 | ||
| 199 | UpdateDynamicStates(); | 198 | UpdateDynamicStates(); |
| 200 | 199 | ||
| 201 | const auto& regs{maxwell3d->regs}; | 200 | const auto& regs{maxwell3d->regs}; |
| 202 | const u32 num_instances{maxwell3d->mme_draw.instance_count}; | 201 | const u32 num_instances{instance_count}; |
| 203 | const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)}; | 202 | const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_indexed)}; |
| 204 | scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) { | 203 | scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) { |
| 205 | if (draw_params.is_indexed) { | 204 | if (draw_params.is_indexed) { |
| 206 | cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances, | 205 | cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances, |
| @@ -304,14 +303,19 @@ void RasterizerVulkan::Clear() { | |||
| 304 | } | 303 | } |
| 305 | } | 304 | } |
| 306 | 305 | ||
| 307 | scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) { | 306 | if (regs.clear_surface.R && regs.clear_surface.G && regs.clear_surface.B && |
| 308 | const VkClearAttachment attachment{ | 307 | regs.clear_surface.A) { |
| 309 | .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, | 308 | scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) { |
| 310 | .colorAttachment = color_attachment, | 309 | const VkClearAttachment attachment{ |
| 311 | .clearValue = clear_value, | 310 | .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, |
| 312 | }; | 311 | .colorAttachment = color_attachment, |
| 313 | cmdbuf.ClearAttachments(attachment, clear_rect); | 312 | .clearValue = clear_value, |
| 314 | }); | 313 | }; |
| 314 | cmdbuf.ClearAttachments(attachment, clear_rect); | ||
| 315 | }); | ||
| 316 | } else { | ||
| 317 | UNIMPLEMENTED_MSG("Unimplemented Clear only the specified channel"); | ||
| 318 | } | ||
| 315 | } | 319 | } |
| 316 | 320 | ||
| 317 | if (!use_depth && !use_stencil) { | 321 | if (!use_depth && !use_stencil) { |
| @@ -679,6 +683,22 @@ void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& reg | |||
| 679 | if (!state_tracker.TouchViewports()) { | 683 | if (!state_tracker.TouchViewports()) { |
| 680 | return; | 684 | return; |
| 681 | } | 685 | } |
| 686 | if (!regs.viewport_scale_offset_enabled) { | ||
| 687 | const auto x = static_cast<float>(regs.surface_clip.x); | ||
| 688 | const auto y = static_cast<float>(regs.surface_clip.y); | ||
| 689 | const auto width = static_cast<float>(regs.surface_clip.width); | ||
| 690 | const auto height = static_cast<float>(regs.surface_clip.height); | ||
| 691 | VkViewport viewport{ | ||
| 692 | .x = x, | ||
| 693 | .y = y, | ||
| 694 | .width = width != 0.0f ? width : 1.0f, | ||
| 695 | .height = height != 0.0f ? height : 1.0f, | ||
| 696 | .minDepth = 0.0f, | ||
| 697 | .maxDepth = 1.0f, | ||
| 698 | }; | ||
| 699 | scheduler.Record([viewport](vk::CommandBuffer cmdbuf) { cmdbuf.SetViewport(0, viewport); }); | ||
| 700 | return; | ||
| 701 | } | ||
| 682 | const bool is_rescaling{texture_cache.IsRescaling()}; | 702 | const bool is_rescaling{texture_cache.IsRescaling()}; |
| 683 | const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f; | 703 | const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f; |
| 684 | const std::array viewports{ | 704 | const std::array viewports{ |
| @@ -1009,4 +1029,17 @@ void RasterizerVulkan::ReleaseChannel(s32 channel_id) { | |||
| 1009 | query_cache.EraseChannel(channel_id); | 1029 | query_cache.EraseChannel(channel_id); |
| 1010 | } | 1030 | } |
| 1011 | 1031 | ||
| 1032 | void RasterizerVulkan::BindInlineIndexBuffer() { | ||
| 1033 | if (maxwell3d->inline_index_draw_indexes.empty()) { | ||
| 1034 | return; | ||
| 1035 | } | ||
| 1036 | const auto data_count = static_cast<u32>(maxwell3d->inline_index_draw_indexes.size()); | ||
| 1037 | auto buffer = buffer_cache_runtime.UploadStagingBuffer(data_count); | ||
| 1038 | std::memcpy(buffer.mapped_span.data(), maxwell3d->inline_index_draw_indexes.data(), data_count); | ||
| 1039 | buffer_cache_runtime.BindIndexBuffer( | ||
| 1040 | maxwell3d->regs.draw.topology, maxwell3d->regs.index_buffer.format, | ||
| 1041 | maxwell3d->regs.index_buffer.first, maxwell3d->regs.index_buffer.count, buffer.buffer, | ||
| 1042 | static_cast<u32>(buffer.offset), data_count); | ||
| 1043 | } | ||
| 1044 | |||
| 1012 | } // namespace Vulkan | 1045 | } // namespace Vulkan |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 4cde3c983..b0bc306f5 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h | |||
| @@ -64,7 +64,7 @@ public: | |||
| 64 | StateTracker& state_tracker_, Scheduler& scheduler_); | 64 | StateTracker& state_tracker_, Scheduler& scheduler_); |
| 65 | ~RasterizerVulkan() override; | 65 | ~RasterizerVulkan() override; |
| 66 | 66 | ||
| 67 | void Draw(bool is_indexed, bool is_instanced) override; | 67 | void Draw(bool is_indexed, u32 instance_count) override; |
| 68 | void Clear() override; | 68 | void Clear() override; |
| 69 | void DispatchCompute() override; | 69 | void DispatchCompute() override; |
| 70 | void ResetCounter(VideoCore::QueryType type) override; | 70 | void ResetCounter(VideoCore::QueryType type) override; |
| @@ -141,6 +141,8 @@ private: | |||
| 141 | 141 | ||
| 142 | void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); | 142 | void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); |
| 143 | 143 | ||
| 144 | void BindInlineIndexBuffer(); | ||
| 145 | |||
| 144 | Tegra::GPU& gpu; | 146 | Tegra::GPU& gpu; |
| 145 | 147 | ||
| 146 | ScreenInfo& screen_info; | 148 | ScreenInfo& screen_info; |
| @@ -153,7 +155,6 @@ private: | |||
| 153 | DescriptorPool descriptor_pool; | 155 | DescriptorPool descriptor_pool; |
| 154 | UpdateDescriptorQueue update_descriptor_queue; | 156 | UpdateDescriptorQueue update_descriptor_queue; |
| 155 | BlitImageHelper blit_image; | 157 | BlitImageHelper blit_image; |
| 156 | ASTCDecoderPass astc_decoder_pass; | ||
| 157 | RenderPassCache render_pass_cache; | 158 | RenderPassCache render_pass_cache; |
| 158 | 159 | ||
| 159 | TextureCacheRuntime texture_cache_runtime; | 160 | TextureCacheRuntime texture_cache_runtime; |
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h index c04aad08f..929216749 100644 --- a/src/video_core/renderer_vulkan/vk_scheduler.h +++ b/src/video_core/renderer_vulkan/vk_scheduler.h | |||
| @@ -144,7 +144,6 @@ private: | |||
| 144 | using FuncType = TypedCommand<T>; | 144 | using FuncType = TypedCommand<T>; |
| 145 | static_assert(sizeof(FuncType) < sizeof(data), "Lambda is too large"); | 145 | static_assert(sizeof(FuncType) < sizeof(data), "Lambda is too large"); |
| 146 | 146 | ||
| 147 | recorded_counts++; | ||
| 148 | command_offset = Common::AlignUp(command_offset, alignof(FuncType)); | 147 | command_offset = Common::AlignUp(command_offset, alignof(FuncType)); |
| 149 | if (command_offset > sizeof(data) - sizeof(FuncType)) { | 148 | if (command_offset > sizeof(data) - sizeof(FuncType)) { |
| 150 | return false; | 149 | return false; |
| @@ -166,7 +165,7 @@ private: | |||
| 166 | } | 165 | } |
| 167 | 166 | ||
| 168 | bool Empty() const { | 167 | bool Empty() const { |
| 169 | return recorded_counts == 0; | 168 | return command_offset == 0; |
| 170 | } | 169 | } |
| 171 | 170 | ||
| 172 | bool HasSubmit() const { | 171 | bool HasSubmit() const { |
| @@ -177,7 +176,6 @@ private: | |||
| 177 | Command* first = nullptr; | 176 | Command* first = nullptr; |
| 178 | Command* last = nullptr; | 177 | Command* last = nullptr; |
| 179 | 178 | ||
| 180 | size_t recorded_counts = 0; | ||
| 181 | size_t command_offset = 0; | 179 | size_t command_offset = 0; |
| 182 | bool submit = false; | 180 | bool submit = false; |
| 183 | alignas(std::max_align_t) std::array<u8, 0x8000> data{}; | 181 | alignas(std::max_align_t) std::array<u8, 0x8000> data{}; |
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp index b87c3be66..edb41b171 100644 --- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp +++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp | |||
| @@ -51,7 +51,7 @@ Flags MakeInvalidationFlags() { | |||
| 51 | void SetupDirtyViewports(Tables& tables) { | 51 | void SetupDirtyViewports(Tables& tables) { |
| 52 | FillBlock(tables[0], OFF(viewport_transform), NUM(viewport_transform), Viewports); | 52 | FillBlock(tables[0], OFF(viewport_transform), NUM(viewport_transform), Viewports); |
| 53 | FillBlock(tables[0], OFF(viewports), NUM(viewports), Viewports); | 53 | FillBlock(tables[0], OFF(viewports), NUM(viewports), Viewports); |
| 54 | tables[0][OFF(viewport_scale_offset_enbled)] = Viewports; | 54 | tables[0][OFF(viewport_scale_offset_enabled)] = Viewports; |
| 55 | tables[1][OFF(window_origin)] = Viewports; | 55 | tables[1][OFF(window_origin)] = Viewports; |
| 56 | } | 56 | } |
| 57 | 57 | ||
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index 305ad8aee..853b80d8a 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp | |||
| @@ -791,12 +791,17 @@ TextureCacheRuntime::TextureCacheRuntime(const Device& device_, Scheduler& sched | |||
| 791 | MemoryAllocator& memory_allocator_, | 791 | MemoryAllocator& memory_allocator_, |
| 792 | StagingBufferPool& staging_buffer_pool_, | 792 | StagingBufferPool& staging_buffer_pool_, |
| 793 | BlitImageHelper& blit_image_helper_, | 793 | BlitImageHelper& blit_image_helper_, |
| 794 | ASTCDecoderPass& astc_decoder_pass_, | 794 | RenderPassCache& render_pass_cache_, |
| 795 | RenderPassCache& render_pass_cache_) | 795 | DescriptorPool& descriptor_pool, |
| 796 | UpdateDescriptorQueue& update_descriptor_queue) | ||
| 796 | : device{device_}, scheduler{scheduler_}, memory_allocator{memory_allocator_}, | 797 | : device{device_}, scheduler{scheduler_}, memory_allocator{memory_allocator_}, |
| 797 | staging_buffer_pool{staging_buffer_pool_}, blit_image_helper{blit_image_helper_}, | 798 | staging_buffer_pool{staging_buffer_pool_}, blit_image_helper{blit_image_helper_}, |
| 798 | astc_decoder_pass{astc_decoder_pass_}, render_pass_cache{render_pass_cache_}, | 799 | render_pass_cache{render_pass_cache_}, resolution{Settings::values.resolution_info} { |
| 799 | resolution{Settings::values.resolution_info} {} | 800 | if (Settings::values.accelerate_astc) { |
| 801 | astc_decoder_pass.emplace(device, scheduler, descriptor_pool, staging_buffer_pool, | ||
| 802 | update_descriptor_queue, memory_allocator); | ||
| 803 | } | ||
| 804 | } | ||
| 800 | 805 | ||
| 801 | void TextureCacheRuntime::Finish() { | 806 | void TextureCacheRuntime::Finish() { |
| 802 | scheduler.Finish(); | 807 | scheduler.Finish(); |
| @@ -1782,17 +1787,17 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, | |||
| 1782 | 1787 | ||
| 1783 | const auto& resolution = runtime.resolution; | 1788 | const auto& resolution = runtime.resolution; |
| 1784 | 1789 | ||
| 1785 | u32 width = 0; | 1790 | u32 width = std::numeric_limits<u32>::max(); |
| 1786 | u32 height = 0; | 1791 | u32 height = std::numeric_limits<u32>::max(); |
| 1787 | for (size_t index = 0; index < NUM_RT; ++index) { | 1792 | for (size_t index = 0; index < NUM_RT; ++index) { |
| 1788 | const ImageView* const color_buffer = color_buffers[index]; | 1793 | const ImageView* const color_buffer = color_buffers[index]; |
| 1789 | if (!color_buffer) { | 1794 | if (!color_buffer) { |
| 1790 | renderpass_key.color_formats[index] = PixelFormat::Invalid; | 1795 | renderpass_key.color_formats[index] = PixelFormat::Invalid; |
| 1791 | continue; | 1796 | continue; |
| 1792 | } | 1797 | } |
| 1793 | width = std::max(width, is_rescaled ? resolution.ScaleUp(color_buffer->size.width) | 1798 | width = std::min(width, is_rescaled ? resolution.ScaleUp(color_buffer->size.width) |
| 1794 | : color_buffer->size.width); | 1799 | : color_buffer->size.width); |
| 1795 | height = std::max(height, is_rescaled ? resolution.ScaleUp(color_buffer->size.height) | 1800 | height = std::min(height, is_rescaled ? resolution.ScaleUp(color_buffer->size.height) |
| 1796 | : color_buffer->size.height); | 1801 | : color_buffer->size.height); |
| 1797 | attachments.push_back(color_buffer->RenderTarget()); | 1802 | attachments.push_back(color_buffer->RenderTarget()); |
| 1798 | renderpass_key.color_formats[index] = color_buffer->format; | 1803 | renderpass_key.color_formats[index] = color_buffer->format; |
| @@ -1804,9 +1809,9 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, | |||
| 1804 | } | 1809 | } |
| 1805 | const size_t num_colors = attachments.size(); | 1810 | const size_t num_colors = attachments.size(); |
| 1806 | if (depth_buffer) { | 1811 | if (depth_buffer) { |
| 1807 | width = std::max(width, is_rescaled ? resolution.ScaleUp(depth_buffer->size.width) | 1812 | width = std::min(width, is_rescaled ? resolution.ScaleUp(depth_buffer->size.width) |
| 1808 | : depth_buffer->size.width); | 1813 | : depth_buffer->size.width); |
| 1809 | height = std::max(height, is_rescaled ? resolution.ScaleUp(depth_buffer->size.height) | 1814 | height = std::min(height, is_rescaled ? resolution.ScaleUp(depth_buffer->size.height) |
| 1810 | : depth_buffer->size.height); | 1815 | : depth_buffer->size.height); |
| 1811 | attachments.push_back(depth_buffer->RenderTarget()); | 1816 | attachments.push_back(depth_buffer->RenderTarget()); |
| 1812 | renderpass_key.depth_format = depth_buffer->format; | 1817 | renderpass_key.depth_format = depth_buffer->format; |
| @@ -1845,7 +1850,7 @@ void TextureCacheRuntime::AccelerateImageUpload( | |||
| 1845 | Image& image, const StagingBufferRef& map, | 1850 | Image& image, const StagingBufferRef& map, |
| 1846 | std::span<const VideoCommon::SwizzleParameters> swizzles) { | 1851 | std::span<const VideoCommon::SwizzleParameters> swizzles) { |
| 1847 | if (IsPixelFormatASTC(image.info.format)) { | 1852 | if (IsPixelFormatASTC(image.info.format)) { |
| 1848 | return astc_decoder_pass.Assemble(image, map, swizzles); | 1853 | return astc_decoder_pass->Assemble(image, map, swizzles); |
| 1849 | } | 1854 | } |
| 1850 | ASSERT(false); | 1855 | ASSERT(false); |
| 1851 | } | 1856 | } |
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index 0b7ac0df1..7ec0df134 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <span> | 6 | #include <span> |
| 7 | 7 | ||
| 8 | #include "shader_recompiler/shader_info.h" | 8 | #include "shader_recompiler/shader_info.h" |
| 9 | #include "video_core/renderer_vulkan/vk_compute_pass.h" | ||
| 9 | #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" | 10 | #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" |
| 10 | #include "video_core/texture_cache/image_view_base.h" | 11 | #include "video_core/texture_cache/image_view_base.h" |
| 11 | #include "video_core/texture_cache/texture_cache_base.h" | 12 | #include "video_core/texture_cache/texture_cache_base.h" |
| @@ -25,14 +26,15 @@ using VideoCommon::RenderTargets; | |||
| 25 | using VideoCommon::SlotVector; | 26 | using VideoCommon::SlotVector; |
| 26 | using VideoCore::Surface::PixelFormat; | 27 | using VideoCore::Surface::PixelFormat; |
| 27 | 28 | ||
| 28 | class ASTCDecoderPass; | ||
| 29 | class BlitImageHelper; | 29 | class BlitImageHelper; |
| 30 | class DescriptorPool; | ||
| 30 | class Device; | 31 | class Device; |
| 31 | class Image; | 32 | class Image; |
| 32 | class ImageView; | 33 | class ImageView; |
| 33 | class Framebuffer; | 34 | class Framebuffer; |
| 34 | class RenderPassCache; | 35 | class RenderPassCache; |
| 35 | class StagingBufferPool; | 36 | class StagingBufferPool; |
| 37 | class UpdateDescriptorQueue; | ||
| 36 | class Scheduler; | 38 | class Scheduler; |
| 37 | 39 | ||
| 38 | class TextureCacheRuntime { | 40 | class TextureCacheRuntime { |
| @@ -41,8 +43,9 @@ public: | |||
| 41 | MemoryAllocator& memory_allocator_, | 43 | MemoryAllocator& memory_allocator_, |
| 42 | StagingBufferPool& staging_buffer_pool_, | 44 | StagingBufferPool& staging_buffer_pool_, |
| 43 | BlitImageHelper& blit_image_helper_, | 45 | BlitImageHelper& blit_image_helper_, |
| 44 | ASTCDecoderPass& astc_decoder_pass_, | 46 | RenderPassCache& render_pass_cache_, |
| 45 | RenderPassCache& render_pass_cache_); | 47 | DescriptorPool& descriptor_pool, |
| 48 | UpdateDescriptorQueue& update_descriptor_queue); | ||
| 46 | 49 | ||
| 47 | void Finish(); | 50 | void Finish(); |
| 48 | 51 | ||
| @@ -97,8 +100,8 @@ public: | |||
| 97 | MemoryAllocator& memory_allocator; | 100 | MemoryAllocator& memory_allocator; |
| 98 | StagingBufferPool& staging_buffer_pool; | 101 | StagingBufferPool& staging_buffer_pool; |
| 99 | BlitImageHelper& blit_image_helper; | 102 | BlitImageHelper& blit_image_helper; |
| 100 | ASTCDecoderPass& astc_decoder_pass; | ||
| 101 | RenderPassCache& render_pass_cache; | 103 | RenderPassCache& render_pass_cache; |
| 104 | std::optional<ASTCDecoderPass> astc_decoder_pass; | ||
| 102 | const Settings::ResolutionScalingInfo& resolution; | 105 | const Settings::ResolutionScalingInfo& resolution; |
| 103 | 106 | ||
| 104 | constexpr static size_t indexing_slots = 8 * sizeof(size_t); | 107 | constexpr static size_t indexing_slots = 8 * sizeof(size_t); |
diff --git a/src/video_core/shader_environment.cpp b/src/video_core/shader_environment.cpp index fbabb3219..f24f320b6 100644 --- a/src/video_core/shader_environment.cpp +++ b/src/video_core/shader_environment.cpp | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include "video_core/engines/kepler_compute.h" | 19 | #include "video_core/engines/kepler_compute.h" |
| 20 | #include "video_core/memory_manager.h" | 20 | #include "video_core/memory_manager.h" |
| 21 | #include "video_core/shader_environment.h" | 21 | #include "video_core/shader_environment.h" |
| 22 | #include "video_core/texture_cache/format_lookup_table.h" | ||
| 22 | #include "video_core/textures/texture.h" | 23 | #include "video_core/textures/texture.h" |
| 23 | 24 | ||
| 24 | namespace VideoCommon { | 25 | namespace VideoCommon { |
| @@ -33,7 +34,7 @@ static u64 MakeCbufKey(u32 index, u32 offset) { | |||
| 33 | return (static_cast<u64>(index) << 32) | offset; | 34 | return (static_cast<u64>(index) << 32) | offset; |
| 34 | } | 35 | } |
| 35 | 36 | ||
| 36 | static Shader::TextureType ConvertType(const Tegra::Texture::TICEntry& entry) { | 37 | static Shader::TextureType ConvertTextureType(const Tegra::Texture::TICEntry& entry) { |
| 37 | switch (entry.texture_type) { | 38 | switch (entry.texture_type) { |
| 38 | case Tegra::Texture::TextureType::Texture1D: | 39 | case Tegra::Texture::TextureType::Texture1D: |
| 39 | return Shader::TextureType::Color1D; | 40 | return Shader::TextureType::Color1D; |
| @@ -59,6 +60,26 @@ static Shader::TextureType ConvertType(const Tegra::Texture::TICEntry& entry) { | |||
| 59 | } | 60 | } |
| 60 | } | 61 | } |
| 61 | 62 | ||
| 63 | static Shader::TexturePixelFormat ConvertTexturePixelFormat(const Tegra::Texture::TICEntry& entry) { | ||
| 64 | switch (PixelFormatFromTextureInfo(entry.format, entry.r_type, entry.g_type, entry.b_type, | ||
| 65 | entry.a_type, entry.srgb_conversion)) { | ||
| 66 | case VideoCore::Surface::PixelFormat::A8B8G8R8_SNORM: | ||
| 67 | return Shader::TexturePixelFormat::A8B8G8R8_SNORM; | ||
| 68 | case VideoCore::Surface::PixelFormat::R8_SNORM: | ||
| 69 | return Shader::TexturePixelFormat::R8_SNORM; | ||
| 70 | case VideoCore::Surface::PixelFormat::R8G8_SNORM: | ||
| 71 | return Shader::TexturePixelFormat::R8G8_SNORM; | ||
| 72 | case VideoCore::Surface::PixelFormat::R16G16B16A16_SNORM: | ||
| 73 | return Shader::TexturePixelFormat::R16G16B16A16_SNORM; | ||
| 74 | case VideoCore::Surface::PixelFormat::R16G16_SNORM: | ||
| 75 | return Shader::TexturePixelFormat::R16G16_SNORM; | ||
| 76 | case VideoCore::Surface::PixelFormat::R16_SNORM: | ||
| 77 | return Shader::TexturePixelFormat::R16_SNORM; | ||
| 78 | default: | ||
| 79 | return Shader::TexturePixelFormat::OTHER; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 62 | static std::string_view StageToPrefix(Shader::Stage stage) { | 83 | static std::string_view StageToPrefix(Shader::Stage stage) { |
| 63 | switch (stage) { | 84 | switch (stage) { |
| 64 | case Shader::Stage::VertexB: | 85 | case Shader::Stage::VertexB: |
| @@ -178,22 +199,31 @@ void GenericEnvironment::Dump(u64 hash) { | |||
| 178 | void GenericEnvironment::Serialize(std::ofstream& file) const { | 199 | void GenericEnvironment::Serialize(std::ofstream& file) const { |
| 179 | const u64 code_size{static_cast<u64>(CachedSize())}; | 200 | const u64 code_size{static_cast<u64>(CachedSize())}; |
| 180 | const u64 num_texture_types{static_cast<u64>(texture_types.size())}; | 201 | const u64 num_texture_types{static_cast<u64>(texture_types.size())}; |
| 202 | const u64 num_texture_pixel_formats{static_cast<u64>(texture_pixel_formats.size())}; | ||
| 181 | const u64 num_cbuf_values{static_cast<u64>(cbuf_values.size())}; | 203 | const u64 num_cbuf_values{static_cast<u64>(cbuf_values.size())}; |
| 182 | 204 | ||
| 183 | file.write(reinterpret_cast<const char*>(&code_size), sizeof(code_size)) | 205 | file.write(reinterpret_cast<const char*>(&code_size), sizeof(code_size)) |
| 184 | .write(reinterpret_cast<const char*>(&num_texture_types), sizeof(num_texture_types)) | 206 | .write(reinterpret_cast<const char*>(&num_texture_types), sizeof(num_texture_types)) |
| 207 | .write(reinterpret_cast<const char*>(&num_texture_pixel_formats), | ||
| 208 | sizeof(num_texture_pixel_formats)) | ||
| 185 | .write(reinterpret_cast<const char*>(&num_cbuf_values), sizeof(num_cbuf_values)) | 209 | .write(reinterpret_cast<const char*>(&num_cbuf_values), sizeof(num_cbuf_values)) |
| 186 | .write(reinterpret_cast<const char*>(&local_memory_size), sizeof(local_memory_size)) | 210 | .write(reinterpret_cast<const char*>(&local_memory_size), sizeof(local_memory_size)) |
| 187 | .write(reinterpret_cast<const char*>(&texture_bound), sizeof(texture_bound)) | 211 | .write(reinterpret_cast<const char*>(&texture_bound), sizeof(texture_bound)) |
| 188 | .write(reinterpret_cast<const char*>(&start_address), sizeof(start_address)) | 212 | .write(reinterpret_cast<const char*>(&start_address), sizeof(start_address)) |
| 189 | .write(reinterpret_cast<const char*>(&cached_lowest), sizeof(cached_lowest)) | 213 | .write(reinterpret_cast<const char*>(&cached_lowest), sizeof(cached_lowest)) |
| 190 | .write(reinterpret_cast<const char*>(&cached_highest), sizeof(cached_highest)) | 214 | .write(reinterpret_cast<const char*>(&cached_highest), sizeof(cached_highest)) |
| 215 | .write(reinterpret_cast<const char*>(&viewport_transform_state), | ||
| 216 | sizeof(viewport_transform_state)) | ||
| 191 | .write(reinterpret_cast<const char*>(&stage), sizeof(stage)) | 217 | .write(reinterpret_cast<const char*>(&stage), sizeof(stage)) |
| 192 | .write(reinterpret_cast<const char*>(code.data()), code_size); | 218 | .write(reinterpret_cast<const char*>(code.data()), code_size); |
| 193 | for (const auto& [key, type] : texture_types) { | 219 | for (const auto& [key, type] : texture_types) { |
| 194 | file.write(reinterpret_cast<const char*>(&key), sizeof(key)) | 220 | file.write(reinterpret_cast<const char*>(&key), sizeof(key)) |
| 195 | .write(reinterpret_cast<const char*>(&type), sizeof(type)); | 221 | .write(reinterpret_cast<const char*>(&type), sizeof(type)); |
| 196 | } | 222 | } |
| 223 | for (const auto& [key, format] : texture_pixel_formats) { | ||
| 224 | file.write(reinterpret_cast<const char*>(&key), sizeof(key)) | ||
| 225 | .write(reinterpret_cast<const char*>(&format), sizeof(format)); | ||
| 226 | } | ||
| 197 | for (const auto& [key, type] : cbuf_values) { | 227 | for (const auto& [key, type] : cbuf_values) { |
| 198 | file.write(reinterpret_cast<const char*>(&key), sizeof(key)) | 228 | file.write(reinterpret_cast<const char*>(&key), sizeof(key)) |
| 199 | .write(reinterpret_cast<const char*>(&type), sizeof(type)); | 229 | .write(reinterpret_cast<const char*>(&type), sizeof(type)); |
| @@ -237,15 +267,13 @@ std::optional<u64> GenericEnvironment::TryFindSize() { | |||
| 237 | return std::nullopt; | 267 | return std::nullopt; |
| 238 | } | 268 | } |
| 239 | 269 | ||
| 240 | Shader::TextureType GenericEnvironment::ReadTextureTypeImpl(GPUVAddr tic_addr, u32 tic_limit, | 270 | Tegra::Texture::TICEntry GenericEnvironment::ReadTextureInfo(GPUVAddr tic_addr, u32 tic_limit, |
| 241 | bool via_header_index, u32 raw) { | 271 | bool via_header_index, u32 raw) { |
| 242 | const auto handle{Tegra::Texture::TexturePair(raw, via_header_index)}; | 272 | const auto handle{Tegra::Texture::TexturePair(raw, via_header_index)}; |
| 243 | const GPUVAddr descriptor_addr{tic_addr + handle.first * sizeof(Tegra::Texture::TICEntry)}; | 273 | const GPUVAddr descriptor_addr{tic_addr + handle.first * sizeof(Tegra::Texture::TICEntry)}; |
| 244 | Tegra::Texture::TICEntry entry; | 274 | Tegra::Texture::TICEntry entry; |
| 245 | gpu_memory->ReadBlock(descriptor_addr, &entry, sizeof(entry)); | 275 | gpu_memory->ReadBlock(descriptor_addr, &entry, sizeof(entry)); |
| 246 | const Shader::TextureType result{ConvertType(entry)}; | 276 | return entry; |
| 247 | texture_types.emplace(raw, result); | ||
| 248 | return result; | ||
| 249 | } | 277 | } |
| 250 | 278 | ||
| 251 | GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_, | 279 | GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_, |
| @@ -305,8 +333,27 @@ u32 GraphicsEnvironment::ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) { | |||
| 305 | Shader::TextureType GraphicsEnvironment::ReadTextureType(u32 handle) { | 333 | Shader::TextureType GraphicsEnvironment::ReadTextureType(u32 handle) { |
| 306 | const auto& regs{maxwell3d->regs}; | 334 | const auto& regs{maxwell3d->regs}; |
| 307 | const bool via_header_index{regs.sampler_binding == Maxwell::SamplerBinding::ViaHeaderBinding}; | 335 | const bool via_header_index{regs.sampler_binding == Maxwell::SamplerBinding::ViaHeaderBinding}; |
| 308 | return ReadTextureTypeImpl(regs.tex_header.Address(), regs.tex_header.limit, via_header_index, | 336 | auto entry = |
| 309 | handle); | 337 | ReadTextureInfo(regs.tex_header.Address(), regs.tex_header.limit, via_header_index, handle); |
| 338 | const Shader::TextureType result{ConvertTextureType(entry)}; | ||
| 339 | texture_types.emplace(handle, result); | ||
| 340 | return result; | ||
| 341 | } | ||
| 342 | |||
| 343 | Shader::TexturePixelFormat GraphicsEnvironment::ReadTexturePixelFormat(u32 handle) { | ||
| 344 | const auto& regs{maxwell3d->regs}; | ||
| 345 | const bool via_header_index{regs.sampler_binding == Maxwell::SamplerBinding::ViaHeaderBinding}; | ||
| 346 | auto entry = | ||
| 347 | ReadTextureInfo(regs.tex_header.Address(), regs.tex_header.limit, via_header_index, handle); | ||
| 348 | const Shader::TexturePixelFormat result(ConvertTexturePixelFormat(entry)); | ||
| 349 | texture_pixel_formats.emplace(handle, result); | ||
| 350 | return result; | ||
| 351 | } | ||
| 352 | |||
| 353 | u32 GraphicsEnvironment::ReadViewportTransformState() { | ||
| 354 | const auto& regs{maxwell3d->regs}; | ||
| 355 | viewport_transform_state = regs.viewport_scale_offset_enabled; | ||
| 356 | return viewport_transform_state; | ||
| 310 | } | 357 | } |
| 311 | 358 | ||
| 312 | ComputeEnvironment::ComputeEnvironment(Tegra::Engines::KeplerCompute& kepler_compute_, | 359 | ComputeEnvironment::ComputeEnvironment(Tegra::Engines::KeplerCompute& kepler_compute_, |
| @@ -337,21 +384,41 @@ u32 ComputeEnvironment::ReadCbufValue(u32 cbuf_index, u32 cbuf_offset) { | |||
| 337 | Shader::TextureType ComputeEnvironment::ReadTextureType(u32 handle) { | 384 | Shader::TextureType ComputeEnvironment::ReadTextureType(u32 handle) { |
| 338 | const auto& regs{kepler_compute->regs}; | 385 | const auto& regs{kepler_compute->regs}; |
| 339 | const auto& qmd{kepler_compute->launch_description}; | 386 | const auto& qmd{kepler_compute->launch_description}; |
| 340 | return ReadTextureTypeImpl(regs.tic.Address(), regs.tic.limit, qmd.linked_tsc != 0, handle); | 387 | auto entry = ReadTextureInfo(regs.tic.Address(), regs.tic.limit, qmd.linked_tsc != 0, handle); |
| 388 | const Shader::TextureType result{ConvertTextureType(entry)}; | ||
| 389 | texture_types.emplace(handle, result); | ||
| 390 | return result; | ||
| 391 | } | ||
| 392 | |||
| 393 | Shader::TexturePixelFormat ComputeEnvironment::ReadTexturePixelFormat(u32 handle) { | ||
| 394 | const auto& regs{kepler_compute->regs}; | ||
| 395 | const auto& qmd{kepler_compute->launch_description}; | ||
| 396 | auto entry = ReadTextureInfo(regs.tic.Address(), regs.tic.limit, qmd.linked_tsc != 0, handle); | ||
| 397 | const Shader::TexturePixelFormat result(ConvertTexturePixelFormat(entry)); | ||
| 398 | texture_pixel_formats.emplace(handle, result); | ||
| 399 | return result; | ||
| 400 | } | ||
| 401 | |||
| 402 | u32 ComputeEnvironment::ReadViewportTransformState() { | ||
| 403 | return viewport_transform_state; | ||
| 341 | } | 404 | } |
| 342 | 405 | ||
| 343 | void FileEnvironment::Deserialize(std::ifstream& file) { | 406 | void FileEnvironment::Deserialize(std::ifstream& file) { |
| 344 | u64 code_size{}; | 407 | u64 code_size{}; |
| 345 | u64 num_texture_types{}; | 408 | u64 num_texture_types{}; |
| 409 | u64 num_texture_pixel_formats{}; | ||
| 346 | u64 num_cbuf_values{}; | 410 | u64 num_cbuf_values{}; |
| 347 | file.read(reinterpret_cast<char*>(&code_size), sizeof(code_size)) | 411 | file.read(reinterpret_cast<char*>(&code_size), sizeof(code_size)) |
| 348 | .read(reinterpret_cast<char*>(&num_texture_types), sizeof(num_texture_types)) | 412 | .read(reinterpret_cast<char*>(&num_texture_types), sizeof(num_texture_types)) |
| 413 | .read(reinterpret_cast<char*>(&num_texture_pixel_formats), | ||
| 414 | sizeof(num_texture_pixel_formats)) | ||
| 349 | .read(reinterpret_cast<char*>(&num_cbuf_values), sizeof(num_cbuf_values)) | 415 | .read(reinterpret_cast<char*>(&num_cbuf_values), sizeof(num_cbuf_values)) |
| 350 | .read(reinterpret_cast<char*>(&local_memory_size), sizeof(local_memory_size)) | 416 | .read(reinterpret_cast<char*>(&local_memory_size), sizeof(local_memory_size)) |
| 351 | .read(reinterpret_cast<char*>(&texture_bound), sizeof(texture_bound)) | 417 | .read(reinterpret_cast<char*>(&texture_bound), sizeof(texture_bound)) |
| 352 | .read(reinterpret_cast<char*>(&start_address), sizeof(start_address)) | 418 | .read(reinterpret_cast<char*>(&start_address), sizeof(start_address)) |
| 353 | .read(reinterpret_cast<char*>(&read_lowest), sizeof(read_lowest)) | 419 | .read(reinterpret_cast<char*>(&read_lowest), sizeof(read_lowest)) |
| 354 | .read(reinterpret_cast<char*>(&read_highest), sizeof(read_highest)) | 420 | .read(reinterpret_cast<char*>(&read_highest), sizeof(read_highest)) |
| 421 | .read(reinterpret_cast<char*>(&viewport_transform_state), sizeof(viewport_transform_state)) | ||
| 355 | .read(reinterpret_cast<char*>(&stage), sizeof(stage)); | 422 | .read(reinterpret_cast<char*>(&stage), sizeof(stage)); |
| 356 | code = std::make_unique<u64[]>(Common::DivCeil(code_size, sizeof(u64))); | 423 | code = std::make_unique<u64[]>(Common::DivCeil(code_size, sizeof(u64))); |
| 357 | file.read(reinterpret_cast<char*>(code.get()), code_size); | 424 | file.read(reinterpret_cast<char*>(code.get()), code_size); |
| @@ -362,6 +429,13 @@ void FileEnvironment::Deserialize(std::ifstream& file) { | |||
| 362 | .read(reinterpret_cast<char*>(&type), sizeof(type)); | 429 | .read(reinterpret_cast<char*>(&type), sizeof(type)); |
| 363 | texture_types.emplace(key, type); | 430 | texture_types.emplace(key, type); |
| 364 | } | 431 | } |
| 432 | for (size_t i = 0; i < num_texture_pixel_formats; ++i) { | ||
| 433 | u32 key; | ||
| 434 | Shader::TexturePixelFormat format; | ||
| 435 | file.read(reinterpret_cast<char*>(&key), sizeof(key)) | ||
| 436 | .read(reinterpret_cast<char*>(&format), sizeof(format)); | ||
| 437 | texture_pixel_formats.emplace(key, format); | ||
| 438 | } | ||
| 365 | for (size_t i = 0; i < num_cbuf_values; ++i) { | 439 | for (size_t i = 0; i < num_cbuf_values; ++i) { |
| 366 | u64 key; | 440 | u64 key; |
| 367 | u32 value; | 441 | u32 value; |
| @@ -409,6 +483,18 @@ Shader::TextureType FileEnvironment::ReadTextureType(u32 handle) { | |||
| 409 | return it->second; | 483 | return it->second; |
| 410 | } | 484 | } |
| 411 | 485 | ||
| 486 | Shader::TexturePixelFormat FileEnvironment::ReadTexturePixelFormat(u32 handle) { | ||
| 487 | const auto it{texture_pixel_formats.find(handle)}; | ||
| 488 | if (it == texture_pixel_formats.end()) { | ||
| 489 | throw Shader::LogicError("Uncached read texture pixel format"); | ||
| 490 | } | ||
| 491 | return it->second; | ||
| 492 | } | ||
| 493 | |||
| 494 | u32 FileEnvironment::ReadViewportTransformState() { | ||
| 495 | return viewport_transform_state; | ||
| 496 | } | ||
| 497 | |||
| 412 | u32 FileEnvironment::LocalMemorySize() const { | 498 | u32 FileEnvironment::LocalMemorySize() const { |
| 413 | return local_memory_size; | 499 | return local_memory_size; |
| 414 | } | 500 | } |
diff --git a/src/video_core/shader_environment.h b/src/video_core/shader_environment.h index 8b3b8e9f5..bb55b029f 100644 --- a/src/video_core/shader_environment.h +++ b/src/video_core/shader_environment.h | |||
| @@ -63,14 +63,15 @@ public: | |||
| 63 | protected: | 63 | protected: |
| 64 | std::optional<u64> TryFindSize(); | 64 | std::optional<u64> TryFindSize(); |
| 65 | 65 | ||
| 66 | Shader::TextureType ReadTextureTypeImpl(GPUVAddr tic_addr, u32 tic_limit, bool via_header_index, | 66 | Tegra::Texture::TICEntry ReadTextureInfo(GPUVAddr tic_addr, u32 tic_limit, |
| 67 | u32 raw); | 67 | bool via_header_index, u32 raw); |
| 68 | 68 | ||
| 69 | Tegra::MemoryManager* gpu_memory{}; | 69 | Tegra::MemoryManager* gpu_memory{}; |
| 70 | GPUVAddr program_base{}; | 70 | GPUVAddr program_base{}; |
| 71 | 71 | ||
| 72 | std::vector<u64> code; | 72 | std::vector<u64> code; |
| 73 | std::unordered_map<u32, Shader::TextureType> texture_types; | 73 | std::unordered_map<u32, Shader::TextureType> texture_types; |
| 74 | std::unordered_map<u32, Shader::TexturePixelFormat> texture_pixel_formats; | ||
| 74 | std::unordered_map<u64, u32> cbuf_values; | 75 | std::unordered_map<u64, u32> cbuf_values; |
| 75 | 76 | ||
| 76 | u32 local_memory_size{}; | 77 | u32 local_memory_size{}; |
| @@ -85,6 +86,8 @@ protected: | |||
| 85 | u32 cached_highest = 0; | 86 | u32 cached_highest = 0; |
| 86 | u32 initial_offset = 0; | 87 | u32 initial_offset = 0; |
| 87 | 88 | ||
| 89 | u32 viewport_transform_state = 1; | ||
| 90 | |||
| 88 | bool has_unbound_instructions = false; | 91 | bool has_unbound_instructions = false; |
| 89 | }; | 92 | }; |
| 90 | 93 | ||
| @@ -102,6 +105,10 @@ public: | |||
| 102 | 105 | ||
| 103 | Shader::TextureType ReadTextureType(u32 handle) override; | 106 | Shader::TextureType ReadTextureType(u32 handle) override; |
| 104 | 107 | ||
| 108 | Shader::TexturePixelFormat ReadTexturePixelFormat(u32 handle) override; | ||
| 109 | |||
| 110 | u32 ReadViewportTransformState() override; | ||
| 111 | |||
| 105 | private: | 112 | private: |
| 106 | Tegra::Engines::Maxwell3D* maxwell3d{}; | 113 | Tegra::Engines::Maxwell3D* maxwell3d{}; |
| 107 | size_t stage_index{}; | 114 | size_t stage_index{}; |
| @@ -120,6 +127,10 @@ public: | |||
| 120 | 127 | ||
| 121 | Shader::TextureType ReadTextureType(u32 handle) override; | 128 | Shader::TextureType ReadTextureType(u32 handle) override; |
| 122 | 129 | ||
| 130 | Shader::TexturePixelFormat ReadTexturePixelFormat(u32 handle) override; | ||
| 131 | |||
| 132 | u32 ReadViewportTransformState() override; | ||
| 133 | |||
| 123 | private: | 134 | private: |
| 124 | Tegra::Engines::KeplerCompute* kepler_compute{}; | 135 | Tegra::Engines::KeplerCompute* kepler_compute{}; |
| 125 | }; | 136 | }; |
| @@ -143,6 +154,10 @@ public: | |||
| 143 | 154 | ||
| 144 | [[nodiscard]] Shader::TextureType ReadTextureType(u32 handle) override; | 155 | [[nodiscard]] Shader::TextureType ReadTextureType(u32 handle) override; |
| 145 | 156 | ||
| 157 | [[nodiscard]] Shader::TexturePixelFormat ReadTexturePixelFormat(u32 handle) override; | ||
| 158 | |||
| 159 | [[nodiscard]] u32 ReadViewportTransformState() override; | ||
| 160 | |||
| 146 | [[nodiscard]] u32 LocalMemorySize() const override; | 161 | [[nodiscard]] u32 LocalMemorySize() const override; |
| 147 | 162 | ||
| 148 | [[nodiscard]] u32 SharedMemorySize() const override; | 163 | [[nodiscard]] u32 SharedMemorySize() const override; |
| @@ -156,6 +171,7 @@ public: | |||
| 156 | private: | 171 | private: |
| 157 | std::unique_ptr<u64[]> code; | 172 | std::unique_ptr<u64[]> code; |
| 158 | std::unordered_map<u32, Shader::TextureType> texture_types; | 173 | std::unordered_map<u32, Shader::TextureType> texture_types; |
| 174 | std::unordered_map<u32, Shader::TexturePixelFormat> texture_pixel_formats; | ||
| 159 | std::unordered_map<u64, u32> cbuf_values; | 175 | std::unordered_map<u64, u32> cbuf_values; |
| 160 | std::array<u32, 3> workgroup_size{}; | 176 | std::array<u32, 3> workgroup_size{}; |
| 161 | u32 local_memory_size{}; | 177 | u32 local_memory_size{}; |
| @@ -164,6 +180,7 @@ private: | |||
| 164 | u32 read_lowest{}; | 180 | u32 read_lowest{}; |
| 165 | u32 read_highest{}; | 181 | u32 read_highest{}; |
| 166 | u32 initial_offset{}; | 182 | u32 initial_offset{}; |
| 183 | u32 viewport_transform_state = 1; | ||
| 167 | }; | 184 | }; |
| 168 | 185 | ||
| 169 | void SerializePipeline(std::span<const char> key, std::span<const GenericEnvironment* const> envs, | 186 | void SerializePipeline(std::span<const char> key, std::span<const GenericEnvironment* const> envs, |
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index 1223df5a0..e8c908b42 100644 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp | |||
| @@ -516,7 +516,6 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr | |||
| 516 | const u32 num_blocks_per_layer = NumBlocks(level_size, tile_size); | 516 | const u32 num_blocks_per_layer = NumBlocks(level_size, tile_size); |
| 517 | const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block; | 517 | const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block; |
| 518 | 518 | ||
| 519 | UNIMPLEMENTED_IF(info.tile_width_spacing > 0); | ||
| 520 | UNIMPLEMENTED_IF(copy.image_offset.x != 0); | 519 | UNIMPLEMENTED_IF(copy.image_offset.x != 0); |
| 521 | UNIMPLEMENTED_IF(copy.image_offset.y != 0); | 520 | UNIMPLEMENTED_IF(copy.image_offset.y != 0); |
| 522 | UNIMPLEMENTED_IF(copy.image_offset.z != 0); | 521 | UNIMPLEMENTED_IF(copy.image_offset.z != 0); |
diff --git a/src/yuzu/CMakeLists.txt b/src/yuzu/CMakeLists.txt index 29d506c47..5cc1fbf32 100644 --- a/src/yuzu/CMakeLists.txt +++ b/src/yuzu/CMakeLists.txt | |||
| @@ -315,7 +315,7 @@ target_include_directories(yuzu PRIVATE ../../externals/Vulkan-Headers/include) | |||
| 315 | if (NOT WIN32) | 315 | if (NOT WIN32) |
| 316 | target_include_directories(yuzu PRIVATE ${Qt5Gui_PRIVATE_INCLUDE_DIRS}) | 316 | target_include_directories(yuzu PRIVATE ${Qt5Gui_PRIVATE_INCLUDE_DIRS}) |
| 317 | endif() | 317 | endif() |
| 318 | if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") | 318 | if (UNIX AND NOT APPLE) |
| 319 | target_link_libraries(yuzu PRIVATE Qt::DBus) | 319 | target_link_libraries(yuzu PRIVATE Qt::DBus) |
| 320 | endif() | 320 | endif() |
| 321 | 321 | ||
| @@ -385,6 +385,6 @@ if (NOT APPLE) | |||
| 385 | target_compile_definitions(yuzu PRIVATE HAS_OPENGL) | 385 | target_compile_definitions(yuzu PRIVATE HAS_OPENGL) |
| 386 | endif() | 386 | endif() |
| 387 | 387 | ||
| 388 | if (ARCHITECTURE_x86_64) | 388 | if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) |
| 389 | target_link_libraries(yuzu PRIVATE dynarmic) | 389 | target_link_libraries(yuzu PRIVATE dynarmic) |
| 390 | endif() | 390 | endif() |
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp index 6acfb7b06..d88efacd7 100644 --- a/src/yuzu/bootmanager.cpp +++ b/src/yuzu/bootmanager.cpp | |||
| @@ -401,224 +401,127 @@ void GRenderWindow::closeEvent(QCloseEvent* event) { | |||
| 401 | } | 401 | } |
| 402 | 402 | ||
| 403 | int GRenderWindow::QtKeyToSwitchKey(Qt::Key qt_key) { | 403 | int GRenderWindow::QtKeyToSwitchKey(Qt::Key qt_key) { |
| 404 | switch (qt_key) { | 404 | static constexpr std::array<std::pair<Qt::Key, Settings::NativeKeyboard::Keys>, 106> key_map = { |
| 405 | case Qt::Key_A: | 405 | std::pair<Qt::Key, Settings::NativeKeyboard::Keys>{Qt::Key_A, Settings::NativeKeyboard::A}, |
| 406 | return Settings::NativeKeyboard::A; | 406 | {Qt::Key_A, Settings::NativeKeyboard::A}, |
| 407 | case Qt::Key_B: | 407 | {Qt::Key_B, Settings::NativeKeyboard::B}, |
| 408 | return Settings::NativeKeyboard::B; | 408 | {Qt::Key_C, Settings::NativeKeyboard::C}, |
| 409 | case Qt::Key_C: | 409 | {Qt::Key_D, Settings::NativeKeyboard::D}, |
| 410 | return Settings::NativeKeyboard::C; | 410 | {Qt::Key_E, Settings::NativeKeyboard::E}, |
| 411 | case Qt::Key_D: | 411 | {Qt::Key_F, Settings::NativeKeyboard::F}, |
| 412 | return Settings::NativeKeyboard::D; | 412 | {Qt::Key_G, Settings::NativeKeyboard::G}, |
| 413 | case Qt::Key_E: | 413 | {Qt::Key_H, Settings::NativeKeyboard::H}, |
| 414 | return Settings::NativeKeyboard::E; | 414 | {Qt::Key_I, Settings::NativeKeyboard::I}, |
| 415 | case Qt::Key_F: | 415 | {Qt::Key_J, Settings::NativeKeyboard::J}, |
| 416 | return Settings::NativeKeyboard::F; | 416 | {Qt::Key_K, Settings::NativeKeyboard::K}, |
| 417 | case Qt::Key_G: | 417 | {Qt::Key_L, Settings::NativeKeyboard::L}, |
| 418 | return Settings::NativeKeyboard::G; | 418 | {Qt::Key_M, Settings::NativeKeyboard::M}, |
| 419 | case Qt::Key_H: | 419 | {Qt::Key_N, Settings::NativeKeyboard::N}, |
| 420 | return Settings::NativeKeyboard::H; | 420 | {Qt::Key_O, Settings::NativeKeyboard::O}, |
| 421 | case Qt::Key_I: | 421 | {Qt::Key_P, Settings::NativeKeyboard::P}, |
| 422 | return Settings::NativeKeyboard::I; | 422 | {Qt::Key_Q, Settings::NativeKeyboard::Q}, |
| 423 | case Qt::Key_J: | 423 | {Qt::Key_R, Settings::NativeKeyboard::R}, |
| 424 | return Settings::NativeKeyboard::J; | 424 | {Qt::Key_S, Settings::NativeKeyboard::S}, |
| 425 | case Qt::Key_K: | 425 | {Qt::Key_T, Settings::NativeKeyboard::T}, |
| 426 | return Settings::NativeKeyboard::K; | 426 | {Qt::Key_U, Settings::NativeKeyboard::U}, |
| 427 | case Qt::Key_L: | 427 | {Qt::Key_V, Settings::NativeKeyboard::V}, |
| 428 | return Settings::NativeKeyboard::L; | 428 | {Qt::Key_W, Settings::NativeKeyboard::W}, |
| 429 | case Qt::Key_M: | 429 | {Qt::Key_X, Settings::NativeKeyboard::X}, |
| 430 | return Settings::NativeKeyboard::M; | 430 | {Qt::Key_Y, Settings::NativeKeyboard::Y}, |
| 431 | case Qt::Key_N: | 431 | {Qt::Key_Z, Settings::NativeKeyboard::Z}, |
| 432 | return Settings::NativeKeyboard::N; | 432 | {Qt::Key_1, Settings::NativeKeyboard::N1}, |
| 433 | case Qt::Key_O: | 433 | {Qt::Key_2, Settings::NativeKeyboard::N2}, |
| 434 | return Settings::NativeKeyboard::O; | 434 | {Qt::Key_3, Settings::NativeKeyboard::N3}, |
| 435 | case Qt::Key_P: | 435 | {Qt::Key_4, Settings::NativeKeyboard::N4}, |
| 436 | return Settings::NativeKeyboard::P; | 436 | {Qt::Key_5, Settings::NativeKeyboard::N5}, |
| 437 | case Qt::Key_Q: | 437 | {Qt::Key_6, Settings::NativeKeyboard::N6}, |
| 438 | return Settings::NativeKeyboard::Q; | 438 | {Qt::Key_7, Settings::NativeKeyboard::N7}, |
| 439 | case Qt::Key_R: | 439 | {Qt::Key_8, Settings::NativeKeyboard::N8}, |
| 440 | return Settings::NativeKeyboard::R; | 440 | {Qt::Key_9, Settings::NativeKeyboard::N9}, |
| 441 | case Qt::Key_S: | 441 | {Qt::Key_0, Settings::NativeKeyboard::N0}, |
| 442 | return Settings::NativeKeyboard::S; | 442 | {Qt::Key_Return, Settings::NativeKeyboard::Return}, |
| 443 | case Qt::Key_T: | 443 | {Qt::Key_Escape, Settings::NativeKeyboard::Escape}, |
| 444 | return Settings::NativeKeyboard::T; | 444 | {Qt::Key_Backspace, Settings::NativeKeyboard::Backspace}, |
| 445 | case Qt::Key_U: | 445 | {Qt::Key_Tab, Settings::NativeKeyboard::Tab}, |
| 446 | return Settings::NativeKeyboard::U; | 446 | {Qt::Key_Space, Settings::NativeKeyboard::Space}, |
| 447 | case Qt::Key_V: | 447 | {Qt::Key_Minus, Settings::NativeKeyboard::Minus}, |
| 448 | return Settings::NativeKeyboard::V; | 448 | {Qt::Key_Plus, Settings::NativeKeyboard::Plus}, |
| 449 | case Qt::Key_W: | 449 | {Qt::Key_questiondown, Settings::NativeKeyboard::Plus}, |
| 450 | return Settings::NativeKeyboard::W; | 450 | {Qt::Key_BracketLeft, Settings::NativeKeyboard::OpenBracket}, |
| 451 | case Qt::Key_X: | 451 | {Qt::Key_BraceLeft, Settings::NativeKeyboard::OpenBracket}, |
| 452 | return Settings::NativeKeyboard::X; | 452 | {Qt::Key_BracketRight, Settings::NativeKeyboard::CloseBracket}, |
| 453 | case Qt::Key_Y: | 453 | {Qt::Key_BraceRight, Settings::NativeKeyboard::CloseBracket}, |
| 454 | return Settings::NativeKeyboard::Y; | 454 | {Qt::Key_Bar, Settings::NativeKeyboard::Pipe}, |
| 455 | case Qt::Key_Z: | 455 | {Qt::Key_Dead_Tilde, Settings::NativeKeyboard::Tilde}, |
| 456 | return Settings::NativeKeyboard::Z; | 456 | {Qt::Key_Ntilde, Settings::NativeKeyboard::Semicolon}, |
| 457 | case Qt::Key_1: | 457 | {Qt::Key_Semicolon, Settings::NativeKeyboard::Semicolon}, |
| 458 | return Settings::NativeKeyboard::N1; | 458 | {Qt::Key_Apostrophe, Settings::NativeKeyboard::Quote}, |
| 459 | case Qt::Key_2: | 459 | {Qt::Key_Dead_Grave, Settings::NativeKeyboard::Backquote}, |
| 460 | return Settings::NativeKeyboard::N2; | 460 | {Qt::Key_Comma, Settings::NativeKeyboard::Comma}, |
| 461 | case Qt::Key_3: | 461 | {Qt::Key_Period, Settings::NativeKeyboard::Period}, |
| 462 | return Settings::NativeKeyboard::N3; | 462 | {Qt::Key_Slash, Settings::NativeKeyboard::Slash}, |
| 463 | case Qt::Key_4: | 463 | {Qt::Key_CapsLock, Settings::NativeKeyboard::CapsLockKey}, |
| 464 | return Settings::NativeKeyboard::N4; | 464 | {Qt::Key_F1, Settings::NativeKeyboard::F1}, |
| 465 | case Qt::Key_5: | 465 | {Qt::Key_F2, Settings::NativeKeyboard::F2}, |
| 466 | return Settings::NativeKeyboard::N5; | 466 | {Qt::Key_F3, Settings::NativeKeyboard::F3}, |
| 467 | case Qt::Key_6: | 467 | {Qt::Key_F4, Settings::NativeKeyboard::F4}, |
| 468 | return Settings::NativeKeyboard::N6; | 468 | {Qt::Key_F5, Settings::NativeKeyboard::F5}, |
| 469 | case Qt::Key_7: | 469 | {Qt::Key_F6, Settings::NativeKeyboard::F6}, |
| 470 | return Settings::NativeKeyboard::N7; | 470 | {Qt::Key_F7, Settings::NativeKeyboard::F7}, |
| 471 | case Qt::Key_8: | 471 | {Qt::Key_F8, Settings::NativeKeyboard::F8}, |
| 472 | return Settings::NativeKeyboard::N8; | 472 | {Qt::Key_F9, Settings::NativeKeyboard::F9}, |
| 473 | case Qt::Key_9: | 473 | {Qt::Key_F10, Settings::NativeKeyboard::F10}, |
| 474 | return Settings::NativeKeyboard::N9; | 474 | {Qt::Key_F11, Settings::NativeKeyboard::F11}, |
| 475 | case Qt::Key_0: | 475 | {Qt::Key_F12, Settings::NativeKeyboard::F12}, |
| 476 | return Settings::NativeKeyboard::N0; | 476 | {Qt::Key_Print, Settings::NativeKeyboard::PrintScreen}, |
| 477 | case Qt::Key_Return: | 477 | {Qt::Key_ScrollLock, Settings::NativeKeyboard::ScrollLockKey}, |
| 478 | return Settings::NativeKeyboard::Return; | 478 | {Qt::Key_Pause, Settings::NativeKeyboard::Pause}, |
| 479 | case Qt::Key_Escape: | 479 | {Qt::Key_Insert, Settings::NativeKeyboard::Insert}, |
| 480 | return Settings::NativeKeyboard::Escape; | 480 | {Qt::Key_Home, Settings::NativeKeyboard::Home}, |
| 481 | case Qt::Key_Backspace: | 481 | {Qt::Key_PageUp, Settings::NativeKeyboard::PageUp}, |
| 482 | return Settings::NativeKeyboard::Backspace; | 482 | {Qt::Key_Delete, Settings::NativeKeyboard::Delete}, |
| 483 | case Qt::Key_Tab: | 483 | {Qt::Key_End, Settings::NativeKeyboard::End}, |
| 484 | return Settings::NativeKeyboard::Tab; | 484 | {Qt::Key_PageDown, Settings::NativeKeyboard::PageDown}, |
| 485 | case Qt::Key_Space: | 485 | {Qt::Key_Right, Settings::NativeKeyboard::Right}, |
| 486 | return Settings::NativeKeyboard::Space; | 486 | {Qt::Key_Left, Settings::NativeKeyboard::Left}, |
| 487 | case Qt::Key_Minus: | 487 | {Qt::Key_Down, Settings::NativeKeyboard::Down}, |
| 488 | return Settings::NativeKeyboard::Minus; | 488 | {Qt::Key_Up, Settings::NativeKeyboard::Up}, |
| 489 | case Qt::Key_Plus: | 489 | {Qt::Key_NumLock, Settings::NativeKeyboard::NumLockKey}, |
| 490 | case Qt::Key_questiondown: | 490 | // Numpad keys are missing here |
| 491 | return Settings::NativeKeyboard::Plus; | 491 | {Qt::Key_F13, Settings::NativeKeyboard::F13}, |
| 492 | case Qt::Key_BracketLeft: | 492 | {Qt::Key_F14, Settings::NativeKeyboard::F14}, |
| 493 | case Qt::Key_BraceLeft: | 493 | {Qt::Key_F15, Settings::NativeKeyboard::F15}, |
| 494 | return Settings::NativeKeyboard::OpenBracket; | 494 | {Qt::Key_F16, Settings::NativeKeyboard::F16}, |
| 495 | case Qt::Key_BracketRight: | 495 | {Qt::Key_F17, Settings::NativeKeyboard::F17}, |
| 496 | case Qt::Key_BraceRight: | 496 | {Qt::Key_F18, Settings::NativeKeyboard::F18}, |
| 497 | return Settings::NativeKeyboard::CloseBracket; | 497 | {Qt::Key_F19, Settings::NativeKeyboard::F19}, |
| 498 | case Qt::Key_Bar: | 498 | {Qt::Key_F20, Settings::NativeKeyboard::F20}, |
| 499 | return Settings::NativeKeyboard::Pipe; | 499 | {Qt::Key_F21, Settings::NativeKeyboard::F21}, |
| 500 | case Qt::Key_Dead_Tilde: | 500 | {Qt::Key_F22, Settings::NativeKeyboard::F22}, |
| 501 | return Settings::NativeKeyboard::Tilde; | 501 | {Qt::Key_F23, Settings::NativeKeyboard::F23}, |
| 502 | case Qt::Key_Ntilde: | 502 | {Qt::Key_F24, Settings::NativeKeyboard::F24}, |
| 503 | case Qt::Key_Semicolon: | 503 | // {Qt::..., Settings::NativeKeyboard::KPComma}, |
| 504 | return Settings::NativeKeyboard::Semicolon; | 504 | // {Qt::..., Settings::NativeKeyboard::Ro}, |
| 505 | case Qt::Key_Apostrophe: | 505 | {Qt::Key_Hiragana_Katakana, Settings::NativeKeyboard::KatakanaHiragana}, |
| 506 | return Settings::NativeKeyboard::Quote; | 506 | {Qt::Key_yen, Settings::NativeKeyboard::Yen}, |
| 507 | case Qt::Key_Dead_Grave: | 507 | {Qt::Key_Henkan, Settings::NativeKeyboard::Henkan}, |
| 508 | return Settings::NativeKeyboard::Backquote; | 508 | {Qt::Key_Muhenkan, Settings::NativeKeyboard::Muhenkan}, |
| 509 | case Qt::Key_Comma: | 509 | // {Qt::..., Settings::NativeKeyboard::NumPadCommaPc98}, |
| 510 | return Settings::NativeKeyboard::Comma; | 510 | {Qt::Key_Hangul, Settings::NativeKeyboard::HangulEnglish}, |
| 511 | case Qt::Key_Period: | 511 | {Qt::Key_Hangul_Hanja, Settings::NativeKeyboard::Hanja}, |
| 512 | return Settings::NativeKeyboard::Period; | 512 | {Qt::Key_Katakana, Settings::NativeKeyboard::KatakanaKey}, |
| 513 | case Qt::Key_Slash: | 513 | {Qt::Key_Hiragana, Settings::NativeKeyboard::HiraganaKey}, |
| 514 | return Settings::NativeKeyboard::Slash; | 514 | {Qt::Key_Zenkaku_Hankaku, Settings::NativeKeyboard::ZenkakuHankaku}, |
| 515 | case Qt::Key_CapsLock: | 515 | // Modifier keys are handled by the modifier property |
| 516 | return Settings::NativeKeyboard::CapsLock; | 516 | }; |
| 517 | case Qt::Key_F1: | 517 | |
| 518 | return Settings::NativeKeyboard::F1; | 518 | for (const auto& [qkey, nkey] : key_map) { |
| 519 | case Qt::Key_F2: | 519 | if (qt_key == qkey) { |
| 520 | return Settings::NativeKeyboard::F2; | 520 | return nkey; |
| 521 | case Qt::Key_F3: | 521 | } |
| 522 | return Settings::NativeKeyboard::F3; | ||
| 523 | case Qt::Key_F4: | ||
| 524 | return Settings::NativeKeyboard::F4; | ||
| 525 | case Qt::Key_F5: | ||
| 526 | return Settings::NativeKeyboard::F5; | ||
| 527 | case Qt::Key_F6: | ||
| 528 | return Settings::NativeKeyboard::F6; | ||
| 529 | case Qt::Key_F7: | ||
| 530 | return Settings::NativeKeyboard::F7; | ||
| 531 | case Qt::Key_F8: | ||
| 532 | return Settings::NativeKeyboard::F8; | ||
| 533 | case Qt::Key_F9: | ||
| 534 | return Settings::NativeKeyboard::F9; | ||
| 535 | case Qt::Key_F10: | ||
| 536 | return Settings::NativeKeyboard::F10; | ||
| 537 | case Qt::Key_F11: | ||
| 538 | return Settings::NativeKeyboard::F11; | ||
| 539 | case Qt::Key_F12: | ||
| 540 | return Settings::NativeKeyboard::F12; | ||
| 541 | case Qt::Key_Print: | ||
| 542 | return Settings::NativeKeyboard::PrintScreen; | ||
| 543 | case Qt::Key_ScrollLock: | ||
| 544 | return Settings::NativeKeyboard::ScrollLock; | ||
| 545 | case Qt::Key_Pause: | ||
| 546 | return Settings::NativeKeyboard::Pause; | ||
| 547 | case Qt::Key_Insert: | ||
| 548 | return Settings::NativeKeyboard::Insert; | ||
| 549 | case Qt::Key_Home: | ||
| 550 | return Settings::NativeKeyboard::Home; | ||
| 551 | case Qt::Key_PageUp: | ||
| 552 | return Settings::NativeKeyboard::PageUp; | ||
| 553 | case Qt::Key_Delete: | ||
| 554 | return Settings::NativeKeyboard::Delete; | ||
| 555 | case Qt::Key_End: | ||
| 556 | return Settings::NativeKeyboard::End; | ||
| 557 | case Qt::Key_PageDown: | ||
| 558 | return Settings::NativeKeyboard::PageDown; | ||
| 559 | case Qt::Key_Right: | ||
| 560 | return Settings::NativeKeyboard::Right; | ||
| 561 | case Qt::Key_Left: | ||
| 562 | return Settings::NativeKeyboard::Left; | ||
| 563 | case Qt::Key_Down: | ||
| 564 | return Settings::NativeKeyboard::Down; | ||
| 565 | case Qt::Key_Up: | ||
| 566 | return Settings::NativeKeyboard::Up; | ||
| 567 | case Qt::Key_NumLock: | ||
| 568 | return Settings::NativeKeyboard::NumLock; | ||
| 569 | // Numpad keys are missing here | ||
| 570 | case Qt::Key_F13: | ||
| 571 | return Settings::NativeKeyboard::F13; | ||
| 572 | case Qt::Key_F14: | ||
| 573 | return Settings::NativeKeyboard::F14; | ||
| 574 | case Qt::Key_F15: | ||
| 575 | return Settings::NativeKeyboard::F15; | ||
| 576 | case Qt::Key_F16: | ||
| 577 | return Settings::NativeKeyboard::F16; | ||
| 578 | case Qt::Key_F17: | ||
| 579 | return Settings::NativeKeyboard::F17; | ||
| 580 | case Qt::Key_F18: | ||
| 581 | return Settings::NativeKeyboard::F18; | ||
| 582 | case Qt::Key_F19: | ||
| 583 | return Settings::NativeKeyboard::F19; | ||
| 584 | case Qt::Key_F20: | ||
| 585 | return Settings::NativeKeyboard::F20; | ||
| 586 | case Qt::Key_F21: | ||
| 587 | return Settings::NativeKeyboard::F21; | ||
| 588 | case Qt::Key_F22: | ||
| 589 | return Settings::NativeKeyboard::F22; | ||
| 590 | case Qt::Key_F23: | ||
| 591 | return Settings::NativeKeyboard::F23; | ||
| 592 | case Qt::Key_F24: | ||
| 593 | return Settings::NativeKeyboard::F24; | ||
| 594 | // case Qt::: | ||
| 595 | // return Settings::NativeKeyboard::KPComma; | ||
| 596 | // case Qt::: | ||
| 597 | // return Settings::NativeKeyboard::Ro; | ||
| 598 | case Qt::Key_Hiragana_Katakana: | ||
| 599 | return Settings::NativeKeyboard::KatakanaHiragana; | ||
| 600 | case Qt::Key_yen: | ||
| 601 | return Settings::NativeKeyboard::Yen; | ||
| 602 | case Qt::Key_Henkan: | ||
| 603 | return Settings::NativeKeyboard::Henkan; | ||
| 604 | case Qt::Key_Muhenkan: | ||
| 605 | return Settings::NativeKeyboard::Muhenkan; | ||
| 606 | // case Qt::: | ||
| 607 | // return Settings::NativeKeyboard::NumPadCommaPc98; | ||
| 608 | case Qt::Key_Hangul: | ||
| 609 | return Settings::NativeKeyboard::HangulEnglish; | ||
| 610 | case Qt::Key_Hangul_Hanja: | ||
| 611 | return Settings::NativeKeyboard::Hanja; | ||
| 612 | case Qt::Key_Katakana: | ||
| 613 | return Settings::NativeKeyboard::KatakanaKey; | ||
| 614 | case Qt::Key_Hiragana: | ||
| 615 | return Settings::NativeKeyboard::HiraganaKey; | ||
| 616 | case Qt::Key_Zenkaku_Hankaku: | ||
| 617 | return Settings::NativeKeyboard::ZenkakuHankaku; | ||
| 618 | // Modifier keys are handled by the modifier property | ||
| 619 | default: | ||
| 620 | return Settings::NativeKeyboard::None; | ||
| 621 | } | 522 | } |
| 523 | |||
| 524 | return Settings::NativeKeyboard::None; | ||
| 622 | } | 525 | } |
| 623 | 526 | ||
| 624 | int GRenderWindow::QtModifierToSwitchModifier(Qt::KeyboardModifiers qt_modifiers) { | 527 | int GRenderWindow::QtModifierToSwitchModifier(Qt::KeyboardModifiers qt_modifiers) { |
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp index 195074bf2..343f3b8e5 100644 --- a/src/yuzu/configuration/config.cpp +++ b/src/yuzu/configuration/config.cpp | |||
| @@ -819,6 +819,9 @@ void Config::ReadUIGamelistValues() { | |||
| 819 | qt_config->beginGroup(QStringLiteral("UIGameList")); | 819 | qt_config->beginGroup(QStringLiteral("UIGameList")); |
| 820 | 820 | ||
| 821 | ReadBasicSetting(UISettings::values.show_add_ons); | 821 | ReadBasicSetting(UISettings::values.show_add_ons); |
| 822 | ReadBasicSetting(UISettings::values.show_compat); | ||
| 823 | ReadBasicSetting(UISettings::values.show_size); | ||
| 824 | ReadBasicSetting(UISettings::values.show_types); | ||
| 822 | ReadBasicSetting(UISettings::values.game_icon_size); | 825 | ReadBasicSetting(UISettings::values.game_icon_size); |
| 823 | ReadBasicSetting(UISettings::values.folder_icon_size); | 826 | ReadBasicSetting(UISettings::values.folder_icon_size); |
| 824 | ReadBasicSetting(UISettings::values.row_1_text_id); | 827 | ReadBasicSetting(UISettings::values.row_1_text_id); |
| @@ -1414,6 +1417,9 @@ void Config::SaveUIGamelistValues() { | |||
| 1414 | qt_config->beginGroup(QStringLiteral("UIGameList")); | 1417 | qt_config->beginGroup(QStringLiteral("UIGameList")); |
| 1415 | 1418 | ||
| 1416 | WriteBasicSetting(UISettings::values.show_add_ons); | 1419 | WriteBasicSetting(UISettings::values.show_add_ons); |
| 1420 | WriteBasicSetting(UISettings::values.show_compat); | ||
| 1421 | WriteBasicSetting(UISettings::values.show_size); | ||
| 1422 | WriteBasicSetting(UISettings::values.show_types); | ||
| 1417 | WriteBasicSetting(UISettings::values.game_icon_size); | 1423 | WriteBasicSetting(UISettings::values.game_icon_size); |
| 1418 | WriteBasicSetting(UISettings::values.folder_icon_size); | 1424 | WriteBasicSetting(UISettings::values.folder_icon_size); |
| 1419 | WriteBasicSetting(UISettings::values.row_1_text_id); | 1425 | WriteBasicSetting(UISettings::values.row_1_text_id); |
diff --git a/src/yuzu/configuration/configure_ui.cpp b/src/yuzu/configuration/configure_ui.cpp index 48f71b53c..2ebb80302 100644 --- a/src/yuzu/configuration/configure_ui.cpp +++ b/src/yuzu/configuration/configure_ui.cpp | |||
| @@ -72,6 +72,9 @@ ConfigureUi::ConfigureUi(Core::System& system_, QWidget* parent) | |||
| 72 | 72 | ||
| 73 | // Force game list reload if any of the relevant settings are changed. | 73 | // Force game list reload if any of the relevant settings are changed. |
| 74 | connect(ui->show_add_ons, &QCheckBox::stateChanged, this, &ConfigureUi::RequestGameListUpdate); | 74 | connect(ui->show_add_ons, &QCheckBox::stateChanged, this, &ConfigureUi::RequestGameListUpdate); |
| 75 | connect(ui->show_compat, &QCheckBox::stateChanged, this, &ConfigureUi::RequestGameListUpdate); | ||
| 76 | connect(ui->show_size, &QCheckBox::stateChanged, this, &ConfigureUi::RequestGameListUpdate); | ||
| 77 | connect(ui->show_types, &QCheckBox::stateChanged, this, &ConfigureUi::RequestGameListUpdate); | ||
| 75 | connect(ui->game_icon_size_combobox, QOverload<int>::of(&QComboBox::currentIndexChanged), this, | 78 | connect(ui->game_icon_size_combobox, QOverload<int>::of(&QComboBox::currentIndexChanged), this, |
| 76 | &ConfigureUi::RequestGameListUpdate); | 79 | &ConfigureUi::RequestGameListUpdate); |
| 77 | connect(ui->folder_icon_size_combobox, QOverload<int>::of(&QComboBox::currentIndexChanged), | 80 | connect(ui->folder_icon_size_combobox, QOverload<int>::of(&QComboBox::currentIndexChanged), |
| @@ -109,6 +112,9 @@ void ConfigureUi::ApplyConfiguration() { | |||
| 109 | UISettings::values.theme = | 112 | UISettings::values.theme = |
| 110 | ui->theme_combobox->itemData(ui->theme_combobox->currentIndex()).toString(); | 113 | ui->theme_combobox->itemData(ui->theme_combobox->currentIndex()).toString(); |
| 111 | UISettings::values.show_add_ons = ui->show_add_ons->isChecked(); | 114 | UISettings::values.show_add_ons = ui->show_add_ons->isChecked(); |
| 115 | UISettings::values.show_compat = ui->show_compat->isChecked(); | ||
| 116 | UISettings::values.show_size = ui->show_size->isChecked(); | ||
| 117 | UISettings::values.show_types = ui->show_types->isChecked(); | ||
| 112 | UISettings::values.game_icon_size = ui->game_icon_size_combobox->currentData().toUInt(); | 118 | UISettings::values.game_icon_size = ui->game_icon_size_combobox->currentData().toUInt(); |
| 113 | UISettings::values.folder_icon_size = ui->folder_icon_size_combobox->currentData().toUInt(); | 119 | UISettings::values.folder_icon_size = ui->folder_icon_size_combobox->currentData().toUInt(); |
| 114 | UISettings::values.row_1_text_id = ui->row_1_text_combobox->currentData().toUInt(); | 120 | UISettings::values.row_1_text_id = ui->row_1_text_combobox->currentData().toUInt(); |
| @@ -129,6 +135,9 @@ void ConfigureUi::SetConfiguration() { | |||
| 129 | ui->language_combobox->setCurrentIndex( | 135 | ui->language_combobox->setCurrentIndex( |
| 130 | ui->language_combobox->findData(UISettings::values.language)); | 136 | ui->language_combobox->findData(UISettings::values.language)); |
| 131 | ui->show_add_ons->setChecked(UISettings::values.show_add_ons.GetValue()); | 137 | ui->show_add_ons->setChecked(UISettings::values.show_add_ons.GetValue()); |
| 138 | ui->show_compat->setChecked(UISettings::values.show_compat.GetValue()); | ||
| 139 | ui->show_size->setChecked(UISettings::values.show_size.GetValue()); | ||
| 140 | ui->show_types->setChecked(UISettings::values.show_types.GetValue()); | ||
| 132 | ui->game_icon_size_combobox->setCurrentIndex( | 141 | ui->game_icon_size_combobox->setCurrentIndex( |
| 133 | ui->game_icon_size_combobox->findData(UISettings::values.game_icon_size.GetValue())); | 142 | ui->game_icon_size_combobox->findData(UISettings::values.game_icon_size.GetValue())); |
| 134 | ui->folder_icon_size_combobox->setCurrentIndex( | 143 | ui->folder_icon_size_combobox->setCurrentIndex( |
diff --git a/src/yuzu/configuration/configure_ui.ui b/src/yuzu/configuration/configure_ui.ui index a50df7f6f..10bb27312 100644 --- a/src/yuzu/configuration/configure_ui.ui +++ b/src/yuzu/configuration/configure_ui.ui | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | <x>0</x> | 7 | <x>0</x> |
| 8 | <y>0</y> | 8 | <y>0</y> |
| 9 | <width>363</width> | 9 | <width>363</width> |
| 10 | <height>507</height> | 10 | <height>562</height> |
| 11 | </rect> | 11 | </rect> |
| 12 | </property> | 12 | </property> |
| 13 | <property name="windowTitle"> | 13 | <property name="windowTitle"> |
| @@ -77,6 +77,13 @@ | |||
| 77 | <item> | 77 | <item> |
| 78 | <layout class="QVBoxLayout" name="GeneralVerticalLayout"> | 78 | <layout class="QVBoxLayout" name="GeneralVerticalLayout"> |
| 79 | <item> | 79 | <item> |
| 80 | <widget class="QCheckBox" name="show_compat"> | ||
| 81 | <property name="text"> | ||
| 82 | <string>Show Compatibility List</string> | ||
| 83 | </property> | ||
| 84 | </widget> | ||
| 85 | </item> | ||
| 86 | <item> | ||
| 80 | <widget class="QCheckBox" name="show_add_ons"> | 87 | <widget class="QCheckBox" name="show_add_ons"> |
| 81 | <property name="text"> | 88 | <property name="text"> |
| 82 | <string>Show Add-Ons Column</string> | 89 | <string>Show Add-Ons Column</string> |
| @@ -84,6 +91,20 @@ | |||
| 84 | </widget> | 91 | </widget> |
| 85 | </item> | 92 | </item> |
| 86 | <item> | 93 | <item> |
| 94 | <widget class="QCheckBox" name="show_size"> | ||
| 95 | <property name="text"> | ||
| 96 | <string>Show Size Column</string> | ||
| 97 | </property> | ||
| 98 | </widget> | ||
| 99 | </item> | ||
| 100 | <item> | ||
| 101 | <widget class="QCheckBox" name="show_types"> | ||
| 102 | <property name="text"> | ||
| 103 | <string>Show File Types Column</string> | ||
| 104 | </property> | ||
| 105 | </widget> | ||
| 106 | </item> | ||
| 107 | <item> | ||
| 87 | <layout class="QHBoxLayout" name="game_icon_size_qhbox_layout_2"> | 108 | <layout class="QHBoxLayout" name="game_icon_size_qhbox_layout_2"> |
| 88 | <item> | 109 | <item> |
| 89 | <widget class="QLabel" name="game_icon_size_label"> | 110 | <widget class="QLabel" name="game_icon_size_label"> |
diff --git a/src/yuzu/game_list.cpp b/src/yuzu/game_list.cpp index b127badc2..5c33c1b0f 100644 --- a/src/yuzu/game_list.cpp +++ b/src/yuzu/game_list.cpp | |||
| @@ -335,6 +335,7 @@ GameList::GameList(FileSys::VirtualFilesystem vfs_, FileSys::ManualContentProvid | |||
| 335 | RetranslateUI(); | 335 | RetranslateUI(); |
| 336 | 336 | ||
| 337 | tree_view->setColumnHidden(COLUMN_ADD_ONS, !UISettings::values.show_add_ons); | 337 | tree_view->setColumnHidden(COLUMN_ADD_ONS, !UISettings::values.show_add_ons); |
| 338 | tree_view->setColumnHidden(COLUMN_COMPATIBILITY, !UISettings::values.show_compat); | ||
| 338 | item_model->setSortRole(GameListItemPath::SortRole); | 339 | item_model->setSortRole(GameListItemPath::SortRole); |
| 339 | 340 | ||
| 340 | connect(main_window, &GMainWindow::UpdateThemedIcons, this, &GameList::OnUpdateThemedIcons); | 341 | connect(main_window, &GMainWindow::UpdateThemedIcons, this, &GameList::OnUpdateThemedIcons); |
| @@ -786,6 +787,9 @@ void GameList::PopulateAsync(QVector<UISettings::GameDir>& game_dirs) { | |||
| 786 | 787 | ||
| 787 | // Update the columns in case UISettings has changed | 788 | // Update the columns in case UISettings has changed |
| 788 | tree_view->setColumnHidden(COLUMN_ADD_ONS, !UISettings::values.show_add_ons); | 789 | tree_view->setColumnHidden(COLUMN_ADD_ONS, !UISettings::values.show_add_ons); |
| 790 | tree_view->setColumnHidden(COLUMN_COMPATIBILITY, !UISettings::values.show_compat); | ||
| 791 | tree_view->setColumnHidden(COLUMN_FILE_TYPE, !UISettings::values.show_types); | ||
| 792 | tree_view->setColumnHidden(COLUMN_SIZE, !UISettings::values.show_size); | ||
| 789 | 793 | ||
| 790 | // Delete any rows that might already exist if we're repopulating | 794 | // Delete any rows that might already exist if we're repopulating |
| 791 | item_model->removeRows(0, item_model->rowCount()); | 795 | item_model->removeRows(0, item_model->rowCount()); |
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index ccae2b828..d95915016 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | #ifdef __APPLE__ | 9 | #ifdef __APPLE__ |
| 10 | #include <unistd.h> // for chdir | 10 | #include <unistd.h> // for chdir |
| 11 | #endif | 11 | #endif |
| 12 | #ifdef __linux__ | 12 | #ifdef __unix__ |
| 13 | #include <csignal> | 13 | #include <csignal> |
| 14 | #include <sys/socket.h> | 14 | #include <sys/socket.h> |
| 15 | #endif | 15 | #endif |
| @@ -275,7 +275,7 @@ static void OverrideWindowsFont() { | |||
| 275 | #endif | 275 | #endif |
| 276 | 276 | ||
| 277 | bool GMainWindow::CheckDarkMode() { | 277 | bool GMainWindow::CheckDarkMode() { |
| 278 | #ifdef __linux__ | 278 | #ifdef __unix__ |
| 279 | const QPalette test_palette(qApp->palette()); | 279 | const QPalette test_palette(qApp->palette()); |
| 280 | const QColor text_color = test_palette.color(QPalette::Active, QPalette::Text); | 280 | const QColor text_color = test_palette.color(QPalette::Active, QPalette::Text); |
| 281 | const QColor window_color = test_palette.color(QPalette::Active, QPalette::Window); | 281 | const QColor window_color = test_palette.color(QPalette::Active, QPalette::Window); |
| @@ -283,7 +283,7 @@ bool GMainWindow::CheckDarkMode() { | |||
| 283 | #else | 283 | #else |
| 284 | // TODO: Windows | 284 | // TODO: Windows |
| 285 | return false; | 285 | return false; |
| 286 | #endif // __linux__ | 286 | #endif // __unix__ |
| 287 | } | 287 | } |
| 288 | 288 | ||
| 289 | GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan) | 289 | GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan) |
| @@ -291,7 +291,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan | |||
| 291 | input_subsystem{std::make_shared<InputCommon::InputSubsystem>()}, config{std::move(config_)}, | 291 | input_subsystem{std::make_shared<InputCommon::InputSubsystem>()}, config{std::move(config_)}, |
| 292 | vfs{std::make_shared<FileSys::RealVfsFilesystem>()}, | 292 | vfs{std::make_shared<FileSys::RealVfsFilesystem>()}, |
| 293 | provider{std::make_unique<FileSys::ManualContentProvider>()} { | 293 | provider{std::make_unique<FileSys::ManualContentProvider>()} { |
| 294 | #ifdef __linux__ | 294 | #ifdef __unix__ |
| 295 | SetupSigInterrupts(); | 295 | SetupSigInterrupts(); |
| 296 | #endif | 296 | #endif |
| 297 | system->Initialize(); | 297 | system->Initialize(); |
| @@ -342,6 +342,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan | |||
| 342 | const auto override_build = | 342 | const auto override_build = |
| 343 | fmt::format(fmt::runtime(std::string(Common::g_title_bar_format_idle)), build_id); | 343 | fmt::format(fmt::runtime(std::string(Common::g_title_bar_format_idle)), build_id); |
| 344 | const auto yuzu_build_version = override_build.empty() ? yuzu_build : override_build; | 344 | const auto yuzu_build_version = override_build.empty() ? yuzu_build : override_build; |
| 345 | const auto processor_count = std::thread::hardware_concurrency(); | ||
| 345 | 346 | ||
| 346 | LOG_INFO(Frontend, "yuzu Version: {}", yuzu_build_version); | 347 | LOG_INFO(Frontend, "yuzu Version: {}", yuzu_build_version); |
| 347 | LogRuntimes(); | 348 | LogRuntimes(); |
| @@ -361,6 +362,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan | |||
| 361 | } | 362 | } |
| 362 | LOG_INFO(Frontend, "Host CPU: {}", cpu_string); | 363 | LOG_INFO(Frontend, "Host CPU: {}", cpu_string); |
| 363 | #endif | 364 | #endif |
| 365 | LOG_INFO(Frontend, "Host CPU Threads: {}", processor_count); | ||
| 364 | LOG_INFO(Frontend, "Host OS: {}", PrettyProductName().toStdString()); | 366 | LOG_INFO(Frontend, "Host OS: {}", PrettyProductName().toStdString()); |
| 365 | LOG_INFO(Frontend, "Host RAM: {:.2f} GiB", | 367 | LOG_INFO(Frontend, "Host RAM: {:.2f} GiB", |
| 366 | Common::GetMemInfo().TotalPhysicalMemory / f64{1_GiB}); | 368 | Common::GetMemInfo().TotalPhysicalMemory / f64{1_GiB}); |
| @@ -509,7 +511,7 @@ GMainWindow::~GMainWindow() { | |||
| 509 | delete render_window; | 511 | delete render_window; |
| 510 | } | 512 | } |
| 511 | 513 | ||
| 512 | #ifdef __linux__ | 514 | #ifdef __unix__ |
| 513 | ::close(sig_interrupt_fds[0]); | 515 | ::close(sig_interrupt_fds[0]); |
| 514 | ::close(sig_interrupt_fds[1]); | 516 | ::close(sig_interrupt_fds[1]); |
| 515 | #endif | 517 | #endif |
| @@ -1379,7 +1381,7 @@ void GMainWindow::OnDisplayTitleBars(bool show) { | |||
| 1379 | } | 1381 | } |
| 1380 | 1382 | ||
| 1381 | void GMainWindow::SetupPrepareForSleep() { | 1383 | void GMainWindow::SetupPrepareForSleep() { |
| 1382 | #ifdef __linux__ | 1384 | #ifdef __unix__ |
| 1383 | auto bus = QDBusConnection::systemBus(); | 1385 | auto bus = QDBusConnection::systemBus(); |
| 1384 | if (bus.isConnected()) { | 1386 | if (bus.isConnected()) { |
| 1385 | const bool success = bus.connect( | 1387 | const bool success = bus.connect( |
| @@ -1393,7 +1395,7 @@ void GMainWindow::SetupPrepareForSleep() { | |||
| 1393 | } else { | 1395 | } else { |
| 1394 | LOG_WARNING(Frontend, "QDBusConnection system bus is not connected"); | 1396 | LOG_WARNING(Frontend, "QDBusConnection system bus is not connected"); |
| 1395 | } | 1397 | } |
| 1396 | #endif // __linux__ | 1398 | #endif // __unix__ |
| 1397 | } | 1399 | } |
| 1398 | 1400 | ||
| 1399 | void GMainWindow::OnPrepareForSleep(bool prepare_sleep) { | 1401 | void GMainWindow::OnPrepareForSleep(bool prepare_sleep) { |
| @@ -1415,7 +1417,7 @@ void GMainWindow::OnPrepareForSleep(bool prepare_sleep) { | |||
| 1415 | } | 1417 | } |
| 1416 | } | 1418 | } |
| 1417 | 1419 | ||
| 1418 | #ifdef __linux__ | 1420 | #ifdef __unix__ |
| 1419 | static std::optional<QDBusObjectPath> HoldWakeLockLinux(u32 window_id = 0) { | 1421 | static std::optional<QDBusObjectPath> HoldWakeLockLinux(u32 window_id = 0) { |
| 1420 | if (!QDBusConnection::sessionBus().isConnected()) { | 1422 | if (!QDBusConnection::sessionBus().isConnected()) { |
| 1421 | return {}; | 1423 | return {}; |
| @@ -1500,14 +1502,14 @@ void GMainWindow::OnSigInterruptNotifierActivated() { | |||
| 1500 | 1502 | ||
| 1501 | emit SigInterrupt(); | 1503 | emit SigInterrupt(); |
| 1502 | } | 1504 | } |
| 1503 | #endif // __linux__ | 1505 | #endif // __unix__ |
| 1504 | 1506 | ||
| 1505 | void GMainWindow::PreventOSSleep() { | 1507 | void GMainWindow::PreventOSSleep() { |
| 1506 | #ifdef _WIN32 | 1508 | #ifdef _WIN32 |
| 1507 | SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED | ES_DISPLAY_REQUIRED); | 1509 | SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED | ES_DISPLAY_REQUIRED); |
| 1508 | #elif defined(HAVE_SDL2) | 1510 | #elif defined(HAVE_SDL2) |
| 1509 | SDL_DisableScreenSaver(); | 1511 | SDL_DisableScreenSaver(); |
| 1510 | #ifdef __linux__ | 1512 | #ifdef __unix__ |
| 1511 | auto reply = HoldWakeLockLinux(winId()); | 1513 | auto reply = HoldWakeLockLinux(winId()); |
| 1512 | if (reply) { | 1514 | if (reply) { |
| 1513 | wake_lock = std::move(reply.value()); | 1515 | wake_lock = std::move(reply.value()); |
| @@ -1521,7 +1523,7 @@ void GMainWindow::AllowOSSleep() { | |||
| 1521 | SetThreadExecutionState(ES_CONTINUOUS); | 1523 | SetThreadExecutionState(ES_CONTINUOUS); |
| 1522 | #elif defined(HAVE_SDL2) | 1524 | #elif defined(HAVE_SDL2) |
| 1523 | SDL_EnableScreenSaver(); | 1525 | SDL_EnableScreenSaver(); |
| 1524 | #ifdef __linux__ | 1526 | #ifdef __unix__ |
| 1525 | if (!wake_lock.path().isEmpty()) { | 1527 | if (!wake_lock.path().isEmpty()) { |
| 1526 | ReleaseWakeLockLinux(wake_lock); | 1528 | ReleaseWakeLockLinux(wake_lock); |
| 1527 | } | 1529 | } |
| @@ -2018,38 +2020,50 @@ static bool RomFSRawCopy(QProgressDialog& dialog, const FileSys::VirtualDir& src | |||
| 2018 | return true; | 2020 | return true; |
| 2019 | } | 2021 | } |
| 2020 | 2022 | ||
| 2023 | QString GMainWindow::GetGameListErrorRemoving(InstalledEntryType type) const { | ||
| 2024 | switch (type) { | ||
| 2025 | case InstalledEntryType::Game: | ||
| 2026 | return tr("Error Removing Contents"); | ||
| 2027 | case InstalledEntryType::Update: | ||
| 2028 | return tr("Error Removing Update"); | ||
| 2029 | case InstalledEntryType::AddOnContent: | ||
| 2030 | return tr("Error Removing DLC"); | ||
| 2031 | default: | ||
| 2032 | return QStringLiteral("Error Removing <Invalid Type>"); | ||
| 2033 | } | ||
| 2034 | } | ||
| 2021 | void GMainWindow::OnGameListRemoveInstalledEntry(u64 program_id, InstalledEntryType type) { | 2035 | void GMainWindow::OnGameListRemoveInstalledEntry(u64 program_id, InstalledEntryType type) { |
| 2022 | const QString entry_type = [type] { | 2036 | const QString entry_question = [type] { |
| 2023 | switch (type) { | 2037 | switch (type) { |
| 2024 | case InstalledEntryType::Game: | 2038 | case InstalledEntryType::Game: |
| 2025 | return tr("Contents"); | 2039 | return tr("Remove Installed Game Contents?"); |
| 2026 | case InstalledEntryType::Update: | 2040 | case InstalledEntryType::Update: |
| 2027 | return tr("Update"); | 2041 | return tr("Remove Installed Game Update?"); |
| 2028 | case InstalledEntryType::AddOnContent: | 2042 | case InstalledEntryType::AddOnContent: |
| 2029 | return tr("DLC"); | 2043 | return tr("Remove Installed Game DLC?"); |
| 2030 | default: | 2044 | default: |
| 2031 | return QString{}; | 2045 | return QStringLiteral("Remove Installed Game <Invalid Type>?"); |
| 2032 | } | 2046 | } |
| 2033 | }(); | 2047 | }(); |
| 2034 | 2048 | ||
| 2035 | if (QMessageBox::question( | 2049 | if (QMessageBox::question(this, tr("Remove Entry"), entry_question, |
| 2036 | this, tr("Remove Entry"), tr("Remove Installed Game %1?").arg(entry_type), | 2050 | QMessageBox::Yes | QMessageBox::No, |
| 2037 | QMessageBox::Yes | QMessageBox::No, QMessageBox::No) != QMessageBox::Yes) { | 2051 | QMessageBox::No) != QMessageBox::Yes) { |
| 2038 | return; | 2052 | return; |
| 2039 | } | 2053 | } |
| 2040 | 2054 | ||
| 2041 | switch (type) { | 2055 | switch (type) { |
| 2042 | case InstalledEntryType::Game: | 2056 | case InstalledEntryType::Game: |
| 2043 | RemoveBaseContent(program_id, entry_type); | 2057 | RemoveBaseContent(program_id, type); |
| 2044 | [[fallthrough]]; | 2058 | [[fallthrough]]; |
| 2045 | case InstalledEntryType::Update: | 2059 | case InstalledEntryType::Update: |
| 2046 | RemoveUpdateContent(program_id, entry_type); | 2060 | RemoveUpdateContent(program_id, type); |
| 2047 | if (type != InstalledEntryType::Game) { | 2061 | if (type != InstalledEntryType::Game) { |
| 2048 | break; | 2062 | break; |
| 2049 | } | 2063 | } |
| 2050 | [[fallthrough]]; | 2064 | [[fallthrough]]; |
| 2051 | case InstalledEntryType::AddOnContent: | 2065 | case InstalledEntryType::AddOnContent: |
| 2052 | RemoveAddOnContent(program_id, entry_type); | 2066 | RemoveAddOnContent(program_id, type); |
| 2053 | break; | 2067 | break; |
| 2054 | } | 2068 | } |
| 2055 | Common::FS::RemoveDirRecursively(Common::FS::GetYuzuPath(Common::FS::YuzuPath::CacheDir) / | 2069 | Common::FS::RemoveDirRecursively(Common::FS::GetYuzuPath(Common::FS::YuzuPath::CacheDir) / |
| @@ -2057,7 +2071,7 @@ void GMainWindow::OnGameListRemoveInstalledEntry(u64 program_id, InstalledEntryT | |||
| 2057 | game_list->PopulateAsync(UISettings::values.game_dirs); | 2071 | game_list->PopulateAsync(UISettings::values.game_dirs); |
| 2058 | } | 2072 | } |
| 2059 | 2073 | ||
| 2060 | void GMainWindow::RemoveBaseContent(u64 program_id, const QString& entry_type) { | 2074 | void GMainWindow::RemoveBaseContent(u64 program_id, InstalledEntryType type) { |
| 2061 | const auto& fs_controller = system->GetFileSystemController(); | 2075 | const auto& fs_controller = system->GetFileSystemController(); |
| 2062 | const auto res = fs_controller.GetUserNANDContents()->RemoveExistingEntry(program_id) || | 2076 | const auto res = fs_controller.GetUserNANDContents()->RemoveExistingEntry(program_id) || |
| 2063 | fs_controller.GetSDMCContents()->RemoveExistingEntry(program_id); | 2077 | fs_controller.GetSDMCContents()->RemoveExistingEntry(program_id); |
| @@ -2067,12 +2081,12 @@ void GMainWindow::RemoveBaseContent(u64 program_id, const QString& entry_type) { | |||
| 2067 | tr("Successfully removed the installed base game.")); | 2081 | tr("Successfully removed the installed base game.")); |
| 2068 | } else { | 2082 | } else { |
| 2069 | QMessageBox::warning( | 2083 | QMessageBox::warning( |
| 2070 | this, tr("Error Removing %1").arg(entry_type), | 2084 | this, GetGameListErrorRemoving(type), |
| 2071 | tr("The base game is not installed in the NAND and cannot be removed.")); | 2085 | tr("The base game is not installed in the NAND and cannot be removed.")); |
| 2072 | } | 2086 | } |
| 2073 | } | 2087 | } |
| 2074 | 2088 | ||
| 2075 | void GMainWindow::RemoveUpdateContent(u64 program_id, const QString& entry_type) { | 2089 | void GMainWindow::RemoveUpdateContent(u64 program_id, InstalledEntryType type) { |
| 2076 | const auto update_id = program_id | 0x800; | 2090 | const auto update_id = program_id | 0x800; |
| 2077 | const auto& fs_controller = system->GetFileSystemController(); | 2091 | const auto& fs_controller = system->GetFileSystemController(); |
| 2078 | const auto res = fs_controller.GetUserNANDContents()->RemoveExistingEntry(update_id) || | 2092 | const auto res = fs_controller.GetUserNANDContents()->RemoveExistingEntry(update_id) || |
| @@ -2082,12 +2096,12 @@ void GMainWindow::RemoveUpdateContent(u64 program_id, const QString& entry_type) | |||
| 2082 | QMessageBox::information(this, tr("Successfully Removed"), | 2096 | QMessageBox::information(this, tr("Successfully Removed"), |
| 2083 | tr("Successfully removed the installed update.")); | 2097 | tr("Successfully removed the installed update.")); |
| 2084 | } else { | 2098 | } else { |
| 2085 | QMessageBox::warning(this, tr("Error Removing %1").arg(entry_type), | 2099 | QMessageBox::warning(this, GetGameListErrorRemoving(type), |
| 2086 | tr("There is no update installed for this title.")); | 2100 | tr("There is no update installed for this title.")); |
| 2087 | } | 2101 | } |
| 2088 | } | 2102 | } |
| 2089 | 2103 | ||
| 2090 | void GMainWindow::RemoveAddOnContent(u64 program_id, const QString& entry_type) { | 2104 | void GMainWindow::RemoveAddOnContent(u64 program_id, InstalledEntryType type) { |
| 2091 | u32 count{}; | 2105 | u32 count{}; |
| 2092 | const auto& fs_controller = system->GetFileSystemController(); | 2106 | const auto& fs_controller = system->GetFileSystemController(); |
| 2093 | const auto dlc_entries = system->GetContentProvider().ListEntriesFilter( | 2107 | const auto dlc_entries = system->GetContentProvider().ListEntriesFilter( |
| @@ -2105,7 +2119,7 @@ void GMainWindow::RemoveAddOnContent(u64 program_id, const QString& entry_type) | |||
| 2105 | } | 2119 | } |
| 2106 | 2120 | ||
| 2107 | if (count == 0) { | 2121 | if (count == 0) { |
| 2108 | QMessageBox::warning(this, tr("Error Removing %1").arg(entry_type), | 2122 | QMessageBox::warning(this, GetGameListErrorRemoving(type), |
| 2109 | tr("There are no DLC installed for this title.")); | 2123 | tr("There are no DLC installed for this title.")); |
| 2110 | return; | 2124 | return; |
| 2111 | } | 2125 | } |
| @@ -4084,7 +4098,7 @@ void GMainWindow::SetDiscordEnabled([[maybe_unused]] bool state) { | |||
| 4084 | } | 4098 | } |
| 4085 | 4099 | ||
| 4086 | void GMainWindow::changeEvent(QEvent* event) { | 4100 | void GMainWindow::changeEvent(QEvent* event) { |
| 4087 | #ifdef __linux__ | 4101 | #ifdef __unix__ |
| 4088 | // PaletteChange event appears to only reach so far into the GUI, explicitly asking to | 4102 | // PaletteChange event appears to only reach so far into the GUI, explicitly asking to |
| 4089 | // UpdateUITheme is a decent work around | 4103 | // UpdateUITheme is a decent work around |
| 4090 | if (event->type() == QEvent::PaletteChange) { | 4104 | if (event->type() == QEvent::PaletteChange) { |
| @@ -4099,7 +4113,7 @@ void GMainWindow::changeEvent(QEvent* event) { | |||
| 4099 | } | 4113 | } |
| 4100 | last_window_color = window_color; | 4114 | last_window_color = window_color; |
| 4101 | } | 4115 | } |
| 4102 | #endif // __linux__ | 4116 | #endif // __unix__ |
| 4103 | QWidget::changeEvent(event); | 4117 | QWidget::changeEvent(event); |
| 4104 | } | 4118 | } |
| 4105 | 4119 | ||
diff --git a/src/yuzu/main.h b/src/yuzu/main.h index f7aa8e417..b73f550dd 100644 --- a/src/yuzu/main.h +++ b/src/yuzu/main.h | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | #include "yuzu/compatibility_list.h" | 15 | #include "yuzu/compatibility_list.h" |
| 16 | #include "yuzu/hotkeys.h" | 16 | #include "yuzu/hotkeys.h" |
| 17 | 17 | ||
| 18 | #ifdef __linux__ | 18 | #ifdef __unix__ |
| 19 | #include <QVariant> | 19 | #include <QVariant> |
| 20 | #include <QtDBus/QDBusInterface> | 20 | #include <QtDBus/QDBusInterface> |
| 21 | #include <QtDBus/QtDBus> | 21 | #include <QtDBus/QtDBus> |
| @@ -255,7 +255,7 @@ private: | |||
| 255 | void changeEvent(QEvent* event) override; | 255 | void changeEvent(QEvent* event) override; |
| 256 | void closeEvent(QCloseEvent* event) override; | 256 | void closeEvent(QCloseEvent* event) override; |
| 257 | 257 | ||
| 258 | #ifdef __linux__ | 258 | #ifdef __unix__ |
| 259 | void SetupSigInterrupts(); | 259 | void SetupSigInterrupts(); |
| 260 | static void HandleSigInterrupt(int); | 260 | static void HandleSigInterrupt(int); |
| 261 | void OnSigInterruptNotifierActivated(); | 261 | void OnSigInterruptNotifierActivated(); |
| @@ -324,9 +324,10 @@ private slots: | |||
| 324 | void OnMouseActivity(); | 324 | void OnMouseActivity(); |
| 325 | 325 | ||
| 326 | private: | 326 | private: |
| 327 | void RemoveBaseContent(u64 program_id, const QString& entry_type); | 327 | QString GetGameListErrorRemoving(InstalledEntryType type) const; |
| 328 | void RemoveUpdateContent(u64 program_id, const QString& entry_type); | 328 | void RemoveBaseContent(u64 program_id, InstalledEntryType type); |
| 329 | void RemoveAddOnContent(u64 program_id, const QString& entry_type); | 329 | void RemoveUpdateContent(u64 program_id, InstalledEntryType type); |
| 330 | void RemoveAddOnContent(u64 program_id, InstalledEntryType type); | ||
| 330 | void RemoveTransferableShaderCache(u64 program_id, GameListRemoveTarget target); | 331 | void RemoveTransferableShaderCache(u64 program_id, GameListRemoveTarget target); |
| 331 | void RemoveAllTransferableShaderCaches(u64 program_id); | 332 | void RemoveAllTransferableShaderCaches(u64 program_id); |
| 332 | void RemoveCustomConfiguration(u64 program_id, const std::string& game_path); | 333 | void RemoveCustomConfiguration(u64 program_id, const std::string& game_path); |
| @@ -435,7 +436,7 @@ private: | |||
| 435 | // True if TAS recording dialog is visible | 436 | // True if TAS recording dialog is visible |
| 436 | bool is_tas_recording_dialog_active{}; | 437 | bool is_tas_recording_dialog_active{}; |
| 437 | 438 | ||
| 438 | #ifdef __linux__ | 439 | #ifdef __unix__ |
| 439 | QSocketNotifier* sig_interrupt_notifier; | 440 | QSocketNotifier* sig_interrupt_notifier; |
| 440 | static std::array<int, 3> sig_interrupt_fds; | 441 | static std::array<int, 3> sig_interrupt_fds; |
| 441 | 442 | ||
diff --git a/src/yuzu/main.ui b/src/yuzu/main.ui index 74d49dbd4..e670acc30 100644 --- a/src/yuzu/main.ui +++ b/src/yuzu/main.ui | |||
| @@ -55,7 +55,6 @@ | |||
| 55 | <addaction name="separator"/> | 55 | <addaction name="separator"/> |
| 56 | <addaction name="menu_recent_files"/> | 56 | <addaction name="menu_recent_files"/> |
| 57 | <addaction name="separator"/> | 57 | <addaction name="separator"/> |
| 58 | <addaction name="separator"/> | ||
| 59 | <addaction name="action_Load_Amiibo"/> | 58 | <addaction name="action_Load_Amiibo"/> |
| 60 | <addaction name="separator"/> | 59 | <addaction name="separator"/> |
| 61 | <addaction name="action_Open_yuzu_Folder"/> | 60 | <addaction name="action_Open_yuzu_Folder"/> |
diff --git a/src/yuzu/uisettings.h b/src/yuzu/uisettings.h index 753797efc..452038cd9 100644 --- a/src/yuzu/uisettings.h +++ b/src/yuzu/uisettings.h | |||
| @@ -129,6 +129,13 @@ struct Values { | |||
| 129 | Settings::Setting<bool> favorites_expanded{true, "favorites_expanded"}; | 129 | Settings::Setting<bool> favorites_expanded{true, "favorites_expanded"}; |
| 130 | QVector<u64> favorited_ids; | 130 | QVector<u64> favorited_ids; |
| 131 | 131 | ||
| 132 | // Compatibility List | ||
| 133 | Settings::Setting<bool> show_compat{false, "show_compat"}; | ||
| 134 | |||
| 135 | // Size & File Types Column | ||
| 136 | Settings::Setting<bool> show_size{true, "show_size"}; | ||
| 137 | Settings::Setting<bool> show_types{true, "show_types"}; | ||
| 138 | |||
| 132 | bool configuration_applied; | 139 | bool configuration_applied; |
| 133 | bool reset_to_defaults; | 140 | bool reset_to_defaults; |
| 134 | Settings::Setting<bool> disable_web_applet{true, "disable_web_applet"}; | 141 | Settings::Setting<bool> disable_web_applet{true, "disable_web_applet"}; |